repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
GeraldLoeffler/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/widgets.py | 69 | 40833 | """
GUI Neutral widgets
All of these widgets require you to predefine an Axes instance and
pass that as the first arg. matplotlib doesn't try to be too smart in
layout -- you have to figure out how wide and tall you want your Axes
to be to accommodate your widget.
"""
import numpy as np
from mlab import dist
from patches import Circle, Rectangle
from lines import Line2D
from transforms import blended_transform_factory
class LockDraw:
"""
some widgets, like the cursor, draw onto the canvas, and this is not
desirable under all circumstaces, like when the toolbar is in
zoom-to-rect mode and drawing a rectangle. The module level "lock"
allows someone to grab the lock and prevent other widgets from
drawing. Use matplotlib.widgets.lock(someobj) to pr
"""
def __init__(self):
self._owner = None
def __call__(self, o):
'reserve the lock for o'
if not self.available(o):
raise ValueError('already locked')
self._owner = o
def release(self, o):
'release the lock'
if not self.available(o):
raise ValueError('you do not own this lock')
self._owner = None
def available(self, o):
'drawing is available to o'
return not self.locked() or self.isowner(o)
def isowner(self, o):
'o owns the lock'
return self._owner is o
def locked(self):
'the lock is held'
return self._owner is not None
class Widget:
"""
OK, I couldn't resist; abstract base class for mpl GUI neutral
widgets
"""
drawon = True
eventson = True
class Button(Widget):
"""
A GUI neutral button
The following attributes are accesible
ax - the Axes the button renders into
label - a text.Text instance
color - the color of the button when not hovering
hovercolor - the color of the button when hovering
Call "on_clicked" to connect to the button
"""
def __init__(self, ax, label, image=None,
color='0.85', hovercolor='0.95'):
"""
ax is the Axes instance the button will be placed into
label is a string which is the button text
image if not None, is an image to place in the button -- can
be any legal arg to imshow (numpy array, matplotlib Image
instance, or PIL image)
color is the color of the button when not activated
hovercolor is the color of the button when the mouse is over
it
"""
if image is not None:
ax.imshow(image)
self.label = ax.text(0.5, 0.5, label,
verticalalignment='center',
horizontalalignment='center',
transform=ax.transAxes)
self.cnt = 0
self.observers = {}
self.ax = ax
ax.figure.canvas.mpl_connect('button_press_event', self._click)
ax.figure.canvas.mpl_connect('motion_notify_event', self._motion)
ax.set_navigate(False)
ax.set_axis_bgcolor(color)
ax.set_xticks([])
ax.set_yticks([])
self.color = color
self.hovercolor = hovercolor
self._lastcolor = color
def _click(self, event):
if event.inaxes != self.ax: return
if not self.eventson: return
for cid, func in self.observers.items():
func(event)
def _motion(self, event):
if event.inaxes==self.ax:
c = self.hovercolor
else:
c = self.color
if c != self._lastcolor:
self.ax.set_axis_bgcolor(c)
self._lastcolor = c
if self.drawon: self.ax.figure.canvas.draw()
def on_clicked(self, func):
"""
When the button is clicked, call this func with event
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
'remove the observer with connection id cid'
try: del self.observers[cid]
except KeyError: pass
class Slider(Widget):
"""
A slider representing a floating point range
The following attributes are defined
ax : the slider axes.Axes instance
val : the current slider value
vline : a Line2D instance representing the initial value
poly : A patch.Polygon instance which is the slider
valfmt : the format string for formatting the slider text
label : a text.Text instance, the slider label
closedmin : whether the slider is closed on the minimum
closedmax : whether the slider is closed on the maximum
slidermin : another slider - if not None, this slider must be > slidermin
slidermax : another slider - if not None, this slider must be < slidermax
dragging : allow for mouse dragging on slider
Call on_changed to connect to the slider event
"""
def __init__(self, ax, label, valmin, valmax, valinit=0.5, valfmt='%1.2f',
closedmin=True, closedmax=True, slidermin=None, slidermax=None,
dragging=True, **kwargs):
"""
Create a slider from valmin to valmax in axes ax;
valinit - the slider initial position
label - the slider label
valfmt - used to format the slider value
closedmin and closedmax - indicate whether the slider interval is closed
slidermin and slidermax - be used to contrain the value of
this slider to the values of other sliders.
additional kwargs are passed on to self.poly which is the
matplotlib.patches.Rectangle which draws the slider. See the
matplotlib.patches.Rectangle documentation for legal property
names (eg facecolor, edgecolor, alpha, ...)
"""
self.ax = ax
self.valmin = valmin
self.valmax = valmax
self.val = valinit
self.valinit = valinit
self.poly = ax.axvspan(valmin,valinit,0,1, **kwargs)
self.vline = ax.axvline(valinit,0,1, color='r', lw=1)
self.valfmt=valfmt
ax.set_yticks([])
ax.set_xlim((valmin, valmax))
ax.set_xticks([])
ax.set_navigate(False)
ax.figure.canvas.mpl_connect('button_press_event', self._update)
if dragging:
ax.figure.canvas.mpl_connect('motion_notify_event', self._update)
self.label = ax.text(-0.02, 0.5, label, transform=ax.transAxes,
verticalalignment='center',
horizontalalignment='right')
self.valtext = ax.text(1.02, 0.5, valfmt%valinit,
transform=ax.transAxes,
verticalalignment='center',
horizontalalignment='left')
self.cnt = 0
self.observers = {}
self.closedmin = closedmin
self.closedmax = closedmax
self.slidermin = slidermin
self.slidermax = slidermax
def _update(self, event):
'update the slider position'
if event.button !=1: return
if event.inaxes != self.ax: return
val = event.xdata
if not self.closedmin and val<=self.valmin: return
if not self.closedmax and val>=self.valmax: return
if self.slidermin is not None:
if val<=self.slidermin.val: return
if self.slidermax is not None:
if val>=self.slidermax.val: return
self.set_val(val)
def set_val(self, val):
xy = self.poly.xy
xy[-1] = val, 0
xy[-2] = val, 1
self.poly.xy = xy
self.valtext.set_text(self.valfmt%val)
if self.drawon: self.ax.figure.canvas.draw()
self.val = val
if not self.eventson: return
for cid, func in self.observers.items():
func(val)
def on_changed(self, func):
"""
When the slider valud is changed, call this func with the new
slider position
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
'remove the observer with connection id cid'
try: del self.observers[cid]
except KeyError: pass
def reset(self):
"reset the slider to the initial value if needed"
if (self.val != self.valinit):
self.set_val(self.valinit)
class CheckButtons(Widget):
"""
A GUI neutral radio button
The following attributes are exposed
ax - the Axes instance the buttons are in
labels - a list of text.Text instances
lines - a list of (line1, line2) tuples for the x's in the check boxes.
These lines exist for each box, but have set_visible(False) when
box is not checked
rectangles - a list of patch.Rectangle instances
Connect to the CheckButtons with the on_clicked method
"""
def __init__(self, ax, labels, actives):
"""
Add check buttons to axes.Axes instance ax
labels is a len(buttons) list of labels as strings
actives is a len(buttons) list of booleans indicating whether
the button is active
"""
ax.set_xticks([])
ax.set_yticks([])
ax.set_navigate(False)
if len(labels)>1:
dy = 1./(len(labels)+1)
ys = np.linspace(1-dy, dy, len(labels))
else:
dy = 0.25
ys = [0.5]
cnt = 0
axcolor = ax.get_axis_bgcolor()
self.labels = []
self.lines = []
self.rectangles = []
lineparams = {'color':'k', 'linewidth':1.25, 'transform':ax.transAxes,
'solid_capstyle':'butt'}
for y, label in zip(ys, labels):
t = ax.text(0.25, y, label, transform=ax.transAxes,
horizontalalignment='left',
verticalalignment='center')
w, h = dy/2., dy/2.
x, y = 0.05, y-h/2.
p = Rectangle(xy=(x,y), width=w, height=h,
facecolor=axcolor,
transform=ax.transAxes)
l1 = Line2D([x, x+w], [y+h, y], **lineparams)
l2 = Line2D([x, x+w], [y, y+h], **lineparams)
l1.set_visible(actives[cnt])
l2.set_visible(actives[cnt])
self.labels.append(t)
self.rectangles.append(p)
self.lines.append((l1,l2))
ax.add_patch(p)
ax.add_line(l1)
ax.add_line(l2)
cnt += 1
ax.figure.canvas.mpl_connect('button_press_event', self._clicked)
self.ax = ax
self.cnt = 0
self.observers = {}
def _clicked(self, event):
if event.button !=1 : return
if event.inaxes != self.ax: return
for p,t,lines in zip(self.rectangles, self.labels, self.lines):
if (t.get_window_extent().contains(event.x, event.y) or
p.get_window_extent().contains(event.x, event.y) ):
l1, l2 = lines
l1.set_visible(not l1.get_visible())
l2.set_visible(not l2.get_visible())
thist = t
break
else:
return
if self.drawon: self.ax.figure.canvas.draw()
if not self.eventson: return
for cid, func in self.observers.items():
func(thist.get_text())
def on_clicked(self, func):
"""
When the button is clicked, call this func with button label
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
'remove the observer with connection id cid'
try: del self.observers[cid]
except KeyError: pass
class RadioButtons(Widget):
"""
A GUI neutral radio button
The following attributes are exposed
ax - the Axes instance the buttons are in
activecolor - the color of the button when clicked
labels - a list of text.Text instances
circles - a list of patch.Circle instances
Connect to the RadioButtons with the on_clicked method
"""
def __init__(self, ax, labels, active=0, activecolor='blue'):
"""
Add radio buttons to axes.Axes instance ax
labels is a len(buttons) list of labels as strings
active is the index into labels for the button that is active
activecolor is the color of the button when clicked
"""
self.activecolor = activecolor
ax.set_xticks([])
ax.set_yticks([])
ax.set_navigate(False)
dy = 1./(len(labels)+1)
ys = np.linspace(1-dy, dy, len(labels))
cnt = 0
axcolor = ax.get_axis_bgcolor()
self.labels = []
self.circles = []
for y, label in zip(ys, labels):
t = ax.text(0.25, y, label, transform=ax.transAxes,
horizontalalignment='left',
verticalalignment='center')
if cnt==active:
facecolor = activecolor
else:
facecolor = axcolor
p = Circle(xy=(0.15, y), radius=0.05, facecolor=facecolor,
transform=ax.transAxes)
self.labels.append(t)
self.circles.append(p)
ax.add_patch(p)
cnt += 1
ax.figure.canvas.mpl_connect('button_press_event', self._clicked)
self.ax = ax
self.cnt = 0
self.observers = {}
def _clicked(self, event):
if event.button !=1 : return
if event.inaxes != self.ax: return
xy = self.ax.transAxes.inverted().transform_point((event.x, event.y))
pclicked = np.array([xy[0], xy[1]])
def inside(p):
pcirc = np.array([p.center[0], p.center[1]])
return dist(pclicked, pcirc) < p.radius
for p,t in zip(self.circles, self.labels):
if t.get_window_extent().contains(event.x, event.y) or inside(p):
inp = p
thist = t
break
else: return
for p in self.circles:
if p==inp: color = self.activecolor
else: color = self.ax.get_axis_bgcolor()
p.set_facecolor(color)
if self.drawon: self.ax.figure.canvas.draw()
if not self.eventson: return
for cid, func in self.observers.items():
func(thist.get_text())
def on_clicked(self, func):
"""
When the button is clicked, call this func with button label
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
'remove the observer with connection id cid'
try: del self.observers[cid]
except KeyError: pass
class SubplotTool(Widget):
"""
A tool to adjust to subplot params of fig
"""
def __init__(self, targetfig, toolfig):
"""
targetfig is the figure to adjust
toolfig is the figure to embed the the subplot tool into. If
None, a default pylab figure will be created. If you are
using this from the GUI
"""
self.targetfig = targetfig
toolfig.subplots_adjust(left=0.2, right=0.9)
class toolbarfmt:
def __init__(self, slider):
self.slider = slider
def __call__(self, x, y):
fmt = '%s=%s'%(self.slider.label.get_text(), self.slider.valfmt)
return fmt%x
self.axleft = toolfig.add_subplot(711)
self.axleft.set_title('Click on slider to adjust subplot param')
self.axleft.set_navigate(False)
self.sliderleft = Slider(self.axleft, 'left', 0, 1, targetfig.subplotpars.left, closedmax=False)
self.sliderleft.on_changed(self.funcleft)
self.axbottom = toolfig.add_subplot(712)
self.axbottom.set_navigate(False)
self.sliderbottom = Slider(self.axbottom, 'bottom', 0, 1, targetfig.subplotpars.bottom, closedmax=False)
self.sliderbottom.on_changed(self.funcbottom)
self.axright = toolfig.add_subplot(713)
self.axright.set_navigate(False)
self.sliderright = Slider(self.axright, 'right', 0, 1, targetfig.subplotpars.right, closedmin=False)
self.sliderright.on_changed(self.funcright)
self.axtop = toolfig.add_subplot(714)
self.axtop.set_navigate(False)
self.slidertop = Slider(self.axtop, 'top', 0, 1, targetfig.subplotpars.top, closedmin=False)
self.slidertop.on_changed(self.functop)
self.axwspace = toolfig.add_subplot(715)
self.axwspace.set_navigate(False)
self.sliderwspace = Slider(self.axwspace, 'wspace', 0, 1, targetfig.subplotpars.wspace, closedmax=False)
self.sliderwspace.on_changed(self.funcwspace)
self.axhspace = toolfig.add_subplot(716)
self.axhspace.set_navigate(False)
self.sliderhspace = Slider(self.axhspace, 'hspace', 0, 1, targetfig.subplotpars.hspace, closedmax=False)
self.sliderhspace.on_changed(self.funchspace)
# constraints
self.sliderleft.slidermax = self.sliderright
self.sliderright.slidermin = self.sliderleft
self.sliderbottom.slidermax = self.slidertop
self.slidertop.slidermin = self.sliderbottom
bax = toolfig.add_axes([0.8, 0.05, 0.15, 0.075])
self.buttonreset = Button(bax, 'Reset')
sliders = (self.sliderleft, self.sliderbottom, self.sliderright,
self.slidertop, self.sliderwspace, self.sliderhspace, )
def func(event):
thisdrawon = self.drawon
self.drawon = False
# store the drawon state of each slider
bs = []
for slider in sliders:
bs.append(slider.drawon)
slider.drawon = False
# reset the slider to the initial position
for slider in sliders:
slider.reset()
# reset drawon
for slider, b in zip(sliders, bs):
slider.drawon = b
# draw the canvas
self.drawon = thisdrawon
if self.drawon:
toolfig.canvas.draw()
self.targetfig.canvas.draw()
# during reset there can be a temporary invalid state
# depending on the order of the reset so we turn off
# validation for the resetting
validate = toolfig.subplotpars.validate
toolfig.subplotpars.validate = False
self.buttonreset.on_clicked(func)
toolfig.subplotpars.validate = validate
def funcleft(self, val):
self.targetfig.subplots_adjust(left=val)
if self.drawon: self.targetfig.canvas.draw()
def funcright(self, val):
self.targetfig.subplots_adjust(right=val)
if self.drawon: self.targetfig.canvas.draw()
def funcbottom(self, val):
self.targetfig.subplots_adjust(bottom=val)
if self.drawon: self.targetfig.canvas.draw()
def functop(self, val):
self.targetfig.subplots_adjust(top=val)
if self.drawon: self.targetfig.canvas.draw()
def funcwspace(self, val):
self.targetfig.subplots_adjust(wspace=val)
if self.drawon: self.targetfig.canvas.draw()
def funchspace(self, val):
self.targetfig.subplots_adjust(hspace=val)
if self.drawon: self.targetfig.canvas.draw()
class Cursor:
"""
A horizontal and vertical line span the axes that and move with
the pointer. You can turn off the hline or vline spectively with
the attributes
horizOn =True|False: controls visibility of the horizontal line
vertOn =True|False: controls visibility of the horizontal line
And the visibility of the cursor itself with visible attribute
"""
def __init__(self, ax, useblit=False, **lineprops):
"""
Add a cursor to ax. If useblit=True, use the backend
dependent blitting features for faster updates (GTKAgg only
now). lineprops is a dictionary of line properties. See
examples/widgets/cursor.py.
"""
self.ax = ax
self.canvas = ax.figure.canvas
self.canvas.mpl_connect('motion_notify_event', self.onmove)
self.canvas.mpl_connect('draw_event', self.clear)
self.visible = True
self.horizOn = True
self.vertOn = True
self.useblit = useblit
self.lineh = ax.axhline(ax.get_ybound()[0], visible=False, **lineprops)
self.linev = ax.axvline(ax.get_xbound()[0], visible=False, **lineprops)
self.background = None
self.needclear = False
def clear(self, event):
'clear the cursor'
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
self.linev.set_visible(False)
self.lineh.set_visible(False)
def onmove(self, event):
'on mouse motion draw the cursor if visible'
if event.inaxes != self.ax:
self.linev.set_visible(False)
self.lineh.set_visible(False)
if self.needclear:
self.canvas.draw()
self.needclear = False
return
self.needclear = True
if not self.visible: return
self.linev.set_xdata((event.xdata, event.xdata))
self.lineh.set_ydata((event.ydata, event.ydata))
self.linev.set_visible(self.visible and self.vertOn)
self.lineh.set_visible(self.visible and self.horizOn)
self._update()
def _update(self):
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.linev)
self.ax.draw_artist(self.lineh)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
return False
class MultiCursor:
"""
Provide a vertical line cursor shared between multiple axes
from matplotlib.widgets import MultiCursor
from pylab import figure, show, nx
t = nx.arange(0.0, 2.0, 0.01)
s1 = nx.sin(2*nx.pi*t)
s2 = nx.sin(4*nx.pi*t)
fig = figure()
ax1 = fig.add_subplot(211)
ax1.plot(t, s1)
ax2 = fig.add_subplot(212, sharex=ax1)
ax2.plot(t, s2)
multi = MultiCursor(fig.canvas, (ax1, ax2), color='r', lw=1)
show()
"""
def __init__(self, canvas, axes, useblit=True, **lineprops):
self.canvas = canvas
self.axes = axes
xmin, xmax = axes[-1].get_xlim()
xmid = 0.5*(xmin+xmax)
self.lines = [ax.axvline(xmid, visible=False, **lineprops) for ax in axes]
self.visible = True
self.useblit = useblit
self.background = None
self.needclear = False
self.canvas.mpl_connect('motion_notify_event', self.onmove)
self.canvas.mpl_connect('draw_event', self.clear)
def clear(self, event):
'clear the cursor'
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.canvas.figure.bbox)
for line in self.lines: line.set_visible(False)
def onmove(self, event):
if event.inaxes is None: return
if not self.canvas.widgetlock.available(self): return
self.needclear = True
if not self.visible: return
for line in self.lines:
line.set_xdata((event.xdata, event.xdata))
line.set_visible(self.visible)
self._update()
def _update(self):
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
for ax, line in zip(self.axes, self.lines):
ax.draw_artist(line)
self.canvas.blit(self.canvas.figure.bbox)
else:
self.canvas.draw_idle()
class SpanSelector:
"""
Select a min/max range of the x or y axes for a matplotlib Axes
Example usage:
ax = subplot(111)
ax.plot(x,y)
def onselect(vmin, vmax):
print vmin, vmax
span = SpanSelector(ax, onselect, 'horizontal')
onmove_callback is an optional callback that will be called on mouse move
with the span range
"""
def __init__(self, ax, onselect, direction, minspan=None, useblit=False, rectprops=None, onmove_callback=None):
"""
Create a span selector in ax. When a selection is made, clear
the span and call onselect with
onselect(vmin, vmax)
and clear the span.
direction must be 'horizontal' or 'vertical'
If minspan is not None, ignore events smaller than minspan
The span rect is drawn with rectprops; default
rectprops = dict(facecolor='red', alpha=0.5)
set the visible attribute to False if you want to turn off
the functionality of the span selector
"""
if rectprops is None:
rectprops = dict(facecolor='red', alpha=0.5)
assert direction in ['horizontal', 'vertical'], 'Must choose horizontal or vertical for direction'
self.direction = direction
self.ax = None
self.canvas = None
self.visible = True
self.cids=[]
self.rect = None
self.background = None
self.pressv = None
self.rectprops = rectprops
self.onselect = onselect
self.onmove_callback = onmove_callback
self.useblit = useblit
self.minspan = minspan
# Needed when dragging out of axes
self.buttonDown = False
self.prev = (0, 0)
self.new_axes(ax)
def new_axes(self,ax):
self.ax = ax
if self.canvas is not ax.figure.canvas:
for cid in self.cids:
self.canvas.mpl_disconnect(cid)
self.canvas = ax.figure.canvas
self.cids.append(self.canvas.mpl_connect('motion_notify_event', self.onmove))
self.cids.append(self.canvas.mpl_connect('button_press_event', self.press))
self.cids.append(self.canvas.mpl_connect('button_release_event', self.release))
self.cids.append(self.canvas.mpl_connect('draw_event', self.update_background))
if self.direction == 'horizontal':
trans = blended_transform_factory(self.ax.transData, self.ax.transAxes)
w,h = 0,1
else:
trans = blended_transform_factory(self.ax.transAxes, self.ax.transData)
w,h = 1,0
self.rect = Rectangle( (0,0), w, h,
transform=trans,
visible=False,
**self.rectprops
)
if not self.useblit: self.ax.add_patch(self.rect)
def update_background(self, event):
'force an update of the background'
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
def ignore(self, event):
'return True if event should be ignored'
return event.inaxes!=self.ax or not self.visible or event.button !=1
def press(self, event):
'on button press event'
if self.ignore(event): return
self.buttonDown = True
self.rect.set_visible(self.visible)
if self.direction == 'horizontal':
self.pressv = event.xdata
else:
self.pressv = event.ydata
return False
def release(self, event):
'on button release event'
if self.pressv is None or (self.ignore(event) and not self.buttonDown): return
self.buttonDown = False
self.rect.set_visible(False)
self.canvas.draw()
vmin = self.pressv
if self.direction == 'horizontal':
vmax = event.xdata or self.prev[0]
else:
vmax = event.ydata or self.prev[1]
if vmin>vmax: vmin, vmax = vmax, vmin
span = vmax - vmin
if self.minspan is not None and span<self.minspan: return
self.onselect(vmin, vmax)
self.pressv = None
return False
def update(self):
'draw using newfangled blit or oldfangled draw depending on useblit'
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.rect)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
return False
def onmove(self, event):
'on motion notify event'
if self.pressv is None or self.ignore(event): return
x, y = event.xdata, event.ydata
self.prev = x, y
if self.direction == 'horizontal':
v = x
else:
v = y
minv, maxv = v, self.pressv
if minv>maxv: minv, maxv = maxv, minv
if self.direction == 'horizontal':
self.rect.set_x(minv)
self.rect.set_width(maxv-minv)
else:
self.rect.set_y(minv)
self.rect.set_height(maxv-minv)
if self.onmove_callback is not None:
vmin = self.pressv
if self.direction == 'horizontal':
vmax = event.xdata or self.prev[0]
else:
vmax = event.ydata or self.prev[1]
if vmin>vmax: vmin, vmax = vmax, vmin
self.onmove_callback(vmin, vmax)
self.update()
return False
# For backwards compatibility only!
class HorizontalSpanSelector(SpanSelector):
def __init__(self, ax, onselect, **kwargs):
import warnings
warnings.warn('Use SpanSelector instead!', DeprecationWarning)
SpanSelector.__init__(self, ax, onselect, 'horizontal', **kwargs)
class RectangleSelector:
"""
Select a min/max range of the x axes for a matplotlib Axes
Example usage::
from matplotlib.widgets import RectangleSelector
from pylab import *
def onselect(eclick, erelease):
'eclick and erelease are matplotlib events at press and release'
print ' startposition : (%f, %f)' % (eclick.xdata, eclick.ydata)
print ' endposition : (%f, %f)' % (erelease.xdata, erelease.ydata)
print ' used button : ', eclick.button
def toggle_selector(event):
print ' Key pressed.'
if event.key in ['Q', 'q'] and toggle_selector.RS.active:
print ' RectangleSelector deactivated.'
toggle_selector.RS.set_active(False)
if event.key in ['A', 'a'] and not toggle_selector.RS.active:
print ' RectangleSelector activated.'
toggle_selector.RS.set_active(True)
x = arange(100)/(99.0)
y = sin(x)
fig = figure
ax = subplot(111)
ax.plot(x,y)
toggle_selector.RS = RectangleSelector(ax, onselect, drawtype='line')
connect('key_press_event', toggle_selector)
show()
"""
def __init__(self, ax, onselect, drawtype='box',
minspanx=None, minspany=None, useblit=False,
lineprops=None, rectprops=None, spancoords='data'):
"""
Create a selector in ax. When a selection is made, clear
the span and call onselect with
onselect(pos_1, pos_2)
and clear the drawn box/line. There pos_i are arrays of length 2
containing the x- and y-coordinate.
If minspanx is not None then events smaller than minspanx
in x direction are ignored(it's the same for y).
The rect is drawn with rectprops; default
rectprops = dict(facecolor='red', edgecolor = 'black',
alpha=0.5, fill=False)
The line is drawn with lineprops; default
lineprops = dict(color='black', linestyle='-',
linewidth = 2, alpha=0.5)
Use type if you want the mouse to draw a line, a box or nothing
between click and actual position ny setting
drawtype = 'line', drawtype='box' or drawtype = 'none'.
spancoords is one of 'data' or 'pixels'. If 'data', minspanx
and minspanx will be interpreted in the same coordinates as
the x and ya axis, if 'pixels', they are in pixels
"""
self.ax = ax
self.visible = True
self.canvas = ax.figure.canvas
self.canvas.mpl_connect('motion_notify_event', self.onmove)
self.canvas.mpl_connect('button_press_event', self.press)
self.canvas.mpl_connect('button_release_event', self.release)
self.canvas.mpl_connect('draw_event', self.update_background)
self.active = True # for activation / deactivation
self.to_draw = None
self.background = None
if drawtype == 'none':
drawtype = 'line' # draw a line but make it
self.visible = False # invisible
if drawtype == 'box':
if rectprops is None:
rectprops = dict(facecolor='white', edgecolor = 'black',
alpha=0.5, fill=False)
self.rectprops = rectprops
self.to_draw = Rectangle((0,0), 0, 1,visible=False,**self.rectprops)
self.ax.add_patch(self.to_draw)
if drawtype == 'line':
if lineprops is None:
lineprops = dict(color='black', linestyle='-',
linewidth = 2, alpha=0.5)
self.lineprops = lineprops
self.to_draw = Line2D([0,0],[0,0],visible=False,**self.lineprops)
self.ax.add_line(self.to_draw)
self.onselect = onselect
self.useblit = useblit
self.minspanx = minspanx
self.minspany = minspany
assert(spancoords in ('data', 'pixels'))
self.spancoords = spancoords
self.drawtype = drawtype
# will save the data (position at mouseclick)
self.eventpress = None
# will save the data (pos. at mouserelease)
self.eventrelease = None
def update_background(self, event):
'force an update of the background'
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
def ignore(self, event):
'return True if event should be ignored'
# If RectangleSelector is not active :
if not self.active:
return True
# If canvas was locked
if not self.canvas.widgetlock.available(self):
return True
# If no button was pressed yet ignore the event if it was out
# of the axes
if self.eventpress == None:
return event.inaxes!= self.ax
# If a button was pressed, check if the release-button is the
# same.
return (event.inaxes!=self.ax or
event.button != self.eventpress.button)
def press(self, event):
'on button press event'
# Is the correct button pressed within the correct axes?
if self.ignore(event): return
# make the drawed box/line visible get the click-coordinates,
# button, ...
self.to_draw.set_visible(self.visible)
self.eventpress = event
return False
def release(self, event):
'on button release event'
if self.eventpress is None or self.ignore(event): return
# make the box/line invisible again
self.to_draw.set_visible(False)
self.canvas.draw()
# release coordinates, button, ...
self.eventrelease = event
if self.spancoords=='data':
xmin, ymin = self.eventpress.xdata, self.eventpress.ydata
xmax, ymax = self.eventrelease.xdata, self.eventrelease.ydata
# calculate dimensions of box or line get values in the right
# order
elif self.spancoords=='pixels':
xmin, ymin = self.eventpress.x, self.eventpress.y
xmax, ymax = self.eventrelease.x, self.eventrelease.y
else:
raise ValueError('spancoords must be "data" or "pixels"')
if xmin>xmax: xmin, xmax = xmax, xmin
if ymin>ymax: ymin, ymax = ymax, ymin
spanx = xmax - xmin
spany = ymax - ymin
xproblems = self.minspanx is not None and spanx<self.minspanx
yproblems = self.minspany is not None and spany<self.minspany
if (self.drawtype=='box') and (xproblems or yproblems):
"""Box to small""" # check if drawed distance (if it exists) is
return # not to small in neither x nor y-direction
if (self.drawtype=='line') and (xproblems and yproblems):
"""Line to small""" # check if drawed distance (if it exists) is
return # not to small in neither x nor y-direction
self.onselect(self.eventpress, self.eventrelease)
# call desired function
self.eventpress = None # reset the variables to their
self.eventrelease = None # inital values
return False
def update(self):
'draw using newfangled blit or oldfangled draw depending on useblit'
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.to_draw)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
return False
def onmove(self, event):
'on motion notify event if box/line is wanted'
if self.eventpress is None or self.ignore(event): return
x,y = event.xdata, event.ydata # actual position (with
# (button still pressed)
if self.drawtype == 'box':
minx, maxx = self.eventpress.xdata, x # click-x and actual mouse-x
miny, maxy = self.eventpress.ydata, y # click-y and actual mouse-y
if minx>maxx: minx, maxx = maxx, minx # get them in the right order
if miny>maxy: miny, maxy = maxy, miny
self.to_draw.set_x(minx) # set lower left of box
self.to_draw.set_y(miny)
self.to_draw.set_width(maxx-minx) # set width and height of box
self.to_draw.set_height(maxy-miny)
self.update()
return False
if self.drawtype == 'line':
self.to_draw.set_data([self.eventpress.xdata, x],
[self.eventpress.ydata, y])
self.update()
return False
def set_active(self, active):
""" Use this to activate / deactivate the RectangleSelector
from your program with an boolean variable 'active'.
"""
self.active = active
def get_active(self):
""" to get status of active mode (boolean variable)"""
return self.active
class Lasso(Widget):
def __init__(self, ax, xy, callback=None, useblit=True):
self.axes = ax
self.figure = ax.figure
self.canvas = self.figure.canvas
self.useblit = useblit
if useblit:
self.background = self.canvas.copy_from_bbox(self.axes.bbox)
x, y = xy
self.verts = [(x,y)]
self.line = Line2D([x], [y], linestyle='-', color='black', lw=2)
self.axes.add_line(self.line)
self.callback = callback
self.cids = []
self.cids.append(self.canvas.mpl_connect('button_release_event', self.onrelease))
self.cids.append(self.canvas.mpl_connect('motion_notify_event', self.onmove))
def onrelease(self, event):
if self.verts is not None:
self.verts.append((event.xdata, event.ydata))
if len(self.verts)>2:
self.callback(self.verts)
self.axes.lines.remove(self.line)
self.verts = None
for cid in self.cids:
self.canvas.mpl_disconnect(cid)
def onmove(self, event):
if self.verts is None: return
if event.inaxes != self.axes: return
if event.button!=1: return
self.verts.append((event.xdata, event.ydata))
self.line.set_data(zip(*self.verts))
if self.useblit:
self.canvas.restore_region(self.background)
self.axes.draw_artist(self.line)
self.canvas.blit(self.axes.bbox)
else:
self.canvas.draw_idle()
| agpl-3.0 |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/pandas/core/window.py | 3 | 68121 | """
provide a generic structure to support window functions,
similar to how we have a Groupby object
"""
from __future__ import division
import warnings
import numpy as np
from collections import defaultdict
from datetime import timedelta
from pandas.core.dtypes.generic import (
ABCSeries,
ABCDataFrame,
ABCDatetimeIndex,
ABCTimedeltaIndex,
ABCPeriodIndex)
from pandas.core.dtypes.common import (
is_integer,
is_bool,
is_float_dtype,
is_integer_dtype,
needs_i8_conversion,
is_timedelta64_dtype,
is_list_like,
_ensure_float64,
is_scalar)
import pandas as pd
from pandas.core.base import (PandasObject, SelectionMixin,
GroupByMixin)
import pandas.core.common as com
import pandas._libs.window as _window
from pandas.tseries.offsets import DateOffset
from pandas import compat
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (Substitution, Appender,
cache_readonly)
from pandas.core.generic import _shared_docs
from textwrap import dedent
_shared_docs = dict(**_shared_docs)
_doc_template = """
Returns
-------
same type as input
See also
--------
pandas.Series.%(name)s
pandas.DataFrame.%(name)s
"""
class _Window(PandasObject, SelectionMixin):
_attributes = ['window', 'min_periods', 'freq', 'center', 'win_type',
'axis', 'on', 'closed']
exclusions = set()
def __init__(self, obj, window=None, min_periods=None, freq=None,
center=False, win_type=None, axis=0, on=None, closed=None,
**kwargs):
if freq is not None:
warnings.warn("The freq kw is deprecated and will be removed in a "
"future version. You can resample prior to passing "
"to a window function", FutureWarning, stacklevel=3)
self.__dict__.update(kwargs)
self.blocks = []
self.obj = obj
self.on = on
self.closed = closed
self.window = window
self.min_periods = min_periods
self.freq = freq
self.center = center
self.win_type = win_type
self.axis = obj._get_axis_number(axis) if axis is not None else None
self.validate()
@property
def _constructor(self):
return Window
@property
def is_datetimelike(self):
return None
@property
def _on(self):
return None
@property
def is_freq_type(self):
return self.win_type == 'freq'
def validate(self):
if self.center is not None and not is_bool(self.center):
raise ValueError("center must be a boolean")
if self.min_periods is not None and not \
is_integer(self.min_periods):
raise ValueError("min_periods must be an integer")
if self.closed is not None and self.closed not in \
['right', 'both', 'left', 'neither']:
raise ValueError("closed must be 'right', 'left', 'both' or "
"'neither'")
def _convert_freq(self, how=None):
""" resample according to the how, return a new object """
obj = self._selected_obj
index = None
if (self.freq is not None and
isinstance(obj, (ABCSeries, ABCDataFrame))):
if how is not None:
warnings.warn("The how kw argument is deprecated and removed "
"in a future version. You can resample prior "
"to passing to a window function", FutureWarning,
stacklevel=6)
obj = obj.resample(self.freq).aggregate(how or 'asfreq')
return obj, index
def _create_blocks(self, how):
""" split data into blocks & return conformed data """
obj, index = self._convert_freq(how)
if index is not None:
index = self._on
# filter out the on from the object
if self.on is not None:
if obj.ndim == 2:
obj = obj.reindex(columns=obj.columns.difference([self.on]),
copy=False)
blocks = obj.as_blocks(copy=False).values()
return blocks, obj, index
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
# create a new object to prevent aliasing
if subset is None:
subset = self.obj
self = self._shallow_copy(subset)
self._reset_cache()
if subset.ndim == 2:
if is_scalar(key) and key in subset or is_list_like(key):
self._selection = key
return self
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
raise AttributeError("%r object has no attribute %r" %
(type(self).__name__, attr))
def _dir_additions(self):
return self.obj._dir_additions()
def _get_window(self, other=None):
return self.window
@property
def _window_type(self):
return self.__class__.__name__
def __unicode__(self):
""" provide a nice str repr of our rolling object """
attrs = ["{k}={v}".format(k=k, v=getattr(self, k))
for k in self._attributes
if getattr(self, k, None) is not None]
return "{klass} [{attrs}]".format(klass=self._window_type,
attrs=','.join(attrs))
def _get_index(self, index=None):
"""
Return index as ndarrays
Returns
-------
tuple of (index, index_as_ndarray)
"""
if self.is_freq_type:
if index is None:
index = self._on
return index, index.asi8
return index, index
def _prep_values(self, values=None, kill_inf=True, how=None):
if values is None:
values = getattr(self._selected_obj, 'values', self._selected_obj)
# GH #12373 : rolling functions error on float32 data
# make sure the data is coerced to float64
if is_float_dtype(values.dtype):
values = _ensure_float64(values)
elif is_integer_dtype(values.dtype):
values = _ensure_float64(values)
elif needs_i8_conversion(values.dtype):
raise NotImplementedError("ops for {action} for this "
"dtype {dtype} are not "
"implemented".format(
action=self._window_type,
dtype=values.dtype))
else:
try:
values = _ensure_float64(values)
except (ValueError, TypeError):
raise TypeError("cannot handle this type -> {0}"
"".format(values.dtype))
if kill_inf:
values = values.copy()
values[np.isinf(values)] = np.NaN
return values
def _wrap_result(self, result, block=None, obj=None):
""" wrap a single result """
if obj is None:
obj = self._selected_obj
index = obj.index
if isinstance(result, np.ndarray):
# coerce if necessary
if block is not None:
if is_timedelta64_dtype(block.values.dtype):
result = pd.to_timedelta(
result.ravel(), unit='ns').values.reshape(result.shape)
if result.ndim == 1:
from pandas import Series
return Series(result, index, name=obj.name)
return type(obj)(result, index=index, columns=block.columns)
return result
def _wrap_results(self, results, blocks, obj):
"""
wrap the results
Paramters
---------
results : list of ndarrays
blocks : list of blocks
obj : conformed data (may be resampled)
"""
from pandas import Series
from pandas.core.index import _ensure_index
final = []
for result, block in zip(results, blocks):
result = self._wrap_result(result, block=block, obj=obj)
if result.ndim == 1:
return result
final.append(result)
# if we have an 'on' column
# we want to put it back into the results
# in the same location
columns = self._selected_obj.columns
if self.on is not None \
and not self._on.equals(obj.index):
name = self._on.name
final.append(Series(self._on, index=obj.index, name=name))
if self._selection is not None:
selection = _ensure_index(self._selection)
# need to reorder to include original location of
# the on column (if its not already there)
if name not in selection:
columns = self.obj.columns
indexer = columns.get_indexer(selection.tolist() + [name])
columns = columns.take(sorted(indexer))
if not len(final):
return obj.astype('float64')
return pd.concat(final, axis=1).reindex(columns=columns,
copy=False)
def _center_window(self, result, window):
""" center the result in the window """
if self.axis > result.ndim - 1:
raise ValueError("Requested axis is larger then no. of argument "
"dimensions")
from pandas import Series, DataFrame
offset = _offset(window, True)
if offset > 0:
if isinstance(result, (Series, DataFrame)):
result = result.slice_shift(-offset, axis=self.axis)
else:
lead_indexer = [slice(None)] * result.ndim
lead_indexer[self.axis] = slice(offset, None)
result = np.copy(result[tuple(lead_indexer)])
return result
def aggregate(self, arg, *args, **kwargs):
result, how = self._aggregate(arg, *args, **kwargs)
if result is None:
return self.apply(arg, args=args, kwargs=kwargs)
return result
agg = aggregate
_shared_docs['sum'] = dedent("""
%(name)s sum
Parameters
----------
how : string, default None (DEPRECATED)
Method for down- or re-sampling""")
_shared_docs['mean'] = dedent("""
%(name)s mean
Parameters
----------
how : string, default None (DEPRECATED)
Method for down- or re-sampling""")
class Window(_Window):
"""
Provides rolling window calculcations.
.. versionadded:: 0.18.0
Parameters
----------
window : int, or offset
Size of the moving window. This is the number of observations used for
calculating the statistic. Each window will be a fixed size.
If its an offset then this will be the time period of each window. Each
window will be a variable sized based on the observations included in
the time-period. This is only valid for datetimelike indexes. This is
new in 0.19.0
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA). For a window that is specified by an offset,
this will default to 1.
freq : string or DateOffset object, optional (default None) (DEPRECATED)
Frequency to conform the data to before computing the statistic.
Specified as a frequency string or DateOffset object.
center : boolean, default False
Set the labels at the center of the window.
win_type : string, default None
Provide a window type. See the notes below.
on : string, optional
For a DataFrame, column on which to calculate
the rolling window, rather than the index
closed : string, default None
Make the interval closed on the 'right', 'left', 'both' or
'neither' endpoints.
For offset-based windows, it defaults to 'right'.
For fixed windows, defaults to 'both'. Remaining cases not implemented
for fixed windows.
.. versionadded:: 0.20.0
axis : int or string, default 0
Returns
-------
a Window or Rolling sub-classed for the particular operation
Examples
--------
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
>>> df
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
Rolling sum with a window length of 2, using the 'triang'
window type.
>>> df.rolling(2, win_type='triang').sum()
B
0 NaN
1 1.0
2 2.5
3 NaN
4 NaN
Rolling sum with a window length of 2, min_periods defaults
to the window length.
>>> df.rolling(2).sum()
B
0 NaN
1 1.0
2 3.0
3 NaN
4 NaN
Same as above, but explicity set the min_periods
>>> df.rolling(2, min_periods=1).sum()
B
0 0.0
1 1.0
2 3.0
3 2.0
4 4.0
A ragged (meaning not-a-regular frequency), time-indexed DataFrame
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]},
....: index = [pd.Timestamp('20130101 09:00:00'),
....: pd.Timestamp('20130101 09:00:02'),
....: pd.Timestamp('20130101 09:00:03'),
....: pd.Timestamp('20130101 09:00:05'),
....: pd.Timestamp('20130101 09:00:06')])
>>> df
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 2.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
Contrasting to an integer rolling window, this will roll a variable
length window corresponding to the time period.
The default for min_periods is 1.
>>> df.rolling('2s').sum()
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 3.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
To learn more about the offsets & frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
The recognized win_types are:
* ``boxcar``
* ``triang``
* ``blackman``
* ``hamming``
* ``bartlett``
* ``parzen``
* ``bohman``
* ``blackmanharris``
* ``nuttall``
* ``barthann``
* ``kaiser`` (needs beta)
* ``gaussian`` (needs std)
* ``general_gaussian`` (needs power, width)
* ``slepian`` (needs width).
"""
def validate(self):
super(Window, self).validate()
window = self.window
if isinstance(window, (list, tuple, np.ndarray)):
pass
elif is_integer(window):
if window < 0:
raise ValueError("window must be non-negative")
try:
import scipy.signal as sig
except ImportError:
raise ImportError('Please install scipy to generate window '
'weight')
if not isinstance(self.win_type, compat.string_types):
raise ValueError('Invalid win_type {0}'.format(self.win_type))
if getattr(sig, self.win_type, None) is None:
raise ValueError('Invalid win_type {0}'.format(self.win_type))
else:
raise ValueError('Invalid window {0}'.format(window))
def _prep_window(self, **kwargs):
"""
provide validation for our window type, return the window
we have already been validated
"""
window = self._get_window()
if isinstance(window, (list, tuple, np.ndarray)):
return com._asarray_tuplesafe(window).astype(float)
elif is_integer(window):
import scipy.signal as sig
# the below may pop from kwargs
def _validate_win_type(win_type, kwargs):
arg_map = {'kaiser': ['beta'],
'gaussian': ['std'],
'general_gaussian': ['power', 'width'],
'slepian': ['width']}
if win_type in arg_map:
return tuple([win_type] + _pop_args(win_type,
arg_map[win_type],
kwargs))
return win_type
def _pop_args(win_type, arg_names, kwargs):
msg = '%s window requires %%s' % win_type
all_args = []
for n in arg_names:
if n not in kwargs:
raise ValueError(msg % n)
all_args.append(kwargs.pop(n))
return all_args
win_type = _validate_win_type(self.win_type, kwargs)
# GH #15662. `False` makes symmetric window, rather than periodic.
return sig.get_window(win_type, window, False).astype(float)
def _apply_window(self, mean=True, how=None, **kwargs):
"""
Applies a moving window of type ``window_type`` on the data.
Parameters
----------
mean : boolean, default True
If True computes weighted mean, else weighted sum
how : string, default to None (DEPRECATED)
how to resample
Returns
-------
y : type of input argument
"""
window = self._prep_window(**kwargs)
center = self.center
blocks, obj, index = self._create_blocks(how=how)
results = []
for b in blocks:
try:
values = self._prep_values(b.values)
except TypeError:
results.append(b.values.copy())
continue
if values.size == 0:
results.append(values.copy())
continue
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, len(window))
return _window.roll_window(np.concatenate((arg,
additional_nans))
if center else arg, window, minp,
avg=mean)
result = np.apply_along_axis(f, self.axis, values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, blocks, obj)
_agg_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.rolling(3, win_type='boxcar').agg('mean')
A B C
0 NaN NaN NaN
1 NaN NaN NaN
2 -0.885035 0.212600 -0.711689
3 -0.323928 -0.200122 -1.093408
4 -0.071445 -0.431533 -1.075833
5 0.504739 0.676083 -0.996353
6 0.358206 1.903256 -0.774200
7 0.906020 1.283573 0.085482
8 -0.096361 0.818139 0.472290
9 0.070889 0.134399 -0.031308
See also
--------
pandas.DataFrame.rolling.aggregate
pandas.DataFrame.aggregate
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
versionadded='',
klass='Series/DataFrame'))
def aggregate(self, arg, *args, **kwargs):
result, how = self._aggregate(arg, *args, **kwargs)
if result is None:
# these must apply directly
result = arg(self)
return result
agg = aggregate
@Substitution(name='window')
@Appender(_doc_template)
@Appender(_shared_docs['sum'])
def sum(self, *args, **kwargs):
nv.validate_window_func('sum', args, kwargs)
return self._apply_window(mean=False, **kwargs)
@Substitution(name='window')
@Appender(_doc_template)
@Appender(_shared_docs['mean'])
def mean(self, *args, **kwargs):
nv.validate_window_func('mean', args, kwargs)
return self._apply_window(mean=True, **kwargs)
class _GroupByMixin(GroupByMixin):
""" provide the groupby facilities """
def __init__(self, obj, *args, **kwargs):
parent = kwargs.pop('parent', None) # noqa
groupby = kwargs.pop('groupby', None)
if groupby is None:
groupby, obj = obj, obj.obj
self._groupby = groupby
self._groupby.mutated = True
self._groupby.grouper.mutated = True
super(GroupByMixin, self).__init__(obj, *args, **kwargs)
count = GroupByMixin._dispatch('count')
corr = GroupByMixin._dispatch('corr', other=None, pairwise=None)
cov = GroupByMixin._dispatch('cov', other=None, pairwise=None)
def _apply(self, func, name, window=None, center=None,
check_minp=None, how=None, **kwargs):
"""
dispatch to apply; we are stripping all of the _apply kwargs and
performing the original function call on the grouped object
"""
def f(x, name=name, *args):
x = self._shallow_copy(x)
if isinstance(name, compat.string_types):
return getattr(x, name)(*args, **kwargs)
return x.apply(name, *args, **kwargs)
return self._groupby.apply(f)
class _Rolling(_Window):
@property
def _constructor(self):
return Rolling
def _apply(self, func, name=None, window=None, center=None,
check_minp=None, how=None, **kwargs):
"""
Rolling statistical measure using supplied function. Designed to be
used with passed-in Cython array-based functions.
Parameters
----------
func : string/callable to apply
name : string, optional
name of this function
window : int/array, default to _get_window()
center : boolean, default to self.center
check_minp : function, default to _use_window
how : string, default to None (DEPRECATED)
how to resample
Returns
-------
y : type of input
"""
if center is None:
center = self.center
if window is None:
window = self._get_window()
if check_minp is None:
check_minp = _use_window
blocks, obj, index = self._create_blocks(how=how)
index, indexi = self._get_index(index=index)
results = []
for b in blocks:
try:
values = self._prep_values(b.values)
except TypeError:
results.append(b.values.copy())
continue
if values.size == 0:
results.append(values.copy())
continue
# if we have a string function name, wrap it
if isinstance(func, compat.string_types):
cfunc = getattr(_window, func, None)
if cfunc is None:
raise ValueError("we do not support this function "
"in _window.{0}".format(func))
def func(arg, window, min_periods=None, closed=None):
minp = check_minp(min_periods, window)
# ensure we are only rolling on floats
arg = _ensure_float64(arg)
return cfunc(arg,
window, minp, indexi, closed, **kwargs)
# calculation function
if center:
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def calc(x):
return func(np.concatenate((x, additional_nans)),
window, min_periods=self.min_periods,
closed=self.closed)
else:
def calc(x):
return func(x, window, min_periods=self.min_periods,
closed=self.closed)
with np.errstate(all='ignore'):
if values.ndim > 1:
result = np.apply_along_axis(calc, self.axis, values)
else:
result = calc(values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, blocks, obj)
class _Rolling_and_Expanding(_Rolling):
_shared_docs['count'] = """%(name)s count of number of non-NaN
observations inside provided window."""
def count(self):
blocks, obj, index = self._create_blocks(how=None)
index, indexi = self._get_index(index=index)
window = self._get_window()
window = min(window, len(obj)) if not self.center else window
results = []
for b in blocks:
result = b.notnull().astype(int)
result = self._constructor(result, window=window, min_periods=0,
center=self.center,
closed=self.closed).sum()
results.append(result)
return self._wrap_results(results, blocks, obj)
_shared_docs['apply'] = dedent("""
%(name)s function apply
Parameters
----------
func : function
Must produce a single value from an ndarray input
\*args and \*\*kwargs are passed to the function""")
def apply(self, func, args=(), kwargs={}):
# TODO: _level is unused?
_level = kwargs.pop('_level', None) # noqa
window = self._get_window()
offset = _offset(window, self.center)
index, indexi = self._get_index()
def f(arg, window, min_periods, closed):
minp = _use_window(min_periods, window)
return _window.roll_generic(arg, window, minp, indexi, closed,
offset, func, args, kwargs)
return self._apply(f, func, args=args, kwargs=kwargs,
center=False)
def sum(self, *args, **kwargs):
nv.validate_window_func('sum', args, kwargs)
return self._apply('roll_sum', 'sum', **kwargs)
_shared_docs['max'] = dedent("""
%(name)s maximum
Parameters
----------
how : string, default 'max' (DEPRECATED)
Method for down- or re-sampling""")
def max(self, how=None, *args, **kwargs):
nv.validate_window_func('max', args, kwargs)
if self.freq is not None and how is None:
how = 'max'
return self._apply('roll_max', 'max', how=how, **kwargs)
_shared_docs['min'] = dedent("""
%(name)s minimum
Parameters
----------
how : string, default 'min' (DEPRECATED)
Method for down- or re-sampling""")
def min(self, how=None, *args, **kwargs):
nv.validate_window_func('min', args, kwargs)
if self.freq is not None and how is None:
how = 'min'
return self._apply('roll_min', 'min', how=how, **kwargs)
def mean(self, *args, **kwargs):
nv.validate_window_func('mean', args, kwargs)
return self._apply('roll_mean', 'mean', **kwargs)
_shared_docs['median'] = dedent("""
%(name)s median
Parameters
----------
how : string, default 'median' (DEPRECATED)
Method for down- or re-sampling""")
def median(self, how=None, **kwargs):
if self.freq is not None and how is None:
how = 'median'
return self._apply('roll_median_c', 'median', how=how, **kwargs)
_shared_docs['std'] = dedent("""
%(name)s standard deviation
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.""")
def std(self, ddof=1, *args, **kwargs):
nv.validate_window_func('std', args, kwargs)
window = self._get_window()
index, indexi = self._get_index()
def f(arg, *args, **kwargs):
minp = _require_min_periods(1)(self.min_periods, window)
return _zsqrt(_window.roll_var(arg, window, minp, indexi,
self.closed, ddof))
return self._apply(f, 'std', check_minp=_require_min_periods(1),
ddof=ddof, **kwargs)
_shared_docs['var'] = dedent("""
%(name)s variance
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.""")
def var(self, ddof=1, *args, **kwargs):
nv.validate_window_func('var', args, kwargs)
return self._apply('roll_var', 'var',
check_minp=_require_min_periods(1), ddof=ddof,
**kwargs)
_shared_docs['skew'] = """Unbiased %(name)s skewness"""
def skew(self, **kwargs):
return self._apply('roll_skew', 'skew',
check_minp=_require_min_periods(3), **kwargs)
_shared_docs['kurt'] = """Unbiased %(name)s kurtosis"""
def kurt(self, **kwargs):
return self._apply('roll_kurt', 'kurt',
check_minp=_require_min_periods(4), **kwargs)
_shared_docs['quantile'] = dedent("""
%(name)s quantile
Parameters
----------
quantile : float
0 <= quantile <= 1""")
def quantile(self, quantile, **kwargs):
window = self._get_window()
index, indexi = self._get_index()
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, window)
return _window.roll_quantile(arg, window, minp, indexi,
self.closed, quantile)
return self._apply(f, 'quantile', quantile=quantile,
**kwargs)
_shared_docs['cov'] = dedent("""
%(name)s sample covariance
Parameters
----------
other : Series, DataFrame, or ndarray, optional
if not supplied then will default to self and produce pairwise output
pairwise : bool, default None
If False then only matching columns between self and other will be used
and the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the
output will be a MultiIndexed DataFrame in the case of DataFrame
inputs. In the case of missing elements, only complete pairwise
observations will be used.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.""")
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
window = self._get_window(other)
def _get_cov(X, Y):
# GH #12373 : rolling functions error on float32 data
# to avoid potential overflow, cast the data to float64
X = X.astype('float64')
Y = Y.astype('float64')
mean = lambda x: x.rolling(window, self.min_periods,
center=self.center).mean(**kwargs)
count = (X + Y).rolling(window=window,
center=self.center).count(**kwargs)
bias_adj = count / (count - ddof)
return (mean(X * Y) - mean(X) * mean(Y)) * bias_adj
return _flex_binary_moment(self._selected_obj, other._selected_obj,
_get_cov, pairwise=bool(pairwise))
_shared_docs['corr'] = dedent("""
%(name)s sample correlation
Parameters
----------
other : Series, DataFrame, or ndarray, optional
if not supplied then will default to self and produce pairwise output
pairwise : bool, default None
If False then only matching columns between self and other will be
used and the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the
output will be a MultiIndex DataFrame in the case of DataFrame inputs.
In the case of missing elements, only complete pairwise observations
will be used.""")
def corr(self, other=None, pairwise=None, **kwargs):
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
window = self._get_window(other)
def _get_corr(a, b):
a = a.rolling(window=window, min_periods=self.min_periods,
freq=self.freq, center=self.center)
b = b.rolling(window=window, min_periods=self.min_periods,
freq=self.freq, center=self.center)
return a.cov(b, **kwargs) / (a.std(**kwargs) * b.std(**kwargs))
return _flex_binary_moment(self._selected_obj, other._selected_obj,
_get_corr, pairwise=bool(pairwise))
class Rolling(_Rolling_and_Expanding):
@cache_readonly
def is_datetimelike(self):
return isinstance(self._on,
(ABCDatetimeIndex,
ABCTimedeltaIndex,
ABCPeriodIndex))
@cache_readonly
def _on(self):
if self.on is None:
return self.obj.index
elif (isinstance(self.obj, ABCDataFrame) and
self.on in self.obj.columns):
return pd.Index(self.obj[self.on])
else:
raise ValueError("invalid on specified as {0}, "
"must be a column (if DataFrame) "
"or None".format(self.on))
def validate(self):
super(Rolling, self).validate()
# we allow rolling on a datetimelike index
if (self.is_datetimelike and
isinstance(self.window, (compat.string_types, DateOffset,
timedelta))):
self._validate_monotonic()
freq = self._validate_freq()
# we don't allow center
if self.center:
raise NotImplementedError("center is not implemented "
"for datetimelike and offset "
"based windows")
# this will raise ValueError on non-fixed freqs
self.window = freq.nanos
self.win_type = 'freq'
# min_periods must be an integer
if self.min_periods is None:
self.min_periods = 1
elif not is_integer(self.window):
raise ValueError("window must be an integer")
elif self.window < 0:
raise ValueError("window must be non-negative")
if not self.is_datetimelike and self.closed is not None:
raise ValueError("closed only implemented for datetimelike "
"and offset based windows")
def _validate_monotonic(self):
""" validate on is monotonic """
if not self._on.is_monotonic:
formatted = self.on or 'index'
raise ValueError("{0} must be "
"monotonic".format(formatted))
def _validate_freq(self):
""" validate & return our freq """
from pandas.tseries.frequencies import to_offset
try:
return to_offset(self.window)
except (TypeError, ValueError):
raise ValueError("passed window {0} in not "
"compat with a datetimelike "
"index".format(self.window))
_agg_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.rolling(3).sum()
A B C
0 NaN NaN NaN
1 NaN NaN NaN
2 -2.655105 0.637799 -2.135068
3 -0.971785 -0.600366 -3.280224
4 -0.214334 -1.294599 -3.227500
5 1.514216 2.028250 -2.989060
6 1.074618 5.709767 -2.322600
7 2.718061 3.850718 0.256446
8 -0.289082 2.454418 1.416871
9 0.212668 0.403198 -0.093924
>>> df.rolling(3).agg({'A':'sum', 'B':'min'})
A B
0 NaN NaN
1 NaN NaN
2 -2.655105 -0.165272
3 -0.971785 -1.340923
4 -0.214334 -1.340923
5 1.514216 -1.340923
6 1.074618 0.211596
7 2.718061 -1.647453
8 -0.289082 -1.647453
9 0.212668 -1.647453
See also
--------
pandas.Series.rolling
pandas.DataFrame.rolling
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
versionadded='',
klass='Series/DataFrame'))
def aggregate(self, arg, *args, **kwargs):
return super(Rolling, self).aggregate(arg, *args, **kwargs)
agg = aggregate
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['count'])
def count(self):
# different impl for freq counting
if self.is_freq_type:
return self._apply('roll_count', 'count')
return super(Rolling, self).count()
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['apply'])
def apply(self, func, args=(), kwargs={}):
return super(Rolling, self).apply(func, args=args, kwargs=kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['sum'])
def sum(self, *args, **kwargs):
nv.validate_rolling_func('sum', args, kwargs)
return super(Rolling, self).sum(*args, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['max'])
def max(self, *args, **kwargs):
nv.validate_rolling_func('max', args, kwargs)
return super(Rolling, self).max(*args, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['min'])
def min(self, *args, **kwargs):
nv.validate_rolling_func('min', args, kwargs)
return super(Rolling, self).min(*args, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['mean'])
def mean(self, *args, **kwargs):
nv.validate_rolling_func('mean', args, kwargs)
return super(Rolling, self).mean(*args, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['median'])
def median(self, **kwargs):
return super(Rolling, self).median(**kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['std'])
def std(self, ddof=1, *args, **kwargs):
nv.validate_rolling_func('std', args, kwargs)
return super(Rolling, self).std(ddof=ddof, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['var'])
def var(self, ddof=1, *args, **kwargs):
nv.validate_rolling_func('var', args, kwargs)
return super(Rolling, self).var(ddof=ddof, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['skew'])
def skew(self, **kwargs):
return super(Rolling, self).skew(**kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['kurt'])
def kurt(self, **kwargs):
return super(Rolling, self).kurt(**kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['quantile'])
def quantile(self, quantile, **kwargs):
return super(Rolling, self).quantile(quantile=quantile, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['cov'])
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
return super(Rolling, self).cov(other=other, pairwise=pairwise,
ddof=ddof, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['corr'])
def corr(self, other=None, pairwise=None, **kwargs):
return super(Rolling, self).corr(other=other, pairwise=pairwise,
**kwargs)
class RollingGroupby(_GroupByMixin, Rolling):
"""
Provides a rolling groupby implementation
.. versionadded:: 0.18.1
"""
@property
def _constructor(self):
return Rolling
def _gotitem(self, key, ndim, subset=None):
# we are setting the index on the actual object
# here so our index is carried thru to the selected obj
# when we do the splitting for the groupby
if self.on is not None:
self._groupby.obj = self._groupby.obj.set_index(self._on)
self.on = None
return super(RollingGroupby, self)._gotitem(key, ndim, subset=subset)
def _validate_monotonic(self):
"""
validate that on is monotonic;
we don't care for groupby.rolling
because we have already validated at a higher
level
"""
pass
class Expanding(_Rolling_and_Expanding):
"""
Provides expanding transformations.
.. versionadded:: 0.18.0
Parameters
----------
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None) (DEPRECATED)
Frequency to conform the data to before computing the statistic.
Specified as a frequency string or DateOffset object.
center : boolean, default False
Set the labels at the center of the window.
axis : int or string, default 0
Returns
-------
a Window sub-classed for the particular operation
Examples
--------
>>> df = DataFrame({'B': [0, 1, 2, np.nan, 4]})
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
>>> df.expanding(2).sum()
B
0 NaN
1 1.0
2 3.0
3 3.0
4 7.0
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
"""
_attributes = ['min_periods', 'freq', 'center', 'axis']
def __init__(self, obj, min_periods=1, freq=None, center=False, axis=0,
**kwargs):
super(Expanding, self).__init__(obj=obj, min_periods=min_periods,
freq=freq, center=center, axis=axis)
@property
def _constructor(self):
return Expanding
def _get_window(self, other=None):
obj = self._selected_obj
if other is None:
return (max(len(obj), self.min_periods) if self.min_periods
else len(obj))
return (max((len(obj) + len(obj)), self.min_periods)
if self.min_periods else (len(obj) + len(obj)))
_agg_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.ewm(alpha=0.5).mean()
A B C
0 -2.385977 -0.102758 0.438822
1 -1.464856 0.569633 -0.490089
2 -0.207700 0.149687 -1.135379
3 -0.471677 -0.645305 -0.906555
4 -0.355635 -0.203033 -0.904111
5 1.076417 1.503943 -1.146293
6 -0.041654 1.925562 -0.588728
7 0.680292 0.132049 0.548693
8 0.067236 0.948257 0.163353
9 -0.286980 0.618493 -0.694496
See also
--------
pandas.DataFrame.expanding.aggregate
pandas.DataFrame.rolling.aggregate
pandas.DataFrame.aggregate
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
versionadded='',
klass='Series/DataFrame'))
def aggregate(self, arg, *args, **kwargs):
return super(Expanding, self).aggregate(arg, *args, **kwargs)
agg = aggregate
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['count'])
def count(self, **kwargs):
return super(Expanding, self).count(**kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['apply'])
def apply(self, func, args=(), kwargs={}):
return super(Expanding, self).apply(func, args=args, kwargs=kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['sum'])
def sum(self, *args, **kwargs):
nv.validate_expanding_func('sum', args, kwargs)
return super(Expanding, self).sum(*args, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['max'])
def max(self, *args, **kwargs):
nv.validate_expanding_func('max', args, kwargs)
return super(Expanding, self).max(*args, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['min'])
def min(self, *args, **kwargs):
nv.validate_expanding_func('min', args, kwargs)
return super(Expanding, self).min(*args, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['mean'])
def mean(self, *args, **kwargs):
nv.validate_expanding_func('mean', args, kwargs)
return super(Expanding, self).mean(*args, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['median'])
def median(self, **kwargs):
return super(Expanding, self).median(**kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['std'])
def std(self, ddof=1, *args, **kwargs):
nv.validate_expanding_func('std', args, kwargs)
return super(Expanding, self).std(ddof=ddof, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['var'])
def var(self, ddof=1, *args, **kwargs):
nv.validate_expanding_func('var', args, kwargs)
return super(Expanding, self).var(ddof=ddof, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['skew'])
def skew(self, **kwargs):
return super(Expanding, self).skew(**kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['kurt'])
def kurt(self, **kwargs):
return super(Expanding, self).kurt(**kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['quantile'])
def quantile(self, quantile, **kwargs):
return super(Expanding, self).quantile(quantile=quantile, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['cov'])
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
return super(Expanding, self).cov(other=other, pairwise=pairwise,
ddof=ddof, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['corr'])
def corr(self, other=None, pairwise=None, **kwargs):
return super(Expanding, self).corr(other=other, pairwise=pairwise,
**kwargs)
class ExpandingGroupby(_GroupByMixin, Expanding):
"""
Provides a expanding groupby implementation
.. versionadded:: 0.18.1
"""
@property
def _constructor(self):
return Expanding
_bias_template = """
Parameters
----------
bias : boolean, default False
Use a standard estimation bias correction
"""
_pairwise_template = """
Parameters
----------
other : Series, DataFrame, or ndarray, optional
if not supplied then will default to self and produce pairwise output
pairwise : bool, default None
If False then only matching columns between self and other will be used and
the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the output
will be a MultiIndex DataFrame in the case of DataFrame inputs.
In the case of missing elements, only complete pairwise observations will
be used.
bias : boolean, default False
Use a standard estimation bias correction
"""
class EWM(_Rolling):
r"""
Provides exponential weighted functions
.. versionadded:: 0.18.0
Parameters
----------
com : float, optional
Specify decay in terms of center of mass,
:math:`\alpha = 1 / (1 + com),\text{ for } com \geq 0`
span : float, optional
Specify decay in terms of span,
:math:`\alpha = 2 / (span + 1),\text{ for } span \geq 1`
halflife : float, optional
Specify decay in terms of half-life,
:math:`\alpha = 1 - exp(log(0.5) / halflife),\text{ for } halflife > 0`
alpha : float, optional
Specify smoothing factor :math:`\alpha` directly,
:math:`0 < \alpha \leq 1`
.. versionadded:: 0.18.0
min_periods : int, default 0
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : None or string alias / date offset object, default=None (DEPRECATED)
Frequency to conform to before computing statistic
adjust : boolean, default True
Divide by decaying adjustment factor in beginning periods to account
for imbalance in relative weightings (viewing EWMA as a moving average)
ignore_na : boolean, default False
Ignore missing values when calculating weights;
specify True to reproduce pre-0.15.0 behavior
Returns
-------
a Window sub-classed for the particular operation
Examples
--------
>>> df = DataFrame({'B': [0, 1, 2, np.nan, 4]})
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
>>> df.ewm(com=0.5).mean()
B
0 0.000000
1 0.750000
2 1.615385
3 1.615385
4 3.670213
Notes
-----
Exactly one of center of mass, span, half-life, and alpha must be provided.
Allowed values and relationship between the parameters are specified in the
parameter descriptions above; see the link at the end of this section for
a detailed explanation.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
When adjust is True (default), weighted averages are calculated using
weights (1-alpha)**(n-1), (1-alpha)**(n-2), ..., 1-alpha, 1.
When adjust is False, weighted averages are calculated recursively as:
weighted_average[0] = arg[0];
weighted_average[i] = (1-alpha)*weighted_average[i-1] + alpha*arg[i].
When ignore_na is False (default), weights are based on absolute positions.
For example, the weights of x and y used in calculating the final weighted
average of [x, None, y] are (1-alpha)**2 and 1 (if adjust is True), and
(1-alpha)**2 and alpha (if adjust is False).
When ignore_na is True (reproducing pre-0.15.0 behavior), weights are based
on relative positions. For example, the weights of x and y used in
calculating the final weighted average of [x, None, y] are 1-alpha and 1
(if adjust is True), and 1-alpha and alpha (if adjust is False).
More details can be found at
http://pandas.pydata.org/pandas-docs/stable/computation.html#exponentially-weighted-windows
"""
_attributes = ['com', 'min_periods', 'freq', 'adjust', 'ignore_na', 'axis']
def __init__(self, obj, com=None, span=None, halflife=None, alpha=None,
min_periods=0, freq=None, adjust=True, ignore_na=False,
axis=0):
self.obj = obj
self.com = _get_center_of_mass(com, span, halflife, alpha)
self.min_periods = min_periods
self.freq = freq
self.adjust = adjust
self.ignore_na = ignore_na
self.axis = axis
self.on = None
@property
def _constructor(self):
return EWM
_agg_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.ewm(alpha=0.5).mean()
A B C
0 -2.385977 -0.102758 0.438822
1 -1.464856 0.569633 -0.490089
2 -0.207700 0.149687 -1.135379
3 -0.471677 -0.645305 -0.906555
4 -0.355635 -0.203033 -0.904111
5 1.076417 1.503943 -1.146293
6 -0.041654 1.925562 -0.588728
7 0.680292 0.132049 0.548693
8 0.067236 0.948257 0.163353
9 -0.286980 0.618493 -0.694496
See also
--------
pandas.DataFrame.rolling.aggregate
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
versionadded='',
klass='Series/DataFrame'))
def aggregate(self, arg, *args, **kwargs):
return super(EWM, self).aggregate(arg, *args, **kwargs)
agg = aggregate
def _apply(self, func, how=None, **kwargs):
"""Rolling statistical measure using supplied function. Designed to be
used with passed-in Cython array-based functions.
Parameters
----------
func : string/callable to apply
how : string, default to None (DEPRECATED)
how to resample
Returns
-------
y : type of input argument
"""
blocks, obj, index = self._create_blocks(how=how)
results = []
for b in blocks:
try:
values = self._prep_values(b.values)
except TypeError:
results.append(b.values.copy())
continue
if values.size == 0:
results.append(values.copy())
continue
# if we have a string function name, wrap it
if isinstance(func, compat.string_types):
cfunc = getattr(_window, func, None)
if cfunc is None:
raise ValueError("we do not support this function "
"in _window.{0}".format(func))
def func(arg):
return cfunc(arg, self.com, int(self.adjust),
int(self.ignore_na), int(self.min_periods))
results.append(np.apply_along_axis(func, self.axis, values))
return self._wrap_results(results, blocks, obj)
@Substitution(name='ewm')
@Appender(_doc_template)
def mean(self, *args, **kwargs):
"""exponential weighted moving average"""
nv.validate_window_func('mean', args, kwargs)
return self._apply('ewma', **kwargs)
@Substitution(name='ewm')
@Appender(_doc_template)
@Appender(_bias_template)
def std(self, bias=False, *args, **kwargs):
"""exponential weighted moving stddev"""
nv.validate_window_func('std', args, kwargs)
return _zsqrt(self.var(bias=bias, **kwargs))
vol = std
@Substitution(name='ewm')
@Appender(_doc_template)
@Appender(_bias_template)
def var(self, bias=False, *args, **kwargs):
"""exponential weighted moving variance"""
nv.validate_window_func('var', args, kwargs)
def f(arg):
return _window.ewmcov(arg, arg, self.com, int(self.adjust),
int(self.ignore_na), int(self.min_periods),
int(bias))
return self._apply(f, **kwargs)
@Substitution(name='ewm')
@Appender(_doc_template)
@Appender(_pairwise_template)
def cov(self, other=None, pairwise=None, bias=False, **kwargs):
"""exponential weighted sample covariance"""
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
def _get_cov(X, Y):
X = self._shallow_copy(X)
Y = self._shallow_copy(Y)
cov = _window.ewmcov(X._prep_values(), Y._prep_values(), self.com,
int(self.adjust), int(self.ignore_na),
int(self.min_periods), int(bias))
return X._wrap_result(cov)
return _flex_binary_moment(self._selected_obj, other._selected_obj,
_get_cov, pairwise=bool(pairwise))
@Substitution(name='ewm')
@Appender(_doc_template)
@Appender(_pairwise_template)
def corr(self, other=None, pairwise=None, **kwargs):
"""exponential weighted sample correlation"""
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
def _get_corr(X, Y):
X = self._shallow_copy(X)
Y = self._shallow_copy(Y)
def _cov(x, y):
return _window.ewmcov(x, y, self.com, int(self.adjust),
int(self.ignore_na),
int(self.min_periods),
1)
x_values = X._prep_values()
y_values = Y._prep_values()
with np.errstate(all='ignore'):
cov = _cov(x_values, y_values)
x_var = _cov(x_values, x_values)
y_var = _cov(y_values, y_values)
corr = cov / _zsqrt(x_var * y_var)
return X._wrap_result(corr)
return _flex_binary_moment(self._selected_obj, other._selected_obj,
_get_corr, pairwise=bool(pairwise))
# Helper Funcs
def _flex_binary_moment(arg1, arg2, f, pairwise=False):
from pandas import Series, DataFrame
if not (isinstance(arg1, (np.ndarray, Series, DataFrame)) and
isinstance(arg2, (np.ndarray, Series, DataFrame))):
raise TypeError("arguments to moment function must be of type "
"np.ndarray/Series/DataFrame")
if (isinstance(arg1, (np.ndarray, Series)) and
isinstance(arg2, (np.ndarray, Series))):
X, Y = _prep_binary(arg1, arg2)
return f(X, Y)
elif isinstance(arg1, DataFrame):
def dataframe_from_int_dict(data, frame_template):
result = DataFrame(data, index=frame_template.index)
if len(result.columns) > 0:
result.columns = frame_template.columns[result.columns]
return result
results = {}
if isinstance(arg2, DataFrame):
if pairwise is False:
if arg1 is arg2:
# special case in order to handle duplicate column names
for i, col in enumerate(arg1.columns):
results[i] = f(arg1.iloc[:, i], arg2.iloc[:, i])
return dataframe_from_int_dict(results, arg1)
else:
if not arg1.columns.is_unique:
raise ValueError("'arg1' columns are not unique")
if not arg2.columns.is_unique:
raise ValueError("'arg2' columns are not unique")
with warnings.catch_warnings(record=True):
X, Y = arg1.align(arg2, join='outer')
X = X + 0 * Y
Y = Y + 0 * X
with warnings.catch_warnings(record=True):
res_columns = arg1.columns.union(arg2.columns)
for col in res_columns:
if col in X and col in Y:
results[col] = f(X[col], Y[col])
return DataFrame(results, index=X.index,
columns=res_columns)
elif pairwise is True:
results = defaultdict(dict)
for i, k1 in enumerate(arg1.columns):
for j, k2 in enumerate(arg2.columns):
if j < i and arg2 is arg1:
# Symmetric case
results[i][j] = results[j][i]
else:
results[i][j] = f(*_prep_binary(arg1.iloc[:, i],
arg2.iloc[:, j]))
# TODO: not the most efficient (perf-wise)
# though not bad code-wise
from pandas import Panel, MultiIndex, Index
with warnings.catch_warnings(record=True):
p = Panel.from_dict(results).swapaxes('items', 'major')
if len(p.major_axis) > 0:
p.major_axis = arg1.columns[p.major_axis]
if len(p.minor_axis) > 0:
p.minor_axis = arg2.columns[p.minor_axis]
if len(p.items):
result = pd.concat(
[p.iloc[i].T for i in range(len(p.items))],
keys=p.items)
else:
result = DataFrame(
index=MultiIndex(levels=[arg1.index, arg1.columns],
labels=[[], []]),
columns=arg2.columns,
dtype='float64')
# reset our index names to arg1 names
# reset our column names to arg2 names
# careful not to mutate the original names
result.columns = Index(result.columns).set_names(
arg2.columns.name)
result.index = result.index.set_names(
[arg1.index.name, arg1.columns.name])
return result
else:
raise ValueError("'pairwise' is not True/False")
else:
results = {}
for i, col in enumerate(arg1.columns):
results[i] = f(*_prep_binary(arg1.iloc[:, i], arg2))
return dataframe_from_int_dict(results, arg1)
else:
return _flex_binary_moment(arg2, arg1, f)
def _get_center_of_mass(com, span, halflife, alpha):
valid_count = len([x for x in [com, span, halflife, alpha]
if x is not None])
if valid_count > 1:
raise ValueError("com, span, halflife, and alpha "
"are mutually exclusive")
# Convert to center of mass; domain checks ensure 0 < alpha <= 1
if com is not None:
if com < 0:
raise ValueError("com must satisfy: com >= 0")
elif span is not None:
if span < 1:
raise ValueError("span must satisfy: span >= 1")
com = (span - 1) / 2.
elif halflife is not None:
if halflife <= 0:
raise ValueError("halflife must satisfy: halflife > 0")
decay = 1 - np.exp(np.log(0.5) / halflife)
com = 1 / decay - 1
elif alpha is not None:
if alpha <= 0 or alpha > 1:
raise ValueError("alpha must satisfy: 0 < alpha <= 1")
com = (1.0 - alpha) / alpha
else:
raise ValueError("Must pass one of com, span, halflife, or alpha")
return float(com)
def _offset(window, center):
if not is_integer(window):
window = len(window)
offset = (window - 1) / 2. if center else 0
try:
return int(offset)
except:
return offset.astype(int)
def _require_min_periods(p):
def _check_func(minp, window):
if minp is None:
return window
else:
return max(p, minp)
return _check_func
def _use_window(minp, window):
if minp is None:
return window
else:
return minp
def _zsqrt(x):
with np.errstate(all='ignore'):
result = np.sqrt(x)
mask = x < 0
from pandas import DataFrame
if isinstance(x, DataFrame):
if mask.values.any():
result[mask] = 0
else:
if mask.any():
result[mask] = 0
return result
def _prep_binary(arg1, arg2):
if not isinstance(arg2, type(arg1)):
raise Exception('Input arrays must be of the same type!')
# mask out values, this also makes a common index...
X = arg1 + 0 * arg2
Y = arg2 + 0 * arg1
return X, Y
# Top-level exports
def rolling(obj, win_type=None, **kwds):
from pandas import Series, DataFrame
if not isinstance(obj, (Series, DataFrame)):
raise TypeError('invalid type: %s' % type(obj))
if win_type is not None:
return Window(obj, win_type=win_type, **kwds)
return Rolling(obj, **kwds)
rolling.__doc__ = Window.__doc__
def expanding(obj, **kwds):
from pandas import Series, DataFrame
if not isinstance(obj, (Series, DataFrame)):
raise TypeError('invalid type: %s' % type(obj))
return Expanding(obj, **kwds)
expanding.__doc__ = Expanding.__doc__
def ewm(obj, **kwds):
from pandas import Series, DataFrame
if not isinstance(obj, (Series, DataFrame)):
raise TypeError('invalid type: %s' % type(obj))
return EWM(obj, **kwds)
ewm.__doc__ = EWM.__doc__
| mit |
waterponey/scikit-learn | examples/bicluster/bicluster_newsgroups.py | 142 | 7183 | """
================================================================
Biclustering documents with the Spectral Co-clustering algorithm
================================================================
This example demonstrates the Spectral Co-clustering algorithm on the
twenty newsgroups dataset. The 'comp.os.ms-windows.misc' category is
excluded because it contains many posts containing nothing but data.
The TF-IDF vectorized posts form a word frequency matrix, which is
then biclustered using Dhillon's Spectral Co-Clustering algorithm. The
resulting document-word biclusters indicate subsets words used more
often in those subsets documents.
For a few of the best biclusters, its most common document categories
and its ten most important words get printed. The best biclusters are
determined by their normalized cut. The best words are determined by
comparing their sums inside and outside the bicluster.
For comparison, the documents are also clustered using
MiniBatchKMeans. The document clusters derived from the biclusters
achieve a better V-measure than clusters found by MiniBatchKMeans.
Output::
Vectorizing...
Coclustering...
Done in 9.53s. V-measure: 0.4455
MiniBatchKMeans...
Done in 12.00s. V-measure: 0.3309
Best biclusters:
----------------
bicluster 0 : 1951 documents, 4373 words
categories : 23% talk.politics.guns, 19% talk.politics.misc, 14% sci.med
words : gun, guns, geb, banks, firearms, drugs, gordon, clinton, cdt, amendment
bicluster 1 : 1165 documents, 3304 words
categories : 29% talk.politics.mideast, 26% soc.religion.christian, 25% alt.atheism
words : god, jesus, christians, atheists, kent, sin, morality, belief, resurrection, marriage
bicluster 2 : 2219 documents, 2830 words
categories : 18% comp.sys.mac.hardware, 16% comp.sys.ibm.pc.hardware, 16% comp.graphics
words : voltage, dsp, board, receiver, circuit, shipping, packages, stereo, compression, package
bicluster 3 : 1860 documents, 2745 words
categories : 26% rec.motorcycles, 23% rec.autos, 13% misc.forsale
words : bike, car, dod, engine, motorcycle, ride, honda, cars, bmw, bikes
bicluster 4 : 12 documents, 155 words
categories : 100% rec.sport.hockey
words : scorer, unassisted, reichel, semak, sweeney, kovalenko, ricci, audette, momesso, nedved
"""
from __future__ import print_function
print(__doc__)
from collections import defaultdict
import operator
import re
from time import time
import numpy as np
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster import MiniBatchKMeans
from sklearn.externals.six import iteritems
from sklearn.datasets.twenty_newsgroups import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.cluster import v_measure_score
def number_aware_tokenizer(doc):
""" Tokenizer that maps all numeric tokens to a placeholder.
For many applications, tokens that begin with a number are not directly
useful, but the fact that such a token exists can be relevant. By applying
this form of dimensionality reduction, some methods may perform better.
"""
token_pattern = re.compile(u'(?u)\\b\\w\\w+\\b')
tokens = token_pattern.findall(doc)
tokens = ["#NUMBER" if token[0] in "0123456789_" else token
for token in tokens]
return tokens
# exclude 'comp.os.ms-windows.misc'
categories = ['alt.atheism', 'comp.graphics',
'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware',
'comp.windows.x', 'misc.forsale', 'rec.autos',
'rec.motorcycles', 'rec.sport.baseball',
'rec.sport.hockey', 'sci.crypt', 'sci.electronics',
'sci.med', 'sci.space', 'soc.religion.christian',
'talk.politics.guns', 'talk.politics.mideast',
'talk.politics.misc', 'talk.religion.misc']
newsgroups = fetch_20newsgroups(categories=categories)
y_true = newsgroups.target
vectorizer = TfidfVectorizer(stop_words='english', min_df=5,
tokenizer=number_aware_tokenizer)
cocluster = SpectralCoclustering(n_clusters=len(categories),
svd_method='arpack', random_state=0)
kmeans = MiniBatchKMeans(n_clusters=len(categories), batch_size=20000,
random_state=0)
print("Vectorizing...")
X = vectorizer.fit_transform(newsgroups.data)
print("Coclustering...")
start_time = time()
cocluster.fit(X)
y_cocluster = cocluster.row_labels_
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_cocluster, y_true)))
print("MiniBatchKMeans...")
start_time = time()
y_kmeans = kmeans.fit_predict(X)
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_kmeans, y_true)))
feature_names = vectorizer.get_feature_names()
document_names = list(newsgroups.target_names[i] for i in newsgroups.target)
def bicluster_ncut(i):
rows, cols = cocluster.get_indices(i)
if not (np.any(rows) and np.any(cols)):
import sys
return sys.float_info.max
row_complement = np.nonzero(np.logical_not(cocluster.rows_[i]))[0]
col_complement = np.nonzero(np.logical_not(cocluster.columns_[i]))[0]
# Note: the following is identical to X[rows[:, np.newaxis], cols].sum() but
# much faster in scipy <= 0.16
weight = X[rows][:, cols].sum()
cut = (X[row_complement][:, cols].sum() +
X[rows][:, col_complement].sum())
return cut / weight
def most_common(d):
"""Items of a defaultdict(int) with the highest values.
Like Counter.most_common in Python >=2.7.
"""
return sorted(iteritems(d), key=operator.itemgetter(1), reverse=True)
bicluster_ncuts = list(bicluster_ncut(i)
for i in range(len(newsgroups.target_names)))
best_idx = np.argsort(bicluster_ncuts)[:5]
print()
print("Best biclusters:")
print("----------------")
for idx, cluster in enumerate(best_idx):
n_rows, n_cols = cocluster.get_shape(cluster)
cluster_docs, cluster_words = cocluster.get_indices(cluster)
if not len(cluster_docs) or not len(cluster_words):
continue
# categories
counter = defaultdict(int)
for i in cluster_docs:
counter[document_names[i]] += 1
cat_string = ", ".join("{:.0f}% {}".format(float(c) / n_rows * 100, name)
for name, c in most_common(counter)[:3])
# words
out_of_cluster_docs = cocluster.row_labels_ != cluster
out_of_cluster_docs = np.where(out_of_cluster_docs)[0]
word_col = X[:, cluster_words]
word_scores = np.array(word_col[cluster_docs, :].sum(axis=0) -
word_col[out_of_cluster_docs, :].sum(axis=0))
word_scores = word_scores.ravel()
important_words = list(feature_names[cluster_words[i]]
for i in word_scores.argsort()[:-11:-1])
print("bicluster {} : {} documents, {} words".format(
idx, n_rows, n_cols))
print("categories : {}".format(cat_string))
print("words : {}\n".format(', '.join(important_words)))
| bsd-3-clause |
farhaanbukhsh/sympy | sympy/external/importtools.py | 85 | 7294 | """Tools to assist importing optional external modules."""
from __future__ import print_function, division
import sys
# Override these in the module to change the default warning behavior.
# For example, you might set both to False before running the tests so that
# warnings are not printed to the console, or set both to True for debugging.
WARN_NOT_INSTALLED = None # Default is False
WARN_OLD_VERSION = None # Default is True
def __sympy_debug():
# helper function from sympy/__init__.py
# We don't just import SYMPY_DEBUG from that file because we don't want to
# import all of sympy just to use this module.
import os
debug_str = os.getenv('SYMPY_DEBUG', 'False')
if debug_str in ('True', 'False'):
return eval(debug_str)
else:
raise RuntimeError("unrecognized value for SYMPY_DEBUG: %s" %
debug_str)
if __sympy_debug():
WARN_OLD_VERSION = True
WARN_NOT_INSTALLED = True
def import_module(module, min_module_version=None, min_python_version=None,
warn_not_installed=None, warn_old_version=None,
module_version_attr='__version__', module_version_attr_call_args=None,
__import__kwargs={}, catch=()):
"""
Import and return a module if it is installed.
If the module is not installed, it returns None.
A minimum version for the module can be given as the keyword argument
min_module_version. This should be comparable against the module version.
By default, module.__version__ is used to get the module version. To
override this, set the module_version_attr keyword argument. If the
attribute of the module to get the version should be called (e.g.,
module.version()), then set module_version_attr_call_args to the args such
that module.module_version_attr(*module_version_attr_call_args) returns the
module's version.
If the module version is less than min_module_version using the Python <
comparison, None will be returned, even if the module is installed. You can
use this to keep from importing an incompatible older version of a module.
You can also specify a minimum Python version by using the
min_python_version keyword argument. This should be comparable against
sys.version_info.
If the keyword argument warn_not_installed is set to True, the function will
emit a UserWarning when the module is not installed.
If the keyword argument warn_old_version is set to True, the function will
emit a UserWarning when the library is installed, but cannot be imported
because of the min_module_version or min_python_version options.
Note that because of the way warnings are handled, a warning will be
emitted for each module only once. You can change the default warning
behavior by overriding the values of WARN_NOT_INSTALLED and WARN_OLD_VERSION
in sympy.external.importtools. By default, WARN_NOT_INSTALLED is False and
WARN_OLD_VERSION is True.
This function uses __import__() to import the module. To pass additional
options to __import__(), use the __import__kwargs keyword argument. For
example, to import a submodule A.B, you must pass a nonempty fromlist option
to __import__. See the docstring of __import__().
This catches ImportError to determine if the module is not installed. To
catch additional errors, pass them as a tuple to the catch keyword
argument.
Examples
========
>>> from sympy.external import import_module
>>> numpy = import_module('numpy')
>>> numpy = import_module('numpy', min_python_version=(2, 7),
... warn_old_version=False)
>>> numpy = import_module('numpy', min_module_version='1.5',
... warn_old_version=False) # numpy.__version__ is a string
>>> # gmpy does not have __version__, but it does have gmpy.version()
>>> gmpy = import_module('gmpy', min_module_version='1.14',
... module_version_attr='version', module_version_attr_call_args=(),
... warn_old_version=False)
>>> # To import a submodule, you must pass a nonempty fromlist to
>>> # __import__(). The values do not matter.
>>> p3 = import_module('mpl_toolkits.mplot3d',
... __import__kwargs={'fromlist':['something']})
>>> # matplotlib.pyplot can raise RuntimeError when the display cannot be opened
>>> matplotlib = import_module('matplotlib',
... __import__kwargs={'fromlist':['pyplot']}, catch=(RuntimeError,))
"""
# keyword argument overrides default, and global variable overrides
# keyword argument.
warn_old_version = (WARN_OLD_VERSION if WARN_OLD_VERSION is not None
else warn_old_version or True)
warn_not_installed = (WARN_NOT_INSTALLED if WARN_NOT_INSTALLED is not None
else warn_not_installed or False)
import warnings
# Check Python first so we don't waste time importing a module we can't use
if min_python_version:
if sys.version_info < min_python_version:
if warn_old_version:
warnings.warn("Python version is too old to use %s "
"(%s or newer required)" % (
module, '.'.join(map(str, min_python_version))),
UserWarning)
return
# PyPy 1.6 has rudimentary NumPy support and importing it produces errors, so skip it
if module == 'numpy' and '__pypy__' in sys.builtin_module_names:
return
try:
mod = __import__(module, **__import__kwargs)
## there's something funny about imports with matplotlib and py3k. doing
## from matplotlib import collections
## gives python's stdlib collections module. explicitly re-importing
## the module fixes this.
from_list = __import__kwargs.get('fromlist', tuple())
for submod in from_list:
if submod == 'collections' and mod.__name__ == 'matplotlib':
__import__(module + '.' + submod)
except ImportError:
if warn_not_installed:
warnings.warn("%s module is not installed" % module, UserWarning)
return
except catch as e:
if warn_not_installed:
warnings.warn(
"%s module could not be used (%s)" % (module, repr(e)))
return
if min_module_version:
modversion = getattr(mod, module_version_attr)
if module_version_attr_call_args is not None:
modversion = modversion(*module_version_attr_call_args)
if modversion < min_module_version:
if warn_old_version:
# Attempt to create a pretty string version of the version
if isinstance(min_module_version, basestring):
verstr = min_module_version
elif isinstance(min_module_version, (tuple, list)):
verstr = '.'.join(map(str, min_module_version))
else:
# Either don't know what this is. Hopefully
# it's something that has a nice str version, like an int.
verstr = str(min_module_version)
warnings.warn("%s version is too old to use "
"(%s or newer required)" % (module, verstr),
UserWarning)
return
return mod
| bsd-3-clause |
openworm/tracker-commons | src/Python/tests/diagnostic_test.py | 3 | 3036 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Unit tests for the Python WCON parser
"""
import os
import sys
from six import StringIO # StringIO.StringIO in 2.x, io.StringIO in 3.x
import json
import jsonschema
import unittest
import filecmp
import glob
import collections
import pandas as pd
idx = pd.IndexSlice
import numpy as np
import time
sys.path.append('..')
from wcon import WCONWorms, MeasurementUnit
from wcon.measurement_unit import MeasurementUnitAtom
def timing_function():
"""
There's a better timing function available in Python 3.3+
Otherwise use the old one.
"""
if sys.version_info[0] >= 3 and sys.version_info[1] >= 3:
return time.monotonic()
else:
return time.time()
if __name__ == '__main__':
# def test_big_file():
print("BIG TEST: Test load and save and load and save")
files_to_test = [sys.argv[1]]
for JSON_path in files_to_test:
for pretty in [True]: # , False]:
print("LOADING FOR TEST: " + JSON_path +
" (PRETTY = " + str(pretty) + ")")
start_time = timing_function()
w1 = WCONWorms.load_from_file(JSON_path,
validate_against_schema=False)
print("Time to load w1: " + str(timing_function() - start_time))
# Save these worm tracks to a file, then load that file
test_path = 'test.wcon'
start_time = timing_function()
w1.save_to_file(test_path, pretty_print=pretty)
print("Time to save w1: " + str(timing_function() - start_time))
start_time = timing_function()
w2 = WCONWorms.load_from_file(test_path,
validate_against_schema=False)
print("Time to load w2: " + str(timing_function() - start_time))
# x1 = w1.data.loc[:, idx[0, 'x', 0]].fillna(0)
# x2 = w2.data.loc[:, idx[0, 'x', 0]].fillna(0)
# cmm = np.flatnonzero(x1 != x2)
# xx = pd.concat([x1, x2], axis=1)
# xx = xx.loc[cmm]
# Then load and save AGAIN and do a file comparison to make
# sure it's the same
# this will test that we order the keys (even though this
# isn't in the WCON standard it's nice for human
# readability, i.e. to have "units" before "data",
# "id" first in a data segment, etc.)
w3 = WCONWorms.load_from_file(test_path,
validate_against_schema=False)
assert(w2 == w3)
assert(w1 == w2)
assert(w1 == w3)
test_path2 = 'test2.wcon'
w3.save_to_file(test_path2, pretty_print=pretty)
# As described in the above comment: check that running
# load/save twice should generate an IDENTICAL file.
assert(filecmp.cmp(test_path, test_path2))
os.remove(test_path)
os.remove(test_path2)
# test_big_file()
| mit |
murrayrm/python-control | control/freqplot.py | 1 | 49536 | # freqplot.py - frequency domain plots for control systems
#
# Author: Richard M. Murray
# Date: 24 May 09
#
# This file contains some standard control system plots: Bode plots,
# Nyquist plots and pole-zero diagrams. The code for Nichols charts
# is in nichols.py.
#
# Copyright (c) 2010 by California Institute of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the California Institute of Technology nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH
# OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# $Id$
import math
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import warnings
from .ctrlutil import unwrap
from .bdalg import feedback
from .margins import stability_margins
from .exception import ControlMIMONotImplemented
from .statesp import StateSpace
from .xferfcn import TransferFunction
from . import config
__all__ = ['bode_plot', 'nyquist_plot', 'gangof4_plot',
'bode', 'nyquist', 'gangof4']
# Default values for module parameter variables
_freqplot_defaults = {
'freqplot.feature_periphery_decades': 1,
'freqplot.number_of_samples': 1000,
}
#
# Main plotting functions
#
# This section of the code contains the functions for generating
# frequency domain plots
#
#
# Bode plot
#
# Default values for Bode plot configuration variables
_bode_defaults = {
'bode.dB': False, # Plot gain in dB
'bode.deg': True, # Plot phase in degrees
'bode.Hz': False, # Plot frequency in Hertz
'bode.grid': True, # Turn on grid for gain and phase
'bode.wrap_phase': False, # Wrap the phase plot at a given value
}
def bode_plot(syslist, omega=None,
plot=True, omega_limits=None, omega_num=None,
margins=None, method='best', *args, **kwargs):
"""Bode plot for a system
Plots a Bode plot for the system over a (optional) frequency range.
Parameters
----------
syslist : linsys
List of linear input/output systems (single system is OK)
omega : array_like
List of frequencies in rad/sec to be used for frequency response
dB : bool
If True, plot result in dB. Default is false.
Hz : bool
If True, plot frequency in Hz (omega must be provided in rad/sec).
Default value (False) set by config.defaults['bode.Hz']
deg : bool
If True, plot phase in degrees (else radians). Default value (True)
config.defaults['bode.deg']
plot : bool
If True (default), plot magnitude and phase
omega_limits : array_like of two values
Limits of the to generate frequency vector.
If Hz=True the limits are in Hz otherwise in rad/s.
omega_num : int
Number of samples to plot. Defaults to
config.defaults['freqplot.number_of_samples'].
margins : bool
If True, plot gain and phase margin.
method : method to use in computing margins (see :func:`stability_margins`)
*args : :func:`matplotlib.pyplot.plot` positional properties, optional
Additional arguments for `matplotlib` plots (color, linestyle, etc)
**kwargs : :func:`matplotlib.pyplot.plot` keyword properties, optional
Additional keywords (passed to `matplotlib`)
Returns
-------
mag : ndarray (or list of ndarray if len(syslist) > 1))
magnitude
phase : ndarray (or list of ndarray if len(syslist) > 1))
phase in radians
omega : ndarray (or list of ndarray if len(syslist) > 1))
frequency in rad/sec
Other Parameters
----------------
grid : bool
If True, plot grid lines on gain and phase plots. Default is set by
`config.defaults['bode.grid']`.
initial_phase : float
Set the reference phase to use for the lowest frequency. If set, the
initial phase of the Bode plot will be set to the value closest to the
value specified. Units are in either degrees or radians, depending on
the `deg` parameter. Default is -180 if wrap_phase is False, 0 if
wrap_phase is True.
wrap_phase : bool or float
If wrap_phase is `False`, then the phase will be unwrapped so that it
is continuously increasing or decreasing. If wrap_phase is `True` the
phase will be restricted to the range [-180, 180) (or [:math:`-\\pi`,
:math:`\\pi`) radians). If `wrap_phase` is specified as a float, the
phase will be offset by 360 degrees if it falls below the specified
value. Default to `False`, set by config.defaults['bode.wrap_phase'].
The default values for Bode plot configuration parameters can be reset
using the `config.defaults` dictionary, with module name 'bode'.
Notes
-----
1. Alternatively, you may use the lower-level methods
:meth:`LTI.frequency_response` or ``sys(s)`` or ``sys(z)`` or to
generate the frequency response for a single system.
2. If a discrete time model is given, the frequency response is plotted
along the upper branch of the unit circle, using the mapping ``z =
exp(1j * omega * dt)`` where `omega` ranges from 0 to `pi/dt` and `dt`
is the discrete timebase. If timebase not specified (``dt=True``),
`dt` is set to 1.
Examples
--------
>>> sys = ss("1. -2; 3. -4", "5.; 7", "6. 8", "9.")
>>> mag, phase, omega = bode(sys)
"""
# Make a copy of the kwargs dictonary since we will modify it
kwargs = dict(kwargs)
# Check to see if legacy 'Plot' keyword was used
if 'Plot' in kwargs:
import warnings
warnings.warn("'Plot' keyword is deprecated in bode_plot; use 'plot'",
FutureWarning)
# Map 'Plot' keyword to 'plot' keyword
plot = kwargs.pop('Plot')
# Get values for params (and pop from list to allow keyword use in plot)
dB = config._get_param('bode', 'dB', kwargs, _bode_defaults, pop=True)
deg = config._get_param('bode', 'deg', kwargs, _bode_defaults, pop=True)
Hz = config._get_param('bode', 'Hz', kwargs, _bode_defaults, pop=True)
grid = config._get_param('bode', 'grid', kwargs, _bode_defaults, pop=True)
plot = config._get_param('bode', 'grid', plot, True)
margins = config._get_param('bode', 'margins', margins, False)
wrap_phase = config._get_param(
'bode', 'wrap_phase', kwargs, _bode_defaults, pop=True)
initial_phase = config._get_param(
'bode', 'initial_phase', kwargs, None, pop=True)
# If argument was a singleton, turn it into a tuple
if not hasattr(syslist, '__iter__'):
syslist = (syslist,)
# Decide whether to go above Nyquist frequency
omega_range_given = True if omega is not None else False
if omega is None:
omega_num = config._get_param(
'freqplot', 'number_of_samples', omega_num)
if omega_limits is None:
# Select a default range if none is provided
omega = _default_frequency_range(syslist,
number_of_samples=omega_num)
else:
omega_range_given = True
omega_limits = np.asarray(omega_limits)
if len(omega_limits) != 2:
raise ValueError("len(omega_limits) must be 2")
if Hz:
omega_limits *= 2. * math.pi
omega = np.logspace(np.log10(omega_limits[0]),
np.log10(omega_limits[1]), num=omega_num,
endpoint=True)
if plot:
# Set up the axes with labels so that multiple calls to
# bode_plot will superimpose the data. This was implicit
# before matplotlib 2.1, but changed after that (See
# https://github.com/matplotlib/matplotlib/issues/9024).
# The code below should work on all cases.
# Get the current figure
if 'sisotool' in kwargs:
fig = kwargs['fig']
ax_mag = fig.axes[0]
ax_phase = fig.axes[2]
sisotool = kwargs['sisotool']
del kwargs['fig']
del kwargs['sisotool']
else:
fig = plt.gcf()
ax_mag = None
ax_phase = None
sisotool = False
# Get the current axes if they already exist
for ax in fig.axes:
if ax.get_label() == 'control-bode-magnitude':
ax_mag = ax
elif ax.get_label() == 'control-bode-phase':
ax_phase = ax
# If no axes present, create them from scratch
if ax_mag is None or ax_phase is None:
plt.clf()
ax_mag = plt.subplot(211, label='control-bode-magnitude')
ax_phase = plt.subplot(
212, label='control-bode-phase', sharex=ax_mag)
mags, phases, omegas, nyquistfrqs = [], [], [], []
for sys in syslist:
if not sys.issiso():
# TODO: Add MIMO bode plots.
raise ControlMIMONotImplemented(
"Bode is currently only implemented for SISO systems.")
else:
omega_sys = np.asarray(omega)
if sys.isdtime(strict=True):
nyquistfrq = math.pi / sys.dt
if not omega_range_given:
# limit up to and including nyquist frequency
omega_sys = np.hstack((
omega_sys[omega_sys < nyquistfrq], nyquistfrq))
else:
nyquistfrq = None
mag, phase, omega_sys = sys.frequency_response(omega_sys)
mag = np.atleast_1d(mag)
phase = np.atleast_1d(phase)
#
# Post-process the phase to handle initial value and wrapping
#
if initial_phase is None:
# Start phase in the range 0 to -360 w/ initial phase = -180
# If wrap_phase is true, use 0 instead (phase \in (-pi, pi])
initial_phase = -math.pi if wrap_phase is not True else 0
elif isinstance(initial_phase, (int, float)):
# Allow the user to override the default calculation
if deg:
initial_phase = initial_phase/180. * math.pi
else:
raise ValueError("initial_phase must be a number.")
# Shift the phase if needed
if abs(phase[0] - initial_phase) > math.pi:
phase -= 2*math.pi * \
round((phase[0] - initial_phase) / (2*math.pi))
# Phase wrapping
if wrap_phase is False:
phase = unwrap(phase) # unwrap the phase
elif wrap_phase is True:
pass # default calculation OK
elif isinstance(wrap_phase, (int, float)):
phase = unwrap(phase) # unwrap the phase first
if deg:
wrap_phase *= math.pi/180.
# Shift the phase if it is below the wrap_phase
phase += 2*math.pi * np.maximum(
0, np.ceil((wrap_phase - phase)/(2*math.pi)))
else:
raise ValueError("wrap_phase must be bool or float.")
mags.append(mag)
phases.append(phase)
omegas.append(omega_sys)
nyquistfrqs.append(nyquistfrq)
# Get the dimensions of the current axis, which we will divide up
# TODO: Not current implemented; just use subplot for now
if plot:
nyquistfrq_plot = None
if Hz:
omega_plot = omega_sys / (2. * math.pi)
if nyquistfrq:
nyquistfrq_plot = nyquistfrq / (2. * math.pi)
else:
omega_plot = omega_sys
if nyquistfrq:
nyquistfrq_plot = nyquistfrq
phase_plot = phase * 180. / math.pi if deg else phase
mag_plot = mag
if nyquistfrq_plot:
# append data for vertical nyquist freq indicator line.
# if this extra nyquist lime is is plotted in a single plot
# command then line order is preserved when
# creating a legend eg. legend(('sys1', 'sys2'))
omega_nyq_line = np.array((np.nan, nyquistfrq, nyquistfrq))
omega_plot = np.hstack((omega_plot, omega_nyq_line))
mag_nyq_line = np.array((
np.nan, 0.7*min(mag_plot), 1.3*max(mag_plot)))
mag_plot = np.hstack((mag_plot, mag_nyq_line))
phase_range = max(phase_plot) - min(phase_plot)
phase_nyq_line = np.array(
(np.nan,
min(phase_plot) - 0.2 * phase_range,
max(phase_plot) + 0.2 * phase_range))
phase_plot = np.hstack((phase_plot, phase_nyq_line))
#
# Magnitude plot
#
if dB:
ax_mag.semilogx(omega_plot, 20 * np.log10(mag_plot),
*args, **kwargs)
else:
ax_mag.loglog(omega_plot, mag_plot, *args, **kwargs)
# Add a grid to the plot + labeling
ax_mag.grid(grid and not margins, which='both')
ax_mag.set_ylabel("Magnitude (dB)" if dB else "Magnitude")
#
# Phase plot
#
# Plot the data
ax_phase.semilogx(omega_plot, phase_plot, *args, **kwargs)
# Show the phase and gain margins in the plot
if margins:
# Compute stability margins for the system
margin = stability_margins(sys, method=method)
gm, pm, Wcg, Wcp = (margin[i] for i in (0, 1, 3, 4))
# Figure out sign of the phase at the first gain crossing
# (needed if phase_wrap is True)
phase_at_cp = phases[0][(np.abs(omegas[0] - Wcp)).argmin()]
if phase_at_cp >= 0.:
phase_limit = 180.
else:
phase_limit = -180.
if Hz:
Wcg, Wcp = Wcg/(2*math.pi), Wcp/(2*math.pi)
# Draw lines at gain and phase limits
ax_mag.axhline(y=0 if dB else 1, color='k', linestyle=':',
zorder=-20)
ax_phase.axhline(y=phase_limit if deg else
math.radians(phase_limit),
color='k', linestyle=':', zorder=-20)
mag_ylim = ax_mag.get_ylim()
phase_ylim = ax_phase.get_ylim()
# Annotate the phase margin (if it exists)
if pm != float('inf') and Wcp != float('nan'):
if dB:
ax_mag.semilogx(
[Wcp, Wcp], [0., -1e5],
color='k', linestyle=':', zorder=-20)
else:
ax_mag.loglog(
[Wcp, Wcp], [1., 1e-8],
color='k', linestyle=':', zorder=-20)
if deg:
ax_phase.semilogx(
[Wcp, Wcp], [1e5, phase_limit + pm],
color='k', linestyle=':', zorder=-20)
ax_phase.semilogx(
[Wcp, Wcp], [phase_limit + pm, phase_limit],
color='k', zorder=-20)
else:
ax_phase.semilogx(
[Wcp, Wcp], [1e5, math.radians(phase_limit) +
math.radians(pm)],
color='k', linestyle=':', zorder=-20)
ax_phase.semilogx(
[Wcp, Wcp], [math.radians(phase_limit) +
math.radians(pm),
math.radians(phase_limit)],
color='k', zorder=-20)
# Annotate the gain margin (if it exists)
if gm != float('inf') and Wcg != float('nan'):
if dB:
ax_mag.semilogx(
[Wcg, Wcg], [-20.*np.log10(gm), -1e5],
color='k', linestyle=':', zorder=-20)
ax_mag.semilogx(
[Wcg, Wcg], [0, -20*np.log10(gm)],
color='k', zorder=-20)
else:
ax_mag.loglog(
[Wcg, Wcg], [1./gm, 1e-8], color='k',
linestyle=':', zorder=-20)
ax_mag.loglog(
[Wcg, Wcg], [1., 1./gm], color='k', zorder=-20)
if deg:
ax_phase.semilogx(
[Wcg, Wcg], [0, phase_limit],
color='k', linestyle=':', zorder=-20)
else:
ax_phase.semilogx(
[Wcg, Wcg], [0, math.radians(phase_limit)],
color='k', linestyle=':', zorder=-20)
ax_mag.set_ylim(mag_ylim)
ax_phase.set_ylim(phase_ylim)
if sisotool:
ax_mag.text(
0.04, 0.06,
'G.M.: %.2f %s\nFreq: %.2f %s' %
(20*np.log10(gm) if dB else gm,
'dB ' if dB else '',
Wcg, 'Hz' if Hz else 'rad/s'),
horizontalalignment='left',
verticalalignment='bottom',
transform=ax_mag.transAxes,
fontsize=8 if int(mpl.__version__[0]) == 1 else 6)
ax_phase.text(
0.04, 0.06,
'P.M.: %.2f %s\nFreq: %.2f %s' %
(pm if deg else math.radians(pm),
'deg' if deg else 'rad',
Wcp, 'Hz' if Hz else 'rad/s'),
horizontalalignment='left',
verticalalignment='bottom',
transform=ax_phase.transAxes,
fontsize=8 if int(mpl.__version__[0]) == 1 else 6)
else:
plt.suptitle(
"Gm = %.2f %s(at %.2f %s), "
"Pm = %.2f %s (at %.2f %s)" %
(20*np.log10(gm) if dB else gm,
'dB ' if dB else '',
Wcg, 'Hz' if Hz else 'rad/s',
pm if deg else math.radians(pm),
'deg' if deg else 'rad',
Wcp, 'Hz' if Hz else 'rad/s'))
# Add a grid to the plot + labeling
ax_phase.set_ylabel("Phase (deg)" if deg else "Phase (rad)")
def gen_zero_centered_series(val_min, val_max, period):
v1 = np.ceil(val_min / period - 0.2)
v2 = np.floor(val_max / period + 0.2)
return np.arange(v1, v2 + 1) * period
if deg:
ylim = ax_phase.get_ylim()
ax_phase.set_yticks(gen_zero_centered_series(
ylim[0], ylim[1], 45.))
ax_phase.set_yticks(gen_zero_centered_series(
ylim[0], ylim[1], 15.), minor=True)
else:
ylim = ax_phase.get_ylim()
ax_phase.set_yticks(gen_zero_centered_series(
ylim[0], ylim[1], math.pi / 4.))
ax_phase.set_yticks(gen_zero_centered_series(
ylim[0], ylim[1], math.pi / 12.), minor=True)
ax_phase.grid(grid and not margins, which='both')
# ax_mag.grid(which='minor', alpha=0.3)
# ax_mag.grid(which='major', alpha=0.9)
# ax_phase.grid(which='minor', alpha=0.3)
# ax_phase.grid(which='major', alpha=0.9)
# Label the frequency axis
ax_phase.set_xlabel("Frequency (Hz)" if Hz
else "Frequency (rad/sec)")
if len(syslist) == 1:
return mags[0], phases[0], omegas[0]
else:
return mags, phases, omegas
#
# Nyquist plot
#
# Default values for module parameter variables
_nyquist_defaults = {
'nyquist.mirror_style': '--',
'nyquist.arrows': 2,
'nyquist.arrow_size': 8,
'nyquist.indent_radius': 1e-1,
'nyquist.indent_direction': 'right',
}
def nyquist_plot(syslist, omega=None, plot=True, omega_limits=None,
omega_num=None, label_freq=0, color=None,
return_contour=False, warn_nyquist=True, *args, **kwargs):
"""Nyquist plot for a system
Plots a Nyquist plot for the system over a (optional) frequency range.
The curve is computed by evaluating the Nyqist segment along the positive
imaginary axis, with a mirror image generated to reflect the negative
imaginary axis. Poles on or near the imaginary axis are avoided using a
small indentation. The portion of the Nyquist contour at infinity is not
explicitly computed (since it maps to a constant value for any system with
a proper transfer function).
Parameters
----------
syslist : list of LTI
List of linear input/output systems (single system is OK). Nyquist
curves for each system are plotted on the same graph.
plot : boolean
If True, plot magnitude
omega : array_like
Set of frequencies to be evaluated, in rad/sec.
omega_limits : array_like of two values
Limits to the range of frequencies. Ignored if omega is provided, and
auto-generated if omitted.
omega_num : int
Number of frequency samples to plot. Defaults to
config.defaults['freqplot.number_of_samples'].
color : string
Used to specify the color of the line and arrowhead.
mirror_style : string or False
Linestyle for mirror image of the Nyquist curve. If `False` then
omit completely. Default linestyle ('--') is determined by
config.defaults['nyquist.mirror_style'].
return_contour : bool
If 'True', return the contour used to evaluate the Nyquist plot.
label_freq : int
Label every nth frequency on the plot. If not specified, no labels
are generated.
arrows : int or 1D/2D array of floats
Specify the number of arrows to plot on the Nyquist curve. If an
integer is passed. that number of equally spaced arrows will be
plotted on each of the primary segment and the mirror image. If a 1D
array is passed, it should consist of a sorted list of floats between
0 and 1, indicating the location along the curve to plot an arrow. If
a 2D array is passed, the first row will be used to specify arrow
locations for the primary curve and the second row will be used for
the mirror image.
arrow_size : float
Arrowhead width and length (in display coordinates). Default value is
8 and can be set using config.defaults['nyquist.arrow_size'].
arrow_style : matplotlib.patches.ArrowStyle
Define style used for Nyquist curve arrows (overrides `arrow_size`).
indent_radius : float
Amount to indent the Nyquist contour around poles that are at or near
the imaginary axis.
indent_direction : str
For poles on the imaginary axis, set the direction of indentation to
be 'right' (default), 'left', or 'none'.
warn_nyquist : bool, optional
If set to 'False', turn off warnings about frequencies above Nyquist.
*args : :func:`matplotlib.pyplot.plot` positional properties, optional
Additional arguments for `matplotlib` plots (color, linestyle, etc)
**kwargs : :func:`matplotlib.pyplot.plot` keyword properties, optional
Additional keywords (passed to `matplotlib`)
Returns
-------
count : int (or list of int if len(syslist) > 1)
Number of encirclements of the point -1 by the Nyquist curve. If
multiple systems are given, an array of counts is returned.
contour : ndarray (or list of ndarray if len(syslist) > 1)), optional
The contour used to create the primary Nyquist curve segment. To
obtain the Nyquist curve values, evaluate system(s) along contour.
Notes
-----
1. If a discrete time model is given, the frequency response is computed
along the upper branch of the unit circle, using the mapping ``z =
exp(1j * omega * dt)`` where `omega` ranges from 0 to `pi/dt` and `dt`
is the discrete timebase. If timebase not specified (``dt=True``),
`dt` is set to 1.
2. If a continuous-time system contains poles on or near the imaginary
axis, a small indentation will be used to avoid the pole. The radius
of the indentation is given by `indent_radius` and it is taken the the
right of stable poles and the left of unstable poles. If a pole is
exactly on the imaginary axis, the `indent_direction` parameter can be
used to set the direction of indentation. Setting `indent_direction`
to `none` will turn off indentation. If `return_contour` is True, the
exact contour used for evaluation is returned.
Examples
--------
>>> sys = ss([[1, -2], [3, -4]], [[5], [7]], [[6, 8]], [[9]])
>>> count = nyquist_plot(sys)
"""
# Check to see if legacy 'Plot' keyword was used
if 'Plot' in kwargs:
warnings.warn("'Plot' keyword is deprecated in nyquist_plot; "
"use 'plot'", FutureWarning)
# Map 'Plot' keyword to 'plot' keyword
plot = kwargs.pop('Plot')
# Check to see if legacy 'labelFreq' keyword was used
if 'labelFreq' in kwargs:
warnings.warn("'labelFreq' keyword is deprecated in nyquist_plot; "
"use 'label_freq'", FutureWarning)
# Map 'labelFreq' keyword to 'label_freq' keyword
label_freq = kwargs.pop('labelFreq')
# Check to see if legacy 'arrow_width' or 'arrow_length' were used
if 'arrow_width' in kwargs or 'arrow_length' in kwargs:
warnings.warn(
"'arrow_width' and 'arrow_length' keywords are deprecated in "
"nyquist_plot; use `arrow_size` instead", FutureWarning)
kwargs['arrow_size'] = \
(kwargs.get('arrow_width', 0) + kwargs.get('arrow_length', 0)) / 2
kwargs.pop('arrow_width', False)
kwargs.pop('arrow_length', False)
# Get values for params (and pop from list to allow keyword use in plot)
omega_num = config._get_param('freqplot', 'number_of_samples', omega_num)
mirror_style = config._get_param(
'nyquist', 'mirror_style', kwargs, _nyquist_defaults, pop=True)
arrows = config._get_param(
'nyquist', 'arrows', kwargs, _nyquist_defaults, pop=True)
arrow_size = config._get_param(
'nyquist', 'arrow_size', kwargs, _nyquist_defaults, pop=True)
arrow_style = config._get_param('nyquist', 'arrow_style', kwargs, None)
indent_radius = config._get_param(
'nyquist', 'indent_radius', kwargs, _nyquist_defaults, pop=True)
indent_direction = config._get_param(
'nyquist', 'indent_direction', kwargs, _nyquist_defaults, pop=True)
# If argument was a singleton, turn it into a list
if not hasattr(syslist, '__iter__'):
syslist = (syslist,)
# Decide whether to go above Nyquist frequency
omega_range_given = True if omega is not None else False
# Figure out the frequency limits
if omega is None:
if omega_limits is None:
# Select a default range if none is provided
omega = _default_frequency_range(
syslist, number_of_samples=omega_num)
# Replace first point with the origin
omega[0] = 0
else:
omega_range_given = True
omega_limits = np.asarray(omega_limits)
if len(omega_limits) != 2:
raise ValueError("len(omega_limits) must be 2")
omega = np.logspace(np.log10(omega_limits[0]),
np.log10(omega_limits[1]), num=omega_num,
endpoint=True)
# Go through each system and keep track of the results
counts, contours = [], []
for sys in syslist:
if not sys.issiso():
# TODO: Add MIMO nyquist plots.
raise ControlMIMONotImplemented(
"Nyquist plot currently only supports SISO systems.")
# Figure out the frequency range
omega_sys = np.asarray(omega)
# Determine the contour used to evaluate the Nyquist curve
if sys.isdtime(strict=True):
# Transform frequencies in for discrete-time systems
nyquistfrq = math.pi / sys.dt
if not omega_range_given:
# limit up to and including nyquist frequency
omega_sys = np.hstack((
omega_sys[omega_sys < nyquistfrq], nyquistfrq))
# Issue a warning if we are sampling above Nyquist
if np.any(omega_sys * sys.dt > np.pi) and warn_nyquist:
warnings.warn("evaluation above Nyquist frequency")
# Transform frequencies to continuous domain
contour = np.exp(1j * omega * sys.dt)
else:
contour = 1j * omega_sys
# Bend the contour around any poles on/near the imaginary axis
if isinstance(sys, (StateSpace, TransferFunction)) and \
sys.isctime() and indent_direction != 'none':
poles = sys.pole()
for i, s in enumerate(contour):
# Find the nearest pole
p = poles[(np.abs(poles - s)).argmin()]
# See if we need to indent around it
if abs(s - p) < indent_radius:
if p.real < 0 or \
(p.real == 0 and indent_direction == 'right'):
# Indent to the right
contour[i] += \
np.sqrt(indent_radius ** 2 - (s-p).imag ** 2)
elif p.real > 0 or \
(p.real == 0 and indent_direction == 'left'):
# Indent to the left
contour[i] -= \
np.sqrt(indent_radius ** 2 - (s-p).imag ** 2)
else:
ValueError("unknown value for indent_direction")
# TODO: add code to indent around discrete poles on unit circle
# Compute the primary curve
resp = sys(contour)
# Compute CW encirclements of -1 by integrating the (unwrapped) angle
phase = -unwrap(np.angle(resp + 1))
count = int(np.round(np.sum(np.diff(phase)) / np.pi, 0))
counts.append(count)
contours.append(contour)
if plot:
# Parse the arrows keyword
if isinstance(arrows, int):
N = arrows
# Space arrows out, starting midway along each "region"
arrow_pos = np.linspace(0.5/N, 1 + 0.5/N, N, endpoint=False)
elif isinstance(arrows, (list, np.ndarray)):
arrow_pos = np.sort(np.atleast_1d(arrows))
elif not arrows:
arrow_pos = []
else:
raise ValueError("unknown or unsupported arrow location")
# Set the arrow style
if arrow_style is None:
arrow_style = mpl.patches.ArrowStyle(
'simple', head_width=arrow_size, head_length=arrow_size)
# Save the components of the response
x, y = resp.real, resp.imag
# Plot the primary curve
p = plt.plot(x, y, '-', color=color, *args, **kwargs)
c = p[0].get_color()
ax = plt.gca()
_add_arrows_to_line2D(
ax, p[0], arrow_pos, arrowstyle=arrow_style, dir=1)
# Plot the mirror image
if mirror_style is not False:
p = plt.plot(x, -y, mirror_style, color=c, *args, **kwargs)
_add_arrows_to_line2D(
ax, p[0], arrow_pos, arrowstyle=arrow_style, dir=-1)
# Mark the -1 point
plt.plot([-1], [0], 'r+')
# Label the frequencies of the points
if label_freq:
ind = slice(None, None, label_freq)
for xpt, ypt, omegapt in zip(x[ind], y[ind], omega_sys[ind]):
# Convert to Hz
f = omegapt / (2 * np.pi)
# Factor out multiples of 1000 and limit the
# result to the range [-8, 8].
pow1000 = max(min(get_pow1000(f), 8), -8)
# Get the SI prefix.
prefix = gen_prefix(pow1000)
# Apply the text. (Use a space before the text to
# prevent overlap with the data.)
#
# np.round() is used because 0.99... appears
# instead of 1.0, and this would otherwise be
# truncated to 0.
plt.text(xpt, ypt, ' ' +
str(int(np.round(f / 1000 ** pow1000, 0))) + ' ' +
prefix + 'Hz')
if plot:
ax = plt.gca()
ax.set_xlabel("Real axis")
ax.set_ylabel("Imaginary axis")
ax.grid(color="lightgray")
# "Squeeze" the results
if len(syslist) == 1:
counts, contours = counts[0], contours[0]
# Return counts and (optionally) the contour we used
return (counts, contours) if return_contour else counts
# Internal function to add arrows to a curve
def _add_arrows_to_line2D(
axes, line, arrow_locs=[0.2, 0.4, 0.6, 0.8],
arrowstyle='-|>', arrowsize=1, dir=1, transform=None):
"""
Add arrows to a matplotlib.lines.Line2D at selected locations.
Parameters:
-----------
axes: Axes object as returned by axes command (or gca)
line: Line2D object as returned by plot command
arrow_locs: list of locations where to insert arrows, % of total length
arrowstyle: style of the arrow
arrowsize: size of the arrow
transform: a matplotlib transform instance, default to data coordinates
Returns:
--------
arrows: list of arrows
Based on https://stackoverflow.com/questions/26911898/
"""
if not isinstance(line, mpl.lines.Line2D):
raise ValueError("expected a matplotlib.lines.Line2D object")
x, y = line.get_xdata(), line.get_ydata()
arrow_kw = {
"arrowstyle": arrowstyle,
}
color = line.get_color()
use_multicolor_lines = isinstance(color, np.ndarray)
if use_multicolor_lines:
raise NotImplementedError("multicolor lines not supported")
else:
arrow_kw['color'] = color
linewidth = line.get_linewidth()
if isinstance(linewidth, np.ndarray):
raise NotImplementedError("multiwidth lines not supported")
else:
arrow_kw['linewidth'] = linewidth
if transform is None:
transform = axes.transData
# Compute the arc length along the curve
s = np.cumsum(np.sqrt(np.diff(x) ** 2 + np.diff(y) ** 2))
arrows = []
for loc in arrow_locs:
n = np.searchsorted(s, s[-1] * loc)
# Figure out what direction to paint the arrow
if dir == 1:
arrow_tail = (x[n], y[n])
arrow_head = (np.mean(x[n:n + 2]), np.mean(y[n:n + 2]))
elif dir == -1:
# Orient the arrow in the other direction on the segment
arrow_tail = (x[n + 1], y[n + 1])
arrow_head = (np.mean(x[n:n + 2]), np.mean(y[n:n + 2]))
else:
raise ValueError("unknown value for keyword 'dir'")
p = mpl.patches.FancyArrowPatch(
arrow_tail, arrow_head, transform=transform, lw=0,
**arrow_kw)
axes.add_patch(p)
arrows.append(p)
return arrows
#
# Gang of Four plot
#
# TODO: think about how (and whether) to handle lists of systems
def gangof4_plot(P, C, omega=None, **kwargs):
"""Plot the "Gang of 4" transfer functions for a system
Generates a 2x2 plot showing the "Gang of 4" sensitivity functions
[T, PS; CS, S]
Parameters
----------
P, C : LTI
Linear input/output systems (process and control)
omega : array
Range of frequencies (list or bounds) in rad/sec
**kwargs : :func:`matplotlib.pyplot.plot` keyword properties, optional
Additional keywords (passed to `matplotlib`)
Returns
-------
None
"""
if not P.issiso() or not C.issiso():
# TODO: Add MIMO go4 plots.
raise ControlMIMONotImplemented(
"Gang of four is currently only implemented for SISO systems.")
# Get the default parameter values
dB = config._get_param('bode', 'dB', kwargs, _bode_defaults, pop=True)
Hz = config._get_param('bode', 'Hz', kwargs, _bode_defaults, pop=True)
grid = config._get_param('bode', 'grid', kwargs, _bode_defaults, pop=True)
# Compute the senstivity functions
L = P * C
S = feedback(1, L)
T = L * S
# Select a default range if none is provided
# TODO: This needs to be made more intelligent
if omega is None:
omega = _default_frequency_range((P, C, S))
# Set up the axes with labels so that multiple calls to
# gangof4_plot will superimpose the data. See details in bode_plot.
plot_axes = {'t': None, 's': None, 'ps': None, 'cs': None}
for ax in plt.gcf().axes:
label = ax.get_label()
if label.startswith('control-gangof4-'):
key = label[len('control-gangof4-'):]
if key not in plot_axes:
raise RuntimeError(
"unknown gangof4 axis type '{}'".format(label))
plot_axes[key] = ax
# if any of the axes are missing, start from scratch
if any((ax is None for ax in plot_axes.values())):
plt.clf()
plot_axes = {'s': plt.subplot(221, label='control-gangof4-s'),
'ps': plt.subplot(222, label='control-gangof4-ps'),
'cs': plt.subplot(223, label='control-gangof4-cs'),
't': plt.subplot(224, label='control-gangof4-t')}
#
# Plot the four sensitivity functions
#
omega_plot = omega / (2. * math.pi) if Hz else omega
# TODO: Need to add in the mag = 1 lines
mag_tmp, phase_tmp, omega = S.frequency_response(omega)
mag = np.squeeze(mag_tmp)
if dB:
plot_axes['s'].semilogx(omega_plot, 20 * np.log10(mag), **kwargs)
else:
plot_axes['s'].loglog(omega_plot, mag, **kwargs)
plot_axes['s'].set_ylabel("$|S|$" + " (dB)" if dB else "")
plot_axes['s'].tick_params(labelbottom=False)
plot_axes['s'].grid(grid, which='both')
mag_tmp, phase_tmp, omega = (P * S).frequency_response(omega)
mag = np.squeeze(mag_tmp)
if dB:
plot_axes['ps'].semilogx(omega_plot, 20 * np.log10(mag), **kwargs)
else:
plot_axes['ps'].loglog(omega_plot, mag, **kwargs)
plot_axes['ps'].tick_params(labelbottom=False)
plot_axes['ps'].set_ylabel("$|PS|$" + " (dB)" if dB else "")
plot_axes['ps'].grid(grid, which='both')
mag_tmp, phase_tmp, omega = (C * S).frequency_response(omega)
mag = np.squeeze(mag_tmp)
if dB:
plot_axes['cs'].semilogx(omega_plot, 20 * np.log10(mag), **kwargs)
else:
plot_axes['cs'].loglog(omega_plot, mag, **kwargs)
plot_axes['cs'].set_xlabel(
"Frequency (Hz)" if Hz else "Frequency (rad/sec)")
plot_axes['cs'].set_ylabel("$|CS|$" + " (dB)" if dB else "")
plot_axes['cs'].grid(grid, which='both')
mag_tmp, phase_tmp, omega = T.frequency_response(omega)
mag = np.squeeze(mag_tmp)
if dB:
plot_axes['t'].semilogx(omega_plot, 20 * np.log10(mag), **kwargs)
else:
plot_axes['t'].loglog(omega_plot, mag, **kwargs)
plot_axes['t'].set_xlabel(
"Frequency (Hz)" if Hz else "Frequency (rad/sec)")
plot_axes['t'].set_ylabel("$|T|$" + " (dB)" if dB else "")
plot_axes['t'].grid(grid, which='both')
plt.tight_layout()
#
# Utility functions
#
# This section of the code contains some utility functions for
# generating frequency domain plots
#
# Compute reasonable defaults for axes
def _default_frequency_range(syslist, Hz=None, number_of_samples=None,
feature_periphery_decades=None):
"""Compute a reasonable default frequency range for frequency
domain plots.
Finds a reasonable default frequency range by examining the features
(poles and zeros) of the systems in syslist.
Parameters
----------
syslist : list of LTI
List of linear input/output systems (single system is OK)
Hz : bool
If True, the limits (first and last value) of the frequencies
are set to full decades in Hz so it fits plotting with logarithmic
scale in Hz otherwise in rad/s. Omega is always returned in rad/sec.
number_of_samples : int, optional
Number of samples to generate. The default value is read from
``config.defaults['freqplot.number_of_samples']. If None, then the
default from `numpy.logspace` is used.
feature_periphery_decades : float, optional
Defines how many decades shall be included in the frequency range on
both sides of features (poles, zeros). The default value is read from
``config.defaults['freqplot.feature_periphery_decades']``.
Returns
-------
omega : array
Range of frequencies in rad/sec
Examples
--------
>>> from matlab import ss
>>> sys = ss("1. -2; 3. -4", "5.; 7", "6. 8", "9.")
>>> omega = _default_frequency_range(sys)
"""
# This code looks at the poles and zeros of all of the systems that
# we are plotting and sets the frequency range to be one decade above
# and below the min and max feature frequencies, rounded to the nearest
# integer. It excludes poles and zeros at the origin. If no features
# are found, it turns logspace(-1, 1)
# Set default values for options
number_of_samples = config._get_param(
'freqplot', 'number_of_samples', number_of_samples)
feature_periphery_decades = config._get_param(
'freqplot', 'feature_periphery_decades', feature_periphery_decades, 1)
# Find the list of all poles and zeros in the systems
features = np.array(())
freq_interesting = []
# detect if single sys passed by checking if it is sequence-like
if not hasattr(syslist, '__iter__'):
syslist = (syslist,)
for sys in syslist:
try:
# Add new features to the list
if sys.isctime():
features_ = np.concatenate((np.abs(sys.pole()),
np.abs(sys.zero())))
# Get rid of poles and zeros at the origin
features_ = features_[features_ != 0.0]
features = np.concatenate((features, features_))
elif sys.isdtime(strict=True):
fn = math.pi * 1. / sys.dt
# TODO: What distance to the Nyquist frequency is appropriate?
freq_interesting.append(fn * 0.9)
features_ = np.concatenate((sys.pole(),
sys.zero()))
# Get rid of poles and zeros
# * at the origin and real <= 0 & imag==0: log!
# * at 1.: would result in omega=0. (logaritmic plot!)
features_ = features_[
(features_.imag != 0.0) | (features_.real > 0.)]
features_ = features_[
np.bitwise_not((features_.imag == 0.0) &
(np.abs(features_.real - 1.0) < 1.e-10))]
# TODO: improve
features__ = np.abs(np.log(features_) / (1.j * sys.dt))
features = np.concatenate((features, features__))
else:
# TODO
raise NotImplementedError(
"type of system in not implemented now")
except NotImplementedError:
pass
# Make sure there is at least one point in the range
if features.shape[0] == 0:
features = np.array([1.])
if Hz:
features /= 2. * math.pi
features = np.log10(features)
lsp_min = np.floor(np.min(features) - feature_periphery_decades)
lsp_max = np.ceil(np.max(features) + feature_periphery_decades)
lsp_min += np.log10(2. * math.pi)
lsp_max += np.log10(2. * math.pi)
else:
features = np.log10(features)
lsp_min = np.floor(np.min(features) - feature_periphery_decades)
lsp_max = np.ceil(np.max(features) + feature_periphery_decades)
if freq_interesting:
lsp_min = min(lsp_min, np.log10(min(freq_interesting)))
lsp_max = max(lsp_max, np.log10(max(freq_interesting)))
# TODO: Add a check in discrete case to make sure we don't get aliasing
# (Attention: there is a list of system but only one omega vector)
# Set the range to be an order of magnitude beyond any features
if number_of_samples:
omega = np.logspace(
lsp_min, lsp_max, num=number_of_samples, endpoint=True)
else:
omega = np.logspace(lsp_min, lsp_max, endpoint=True)
return omega
#
# Utility functions to create nice looking labels (KLD 5/23/11)
#
def get_pow1000(num):
"""Determine exponent for which significand of a number is within the
range [1, 1000).
"""
# Based on algorithm from http://www.mail-archive.com/
# [email protected]/msg14433.html, accessed 2010/11/7
# by Jason Heeris 2009/11/18
from decimal import Decimal
from math import floor
dnum = Decimal(str(num))
if dnum == 0:
return 0
elif dnum < 0:
dnum = -dnum
return int(floor(dnum.log10() / 3))
def gen_prefix(pow1000):
"""Return the SI prefix for a power of 1000.
"""
# Prefixes according to Table 5 of [BIPM 2006] (excluding hecto,
# deca, deci, and centi).
if pow1000 < -8 or pow1000 > 8:
raise ValueError(
"Value is out of the range covered by the SI prefixes.")
return ['Y', # yotta (10^24)
'Z', # zetta (10^21)
'E', # exa (10^18)
'P', # peta (10^15)
'T', # tera (10^12)
'G', # giga (10^9)
'M', # mega (10^6)
'k', # kilo (10^3)
'', # (10^0)
'm', # milli (10^-3)
r'$\mu$', # micro (10^-6)
'n', # nano (10^-9)
'p', # pico (10^-12)
'f', # femto (10^-15)
'a', # atto (10^-18)
'z', # zepto (10^-21)
'y'][8 - pow1000] # yocto (10^-24)
def find_nearest_omega(omega_list, omega):
omega_list = np.asarray(omega_list)
return omega_list[(np.abs(omega_list - omega)).argmin()]
# Function aliases
bode = bode_plot
nyquist = nyquist_plot
gangof4 = gangof4_plot
| bsd-3-clause |
hugobowne/scikit-learn | sklearn/linear_model/tests/test_least_angle.py | 42 | 20925 | from nose.tools import assert_equal
import numpy as np
from scipy import linalg
from sklearn.model_selection import train_test_split
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_no_warnings, assert_warns
from sklearn.utils.testing import TempMemmap
from sklearn.exceptions import ConvergenceWarning
from sklearn import linear_model, datasets
from sklearn.linear_model.least_angle import _lars_path_residues
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# TODO: use another dataset that has multiple drops
def test_simple():
# Principle of Lars is to keep covariances tied and decreasing
# also test verbose output
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", verbose=10)
sys.stdout = old_stdout
for (i, coef_) in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
finally:
sys.stdout = old_stdout
def test_simple_precomputed():
# The same, with precomputed Gram matrix
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, Gram=G, method="lar")
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
def test_all_precomputed():
# Test that lars_path with precomputed Gram and Xy gives the right answer
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
for method in 'lar', 'lasso':
output = linear_model.lars_path(X, y, method=method)
output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method)
for expected, got in zip(output, output_pre):
assert_array_almost_equal(expected, got)
def test_lars_lstsq():
# Test that Lars gives least square solution at the end
# of the path
X1 = 3 * diabetes.data # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.)
clf.fit(X1, y)
coef_lstsq = np.linalg.lstsq(X1, y)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
def test_lasso_gives_lstsq_solution():
# Test that Lars Lasso gives least square solution at the end
# of the path
alphas_, active, coef_path_ = linear_model.lars_path(X, y, method="lasso")
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
# Check that lars_path is robust to collinearity in input
X = np.array([[3., 3., 1.],
[2., 2., 0.],
[1., 1., 0]])
y = np.array([1., 0., 0])
f = ignore_warnings
_, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01)
assert_true(not np.isnan(coef_path_).any())
residual = np.dot(X, coef_path_[:, -1]) - y
assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded
n_samples = 10
X = np.random.rand(n_samples, 5)
y = np.zeros(n_samples)
_, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False,
copy_Gram=False, alpha_min=0.,
method='lasso', verbose=0,
max_iter=500)
assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_))
def test_no_path():
# Test that the ``return_path=False`` option returns the correct output
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar")
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_precomputed():
# Test that the ``return_path=False`` option with Gram remains correct
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G)
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G,
return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_all_precomputed():
# Test that the ``return_path=False`` option with Gram and Xy remains
# correct
X, y = 3 * diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
alphas_, active_, coef_path_ = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9)
print("---")
alpha_, active, coef = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_singular_matrix():
# Test when input is a singular matrix
X1 = np.array([[1, 1.], [1., 1.]])
y1 = np.array([1, 1])
alphas, active, coef_path = linear_model.lars_path(X1, y1)
assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]])
def test_rank_deficient_design():
# consistency test that checks that LARS Lasso is handling rank
# deficient input data (with n_features < rank) in the same way
# as coordinate descent Lasso
y = [5, 0, 5]
for X in ([[5, 0],
[0, 5],
[10, 10]],
[[10, 10, 0],
[1e-32, 0, 0],
[0, 0, 1]],
):
# To be able to use the coefs to compute the objective function,
# we need to turn off normalization
lars = linear_model.LassoLars(.1, normalize=False)
coef_lars_ = lars.fit(X, y).coef_
obj_lars = (1. / (2. * 3.)
* linalg.norm(y - np.dot(X, coef_lars_)) ** 2
+ .1 * linalg.norm(coef_lars_, 1))
coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
coef_cd_ = coord_descent.fit(X, y).coef_
obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
+ .1 * linalg.norm(coef_cd_, 1))
assert_less(obj_lars, obj_cd * (1. + 1e-8))
def test_lasso_lars_vs_lasso_cd(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results.
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
normalize=False).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# same test, with normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_lasso_cd_early_stopping(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when early stopping is used.
# (test : before, in the middle, and in the last part of the path)
alphas_min = [10, 0.9, 1e-4]
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
alphas_min = [10, 0.9, 1e-4]
# same test, with normalization
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_path_length():
# Test that the path length of the LassoLars is right
lasso = linear_model.LassoLars()
lasso.fit(X, y)
lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2])
lasso2.fit(X, y)
assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_)
# Also check that the sequence of alphas is always decreasing
assert_true(np.all(np.diff(lasso.alphas_) < 0))
def test_lasso_lars_vs_lasso_cd_ill_conditioned():
# Test lasso lars on a very ill-conditioned design, and check that
# it does not blow up, and stays somewhat close to a solution given
# by the coordinate descent solver
# Also test that lasso_path (using lars_path output style) gives
# the same result as lars_path and previous lasso output style
# under these conditions.
rng = np.random.RandomState(42)
# Generate data
n, m = 70, 100
k = 5
X = rng.randn(n, m)
w = np.zeros((m, 1))
i = np.arange(0, m)
rng.shuffle(i)
supp = i[:k]
w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1)
y = np.dot(X, w)
sigma = 0.2
y += sigma * rng.rand(*y.shape)
y = y.squeeze()
lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso')
_, lasso_coef2, _ = linear_model.lasso_path(X, y,
alphas=lars_alphas,
tol=1e-6,
fit_intercept=False)
assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1)
def test_lasso_lars_vs_lasso_cd_ill_conditioned2():
# Create an ill-conditioned situation in which the LARS has to go
# far in the path to converge, and check that LARS and coordinate
# descent give the same answers
# Note it used to be the case that Lars had to use the drop for good
# strategy for this but this is no longer the case with the
# equality_tolerance checks
X = [[1e20, 1e20, 0],
[-1e-32, 0, 0],
[1, 1, 1]]
y = [10, 10, 1]
alpha = .0001
def objective_function(coef):
return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2
+ alpha * linalg.norm(coef, 1))
lars = linear_model.LassoLars(alpha=alpha, normalize=False)
assert_warns(ConvergenceWarning, lars.fit, X, y)
lars_coef_ = lars.coef_
lars_obj = objective_function(lars_coef_)
coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-4, normalize=False)
cd_coef_ = coord_descent.fit(X, y).coef_
cd_obj = objective_function(cd_coef_)
assert_less(lars_obj, cd_obj * (1. + 1e-8))
def test_lars_add_features():
# assure that at least some features get added if necessary
# test for 6d2b4c
# Hilbert matrix
n = 5
H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
clf = linear_model.Lars(fit_intercept=False).fit(
H, np.arange(n))
assert_true(np.all(np.isfinite(clf.coef_)))
def test_lars_n_nonzero_coefs(verbose=False):
lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
lars.fit(X, y)
assert_equal(len(lars.coef_.nonzero()[0]), 6)
# The path should be of length 6 + 1 in a Lars going down to 6
# non-zero coefs
assert_equal(len(lars.alphas_), 7)
@ignore_warnings
def test_multitarget():
# Assure that estimators receiving multidimensional y do the right thing
X = diabetes.data
Y = np.vstack([diabetes.target, diabetes.target ** 2]).T
n_targets = Y.shape[1]
for estimator in (linear_model.LassoLars(), linear_model.Lars()):
estimator.fit(X, Y)
Y_pred = estimator.predict(X)
Y_dec = assert_warns(DeprecationWarning, estimator.decision_function, X)
assert_array_almost_equal(Y_pred, Y_dec)
alphas, active, coef, path = (estimator.alphas_, estimator.active_,
estimator.coef_, estimator.coef_path_)
for k in range(n_targets):
estimator.fit(X, Y[:, k])
y_pred = estimator.predict(X)
assert_array_almost_equal(alphas[k], estimator.alphas_)
assert_array_almost_equal(active[k], estimator.active_)
assert_array_almost_equal(coef[k], estimator.coef_)
assert_array_almost_equal(path[k], estimator.coef_path_)
assert_array_almost_equal(Y_pred[:, k], y_pred)
def test_lars_cv():
# Test the LassoLarsCV object by checking that the optimal alpha
# increases as the number of samples increases.
# This property is not actually guaranteed in general and is just a
# property of the given dataset, with the given steps chosen.
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
def test_lasso_lars_ic():
# Test the LassoLarsIC object by checking that
# - some good features are selected.
# - alpha_bic > alpha_aic
# - n_nonzero_bic < n_nonzero_aic
lars_bic = linear_model.LassoLarsIC('bic')
lars_aic = linear_model.LassoLarsIC('aic')
rng = np.random.RandomState(42)
X = diabetes.data
y = diabetes.target
X = np.c_[X, rng.randn(X.shape[0], 4)] # add 4 bad features
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert_greater(lars_bic.alpha_, lars_aic.alpha_)
assert_less(len(nonzero_bic), len(nonzero_aic))
assert_less(np.max(nonzero_bic), diabetes.data.shape[1])
# test error on unknown IC
lars_broken = linear_model.LassoLarsIC('<unknown>')
assert_raises(ValueError, lars_broken.fit, X, y)
def test_no_warning_for_zero_mse():
# LassoLarsIC should not warn for log of zero MSE.
y = np.arange(10, dtype=float)
X = y.reshape(-1, 1)
lars = linear_model.LassoLarsIC(normalize=False)
assert_no_warnings(lars.fit, X, y)
assert_true(np.any(np.isinf(lars.criterion_)))
def test_lars_path_readonly_data():
# When using automated memory mapping on large input, the
# fold data is in read-only mode
# This is a non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/4597
splitted_data = train_test_split(X, y, random_state=42)
with TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test):
# The following should not fail despite copy=False
_lars_path_residues(X_train, y_train, X_test, y_test, copy=False)
def test_lars_path_positive_constraint():
# this is the main test for the positive parameter on the lars_path method
# the estimator classes just make use of this function
# we do the test on the diabetes dataset
# ensure that we get negative coefficients when positive=False
# and all positive when positive=True
# for method 'lar' (default) and lasso
for method in ['lar', 'lasso']:
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=False)
assert_true(coefs.min() < 0)
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=True)
assert_true(coefs.min() >= 0)
# now we gonna test the positive option for all estimator classes
default_parameter = {'fit_intercept': False}
estimator_parameter_map = {'Lars': {'n_nonzero_coefs': 5},
'LassoLars': {'alpha': 0.1},
'LarsCV': {},
'LassoLarsCV': {},
'LassoLarsIC': {}}
def test_estimatorclasses_positive_constraint():
# testing the transmissibility for the positive option of all estimator
# classes in this same function here
for estname in estimator_parameter_map:
params = default_parameter.copy()
params.update(estimator_parameter_map[estname])
estimator = getattr(linear_model, estname)(positive=False, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(estimator.coef_.min() < 0)
estimator = getattr(linear_model, estname)(positive=True, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(min(estimator.coef_) >= 0)
def test_lasso_lars_vs_lasso_cd_positive(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when using the positive option
# This test is basically a copy of the above with additional positive
# option. However for the middle part, the comparison of coefficient values
# for a range of alphas, we had to make an adaptations. See below.
# not normalized data
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8, positive=True)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# The range of alphas chosen for coefficient comparison here is restricted
# as compared with the above test without the positive option. This is due
# to the circumstance that the Lars-Lasso algorithm does not converge to
# the least-squares-solution for small alphas, see 'Least Angle Regression'
# by Efron et al 2004. The coefficients are typically in congruence up to
# the smallest alpha reached by the Lars-Lasso algorithm and start to
# diverge thereafter. See
# https://gist.github.com/michigraber/7e7d7c75eca694c7a6ff
for alpha in np.linspace(6e-1, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(fit_intercept=False, alpha=alpha,
normalize=False, positive=True).fit(X, y)
clf2 = linear_model.Lasso(fit_intercept=False, alpha=alpha, tol=1e-8,
normalize=False, positive=True).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8, positive=True)
for c, a in zip(lasso_path.T[:-1], alphas[:-1]): # don't include alpha=0
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
| bsd-3-clause |
CG-F16-4-Rutgers/steersuite-rutgers | steerstats/OptimizeAlgorithm.py | 8 | 34052 | from SteerStats import SteerStats
from steersuite.SteerStatsOptions import getOptions
from tools.cma.cma import fmin
from tools.cma.cma import fcts
import math
from steersuite.OptimizationExperiment import OptimizationExperiment
import sys
from optimization import *
import random
import array
import numpy
import copy
from math import sqrt
from tools.deap import algorithms
from tools.deap import base
from tools.deap import benchmarks
from tools.deap.benchmarks.tools import diversity, convergence
from tools.deap import creator
from tools.deap import tools
from util import saveMatrixToCVS
from util import saveMatrixToCVSDict
from util import readCSVToMutliObjData
from util import readCSVDictToMutliObjData
from multiprocessing import Semaphore
# for post processing
from tools.SteerStatsPostProcessEvent import PostProcessEvent_TarData
from tools.SteerStatsPostProcessEvent import PostProcessEvent_DumpOptResults
from tools.SteerStatsPostProcessEvent import PostProcessEvent_CreateHeatMap
from tools.SteerStatsPostProcessEvent import PostProcessEvent_MakeVideo
# import multiprocessing
# from pathos.multiprocessing import ProcessingPool as Pool
# from tools.plotting.PlotParameterData import parameter
# clear; clear; time python OptimizeAlgorithm.py --ai pprAI --checkAgentInteraction --numScenarios 200 --benchmark compositeGraph -c --scenarioSetInitId=0 --scenarioDIR data/scenarios/representativeSet --paramFile xml/config/ppr-param-config.xml --statsOnly -p 2
# clear; clear; time nice -4 python OptimizeAlgorithm.py --ai rvo2dAI --checkAgentInteraction --numScenarios 5000 -c --scenarioSetInitId=0 --scenarioDIR data/scenarios/representativeSet --paramFile xml/config/rvo2d-param-config.xml --statsOnly -p 16
# clear; clear; time nice -4 python OptimizeAlgorithm.py --ai pprAI --checkAgentInteraction --numScenarios 50 -c --scenarioSetInitId=0 --scenarioDIR data/scenarios/representativeSet --paramFile xml/config/ppr_optimized_configs/param_config_ppr_combination.xml --statsOnly -p 2 --cma data
# clear; clear; time nice -4 python OptimizeAlgorithm.py --ai rvo2dAI --checkAgentInteraction --numScenarios 500 -c --scenarioSetInitId=0 --scenarioDIR data/scenarios/representativeSet_Intersections/ --paramFile xml/config/rvo2d-param-config.xml --statsOnly -p 2 --cma data --benchmark compositePLE
# clear; clear; time nice -4 python OptimizeAlgorithm.py --ai pprAI --checkAgentInteraction --numScenarios 5000 -c --scenarioSetInitId=0 --scenarioDIR data/scenarios/representativeSet_Intersections/ --paramFile xml/config/ppr-param-config.xml --statsOnly -p 10 --cma data/optimization/ppr_over_PLE/ --benchmark compositePLE --dataDir /Volumes/Block0/SteerFit/PPR/PLE/
# clear; clear; time nice -4 python OptimizeAlgorithm.py --ai footstepAI --checkAgentInteraction --numScenarios 200 -c --scenarioSetInitId=0 --scenarioDIR data/scenarios/representativeSet_Intersections/ --paramFile xml/config/footstep-param-config.xml --statsOnly -p 2 --benchmark compositePLE
# Optimize a subspace
# clear; clear; time nice -18 python OptimizeAlgorithm.py --ai sfAI --numScenarios 10 --benchmark compositePLE --statsOnly --scenarioSetInitId 0 --scenarioDIR data/scenarios/subspace/ --subspace=../subspaces/hallway-one-way.xml --optimizer=CMA-ES --paramFile=xml/config/subspaces/hallway-one-way-2pillar.xml --cmaDumpDir=data/ --numFrames=2000
# clear; clear; time nice -18 python OptimizeAlgorithm.py --ai sfAI --numScenarios 10 --benchmark compositePLE --statsOnly --scenarioSetInitId 0 --scenarioDIR data/scenarios/subspace/ --subspace=../subspaces/hallway-one-way.xml --optimizer=CMA-ES --paramFile=xml/config/subspaces/hallway-4-pillar.xml --cmaDumpDir=data/ --numFrames=2000
# To optimize
# clear; clear; time nice -18 python OptimizeAlgorithm.py --ai sfAI --numScenarios 1 --benchmark compositePLE --statsOnly --scenarioSetInitId 0 --scenarioDIR data/scenarios/subspace/ --subspace=../subspaces/hallway-one-way.xml --optimizer=CMA-ES --paramFile=xml/config/subspaces/hallway-4-pillar.xml --cmaDumpDir=data/ --numFrames=2000 -c
def nothing(*args):
return 0
class OptimizeAlgorithm(object):
def __init__(self, f=nothing, g=nothing, options=None):
import os
from datetime import datetime
self._result_list = []
self._experiements = []
self._penalty_func = []
self.set_metric_func(f)
self.set_penatly_funct(g)
self._evals=0
self._options=options
self.__eval_processes=1
self.__evalSem=Semaphore(self.__eval_processes)
if os.path.exists(self._options.dataPath) and (not self._options.noSimulation):
if self._options.dataPath[-1] == '/':
newDir = self._options.dataPath[:-1]+"_"+str(datetime.now().strftime('%Y-%m-%d-%M-%S.%f'))
else:
newDir = self._options.dataPath+"_"+str(datetime.now().strftime('%Y-%m-%d-%M-%S.%f'))
print "Moving old simulation data dir: " + str(self._options.dataPath) + " to " + newDir
os.rename( self._options.dataPath, newDir )
self._post_process_events=[]
# Used to normalize results (make it possible for functions to be converted to minimizations)
self._control_result=None
def set_metric_func(self, f):
self._metric_func=f
def set_penatly_funct(self, g):
self._penalty_func.append(g);
def set_steerstats(self, steerStats):
self._steerStats=steerStats
def get_steerstats(self):
return self._steerStats
def add_post_process_event(self, event):
self._post_process_events.append(event)
def post_process(self):
for event in self._post_process_events:
event.process()
def get_options(self):
return self._options
def eval(self, parameters):
"""
Used to combine the metric and penalty functions. This also allows
the penalty function to be a function of the metric function.
"""
dirName="eval"
self.__evalSem.acquire()
options = copy.deepcopy(self._options)
options.dataPath = options.dataPath+dirName+str(self._evals)+"/"
print "Evals thus far: " + str(self._evals) + " datapath: "+ str(options.dataPath)
self._evals += 1
self.__evalSem.release()
f = self._metric_func(parameters,options=options)
# print "parameters in eval: " + str(parameters)
g = float(0)
for penalty in self._penalty_func:
g = g + penalty(parameters, f)
if self._control_result is not None:
print "Control result is: " + str(self._control_result)
print "original f is: " + str(f)
f = self._control_result - f
result = f + g
print "**** f: " + str(f) + " ***** g:" + str(g) + " **** result: " + str(result) + " params: " + str(parameters)
if options.logMetrics:
# Will this work properly? Will the instances be different?
# will it handle multi-threading??
data = [result, f, g]
data.extend(parameters)
print "Writing metric file: " + str(data)
self._steerStats.log(data)
self._steerStats.close_data_log()
return result
def evalMulti(self, parameters):
out = ()
results = self._steerStats.RunStats(parameters)
for metric in self._metric_func:
out = out + (metric(parameters, results),)
return out
def evalMultiWithEntropyLast(self, parameters):
out = ()
results = self._steerStats.RunStats(parameters)
for metric in self._metric_func[:-1]:
out = out + (metric(parameters, results),)
entropyOut = self._metric_func[2](parameters)
out = out + (entropyOut,)
return out
def log_result(self, result):
# This is called whenever foo_pool(i) returns a result.
# result_list is modified only by the main process, not the pool workers.
self._result_list.append(result)
def get_results(self):
return self._result_list
def set_experiements(self, exps):
self._experiements = exps
def get_experiements(self):
return self._experiements
def OptimizeParamsCMA(function_to_min, initial_param_values, std_value,
ai_bounds, CMAavailableProcesses,
cmaFilePrefix="", step_scaling=None,
maxfevals_=1000):
return fmin(function_to_min, initial_param_values, sigma0=std_value,
bounds=ai_bounds,
verb_filenameprefix=cmaFilePrefix+'outcma',
scaling_of_variables=step_scaling,
maxfevals=maxfevals_,
availableProceses=CMAavailableProcesses)
def OptimizeParamsCMAProfiling(function_to_min, initial_param_values, goal_value,
ai_bounds, cmaFilePrefix=""):
return fmin(function_to_min, initial_param_values, sigma0=goal_value,
bounds=ai_bounds,
verb_filenameprefix=cmaFilePrefix+'outcma',
maxiter=3)
def prepareOptimization(filename, options):
param_xml_file = open(filename)
param_config_data = param_xml_file.read()
OptimExper = OptimizationExperiment(param_config_data)
optimizingParameters = OptimExper.parseExperimentXML()
stddev_for_parameters=1
scaling_factor=float(0.3)
ai_params_list = []
ai_param_names_list = []
ai_lower_bounds = []
ai_upper_bounds = []
ai_step_scaling = []
for ai_param in optimizingParameters:
ai_params_list.append(ai_param.get_original())
ai_param_names_list.append(ai_param.get_name())
ai_lower_bounds.append(ai_param.get_min())
ai_upper_bounds.append(ai_param.get_max())
"""
This is magic stuff here
Scaling stddev for all the parameters acording the min/max values for the search
apparently this should be 0.3 of the search area if your search area is perfectly round
"""
ai_step_scaling.append( (ai_param.get_max()-ai_param.get_min())*scaling_factor )
ai_bounds = [ai_lower_bounds, ai_upper_bounds]
return [ai_bounds, ai_params_list, ai_param_names_list,
stddev_for_parameters, ai_step_scaling]
def multiOptimizeNSGA2(restart_=False):
"""
# for multi objective optimization
# clear; clear; time nice -4 python OptimizeAlgorithm.py --ai sfAI --checkAgentInteraction --numScenarios 1 --scenarioSetInitId=0 --scenarioDIR data/scenarios/representativeSet_Intersections/ --paramFile xml/config/sf-param-config-multi.xml --statsOnly -p 1 --benchmark compositePLE --optimizer="NSGA2" -c
# 3d multi opjective optimization
# clear; clear; time nice -4 optirun python OptimizeAlgorithm.py --ai sfAI --checkAgentInteraction --numScenarios 1 --scenarioSetInitId=0 --scenarioDIR data/scenarios/customSet/bottleneck-hallway/ --paramFile xml/config/sf-param-config-multi.xml --statsOnly -p 1 --benchmark compositePLE --optimizer="NSGA2" --numFrames 2500 --RealDataDir=data/RealWorldData/b140_combined/ -v -c
# to restart
# clear; clear; time nice -4 optirun python OptimizeAlgorithm.py --ai sfAI --checkAgentInteraction --numScenarios 1 --scenarioSetInitId=0 --scenarioDIR data/scenarios/customSet/bottleneck-hallway/ --paramFile xml/config/sf-param-config-multi.xml --statsOnly -p 6 --benchmark compositePLE --optimizer="NSGA2" --numFrames 2500 --RealDataDir=data/RealWorldData/b140_combined/ -c --multiObjRestartFile=SteerStatsOpt_1.log
# clear; clear; time nice -4 optirun python OptimizeAlgorithm.py --ai sfAI --checkAgentInteraction --numScenarios 1 --scenarioSetInitId=0 --scenarioDIR data/scenarios/customSet/bottleneck-hallway/ --paramFile xml/config/sf-param-config-multi.xml --statsOnly -p 6 --benchmark compositePLE --optimizer="NSGA2" --numFrames 2500 --RealDataDir=data/RealWorldData/b140_combined/ -c --multiObjRestartFile=SteerStatsOpt_2.log
"""
options_ = getOptions()
availableProcesses_=int(options_.processes)
options_.processes=1
[ai_bounds_, ai_params_list, ai_param_names_list,
stddev_for_parameters, ai_step_scaling] = prepareOptimization(options.paramFile, options)
steerStats_ = SteerStats(options_)
steerStats_.setParamNameDict(ai_param_names_list)
cmaFilePrefix=options_.cmaFilePrefix
print "Algorithm param names: " + str(ai_param_names_list)
print ai_bounds_
print ai_params_list
NDIM_ = len(ai_param_names_list)
# f_ = (steerStats_.performanceMetric, steerStats_.pleMetricGlobal, steerStats_.entropyMetric)
f_ = (steerStats_.performanceMetric, steerStats_.pleMetricGlobal)
return multiOptimizeWithNSGA2(NDIM=NDIM_, NGEN=3, MU=8, f=f_,
options=options_, ai_bounds=ai_bounds_,
availableProcesses=availableProcesses_,
steerStats=steerStats_, restart=restart_)
def multiOptimizeWithNSGA2(options, availableProcesses, ai_bounds, steerStats,
restart, NDIM=5, NGEN=3, MU=6, f=()):
BOUND_LOW, BOUND_UP = 0, 10000.0
weights_ = (-1.0,)*len(f)
weightNames = ['o'+str(i) for i in range(len(weights_))]
creator.create("FitnessMin", base.Fitness, weights=weights_)
creator.create("Individual", array.array, typecode='d', fitness=creator.FitnessMin)
toolbox = base.Toolbox()
op = OptimizeAlgorithm()
op.set_penatly_funct(overlapPenalty)
op.set_penatly_funct(agentNotFInishedPenalty)
op.set_steerstats(steerStats)
def checkBoundsPop(min, max):
def decorator(func):
def wrapper(*args, **kargs):
offspring = func(*args, **kargs)
for child in offspring:
for i in xrange(len(child)):
# print "************************ checking range"
if child[i] > max[i]:
child[i] = max[i]
elif child[i] < min[i]:
child[i] = min[i]
return offspring
return wrapper
return decorator
def checkBounds(min, max):
def decorator(func):
def wrapper(*args, **kargs):
offspring = func(*args, **kargs)
for i in xrange(len(offspring)):
# print offspring[i]
if offspring[i] > max[i]:
offspring[i] = max[i]
elif offspring[i] < min[i]:
offspring[i] = min[i]
return offspring
return wrapper
return decorator
def uniform(low, up, size=None):
return [random.uniform(a, b) for a, b in zip(low, up)]
if availableProcesses > 1:
# pool = multiprocessing.Pool(2)
# I will assume the user knows what they are doing here.
pool = Pool(availableProcesses)
toolbox.register("map", pool.map)
toolbox.register("attr_float", uniform, ai_bounds[0], ai_bounds[1], NDIM)
toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.attr_float)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
op.set_metric_func(f)
toolbox.register("evaluate", op.evalMulti)
toolbox.register("mate", tools.cxSimulatedBinaryBounded, low=BOUND_LOW, up=BOUND_UP, eta=20.0)
toolbox.register("mutate", tools.mutPolynomialBounded, low=BOUND_LOW, up=BOUND_UP, eta=20.0, indpb=1.0/NDIM)
toolbox.register("select", tools.selNSGA2)
toolbox.decorate("mate", checkBoundsPop(ai_bounds[0], ai_bounds[1]))
toolbox.decorate("mutate", checkBoundsPop(ai_bounds[0], ai_bounds[1]))
CXPB = 0.9
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean, axis=0)
stats.register("std", numpy.std, axis=0)
stats.register("min", numpy.min, axis=0)
stats.register("max", numpy.max, axis=0)
logbook = tools.Logbook()
logbook.header = "gen", "evals", "std", "min", "avg", "max"
weighties = []
weighties.extend(weightNames)
weighties.extend(steerStats.getParamNameDict())
if not restart:
pop = toolbox.population(n=MU)
print "population: "
print pop
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in pop if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
print "Post fitness"
print pop
print "ind"
print ind
print "invalid_ind"
print invalid_ind
print "fit"
print fit
print "fitnesses"
print fitnesses
else:
print "restarting from previous data"
pop = toolbox.population(n=MU)
dataFile = open(options.multiObjRestartFile, "r")
fitnesses, invalid_ind = readCSVDictToMutliObjData(dataFile, len(f), weighties)
dataFile.close()
for tmp_p, ind, fit in zip(invalid_ind, pop, fitnesses):
fit = tuple(fit)
print "fit: " + str(fit)
print "tmp_p: " + str(tmp_p)
print "ind " + str(ind)
ind.fitness.values = fit
for i in range(len(ind)):
ind[i] = float(tmp_p[i])
# This is just to assign the crowding distance to the individuals
# no actual selection is done
pop = toolbox.select(pop, len(pop))
record = stats.compile(pop)
logbook.record(gen=0, evals=len(invalid_ind), **record)
print(logbook.stream)
# Begin the generational process
for gen in range(1, NGEN):
# Vary the population
offspring = tools.selTournamentDCD(pop, len(pop))
offspring = [toolbox.clone(ind) for ind in offspring]
# print offspring
for ind1, ind2 in zip(offspring[::2], offspring[1::2]):
if random.random() <= CXPB:
toolbox.mate(ind1, ind2)
toolbox.mutate(ind1)
toolbox.mutate(ind2)
del ind1.fitness.values, ind2.fitness.values
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# Select the next generation population
pop = toolbox.select(pop + offspring, MU)
record = stats.compile(pop)
logbook.record(gen=gen, evals=len(invalid_ind), **record)
print(logbook.stream)
tmp_pop = copy.deepcopy(pop)
tmp_pop.sort(key=lambda x: x.fitness.values)
front = numpy.array([ind.fitness.values for ind in tmp_pop])
log_filename = (options.steeringAlgorithm + '_GEN_' + str(gen) +
'_POP_' + str(MU) + '.csv')
opt_log = open(log_filename, "w")
saveMatrixToCVSDict(numpy.append(front, pop, axis=1), opt_log, weighties)
opt_log.close()
return pop, logbook
def OptimizeWithCMA():
# clear; clear; time python SteerStats.py --ai pprAI --checkAgentInteraction --numScenarios 5000 --benchmark compositeGraph -c --scenarioSetInitId=0 --scenarioDIR data/scenarios/representativeSet --statsOnly -p 8
options = getOptions()
[ai_bounds, ai_params_defaults_list, ai_param_names_list,
stddev_for_parameters, ai_step_scaling] = prepareOptimization(options.paramFile, options)
steerStats = SteerStats(options)
steerStats.setParamNameDict(ai_param_names_list)
cmaFilePrefix=options.cmaFilePrefix
print "Algorithm param names: " + str(ai_param_names_list)
print ai_bounds
print ai_params_defaults_list
# sys.exit()
# print results
# JUst coverage metric
#result = OptimizeParamsCMA(steerStats.coverageMetric, ai_params_list,
# stddev_for_parameters, ai_bounds,
# cmaFilePrefix)
# JUst coverage metric
#result = OptimizeParamsCMA(steerStats.performanceMetric, ai_params_list,
# stddev_for_parameters, ai_bounds, cmaFilePrefix)
#
# JUst coverage metric
# result = OptimizeParamsCMA(steerStats.distanceMetric, ai_params_list,
# stddev_for_parameters, ai_bounds, cmaFilePrefix)
# Coverage plus frames metric
#result = OptimizeParamsCMA(steerStats.coveragePlusFramesMetric, ai_params_list,
# stddev_for_parameters, ai_bounds, cmaFilePrefix)
# coverage plus distance metric
# result = OptimizeParamsCMA(steerStats.coveragePlusDistanceMetric, ai_params_list,
# stddev_for_parameters, ai_bounds, cmaFilePrefix)
# coverage + computational_time
#result = OptimizeParamsCMA(steerStats.coveragePlusPerformanceMetric,
# ai_params_list,
#1 stddev_for_parameters, ai_bounds, cmaFilePrefix)
# computation time + converage + quality_distance
# result = OptimizeParamsCMA(steerStats.coveragePlusDistancePlusComputationMetric,
# ai_params_list,
# stddev_for_parameters, ai_bounds,
# cmaFilePrefix)
# computation time + converage + quality_distance
op = OptimizeAlgorithm(options=options)
op.set_steerstats(steerStats)
ppe = PostProcessEvent_TarData(op)
pped = PostProcessEvent_DumpOptResults(op)
ppev = PostProcessEvent_MakeVideo(op)
# op.set_metric_func(steerStats.timeMetricGlobal)
if options.objectiveFunction != "":
metric = steerStats.getBoundObjective(options.objectiveFunction)
if metric is None:
print '***** objecive function ' + str(options.objectiveFunction) + ' not found *******'
sys.exit(1)
print "objective Function: " + str(options.objectiveFunction) + ", found: " + str(metric)
op.set_metric_func(steerStats.getBoundObjective(options.objectiveFunction))
else:
op.set_metric_func(steerStats.agentFlowMetricGlobal)
print 'blah'
if options.penaltyFunction != "overlapPenalty":
penalty = steerStats.getBoundPenaltyFunc(options.objectiveFunction)
op.set_penatly_funct(penalty)
else:
op.set_penatly_funct(overlapPenalty)
# Does not acctually place an obstacle in the scenario because it comes from options.subspaceParams
#control_result = steerStats.RunStats((0,0), options=options) # hard coded stuff uggh...
#control_result = op._metric_func((0,0),results=control_result ,options=options)
# control_result = steerStats.RunStats(ai_params_defaults_list, options=options)
# print "control result: " + str(control_result)
# print "op: " + str(op)
# control_result = op._metric_func(ai_params_defaults_list,results=control_result ,options=options)
# op._control_result = control_result
result = OptimizeParamsCMA(op.eval,
ai_params_defaults_list,
stddev_for_parameters, ai_bounds,
options.cmaProcesses,
cmaFilePrefix,
ai_step_scaling,
maxfevals_=options.cmaMaxEvals)
print "Control result: " + str(op._control_result)
opt_log = open(cmaFilePrefix+"SteerStatsOptResult.txt", "w")
writeCMAResults(opt_log, result)
opt_log.close()
# write all of the result to a file.
op.add_post_process_event(pped)
# create a heatmap from the results
# op.add_post_process_event(ppeh)
# make video
ppev.set_ai_params(result[0])
# Record a video of the result
#if options.config.getProcessingEvents()['PostProcessEvent_MakeVideo'] == 'true':
# op.add_post_process_event(ppev)
# this post processing step should be added last (compressing everything)
op.add_post_process_event(ppe)
op.post_process()
print "Done"
def writeCMAResults(file, result):
file.write("best evaluated set of parameters, X: " + str(result[0]) + '\n')
file.write('best evaluated f for parameters, X: ' + str(result[1]) + '\n')
file.write('evaluations till best X: ' + str(result[2]) + '\n')
file.write('geno of x: ' + str(result[3]) + '\n')
file.write('total evaluations: ' + str(result[4]) + '\n')
file.write('total iterations f: ' + str(result[5]) + '\n')
file.write('best mean solution for normal distribution: ' + str(result[6]) + '\n')
file.write('std deviation of parameters: ' + str(result[7]) + '\n')
file.write('reason for stopping optimization: ' + str(result[8]) + '\n')
def OptimizeWithCMA_ES_MixedInt():
print "blah"
options = getOptions()
cmaFilePrefix=options.cmaFilePrefix
result = "blah"
param_xml_file = open(options.paramFile)
param_config_data = param_xml_file.read()
OptimExper = OptimizationExperiment(param_config_data)
optimizingParameters = OptimExper.parseExperimentXML()
stddev_for_parameters=1
scaling_factor=float(0.13)
ai_params_list = []
ai_param_names_list = []
ai_lower_bounds = []
ai_upper_bounds = []
ai_step_scaling = []
for ai_param in optimizingParameters:
ai_params_list.append(ai_param.get_original())
ai_param_names_list.append(ai_param.get_name())
ai_lower_bounds.append(ai_param.get_min())
ai_upper_bounds.append(ai_param.get_max())
"""
This is magic stuff here
Scaling stddev for all the parameters acording the min/max values for the search
apparently this should be 0.3 of the search area if your search area is perfectly round
"""
ai_step_scaling.append( (ai_param.get_max()-ai_param.get_min())*scaling_factor )
print "Algorithm param names: " + str(ai_param_names_list)
ai_bounds = [ai_lower_bounds, ai_upper_bounds]
steerStats = SteerStats(options)
steerStats.setParamNameDict(ai_param_names_list)
print ai_lower_bounds
print ai_params_list
print ai_upper_bounds
steerStats.pleMetric(ai_params_list, None)
def OptimizeWithMIDACO():
print "Not supported"
sys.exit(-1)
def OptimizeWithBruteForce():
# Only works for 2D for now
# clear; clear; time nice -18 python OptimizeAlgorithm.py --ai sfAI --checkAgentInteraction --numScenarios 1 --benchmark compositePLE --statsOnly --scenarioSetInitId 0 --subspace=../subspaces/icra-subspaces/hallway-one-way-100-agents-funnel.xml --dataDir=data/ --numFrames=2000 --optimizeWith=bruteForce --paramFile=xml/config/subspaces/icra-subspaces/hallway-one-way-1pillar-smallOptimizationRegion.xml -c -p 4 --logMetrics
import time # aint Python crazy like this
# from multiprocessing import Pool as Pool
from multiprocessing.pool import ThreadPool
import itertools
options = getOptions()
# options.noReturn=True
availableProcesses=int(options.processes)
options.processes=int(1)
steerStats = SteerStats(options)
# steerStats.set_insert_func(InsertStatistics.InsertOptimizationConfig)
[ai_bounds, ai_params_list, ai_param_names_list,
stddev_for_parameters, ai_step_scaling] = prepareOptimization(options.paramFile, options)
# steerStats = SteerStats(options)
steerStats.setParamNameDict(ai_param_names_list)
cmaFilePrefix=options.cmaFilePrefix
param_xml_file = open(options.paramFile)
param_config_data = param_xml_file.read()
default_params={}
for param_name,t_param in zip(ai_param_names_list,ai_params_list):
default_params[param_name]=t_param
print default_params
OptimExper = OptimizationExperiment(param_config_data)
optimizingParameters = OptimExper.parseExperimentXML()
op = OptimizeAlgorithm(options=options)
ppe = PostProcessEvent_TarData(op)
pped = PostProcessEvent_DumpOptResults(op)
ppeh = PostProcessEvent_CreateHeatMap(op)
# op._options=options
# op.set_metric_func(steerStats.timeMetricGlobal)
op.set_steerstats(steerStats)
# op.set_metric_func(steerStats.simulationTimeMetricGlobal)
if options.objectiveFunction != "":
op.set_metric_func(steerStats.getBoundObjective(options.objectiveFunction))
else:
# op.set_metric_func(steerStats.agentFlowMetricGlobal)
print 'blah'
op.set_penatly_funct(overlapPenalty)
# result = OptimizeParamsCMA(op.eval,
# ai_params_list,
# stddev_for_parameters, ai_bounds,
# options.cmaProcesses,
# cmaFilePrefix,
# ai_step_scaling)
#opt_log = open(cmaFilePrefix+"SteerStatsOptResult.txt", "w")
#writeCMAResults(opt_log, result)
#opt_log.close()
# print "Algorithm param names: " + str(ai_param_names_list)
# print optimizingParameters
experiements = []
experiements_param = []
aiParams = []
for param in optimizingParameters:
# this will create a lot of directories but I won't have to worry about
# syncroniztion and reuse of directories
for n in range(int(param.get_descetization())):
# aiParams_tmp = default_params
aiParams_tmp = {}
"""
for param_ in optimizingParameters:
if param_.get_type() == "float":
aiParams_tmp[param_.get_name()] = str(float(param_.get_original()))
elif param_.get_type() == "integer":
aiParams_tmp[param_.get_name()] = str(int(param_.get_original()))
"""
# calculate param value for this iteration
if param.get_type() == "float":
paramValue = param.get_min() + (((param.get_max()-param.get_min())/param.get_descetization())*n)
elif param.get_type() == "integer":
paramValue = int(param.get_min() + (((param.get_max()-param.get_min())/param.get_descetization())*n))
aiParams_tmp[param.get_name()] = paramValue
tmp_params = []
tmp_param_names = []
for key, value in aiParams_tmp.items() :
tmp_params.append(value)
tmp_param_names.append(key)
# print tmp_params
# print tmp_param_names
experiements_param.append(copy.deepcopy(paramValue))
aiParams.append(copy.deepcopy(tmp_param_names))
experiements.append(experiements_param)
experiements_param = []
#TODO stuff here.
print "Number of experiments in-line: " + str(len(experiements))
print "Size of process pool: " + str(availableProcesses)
#print "experiements: " + str(experiements)
# print ""
experiements = list(itertools.product(experiements[0],experiements[1]))
# print "Cross product: " + str(experiements)
# sys.exit()
#try:
processes_pool = ThreadPool(availableProcesses)
# results = processes_pool.apply(op.eval, experiements)
for item in experiements:
# this ensures the results come out in the same order the the experiemtns are in this list.
processes_pool.apply_async(op.eval, args = (item, ), callback = op.log_result)
processes_pool.close()
processes_pool.join()
# print op._result_list
# Does not acctually place an obstacle in the scenario because it comes from options.subspaceParams
control_result = steerStats.RunStats((0,0), options=options)
control_result = op._metric_func((0,0),results=control_result ,options=options)
# print "Control result: " + str(control_result)
op._result_list = control_result - numpy.array(op._result_list)
# print "Corrected results" + str( op._result_list)
# processes_pool = Pool(availableProcesses)
# results = processes_pool.map(op.eval, experiements)
# except :
# print "Multi-processing failed: "
op.set_experiements(experiements)
# results = map(op.eval, experiements)
print "Waited for subprocess"
# print "Results: " + str(zip(results,experiements))
# write all of the result to a file.
op.add_post_process_event(pped)
# create a heatmap from the results
op.add_post_process_event(ppeh)
# this post processing step should be added last
op.add_post_process_event(ppe)
op.post_process()
print "Done"
if __name__ == "__main__":
options = getOptions()
if options.optimizationAlgorithm == "CMA-ES":
OptimizeWithCMA()
elif options.optimizationAlgorithm == "midaco":
OptimizeWithMIDACO()
elif options.optimizationAlgorithm == "bruteForce":
OptimizeWithBruteForce()
elif options.optimizationAlgorithm == "CMA-ES-MixInt":
OptimizeWithCMA_ES_MixedInt()
elif options.optimizationAlgorithm == "NSGA2":
if options.multiObjRestartFile == "":
pop, stats = multiOptimizeNSGA2()
else:
pop, stats = multiOptimizeNSGA2(restart_=True)
print(stats)
pop.sort(key=lambda x: x.fitness.values)
import matplotlib.pyplot as plt
#
front = numpy.array([ind.fitness.values for ind in pop])
opt_log = open("SteerStatsOpt.log", "w")
saveMatrixToCVS(numpy.append(front, pop, axis=1), opt_log)
plt.scatter(front[:,0], front[:,1], c="b")
plt.axis("tight")
plt.show()
else :
print "Usage: python OptimizeAlgorithm --optimizationAlgorithm CMA-ES"
| gpl-3.0 |
rosswhitfield/javelin | tests/ase_api_compatibility_test.py | 1 | 6803 | """Test that the api of javelin.structure.Structure is compatible that
of diffpy.Structure.Structure and ase.Atoms
To maintain api compatibility between ASE and javelin structures
object the following methods must return the same thing:
get_scaled_positions
get_atomic_numbers
get_magnetic_moments
And be able to get the same unitcell from both structures.
"""
import pytest
import os
from numpy.testing import assert_array_equal, assert_array_almost_equal
from javelin.structure import Structure
ase = pytest.importorskip("ase")
def test_hex():
positions = [[0, 1, 0], [1, 1, 0], [1, 0, 0], [0, -1, 0], [-1, -1, 0], [-1, 0, 0]]
symbols = ['C']*6
unitcell = (1.4, 1.4, 1, 90, 90, 120)
hex_javelin = Structure(unitcell=unitcell,
symbols=symbols,
positions=positions)
hex_ase = ase.Atoms(symbols=symbols, scaled_positions=positions, cell=hex_javelin.unitcell.Binv)
assert len(hex_ase) == 6
# unitcell
assert_array_equal(hex_javelin.unitcell.cell,
ase.geometry.cell_to_cellpar(hex_ase.cell))
# get_atomic_numbers
assert_array_equal(hex_javelin.get_atomic_numbers(),
hex_ase.get_atomic_numbers())
# get_positions
assert_array_almost_equal(hex_javelin.get_positions(),
hex_ase.get_positions())
# get_scaled_positions
assert_array_almost_equal(hex_javelin.get_scaled_positions(),
hex_ase.get_scaled_positions())
def test_read_stru_znse():
from javelin.io import read_stru, read_stru_to_ase
filename = os.path.join(os.path.dirname(__file__), 'data', 'znse.cell')
znse_javelin = read_stru(filename)
znse_ase = read_stru_to_ase(filename)
assert len(znse_ase) == 2
assert_array_almost_equal(znse_ase.get_cell(), [[3.997, 0, 0],
[-1.9985, 3.461504, 0],
[0, 0, 6.501]])
assert_array_equal(znse_ase.get_scaled_positions(),
[[0.3333333, 0.6666667, 0.3671],
[0.3333333, 0.6666667, 0.]])
assert znse_ase.get_chemical_formula() == 'SeZn'
# unitcell
assert_array_almost_equal(znse_javelin.unitcell.cell,
ase.geometry.cell_to_cellpar(znse_ase.cell))
# get_atomic_numbers
assert_array_equal(znse_javelin.get_atomic_numbers(),
znse_ase.get_atomic_numbers())
# get_positions
# assert_array_almost_equal(znse_javelin.get_positions(),
# znse_ase.get_positions())
# get_scaled_positions
assert_array_almost_equal(znse_javelin.get_scaled_positions(),
znse_ase.get_scaled_positions())
def test_read_stru_pzn():
from javelin.io import read_stru, read_stru_to_ase
filename = os.path.join(os.path.dirname(__file__), 'data', 'pzn.stru')
pzn_javelin = read_stru(filename, starting_cell=(0, 0, 0))
pzn_ase = read_stru_to_ase(filename)
assert len(pzn_ase) == 15
assert_array_almost_equal(pzn_ase.get_cell(), [[4.06, 0, 0],
[0, 4.06, 0],
[0, 0, 4.06]])
assert pzn_ase.get_chemical_formula() == 'Nb2O9Pb3Zn'
# unitcell
assert_array_equal(pzn_javelin.unitcell.cell,
ase.geometry.cell_to_cellpar(pzn_ase.cell))
# get_atomic_numbers
assert_array_equal(pzn_javelin.get_atomic_numbers(),
pzn_ase.get_atomic_numbers())
# get_positions
assert_array_almost_equal(pzn_javelin.get_positions(),
pzn_ase.get_positions())
# get_scaled_positions
assert_array_almost_equal(pzn_javelin.get_scaled_positions(),
pzn_ase.get_scaled_positions())
def test_read_stru_missing_cell():
from javelin.io import read_stru, read_stru_to_ase
filename = os.path.join(os.path.dirname(__file__), 'data', 'missing_cell.cell')
c_javelin = read_stru(filename)
c_ase = read_stru_to_ase(filename)
assert len(c_ase) == 1
assert_array_equal(c_ase.get_cell(), [[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
assert_array_equal(c_ase.get_scaled_positions(), [[0.5, 0., 0.25]])
assert c_ase.get_chemical_formula() == 'C'
# unitcell
assert_array_equal(c_javelin.unitcell.cell,
ase.geometry.cell_to_cellpar(c_ase.cell))
# get_atomic_numbers
assert_array_equal(c_javelin.get_atomic_numbers(),
c_ase.get_atomic_numbers())
# get_positions
assert_array_almost_equal(c_javelin.get_positions(),
c_ase.get_positions())
# get_scaled_positions
assert_array_almost_equal(c_javelin.get_scaled_positions(),
c_ase.get_scaled_positions())
def test_ase_to_javelin():
positions = [[0, 1, 0], [1, 1, 0], [1, 0, 0], [0, -1, 0], [-1, -1, 0], [-1, 0, 0]]
symbols = ['C']*6
unitcell = (1.4, 1.4, 1, 90, 90, 120)
hex_ase = ase.Atoms(symbols=symbols, scaled_positions=positions,
cell=ase.geometry.cellpar_to_cell(unitcell))
hex_javelin = Structure(hex_ase)
# unitcell
assert_array_equal(hex_javelin.unitcell.cell,
ase.geometry.cell_to_cellpar(hex_ase.cell))
# get_atomic_numbers
assert_array_equal(hex_javelin.get_atomic_numbers(),
hex_ase.get_atomic_numbers())
def test_javelin_to_ase():
positions = [[0, 1, 0], [1, 1, 0], [1, 0, 0], [0, -1, 0], [-1, -1, 0], [-1, 0, 0]]
symbols = ['C']*6
unitcell = (1.4, 1.4, 1, 90, 90, 120)
hex_javelin = Structure(symbols=symbols,
unitcell=unitcell,
positions=positions)
hex_ase = hex_javelin.to_ase()
# unitcell
assert_array_equal(hex_javelin.unitcell.cell,
ase.geometry.cell_to_cellpar(hex_ase.cell))
# get_atomic_numbers
assert_array_equal(hex_javelin.get_atomic_numbers(),
hex_ase.get_atomic_numbers())
def test_ase_plot_atoms():
matplotlib = pytest.importorskip("matplotlib")
matplotlib.use('Agg')
from ase.visualize.plot import plot_atoms
positions = [[0, 1, 0], [1, 1, 0], [1, 0, 0], [0, -1, 0], [-1, -1, 0], [-1, 0, 0]]
symbols = ['C']*6
unitcell = (1.4, 1.4, 1, 90, 90, 120)
structure = Structure(symbols=symbols,
unitcell=unitcell,
positions=positions)
ax = plot_atoms(structure)
assert isinstance(ax, matplotlib.axes.Subplot)
| mit |
PPKE-Bioinf/consensx.itk.ppke.hu | consensx/graph/correl_graph.py | 1 | 1656 | import numpy as np
import matplotlib.pyplot as plt
plt.switch_backend('Agg')
def correl_graph(my_path, calced, experimental, graph_name):
"""X axis -> experimental values, Y axis -> calculated values
"calced" is a dict containing values for residues (as keys)
"experimental" is a list containing STR record objects"""
min_calc = min(calced.values())
max_calc = max(calced.values())
exp_values = []
for record in experimental:
exp_values.append(record.value)
min_exp = min(exp_values)
max_exp = max(exp_values)
miny = min(min_calc, min_exp) # get minimum value
maxy = max(max_calc, max_exp) # get maximum value
exp_line, calc_line = [], []
for i, j in enumerate(calced.keys()): # fetch data from arguments
calc = calced[j]
exp = experimental[i].value
exp_line.append(exp)
calc_line.append(calc)
diag = []
margin = int(abs(miny - maxy) * 0.05)
if abs(miny - maxy) < 10:
margin = 0.3
elif abs(miny - maxy) < 2:
margin = 0.01
elif abs(miny - maxy) < 1:
margin = 0
maxy += margin
miny -= margin
for i in np.arange(miny, maxy * 1.42, 0.1): # draw graph diagonal
diag.append(i)
plt.figure(figsize=(6, 5), dpi=80)
plt.plot(diag, diag, linewidth=2.0, color='#FD6C6C', alpha=.7)
plt.plot(exp_line, calc_line, color='#027A8B', marker='o', linestyle='')
plt.axis([miny, maxy, miny, maxy])
plt.xlabel('experimental')
plt.ylabel('calculated')
plt.tight_layout(pad=1.08)
plt.savefig(my_path + "/" + graph_name, format="svg", transparent=True)
plt.close()
| mit |
aldian/tensorflow | tensorflow/contrib/distributions/python/ops/mixture_same_family.py | 9 | 14358 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The same-family Mixture distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
class MixtureSameFamily(distribution.Distribution):
"""Mixture (same-family) distribution.
The `MixtureSameFamily` distribution implements a (batch of) mixture
distribution where all components are from different parameterizations of the
same distribution type. It is parameterized by a `Categorical` "selecting
distribution" (over `k` components) and a components distribution, i.e., a
`Distribution` with a rightmost batch shape (equal to `[k]`) which indexes
each (batch of) component.
#### Examples
```python
tfd = tf.contrib.distributions
### Create a mixture of two scalar Gaussians:
gm = tfd.MixtureSameFamily(
mixture_distribution=tfd.Categorical(
probs=[0.3, 0.7]),
components_distribution=tfd.Normal(
loc=[-1., 1], # One for each component.
scale=[0.1, 0.5])) # And same here.
gm.mean()
# ==> 0.4
gm.variance()
# ==> 1.018
# Plot PDF.
x = np.linspace(-2., 3., int(1e4), dtype=np.float32)
import matplotlib.pyplot as plt
plt.plot(x, gm.prob(x).eval());
### Create a mixture of two Bivariate Gaussians:
gm = tfd.MixtureSameFamily(
mixture_distribution=tfd.Categorical(
probs=[0.3, 0.7]),
components_distribution=tfd.MultivariateNormalDiag(
loc=[[-1., 1], # component 1
[1, -1]], # component 2
scale_identity_multiplier=[.3, .6]))
gm.mean()
# ==> array([ 0.4, -0.4], dtype=float32)
gm.covariance()
# ==> array([[ 1.119, -0.84],
# [-0.84, 1.119]], dtype=float32)
# Plot PDF contours.
def meshgrid(x, y=x):
[gx, gy] = np.meshgrid(x, y, indexing='ij')
gx, gy = np.float32(gx), np.float32(gy)
grid = np.concatenate([gx.ravel()[None, :], gy.ravel()[None, :]], axis=0)
return grid.T.reshape(x.size, y.size, 2)
grid = meshgrid(np.linspace(-2, 2, 100, dtype=np.float32))
plt.contour(grid[..., 0], grid[..., 1], gm.prob(grid).eval());
```
"""
def __init__(self,
mixture_distribution,
components_distribution,
validate_args=False,
allow_nan_stats=True,
name="MixtureSameFamily"):
"""Construct a `MixtureSameFamily` distribution.
Args:
mixture_distribution: `tf.distributions.Categorical`-like instance.
Manages the probability of selecting components. The number of
categories must match the rightmost batch dimension of the
`components_distribution`. Must have either scalar `batch_shape` or
`batch_shape` matching `components_distribution.batch_shape[:-1]`.
components_distribution: `tf.distributions.Distribution`-like instance.
Right-most batch dimension indexes components.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: `if not mixture_distribution.dtype.is_integer`.
ValueError: if mixture_distribution does not have scalar `event_shape`.
ValueError: if `mixture_distribution.batch_shape` and
`components_distribution.batch_shape[:-1]` are both fully defined and
the former is neither scalar nor equal to the latter.
ValueError: if `mixture_distribution` categories does not equal
`components_distribution` rightmost batch shape.
"""
parameters = locals()
with ops.name_scope(name):
self._mixture_distribution = mixture_distribution
self._components_distribution = components_distribution
self._runtime_assertions = []
s = components_distribution.event_shape_tensor()
self._event_ndims = (s.shape[0].value
if s.shape.with_rank_at_least(1)[0].value is not None
else array_ops.shape(s)[0])
if not mixture_distribution.dtype.is_integer:
raise ValueError(
"`mixture_distribution.dtype` ({}) is not over integers".format(
mixture_distribution.dtype.name))
if (mixture_distribution.event_shape.ndims is not None
and mixture_distribution.event_shape.ndims != 0):
raise ValueError("`mixture_distribution` must have scalar `event_dim`s")
elif validate_args:
self._runtime_assertions += [
control_flow_ops.assert_has_rank(
mixture_distribution.event_shape_tensor(), 0,
message="`mixture_distribution` must have scalar `event_dim`s"),
]
mdbs = mixture_distribution.batch_shape
cdbs = components_distribution.batch_shape.with_rank_at_least(1)[:-1]
if mdbs.is_fully_defined() and cdbs.is_fully_defined():
if mdbs.ndims != 0 and mdbs != cdbs:
raise ValueError(
"`mixture_distribution.batch_shape` (`{}`) is not "
"compatible with `components_distribution.batch_shape` "
"(`{}`)".format(mdbs.as_list(), cdbs.as_list()))
elif validate_args:
mdbs = mixture_distribution.batch_shape_tensor()
cdbs = components_distribution.batch_shape_tensor()[:-1]
self._runtime_assertions += [
control_flow_ops.assert_equal(
distribution_util.pick_vector(
mixture_distribution.is_scalar_batch(), cdbs, mdbs),
cdbs,
message=(
"`mixture_distribution.batch_shape` is not "
"compatible with `components_distribution.batch_shape`"))]
km = mixture_distribution.logits.shape.with_rank_at_least(1)[-1].value
kc = components_distribution.batch_shape.with_rank_at_least(1)[-1].value
if km is not None and kc is not None and km != kc:
raise ValueError("`mixture_distribution components` ({}) does not "
"equal `components_distribution.batch_shape[-1]` "
"({})".format(km, kc))
elif validate_args:
km = array_ops.shape(mixture_distribution.logits)[-1]
kc = components_distribution.batch_shape_tensor()[-1]
self._runtime_assertions += [
control_flow_ops.assert_equal(
km, kc,
message=("`mixture_distribution components` does not equal "
"`components_distribution.batch_shape[-1:]`")),
]
elif km is None:
km = array_ops.shape(mixture_distribution.logits)[-1]
self._num_components = km
super(MixtureSameFamily, self).__init__(
dtype=self._components_distribution.dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=(
self._mixture_distribution._graph_parents # pylint: disable=protected-access
+ self._components_distribution._graph_parents), # pylint: disable=protected-access
name=name)
@property
def mixture_distribution(self):
return self._mixture_distribution
@property
def components_distribution(self):
return self._components_distribution
def _batch_shape_tensor(self):
with ops.control_dependencies(self._runtime_assertions):
return self.components_distribution.batch_shape_tensor()[:-1]
def _batch_shape(self):
return self.components_distribution.batch_shape.with_rank_at_least(1)[:-1]
def _event_shape_tensor(self):
with ops.control_dependencies(self._runtime_assertions):
return self.components_distribution.event_shape_tensor()
def _event_shape(self):
return self.components_distribution.event_shape
def _sample_n(self, n, seed):
with ops.control_dependencies(self._runtime_assertions):
x = self.components_distribution.sample(n) # [n, B, k, E]
# TODO(jvdillon): Consider using tf.gather (by way of index unrolling).
npdt = x.dtype.as_numpy_dtype
mask = array_ops.one_hot(
indices=self.mixture_distribution.sample(n), # [n, B]
depth=self._num_components, # == k
on_value=np.ones([], dtype=npdt),
off_value=np.zeros([], dtype=npdt)) # [n, B, k]
mask = self._pad_mix_dims(mask) # [n, B, k, [1]*e]
return math_ops.reduce_sum(
x * mask, axis=-1 - self._event_ndims) # [n, B, E]
def _log_prob(self, x):
with ops.control_dependencies(self._runtime_assertions):
x = self._pad_sample_dims(x)
log_prob_x = self.components_distribution.log_prob(x) # [S, B, k]
log_mix_prob = nn_ops.log_softmax(
self.mixture_distribution.logits, dim=-1) # [B, k]
return math_ops.reduce_logsumexp(
log_prob_x + log_mix_prob, axis=-1) # [S, B]
def _mean(self):
with ops.control_dependencies(self._runtime_assertions):
probs = self._pad_mix_dims(
self.mixture_distribution.probs) # [B, k, [1]*e]
return math_ops.reduce_sum(
probs * self.components_distribution.mean(),
axis=-1 - self._event_ndims) # [B, E]
def _log_cdf(self, x):
x = self._pad_sample_dims(x)
log_cdf_x = self.components_distribution.log_cdf(x) # [S, B, k]
log_mix_prob = nn_ops.log_softmax(
self.mixture_distribution.logits, dim=-1) # [B, k]
return math_ops.reduce_logsumexp(
log_cdf_x + log_mix_prob, axis=-1) # [S, B]
def _variance(self):
with ops.control_dependencies(self._runtime_assertions):
# Law of total variance: Var(Y) = E[Var(Y|X)] + Var(E[Y|X])
probs = self._pad_mix_dims(
self.mixture_distribution.probs) # [B, k, [1]*e]
mean_cond_var = math_ops.reduce_sum(
probs * self.components_distribution.variance(),
axis=-1 - self._event_ndims) # [B, E]
var_cond_mean = math_ops.reduce_sum(
probs * math_ops.squared_difference(
self.components_distribution.mean(),
self._pad_sample_dims(self._mean())),
axis=-1 - self._event_ndims) # [B, E]
return mean_cond_var + var_cond_mean # [B, E]
def _covariance(self):
static_event_ndims = self.event_shape.ndims
if static_event_ndims != 1:
# Covariance is defined only for vector distributions.
raise NotImplementedError("covariance is not implemented")
with ops.control_dependencies(self._runtime_assertions):
# Law of total variance: Var(Y) = E[Var(Y|X)] + Var(E[Y|X])
probs = self._pad_mix_dims(self._pad_mix_dims(
self.mixture_distribution.probs)) # [B, k, 1, 1]
mean_cond_var = math_ops.reduce_sum(
probs * self.components_distribution.covariance(),
axis=-3) # [B, e, e]
var_cond_mean = math_ops.reduce_sum(
probs * _outer_squared_difference(
self.components_distribution.mean(),
self._pad_sample_dims(self._mean())),
axis=-3) # [B, e, e]
return mean_cond_var + var_cond_mean # [B, e, e]
def _pad_sample_dims(self, x):
with ops.name_scope("pad_sample_dims", values=[x]):
ndims = x.shape.ndims if x.shape.ndims is not None else array_ops.rank(x)
shape = array_ops.shape(x)
d = ndims - self._event_ndims
x = array_ops.reshape(x, shape=array_ops.concat([
shape[:d], [1], shape[d:]], axis=0))
return x
def _pad_mix_dims(self, x):
with ops.name_scope("pad_mix_dims", values=[x]):
def _get_ndims(d):
if d.batch_shape.ndims is not None:
return d.batch_shape.ndims
return array_ops.shape(d.batch_shape_tensor())[0]
dist_batch_ndims = _get_ndims(self)
cat_batch_ndims = _get_ndims(self.mixture_distribution)
pad_ndims = array_ops.where(
self.mixture_distribution.is_scalar_batch(),
dist_batch_ndims,
dist_batch_ndims - cat_batch_ndims)
s = array_ops.shape(x)
x = array_ops.reshape(x, shape=array_ops.concat([
s[:-1],
array_ops.ones([pad_ndims], dtype=dtypes.int32),
s[-1:],
array_ops.ones([self._event_ndims], dtype=dtypes.int32),
], axis=0))
return x
def _outer_squared_difference(x, y):
"""Convenience function analogous to tf.squared_difference."""
z = x - y
return z[..., array_ops.newaxis, :] * z[..., array_ops.newaxis]
| apache-2.0 |
meereeum/vANNilla-tf | train.py | 1 | 5133 | from __future__ import division
import pandas as pd
from config import config
from classes.data import DataIO, splitTrainValidate
from classes.model import Model
from classes.tune import GridSearch
def preprocess(file_in, target_label, outfiles):
"""Generate processed training and validation data from input file, and
save files necessary to process future test data.
Returns: (train, validate) as preprocessed DataIO objects
"""
df = pd.read_csv(file_in)
assert sum(df.isnull().any()) == False
# record categorical targets for decoding test set
targets = list(pd.get_dummies(df[target_label]))
with open(outfiles['targets'], 'w') as f:
f.write(','.join(map(str, targets)))
# nested validation
train, validate = splitTrainValidate(df, perc_training=0.8)
# extract raw quantitative features mean, stddev from train set
# to use for all preprocessing
raw_features, _ = DataIO(train, target_label).splitXY()
bin_cols = raw_features.apply(DataIO.isBinary, axis=0)
quantitative = raw_features.loc[:, ~bin_cols]
params = (quantitative.mean(axis=0), quantitative.std(axis=0))
for k, param in zip(('preprocessing_means', 'preprocessing_stddevs'), params):
with open(outfiles[k], 'w') as f:
param.to_csv(f)
# preprocess features
return (DataIO(dataset, target_label, lambda x:
DataIO.gaussianNorm(x, *params))#, [-10, 10]
for dataset in (train, validate))
def trainWithEarlyStopping(train, validate, hyperparams, architecture, outfiles,
seed = None, num_cores = 0, verbose = False):
"""Build trained artificial neural net model using early-stopping with
validation set, and save files necessary for resurrection of tuned model
"""
# if not already set, size input & output nodes by data shape
if not architecture[0].nodes:
architecture[0] = architecture[0]._replace(nodes = train.n_features)
if not architecture[-1].nodes:
architecture[-1] = architecture[-1]._replace(nodes = train.n_targets)
model = Model(hyperparams, architecture)
data = {'train': train.stream(batchsize = hyperparams['n_minibatch'],
max_iter = hyperparams['epochs']),
'validate': validate.stream()}
val_accs = model.train(data, train.len_, logging = True, save = True,
outfile = outfiles['graph_def'], seed = seed,
num_cores = num_cores, verbose = verbose)
i, max_ = max(enumerate(val_accs, 1), key = lambda x: (x[1], x[0]))
print """
Validation accuracies: {}
BEST: {}
(at epoch {})""".format(val_accs, max_, i)
with open(outfiles['performance'], 'w') as f: # TODO ?
f.write("Estimated accuracy: {}".format(max_))
print "Trained model saved to: ", outfiles['graph_def']
def trainWithNestedCV(train, validate, d_hyperparams, d_architectures,
outfiles, seed = None, num_cores = 0, verbose = False):
"""Implement nested cross-validation to (1) use grid search of all
combinatorial possibilities for given hyperparameters and layer architecture
to tune artificial neural net model, and (2) generate trained model using
early-stopping with held-out validation set
"""
KWARGS = {'seed': seed, 'num_cores': num_cores, 'verbose': verbose}
# choose optimal hyperparams, architecture with k-fold cross-validation
tuner = GridSearch(d_hyperparams, d_architectures)
hyperparams, architecture = tuner.tuneParams(train, **KWARGS)
# train on full training set and use validation set for early-stopping
hyperparams['epochs'] += 100
trainWithEarlyStopping(train, validate, hyperparams, architecture, outfiles,
**KWARGS)
if __name__ == '__main__':
train, validate = preprocess(file_in = config.TRAINING_DATA,
target_label = config.TARGET_LABEL,
outfiles = config.OUTFILES)
try:
trainWithNestedCV(train = train,
validate = validate,
d_hyperparams = config.HYPERPARAM_GRID,
d_architectures = config.HIDDEN_LAYER_GRID,
outfiles = config.OUTFILES,
seed = config.SEED,
num_cores = config.NUM_CORES,
verbose = config.VERBOSE)
except(AttributeError):
try:
trainWithEarlyStopping(train = train,
validate = validate,
hyperparams = config.HYPERPARAMS,
architecture = config.LAYERS,
outfiles = config.OUTFILES,
seed = config.SEED,
num_cores = config.NUM_CORES,
verbose = config.VERBOSE)
except(AttributeError):
print 'Must supply valid config for Model training'
raise
| mit |
idekerlab/cyrest-examples | py2cytoscape_doc/py2cytoscape/data/network_client.py | 1 | 10154 | # -*- coding: utf-8 -*-
"""
This class has methods to create network object from many data format, delete sessions and get sessions.
"""
import os
import requests
import json
import pandas as pd
from . import HEADERS, SUID_LIST
from ..util import cytoscapejs as util
from ..util import util_networkx as nx_util
from ..util import util_igraph as ig_util
from ..util import dataframe as df_util
from ..util import util_numpy as np_util
JSON = 'json'
from .cynetwork import CyNetwork, check_response
class NetworkClient(object):
"""
This class has methods to create network object from many data format, delete sessions and get sessions.
"""
def __init__(self, url, session=None):
self.__url = url + 'networks'
# Using a persistent session object, so we don't have to create one for every request.
self.session = session if session is not None else requests.Session()
def create_from(self, locations=None, collection=None):
"""
By using this method, you can load network, session, table and visualize network from url lists.
You can also set collection parameter to put networks to the collection that you want.
:param locations: list. You can input local path or url.
:param collection: string.
:return : If you input the list of location, you will get list of Cytoscape network objects.
If you input the one location, you will get one Cytoscape network object.
"""
if locations is None:
raise ValueError('Locations parameter is required.')
input_type = type(locations)
if input_type is list or input_type is tuple or input_type is set:
location_list = []
for loc in locations:
if not str(loc).startswith('http'):
location_list.append(self.__to_file_url(loc))
else:
location_list.append(loc)
else:
if not str(locations).startswith('http'):
location_list = [self.__to_file_url(locations)]
else:
location_list = [locations]
if collection is None:
collection_name = 'Created from resources'
else:
collection_name = collection
parameters = {
'collection': collection_name,
'source': 'url'
}
res = self.session.post(self.__url, data=json.dumps(location_list),
params=parameters, headers=HEADERS)
check_response(res)
res = res.json()
if len(res) == 1:
network_ids = res[0]['networkSUID']
if len(network_ids) == 1:
return CyNetwork(network_ids[0], session=self.session,
url=self.__url)
else:
return [CyNetwork(suid, session=self.session, url=self.__url) for
suid
in
network_ids]
else:
result_dict = {entry['source']: CyNetwork(entry['networkSUID'],
session=self.session,
url=self.__url)
for entry in res}
return pd.Series(result_dict)
def __to_file_url(self, file_name):
local_file = os.path.abspath(file_name)
return 'file:///' + local_file
def create(self, suid=None, name=None, collection=None,
data=None):
"""
This method is to create Cytoscape Object from existing network
and is also to create network by using parameter and then create Cytoscape Object.
:param suid: If you want to create Cytoscape Object from existing network, you should input the network's suid.
On the other hand, if you don't input this parameter, you can make network by using parameter and get that Cytoscape object.
:param name: The network name. This parameter is only valid when you don't input suid parameter.
:param collection: The collection name. This parameter is only valid when you don't input suid parameter.
:param data: The network data and the data format is cytoscape.js. This parameter is only valid when you don't input suid parameter.
:return : The Cytoscape object.
"""
if suid is not None:
# fetch existing network
res = self.session.get(self.__url)
existing_networks = res.json()
if suid in existing_networks:
network_id = suid
else:
raise ValueError('No such network')
else:
if data is None:
network_data = util.get_empty_network()
else:
network_data = data
if name is not None:
network_data['data']['name'] = name
if collection is None:
network_collection = 'From cyREST'
else:
network_collection = collection
res = self.session.post(self.__url + '?collection=' + network_collection,
data=json.dumps(network_data), headers=HEADERS)
check_response(res)
result = res.json()
network_id = result['networkSUID']
return CyNetwork(network_id, session=self.session, url=self.__url)
def create_from_networkx(self, network, name=None, collection=None):
"""
This method create network from networkx. You can use networkx's object, create Cytoscape network and get the Cytoscape Object.
:param network: The object of networkx.
:param name: The network name that you will create in Cytoscape.
:param collection: The collection name.
:return : Cytoscape object
"""
if network is None:
raise ValueError('NetworkX graph object is required.')
cyjs = nx_util.from_networkx(network)
return self.create(name=name, collection=collection, data=cyjs)
def create_from_igraph(self, network, name=None, collection=None):
"""
This method create network from igraph. You can use igraph's object, create Cytoscape network and get the Cytoscape Object.
:param network: The object of igraph.
:param name: The network name that you will create in Cytoscape.
:param collection: The collection name.
:return : Cytoscape object
"""
if network is None:
raise ValueError('igraph object is required.')
cyjs = ig_util.from_igraph(network)
return self.create(name=name, collection=collection, data=cyjs)
def create_from_ndarray(self, matrix, name=None, labels=None,
collection=None, weighted=False):
"""
This method create network from igraph. You can use igraph's object, create Cytoscape network and get the Cytoscape Object.
:param matrix: ndarray data.
:param name: The network name.
:param labels: The labels
:param collection: The collection name
:param weighted: If this value is True, the edges will be weighted.
If this value is False, The edges will be unweighted.
:return : Cytoscape obejct.
"""
if matrix is None:
raise ValueError('2D ndarray object is required.')
cyjs = np_util.from_ndarray(matrix, name, labels, weighted=weighted)
return self.create(name=name, collection=collection, data=cyjs)
def create_from_dataframe(self, dataframe, source_col='source',
target_col='target', interaction_col='interaction',
name='Created from DataFrame', collection=None):
"""
This method create network from Pandas DataFrame.
You can use Pandas DataFrame's object, create Cytoscape network and get the Cytoscape Object.
:param dataframe: the network data
:param source_col: the source column name in data frame.
:param target_col: the target column name in data frame.
:param interaction_col: the interaction column name in data frame.
:param name: the network name that you will create from data frame.
:param collection: The collection name.
:return : The cytoscape object.
"""
if dataframe is None:
raise ValueError('DataFrame object is required.')
# Convert from DataFrame to Cytoscape.js JSON
cyjs = df_util.from_dataframe(dataframe, source_col=source_col,
target_col=target_col, interaction_col=interaction_col,
name=name)
return self.create(collection=collection, data=cyjs)
def get_all(self, format=SUID_LIST):
"""
You can get the all of the network session. You can get the value as SUID list or JSON.
:param format: The default value is SUID_LIST. You can also set this parameter as JSON.
:return : the all session
"""
if format is SUID_LIST:
result = self.session.get(self.__url)
elif format is JSON:
result = self.session.get(self.__url + '.json')
else:
raise ValueError('Unsupported format type: ' + format)
return result.json()
def get(self, id):
"""
You can set network id and get the network session information.
:param id: the network id
:return : the session as JSON format.
"""
return self.session.get(self.__url + '/' + str(id)).json()
def delete_all(self):
"""
Delete all network session.
"""
self.session.delete(self.__url)
def delete(self, cynetwork):
"""
Delete network session that you want by sessing cynetwork object which will be deleted.
:param cynetwork: The cynetwork that you want to delete session.
"""
id = cynetwork.get_id()
self.session.delete(self.__url + '/' + str(id))
del cynetwork
| mit |
gewaltig/cython-neuron | pynest/examples/twoneurons.py | 2 | 1175 | #! /usr/bin/env python
# twoneurons.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import matplotlib
# matplotlib.use("macosx")
import pylab
import nest
import nest.voltage_trace
weight=20.0
delay=1.0
stim=1000.0
neuron1 = nest.Create("iaf_neuron")
neuron2 = nest.Create("iaf_neuron")
voltmeter = nest.Create("voltmeter")
nest.SetStatus(neuron1, {"I_e": stim})
nest.Connect(neuron1,neuron2,weight,delay)
nest.Connect(voltmeter, neuron2)
nest.Simulate(100.0)
nest.voltage_trace.from_device(voltmeter)
nest.voltage_trace.show()
| gpl-2.0 |
dopplershift/siphon | siphon/simplewebservice/wyoming.py | 1 | 4132 | # Copyright (c) 2013-2015 University Corporation for Atmospheric Research/Unidata.
# Distributed under the terms of the MIT License.
# SPDX-License-Identifier: MIT
"""Read upper air data from the Wyoming archives."""
from io import StringIO
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
from .._tools import get_wind_components
from ..http_util import HTTPEndPoint
class WyomingUpperAir(HTTPEndPoint):
"""Download and parse data from the University of Wyoming's upper air archive."""
def __init__(self):
"""Set up endpoint."""
super(WyomingUpperAir, self).__init__('http://weather.uwyo.edu/cgi-bin/sounding')
@classmethod
def request_data(cls, time, site_id, **kwargs):
r"""Retrieve upper air observations from the Wyoming archive.
Parameters
----------
time : datetime
The date and time of the desired observation.
site_id : str
The three letter ICAO identifier of the station for which data should be
downloaded.
kwargs
Arbitrary keyword arguments to use to initialize source
Returns
-------
:class:`pandas.DataFrame` containing the data
"""
endpoint = cls()
df = endpoint._get_data(time, site_id, **kwargs)
return df
def _get_data(self, time, site_id, region='naconf'):
r"""Download and parse upper air observations from an online archive.
Parameters
----------
time : datetime
The date and time of the desired observation.
site_id : str
The three letter ICAO identifier of the station for which data should be
downloaded.
region
Region to request data from
Returns
-------
:class:`pandas.DataFrame` containing the data
"""
raw_data = self._get_data_raw(time, site_id, region)
col_names = ['pressure', 'height', 'temperature', 'dewpoint', 'direction', 'speed']
df = pd.read_fwf(raw_data, skiprows=5, usecols=[0, 1, 2, 3, 6, 7], names=col_names)
df['u_wind'], df['v_wind'] = get_wind_components(df['speed'],
np.deg2rad(df['direction']))
# Drop any rows with all NaN values for T, Td, winds
df = df.dropna(subset=('temperature', 'dewpoint', 'direction', 'speed',
'u_wind', 'v_wind'), how='all').reset_index(drop=True)
# Add unit dictionary
df.units = {'pressure': 'hPa',
'height': 'meter',
'temperature': 'degC',
'dewpoint': 'degC',
'direction': 'degrees',
'speed': 'knot',
'u_wind': 'knot',
'v_wind': 'knot'}
return df
def _get_data_raw(self, time, site_id, region='naconf'):
"""Download data from the University of Wyoming's upper air archive.
Parameters
----------
time : datetime
Date and time for which data should be downloaded
site_id : str
Site id for which data should be downloaded
region : str
The region in which the station resides. Defaults to `naconf`.
Returns
-------
a file-like object from which to read the data
"""
path = ('?region={region}&TYPE=TEXT%3ALIST'
'&YEAR={time:%Y}&MONTH={time:%m}&FROM={time:%d%H}&TO={time:%d%H}'
'&STNM={stid}').format(region=region, time=time, stid=site_id)
resp = self.get_path(path)
# See if the return is valid, but has no data
if resp.text.find('Can\'t') != -1:
raise ValueError(
'No data available for {time:%Y-%m-%d %HZ} from region {region} '
'for station {stid}.'.format(time=time, region=region,
stid=site_id))
soup = BeautifulSoup(resp.text, 'html.parser')
return StringIO(soup.find_all('pre')[0].contents[0])
| mit |
PeterRochford/SkillMetrics | Examples/taylor13.py | 1 | 5511 | '''
How to create a Taylor diagram with no centered RMSD contours
A thirteenth example of how to create a Taylor diagram given one set of
reference observations and two sets of model predictions for the quantity.
This is a variant of example 12, but where the centered RMSD contours
are suppressed.
This example shows how to display multiple data sets on the same Taylor
diagram where a different color marker is used for each data set to
identify its source. This is accomplished by overlaying the points from
the second data set onto the Taylor diagram created using the first data
set. Three data sets are used in this example where one is the reference
and the other two are model predictions. This example also shows how to
specify the legend using a dictionary instead of a list.
The data sets are yearly time series for years 2001-2014, each stored as
a list in a dictionary having a key of the form 'spi_2001', 'spi_2002', etc.
There is a separate dictionary for each of the observation data set and the
two model predictions. Each dictionary is written to its own pickle file.
A different file suffix is used depending upon whether the file is created
using Python 2 (.pkl) or Python 3 (.pkl3) because the pickle package is not
cross version compatible for pickle files containing dictionaries.
The data in these files are statistics calculated from yearly time series of
Standard Precipitation Index value over the Mekong basin, a trans-boundary
river in Southeast Asia that originates in the Tibetan Plateau and runs
through China's Yunnan Province, Myanmar, Laos, Thailand, Cambodia, and Vietnam.
The data sources are the ERA5 climate reanalysis dataset from the European
Centre for Medium-Range Weather Forecasts (ECMWF) and the Tropical Rainfall
Measuring Mission (TRMM 3B42 v7) satellite data, whilst the observation data is
the Asian Precipitation - Highly-Resolved Observational Data Integration
Towards Evaluation (APHRODITE V1801R1) rain-gauge data. All the statistics for
the yearly time series are calculated as a function of the year, i.e. 2001 ERA5
and TRMM are calculated using APHRODITE 2001, 2002 ERA5 and TRMM are calculated
using APHRODITE 2002, etc.
Note that the centered RMSD contours are suppressed for this Taylor diagram.
This is important because the origin for the RMSD contours is specified by the
standard deviation of the observations as dictated by the Taylor relationship.
While statistics for each data point can be calculated using observations for
that year and displayed on the diagram, there is no universal set of RMSD contours
to correctly indicate the centered RMSD values of the different points, because
each point is associated with a different observation standard deviation, and
hence each has a different set of RMS contours with its own distinct origin. To
show statistics respect to reference time series of the same year, one must
suppress the RMSD contours, as otherwise it would provide a misleading indication
of the centered RMSD values.
This data was provided courtesy of Iacopo Ferrario, Resources Scientist,
HR Wallingford, Flood and Water Resources group, Wallingford Oxfordshire,
United Kingdom
Author: Peter A. Rochford
Symplectic, LLC
www.thesymplectic.com
Created on Feb 26, 2019
@author: [email protected]
'''
import matplotlib.pyplot as plt
from matplotlib import rcParams
import pickle
import skill_metrics as sm
from sys import version_info
def load_obj(name):
# Load object from file in pickle format
if version_info[0] == 2:
suffix = 'pkl'
else:
suffix = 'pkl3'
with open(name + '.' + suffix, 'rb') as f:
return pickle.load(f) # Python2 succeeds
class Container(object):
def __init__(self, target_stats1, target_stats2, taylor_stats1, taylor_stats2):
self.target_stats1 = target_stats1
self.target_stats2 = target_stats2
self.taylor_stats1 = taylor_stats1
self.taylor_stats2 = taylor_stats2
if __name__ == '__main__':
# Set the figure properties (optional)
rcParams["figure.figsize"] = [6.0, 4.8]
rcParams['lines.linewidth'] = 1 # line width for plots
rcParams.update({'font.size': 12}) # font size of axes text
# Close any previously open graphics windows
# ToDo: fails to work within Eclipse
plt.close('all')
# Read Taylor statistics for ERA Interim (stats1) and TRMM (stats2)
# data with respect to APHRODITE observations for each of years 2001 to
# 2014 from pickle file
stats = load_obj('Mekong_Basin_data_interannual') # observations
# Specify labels for points in a dictionary because only desire labels
# for each data set.
label = {'ERA-5': 'r', 'TRMM': 'b'}
'''
Produce the Taylor diagram for the first dataset
'''
sm.taylor_diagram(stats.taylor_stats1['sdev'],
stats.taylor_stats1['crmsd'],
stats.taylor_stats1['ccoef'], markercolor ='r', alpha = 0.0,
titleRMS = 'off', showlabelsRMS = 'off', tickRMS =[0.0])
'''
Overlay the second dataset
'''
sm.taylor_diagram(stats.taylor_stats2['sdev'],
stats.taylor_stats2['crmsd'],
stats.taylor_stats2['ccoef'], markercolor ='b', alpha = 0.0,
overlay = 'on', markerLabel = label)
# Write plot to file
plt.savefig('taylor13.png',dpi=150,facecolor='w')
# Show plot
plt.show()
| gpl-3.0 |
sonnyhu/scikit-learn | sklearn/metrics/regression.py | 31 | 17366 | """Metrics to assess performance on regression task
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# Manoj Kumar <[email protected]>
# Michael Eickenberg <[email protected]>
# Konstantin Shmelkov <[email protected]>
# License: BSD 3 clause
from __future__ import division
import numpy as np
from ..utils.validation import check_array, check_consistent_length
from ..utils.validation import column_or_1d
from ..externals.six import string_types
import warnings
__ALL__ = [
"mean_absolute_error",
"mean_squared_error",
"median_absolute_error",
"r2_score",
"explained_variance_score"
]
def _check_reg_targets(y_true, y_pred, multioutput):
"""Check that y_true and y_pred belong to the same regression task
Parameters
----------
y_true : array-like,
y_pred : array-like,
multioutput : array-like or string in ['raw_values', uniform_average',
'variance_weighted'] or None
None is accepted due to backward compatibility of r2_score().
Returns
-------
type_true : one of {'continuous', continuous-multioutput'}
The type of the true target data, as output by
'utils.multiclass.type_of_target'
y_true : array-like of shape = (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples, n_outputs)
Estimated target values.
multioutput : array-like of shape = (n_outputs) or string in ['raw_values',
uniform_average', 'variance_weighted'] or None
Custom output weights if ``multioutput`` is array-like or
just the corresponding argument if ``multioutput`` is a
correct keyword.
"""
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False)
y_pred = check_array(y_pred, ensure_2d=False)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError("y_true and y_pred have different number of output "
"({0}!={1})".format(y_true.shape[1], y_pred.shape[1]))
n_outputs = y_true.shape[1]
multioutput_options = (None, 'raw_values', 'uniform_average',
'variance_weighted')
if multioutput not in multioutput_options:
multioutput = check_array(multioutput, ensure_2d=False)
if n_outputs == 1:
raise ValueError("Custom weights are useful only in "
"multi-output cases.")
elif n_outputs != len(multioutput):
raise ValueError(("There must be equally many custom weights "
"(%d) as outputs (%d).") %
(len(multioutput), n_outputs))
y_type = 'continuous' if n_outputs == 1 else 'continuous-multioutput'
return y_type, y_true, y_pred, multioutput
def mean_absolute_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean absolute error regression loss
Read more in the :ref:`User Guide <mean_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
If multioutput is 'raw_values', then mean absolute error is returned
for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
MAE output is non-negative floating point. The best value is 0.0.
Examples
--------
>>> from sklearn.metrics import mean_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_absolute_error(y_true, y_pred)
0.5
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> mean_absolute_error(y_true, y_pred)
0.75
>>> mean_absolute_error(y_true, y_pred, multioutput='raw_values')
array([ 0.5, 1. ])
>>> mean_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.849...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average(np.abs(y_pred - y_true),
weights=sample_weight, axis=0)
if isinstance(multioutput, string_types):
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def mean_squared_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean squared error regression loss
Read more in the :ref:`User Guide <mean_squared_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import mean_squared_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_squared_error(y_true, y_pred)
0.375
>>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
>>> y_pred = [[0, 2],[-1, 2],[8, -5]]
>>> mean_squared_error(y_true, y_pred) # doctest: +ELLIPSIS
0.708...
>>> mean_squared_error(y_true, y_pred, multioutput='raw_values')
... # doctest: +ELLIPSIS
array([ 0.416..., 1. ])
>>> mean_squared_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.824...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average((y_true - y_pred) ** 2, axis=0,
weights=sample_weight)
if isinstance(multioutput, string_types):
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def median_absolute_error(y_true, y_pred):
"""Median absolute error regression loss
Read more in the :ref:`User Guide <median_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples)
Estimated target values.
Returns
-------
loss : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import median_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> median_absolute_error(y_true, y_pred)
0.5
"""
y_type, y_true, y_pred, _ = _check_reg_targets(y_true, y_pred,
'uniform_average')
if y_type == 'continuous-multioutput':
raise ValueError("Multioutput not supported in median_absolute_error")
return np.median(np.abs(y_pred - y_true))
def explained_variance_score(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Explained variance regression score function
Best possible score is 1.0, lower values are worse.
Read more in the :ref:`User Guide <explained_variance_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average', \
'variance_weighted'] or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
score : float or ndarray of floats
The explained variance or ndarray if 'multioutput' is 'raw_values'.
Notes
-----
This is not a symmetric function.
Examples
--------
>>> from sklearn.metrics import explained_variance_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> explained_variance_score(y_true, y_pred) # doctest: +ELLIPSIS
0.957...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> explained_variance_score(y_true, y_pred, multioutput='uniform_average')
... # doctest: +ELLIPSIS
0.983...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
y_diff_avg = np.average(y_true - y_pred, weights=sample_weight, axis=0)
numerator = np.average((y_true - y_pred - y_diff_avg) ** 2,
weights=sample_weight, axis=0)
y_true_avg = np.average(y_true, weights=sample_weight, axis=0)
denominator = np.average((y_true - y_true_avg) ** 2,
weights=sample_weight, axis=0)
nonzero_numerator = numerator != 0
nonzero_denominator = denominator != 0
valid_score = nonzero_numerator & nonzero_denominator
output_scores = np.ones(y_true.shape[1])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if isinstance(multioutput, string_types):
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing to np.average() None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
def r2_score(y_true, y_pred,
sample_weight=None,
multioutput=None):
"""R^2 (coefficient of determination) regression score function.
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Read more in the :ref:`User Guide <r2_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average', \
'variance_weighted'] or None or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
Default value corresponds to 'variance_weighted', this behaviour is
deprecated since version 0.17 and will be changed to 'uniform_average'
starting from 0.19.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
z : float or ndarray of floats
The R^2 score or ndarray of scores if 'multioutput' is
'raw_values'.
Notes
-----
This is not a symmetric function.
Unlike most other scores, R^2 score may be negative (it need not actually
be the square of a quantity R).
References
----------
.. [1] `Wikipedia entry on the Coefficient of determination
<https://en.wikipedia.org/wiki/Coefficient_of_determination>`_
Examples
--------
>>> from sklearn.metrics import r2_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> r2_score(y_true, y_pred) # doctest: +ELLIPSIS
0.948...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> r2_score(y_true, y_pred, multioutput='variance_weighted') # doctest: +ELLIPSIS
0.938...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
weight = sample_weight[:, np.newaxis]
else:
weight = 1.
numerator = (weight * (y_true - y_pred) ** 2).sum(axis=0,
dtype=np.float64)
denominator = (weight * (y_true - np.average(
y_true, axis=0, weights=sample_weight)) ** 2).sum(axis=0,
dtype=np.float64)
nonzero_denominator = denominator != 0
nonzero_numerator = numerator != 0
valid_score = nonzero_denominator & nonzero_numerator
output_scores = np.ones([y_true.shape[1]])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if multioutput is None and y_true.shape[1] != 1:
warnings.warn("Default 'multioutput' behavior now corresponds to "
"'variance_weighted' value which is deprecated since "
"0.17, it will be changed to 'uniform_average' "
"starting from 0.19.",
DeprecationWarning)
multioutput = 'variance_weighted'
if isinstance(multioutput, string_types):
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
# avoid fail on constant y or one-element arrays
if not np.any(nonzero_denominator):
if not np.any(nonzero_numerator):
return 1.0
else:
return 0.0
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
| bsd-3-clause |
openmichigan/PSNM | PythonPrograms/Programs/PythonCode/NLSsplitting2D.py | 1 | 2992 | """
A program to solve the 2D Nonlinear Schrodinger equation using a
second order splitting method
More information on visualization can be found on the Mayavi
website, in particular:
http://github.enthought.com/mayavi/mayavi/mlab.html
which was last checked on 6 April 2012
"""
import math
import numpy
from mayavi import mlab
import matplotlib.pyplot as plt
import time
# Grid
Lx=4.0 # Period 2*pi*Lx
Ly=4.0 # Period 2*pi*Ly
Nx=64 # Number of harmonics
Ny=64 # Number of harmonics
Nt=100 # Number of time slices
tmax=1.0 # Maximum time
dt=tmax/Nt # time step
plotgap=10 # time steps between plots
Es= 1.0 # focusing (+1) or defocusing (-1) parameter
numplots=Nt/plotgap # number of plots to make
x = [i*2.0*math.pi*(Lx/Nx) for i in xrange(-Nx/2,1+Nx/2)]
y = [i*2.0*math.pi*(Ly/Ny) for i in xrange(-Ny/2,1+Ny/2)]
k_x = (1.0/Lx)*numpy.array([complex(0,1)*n for n in range(0,Nx/2) \
+ [0] + range(-Nx/2+1,0)])
k_y = (1.0/Ly)*numpy.array([complex(0,1)*n for n in range(0,Ny/2) \
+ [0] + range(-Ny/2+1,0)])
k2xm=numpy.zeros((Nx,Ny), dtype=float)
k2ym=numpy.zeros((Nx,Ny), dtype=float)
xx=numpy.zeros((Nx,Ny), dtype=float)
yy=numpy.zeros((Nx,Ny), dtype=float)
for i in xrange(Nx):
for j in xrange(Ny):
k2xm[i,j] = numpy.real(k_x[i]**2)
k2ym[i,j] = numpy.real(k_y[j]**2)
xx[i,j]=x[i]
yy[i,j]=y[j]
# allocate arrays
usquared=numpy.zeros((Nx,Ny), dtype=float)
pot=numpy.zeros((Nx,Ny), dtype=float)
u=numpy.zeros((Nx,Ny), dtype=complex)
una=numpy.zeros((Nx,Ny), dtype=complex)
unb=numpy.zeros((Nx,Ny), dtype=complex)
v=numpy.zeros((Nx,Ny), dtype=complex)
vna=numpy.zeros((Nx,Ny), dtype=complex)
vnb=numpy.zeros((Nx,Ny), dtype=complex)
mass=numpy.zeros((Nx,Ny), dtype=complex)
test=numpy.zeros((numplots-1),dtype=float)
tdata=numpy.zeros((numplots-1), dtype=float)
u=numpy.exp(-(xx**2 + yy**2 ))
v=numpy.fft.fftn(u)
usquared=abs(u)**2
src = mlab.surf(xx,yy,usquared,colormap='YlGnBu',warp_scale='auto')
mlab.scalarbar()
mlab.xlabel('x',object=src)
mlab.ylabel('y',object=src)
mlab.zlabel('abs(u)^2',object=src)
# initial mass
usquared=abs(u)**2
mass=numpy.fft.fftn(usquared)
ma=numpy.real(mass[0,0])
print(ma)
maO=ma
t=0.0
tdata[0]=t
plotnum=0
#solve pde and plot results
for nt in xrange(numplots-1):
for n in xrange(plotgap):
vna=v*numpy.exp(complex(0,0.5)*dt*(k2xm+k2ym))
una=numpy.fft.ifftn(vna)
usquared=abs(una)**2
pot=Es*usquared
unb=una*numpy.exp(complex(0,-1)*dt*pot)
vnb=numpy.fft.fftn(unb)
v=vnb*numpy.exp(complex(0,0.5)*dt*(k2xm+k2ym) )
u=numpy.fft.ifftn(v)
t+=dt
plotnum+=1
usquared=abs(u)**2
src.mlab_source.scalars = usquared
mass=numpy.fft.fftn(usquared)
ma=numpy.real(mass[0,0])
test[plotnum-1]=numpy.log(abs(1-ma/maO))
print(test[plotnum-1])
tdata[plotnum-1]=t
plt.figure()
plt.plot(tdata,test,'r-')
plt.title('Time Dependence of Change in Mass')
plt.show()
| bsd-2-clause |
liberatorqjw/scikit-learn | examples/linear_model/plot_omp.py | 385 | 2263 | """
===========================
Orthogonal Matching Pursuit
===========================
Using orthogonal matching pursuit for recovering a sparse signal from a noisy
measurement encoded with a dictionary
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import OrthogonalMatchingPursuitCV
from sklearn.datasets import make_sparse_coded_signal
n_components, n_features = 512, 100
n_nonzero_coefs = 17
# generate the data
###################
# y = Xw
# |x|_0 = n_nonzero_coefs
y, X, w = make_sparse_coded_signal(n_samples=1,
n_components=n_components,
n_features=n_features,
n_nonzero_coefs=n_nonzero_coefs,
random_state=0)
idx, = w.nonzero()
# distort the clean signal
##########################
y_noisy = y + 0.05 * np.random.randn(len(y))
# plot the sparse signal
########################
plt.figure(figsize=(7, 7))
plt.subplot(4, 1, 1)
plt.xlim(0, 512)
plt.title("Sparse signal")
plt.stem(idx, w[idx])
# plot the noise-free reconstruction
####################################
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 2)
plt.xlim(0, 512)
plt.title("Recovered signal from noise-free measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction
###############################
omp.fit(X, y_noisy)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 3)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction with number of non-zeros set by CV
##################################################################
omp_cv = OrthogonalMatchingPursuitCV()
omp_cv.fit(X, y_noisy)
coef = omp_cv.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 4)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements with CV")
plt.stem(idx_r, coef[idx_r])
plt.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38)
plt.suptitle('Sparse signal recovery with Orthogonal Matching Pursuit',
fontsize=16)
plt.show()
| bsd-3-clause |
liberatorqjw/scikit-learn | sklearn/utils/estimator_checks.py | 5 | 35692 | from __future__ import print_function
import warnings
import sys
import traceback
import inspect
import pickle
import numpy as np
from scipy import sparse
import struct
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import check_skip_travis
from sklearn.base import (clone, ClusterMixin, ClassifierMixin)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.lda import LDA
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM
from sklearn.utils.validation import DataConversionWarning
from sklearn.cross_validation import train_test_split
from sklearn.utils import shuffle
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_fast_parameters(estimator):
# speed up some estimators
params = estimator.get_params()
if "n_iter" in params:
estimator.set_params(n_iter=5)
if "max_iter" in params:
# NMF
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=1)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_regressors_classifiers_sparse_data(name, Estimator):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
# catch deprecation warnings
with warnings.catch_warnings():
estimator = Estimator()
set_fast_parameters(estimator)
# fit and predict
try:
estimator.fit(X, y)
estimator.predict(X)
if hasattr(estimator, 'predict_proba'):
estimator.predict_proba(X)
except TypeError as e:
if not 'sparse' in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
def check_transformer(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
_check_transformer(name, Transformer, X, y)
_check_transformer(name, Transformer, X.tolist(), y.tolist())
def check_transformer_data_not_an_array(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, Transformer, this_X, this_y)
def _check_transformer(name, Transformer, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
set_random_state(transformer)
if name == "KernelPCA":
transformer.remove_zero_eig = False
set_fast_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
X_pred = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_array_almost_equal(
x_pred, x_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
x_pred, x_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
else:
assert_array_almost_equal(
X_pred, X_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
X_pred, X_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
assert_raises(ValueError, transformer.transform, X.T)
def check_transformer_sparse_data(name, Transformer):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
if name in ['Scaler', 'StandardScaler']:
transformer = Transformer(with_mean=False)
else:
transformer = Transformer()
set_fast_parameters(transformer)
# fit
try:
transformer.fit(X, y)
except TypeError as e:
if not 'sparse' in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
def check_estimators_nan_inf(name, Estimator):
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(name, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
# try to fit
try:
if issubclass(Estimator, ClusterMixin):
estimator.fit(X_train)
else:
estimator.fit(X_train, y)
except ValueError as e:
if not 'inf' in repr(e) and not 'NaN' in repr(e):
print(error_string_fit, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, Estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, Estimator)
# actually fit
if issubclass(Estimator, ClusterMixin):
# All estimators except clustering algorithm
# support fitting with (optional) y
estimator.fit(X_train_finite)
else:
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if not 'inf' in repr(e) and not 'NaN' in repr(e):
print(error_string_predict, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, Estimator)
# transform
if hasattr(estimator, "transform"):
try:
estimator.transform(X_train)
except ValueError as e:
if not 'inf' in repr(e) and not 'NaN' in repr(e):
print(error_string_transform, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, Estimator)
def check_transformer_pickle(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
n_samples, n_features = X.shape
X = StandardScaler().fit_transform(X)
X -= X.min()
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
if not hasattr(transformer, 'transform'):
return
set_random_state(transformer)
set_fast_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
random_state = np.random.RandomState(seed=12345)
y_ = np.vstack([y, 2 * y + random_state.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
transformer.fit(X, y_)
X_pred = transformer.fit(X, y_).transform(X)
pickled_transformer = pickle.dumps(transformer)
unpickled_transformer = pickle.loads(pickled_transformer)
pickled_X_pred = unpickled_transformer.transform(X)
assert_array_almost_equal(pickled_X_pred, X_pred)
def check_estimators_partial_fit_n_features(name, Alg):
# check if number of features changes between calls to partial_fit.
if not hasattr(Alg, 'partial_fit'):
return
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if isinstance(alg, ClassifierMixin):
classes = np.unique(y)
alg.partial_fit(X, y, classes=classes)
else:
alg.partial_fit(X, y)
assert_raises(ValueError, alg.partial_fit, X[:, :-1], y)
def check_clustering(name, Alg):
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if hasattr(alg, "n_clusters"):
alg.set_params(n_clusters=3)
set_random_state(alg)
if name == 'AffinityPropagation':
alg.set_params(preference=-100)
alg.set_params(max_iter=100)
# fit
alg.fit(X)
# with lists
alg.fit(X.tolist())
assert_equal(alg.labels_.shape, (n_samples,))
pred = alg.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name is 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(alg)
with warnings.catch_warnings(record=True):
pred2 = alg.fit_predict(X)
assert_array_equal(pred, pred2)
def check_classifiers_one_label(name, Classifier):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if not 'class' in repr(e):
print(error_string_fit, Classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, Classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, Classifier, exc)
raise exc
def check_classifiers_train(name, Classifier):
X_m, y_m = make_blobs(random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
# catch deprecation warnings
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name in ['BernoulliNB', 'MultinomialNB']:
X -= X.min()
set_fast_parameters(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB']:
assert_greater(accuracy_score(y, y_pred), 0.85)
# raises error on malformed input for predict
assert_raises(ValueError, classifier.predict, X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict:
decision = classifier.decision_function(X)
if n_classes is 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes is 3
and not isinstance(classifier, BaseLibSVM)):
# 1on1 of LibSVM works differently
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError,
classifier.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError,
classifier.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict:
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1),
np.ones(n_samples))
# raises error on malformed input
assert_raises(ValueError, classifier.predict_proba, X.T)
# raises error on malformed input for predict_proba
assert_raises(ValueError, classifier.predict_proba, X.T)
def check_classifiers_input_shapes(name, Classifier):
iris = load_iris()
X, y = iris.data, iris.target
X, y = shuffle(X, y, random_state=1)
X = StandardScaler().fit_transform(X)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
set_random_state(classifier)
# fit
classifier.fit(X, y)
y_pred = classifier.predict(X)
set_random_state(classifier)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
classifier.fit(X, y[:, np.newaxis])
assert_equal(len(w), 1)
assert_array_equal(y_pred, classifier.predict(X))
def check_classifiers_classes(name, Classifier):
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
y_names = np.array(["one", "two", "three"])[y]
for y_names in [y_names, y_names.astype('O')]:
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name == 'BernoulliNB':
classifier.set_params(binarize=X.mean())
set_fast_parameters(classifier)
# fit
classifier.fit(X, y_)
y_pred = classifier.predict(X)
# training set performance
assert_array_equal(np.unique(y_), np.unique(y_pred))
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
def check_classifiers_pickle(name, Classifier):
X, y = make_blobs(random_state=0)
X, y = shuffle(X, y, random_state=7)
X -= X.min()
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
y_pred = classifier.predict(X)
pickled_classifier = pickle.dumps(classifier)
unpickled_classifier = pickle.loads(pickled_classifier)
pickled_y_pred = unpickled_classifier.predict(X)
assert_array_almost_equal(pickled_y_pred, y_pred)
def check_regressors_int(name, Regressor):
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(name, y)
if name == 'OrthogonalMatchingPursuitCV':
# FIXME: This test is unstable on Travis, see issue #3190.
check_skip_travis()
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
regressor_1 = Regressor()
regressor_2 = Regressor()
set_fast_parameters(regressor_1)
set_fast_parameters(regressor_2)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_regressors_train(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y) # X is already scaled
y = multioutput_estimator_convert_y_2d(name, y)
if name == 'OrthogonalMatchingPursuitCV':
# FIXME: This test is unstable on Travis, see issue #3190.
check_skip_travis()
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
set_fast_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
# raises error on malformed input for fit
assert_raises(ValueError, regressor.fit, X, y[:-1])
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
regressor.predict(X)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
assert_greater(regressor.score(X, y_), 0.5)
def check_regressors_pickle(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y) # X is already scaled
y = multioutput_estimator_convert_y_2d(name, y)
if name == 'OrthogonalMatchingPursuitCV':
# FIXME: This test is unstable on Travis, see issue #3190.
check_skip_travis()
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
set_fast_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
regressor.fit(X, y_)
y_pred = regressor.predict(X)
# store old predictions
pickled_regressor = pickle.dumps(regressor)
unpickled_regressor = pickle.loads(pickled_regressor)
pickled_y_pred = unpickled_regressor.predict(X)
assert_array_almost_equal(pickled_y_pred, y_pred)
def check_class_weight_classifiers(name, Classifier):
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
with warnings.catch_warnings(record=True):
classifier = Classifier(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
assert_greater(np.mean(y_pred == 0), 0.9)
def check_class_weight_auto_classifiers(name, Classifier, X_train, y_train,
X_test, y_test, weights):
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='auto')
classifier.fit(X_train, y_train)
y_pred_auto = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_auto),
f1_score(y_test, y_pred))
def check_class_weight_auto_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='auto')
coef_auto = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
mean_weight = (1. / 3 + 1. / 2) / 2
class_weight = {
1: 1. / 3 / mean_weight,
-1: 1. / 2 / mean_weight,
}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_array_almost_equal(coef_auto, coef_manual)
def check_estimators_overwrite_params(name, Estimator):
X, y = make_blobs(random_state=0, n_samples=9)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
with warnings.catch_warnings(record=True):
# catch deprecation warnings
estimator = Estimator()
if name == 'MiniBatchDictLearning' or name == 'MiniBatchSparsePCA':
# FIXME
# for MiniBatchDictLearning and MiniBatchSparsePCA
estimator.batch_size = 1
set_fast_parameters(estimator)
set_random_state(estimator)
params = estimator.get_params()
estimator.fit(X, y)
new_params = estimator.get_params()
for k, v in params.items():
assert_false(np.any(new_params[k] != v),
"Estimator %s changes its parameter %s"
" from %s to %s during fit."
% (name, k, v, new_params[k]))
def check_cluster_overwrite_params(name, Clustering):
X, y = make_blobs(random_state=0, n_samples=9)
with warnings.catch_warnings(record=True):
# catch deprecation warnings
clustering = Clustering()
set_fast_parameters(clustering)
params = clustering.get_params()
clustering.fit(X)
new_params = clustering.get_params()
for k, v in params.items():
assert_false(np.any(new_params[k] != v),
"Estimator %s changes its parameter %s"
" from %s to %s during fit."
% (name, k, v, new_params[k]))
def check_sparsify_multiclass_classifier(name, Classifier):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = Classifier()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_sparsify_binary_classifier(name, Estimator):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
est = Estimator()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_classifier_data_not_an_array(name, Estimator):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_regressor_data_not_an_array(name, Estimator):
X, y = _boston_subset(n_samples=50)
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_estimators_data_not_an_array(name, Estimator, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
estimator_1 = Estimator()
estimator_2 = Estimator()
set_fast_parameters(estimator_1)
set_fast_parameters(estimator_2)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_parameters_default_constructible(name, Estimator):
classifier = LDA()
# test default-constructibility
# get rid of deprecation warnings
with warnings.catch_warnings(record=True):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(isinstance(estimator.set_params(), Estimator))
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
args, varargs, kws, defaults = inspect.getargspec(init)
except TypeError:
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they need a non-default argument
args = args[2:]
else:
args = args[1:]
if args:
# non-empty list
assert_equal(len(args), len(defaults))
else:
return
for arg, default in zip(args, defaults):
if arg not in params.keys():
# deprecated parameter, not in get_params
assert_true(default is None)
continue
if isinstance(params[arg], np.ndarray):
assert_array_equal(params[arg], default)
else:
assert_equal(params[arg], default)
def multioutput_estimator_convert_y_2d(name, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if name in (['MultiTaskElasticNetCV', 'MultiTaskLassoCV',
'MultiTaskLasso', 'MultiTaskElasticNet']):
return y[:, np.newaxis]
return y
def check_non_transformer_estimators_n_iter(name, estimator, multi_output=False):
# Check if all iterative solvers, run for more than one iteratiom
iris = load_iris()
X, y_ = iris.data, iris.target
if multi_output:
y_ = y_[:, np.newaxis]
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
assert_greater(estimator.n_iter_, 0)
def check_transformer_n_iter(name, estimator):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater(iter_, 1)
else:
assert_greater(estimator.n_iter_, 1)
| bsd-3-clause |
clemkoa/scikit-learn | examples/neighbors/plot_species_kde.py | 44 | 4025 | """
================================================
Kernel Density Estimate of Species Distributions
================================================
This shows an example of a neighbors-based query (in particular a kernel
density estimate) on geospatial data, using a Ball Tree built upon the
Haversine distance metric -- i.e. distances over points in latitude/longitude.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.org/basemap>`_
to plot the coast lines and national boundaries of South America.
This example does not perform any learning over the data
(see :ref:`sphx_glr_auto_examples_applications_plot_species_distribution_modeling.py` for
an example of classification based on the attributes in this dataset). It
simply shows the kernel density estimate of observed data points in
geospatial coordinates.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://rob.schapire.net/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Author: Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn.neighbors import KernelDensity
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
# Get matrices/arrays of species IDs and locations
data = fetch_species_distributions()
species_names = ['Bradypus Variegatus', 'Microryzomys Minutus']
Xtrain = np.vstack([data['train']['dd lat'],
data['train']['dd long']]).T
ytrain = np.array([d.decode('ascii').startswith('micro')
for d in data['train']['species']], dtype='int')
Xtrain *= np.pi / 180. # Convert lat/long to radians
# Set up the data grid for the contour plot
xgrid, ygrid = construct_grids(data)
X, Y = np.meshgrid(xgrid[::5], ygrid[::5][::-1])
land_reference = data.coverages[6][::5, ::5]
land_mask = (land_reference > -9999).ravel()
xy = np.vstack([Y.ravel(), X.ravel()]).T
xy = xy[land_mask]
xy *= np.pi / 180.
# Plot map of South America with distributions of each species
fig = plt.figure()
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05)
for i in range(2):
plt.subplot(1, 2, i + 1)
# construct a kernel density estimate of the distribution
print(" - computing KDE in spherical coordinates")
kde = KernelDensity(bandwidth=0.04, metric='haversine',
kernel='gaussian', algorithm='ball_tree')
kde.fit(Xtrain[ytrain == i])
# evaluate only on the land: -9999 indicates ocean
Z = -9999 + np.zeros(land_mask.shape[0])
Z[land_mask] = np.exp(kde.score_samples(xy))
Z = Z.reshape(X.shape)
# plot contours of the density
levels = np.linspace(0, Z.max(), 25)
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
plt.title(species_names[i])
plt.show()
| bsd-3-clause |
iamkingmaker/trading-with-python | lib/bats.py | 78 | 3458 | #-------------------------------------------------------------------------------
# Name: BATS
# Purpose: get data from BATS exchange
#
# Author: jev
#
# Created: 17/08/2013
# Copyright: (c) Jev Kuznetsov 2013
# Licence: BSD
#-------------------------------------------------------------------------------
import urllib
import re
import pandas as pd
import datetime as dt
import zipfile
import StringIO
from extra import ProgressBar
import os
import yahooFinance as yf
from string import Template
import numpy as np
def fileName2date( fName):
'''convert filename to date'''
name = os.path.splitext(fName)[0]
m = re.findall('\d+',name)[0]
return dt.datetime.strptime(m,'%Y%m%d').date()
def date2fileName(date):
return 'BATSshvol%s.txt.zip' % date.strftime('%Y%m%d')
def downloadUrl(date):
s = Template('http://www.batstrading.com/market_data/shortsales/$year/$month/$fName-dl?mkt=bzx')
url = s.substitute(fName=date2fileName(date), year=date.year, month='%02d' % date.month)
return url
class BATS_Data(object):
def __init__(self, dataDir):
''' create class. dataDir: directory to which files are downloaded '''
self.dataDir = dataDir
self.shortRatio = None
self._checkDates()
def _checkDates(self):
''' update list of available dataset dates'''
self.dates = []
for fName in os.listdir(self.dataDir):
self.dates.append(fileName2date(fName))
def _missingDates(self):
''' check for missing dates based on spy data'''
print 'Getting yahoo data to determine business dates... ',
spy = yf.getHistoricData('SPY',sDate = (2010,1,1))
busDates = [d.date() for d in spy.index ]
print 'Date range: ', busDates[0] ,'-', busDates[-1]
missing = []
for d in busDates:
if d not in self.dates:
missing.append(d)
return missing
def updateDb(self):
print 'Updating database'
missing = self._missingDates()
for i, date in enumerate(missing):
source = downloadUrl(date)
dest = os.path.join(self.dataDir,date2fileName(date))
if not os.path.exists(dest):
print 'Downloading [%i/%i]' %(i,len(missing)), source
urllib.urlretrieve(source, dest)
else:
print 'x',
print 'Update done.'
self._checkDates()
def loadDate(self,date):
fName = os.path.join(self.dataDir, date2fileName(date))
zipped = zipfile.ZipFile(fName) # open zip file
lines = zipped.read(zipped.namelist()[0]) # read first file from to lines
buf = StringIO.StringIO(lines) # create buffer
df = pd.read_csv(buf,sep='|',index_col=1,parse_dates=False,dtype={'Date':object,'Short Volume':np.float32,'Total Volume':np.float32})
s = df['Short Volume']/df['Total Volume']
s.name = dt.datetime.strptime(df['Date'][-1],'%Y%m%d')
return s
def loadData(self):
''' load data from zip files '''
data = []
pb = ProgressBar(len(self.dates)-1)
for idx, date in enumerate(self.dates):
data.append(self.loadDate(date))
pb.animate(idx)
self.shortRatio = pd.DataFrame(data)
return self.shortRatio
| bsd-3-clause |
bhargav/scikit-learn | sklearn/neighbors/regression.py | 7 | 10997 | """Nearest Neighbor Regression"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from .base import _get_weights, _check_weights, NeighborsBase, KNeighborsMixin
from .base import RadiusNeighborsMixin, SupervisedFloatMixin
from ..base import RegressorMixin
from ..utils import check_array
class KNeighborsRegressor(NeighborsBase, KNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on k-nearest neighbors.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Doesn't affect :meth:`fit` method.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsRegressor
>>> neigh = KNeighborsRegressor(n_neighbors=2)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
RadiusNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=1,
**kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.mean(_y[neigh_ind], axis=1)
else:
y_pred = np.empty((X.shape[0], _y.shape[1]), dtype=np.float64)
denom = np.sum(weights, axis=1)
for j in range(_y.shape[1]):
num = np.sum(_y[neigh_ind, j] * weights, axis=1)
y_pred[:, j] = num / denom
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
class RadiusNeighborsRegressor(NeighborsBase, RadiusNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on neighbors within a fixed radius.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsRegressor
>>> neigh = RadiusNeighborsRegressor(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
KNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
p=p, metric=metric, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.radius_neighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.array([np.mean(_y[ind, :], axis=0)
for ind in neigh_ind])
else:
y_pred = np.array([(np.average(_y[ind, :], axis=0,
weights=weights[i]))
for (i, ind) in enumerate(neigh_ind)])
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
| bsd-3-clause |
pompiduskus/scikit-learn | sklearn/feature_extraction/image.py | 263 | 17600 | """
The :mod:`sklearn.feature_extraction.image` submodule gathers utilities to
extract features from images.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Olivier Grisel
# Vlad Niculae
# License: BSD 3 clause
from itertools import product
import numbers
import numpy as np
from scipy import sparse
from numpy.lib.stride_tricks import as_strided
from ..utils import check_array, check_random_state
from ..utils.fixes import astype
from ..base import BaseEstimator
__all__ = ['PatchExtractor',
'extract_patches_2d',
'grid_to_graph',
'img_to_graph',
'reconstruct_from_patches_2d']
###############################################################################
# From an image to a graph
def _make_edges_3d(n_x, n_y, n_z=1):
"""Returns a list of edges for a 3D image.
Parameters
===========
n_x: integer
The size of the grid in the x direction.
n_y: integer
The size of the grid in the y direction.
n_z: integer, optional
The size of the grid in the z direction, defaults to 1
"""
vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z))
edges_deep = np.vstack((vertices[:, :, :-1].ravel(),
vertices[:, :, 1:].ravel()))
edges_right = np.vstack((vertices[:, :-1].ravel(),
vertices[:, 1:].ravel()))
edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel()))
edges = np.hstack((edges_deep, edges_right, edges_down))
return edges
def _compute_gradient_3d(edges, img):
n_x, n_y, n_z = img.shape
gradient = np.abs(img[edges[0] // (n_y * n_z),
(edges[0] % (n_y * n_z)) // n_z,
(edges[0] % (n_y * n_z)) % n_z] -
img[edges[1] // (n_y * n_z),
(edges[1] % (n_y * n_z)) // n_z,
(edges[1] % (n_y * n_z)) % n_z])
return gradient
# XXX: Why mask the image after computing the weights?
def _mask_edges_weights(mask, edges, weights=None):
"""Apply a mask to edges (weighted or not)"""
inds = np.arange(mask.size)
inds = inds[mask.ravel()]
ind_mask = np.logical_and(np.in1d(edges[0], inds),
np.in1d(edges[1], inds))
edges = edges[:, ind_mask]
if weights is not None:
weights = weights[ind_mask]
if len(edges.ravel()):
maxval = edges.max()
else:
maxval = 0
order = np.searchsorted(np.unique(edges.ravel()), np.arange(maxval + 1))
edges = order[edges]
if weights is None:
return edges
else:
return edges, weights
def _to_graph(n_x, n_y, n_z, mask=None, img=None,
return_as=sparse.coo_matrix, dtype=None):
"""Auxiliary function for img_to_graph and grid_to_graph
"""
edges = _make_edges_3d(n_x, n_y, n_z)
if dtype is None:
if img is None:
dtype = np.int
else:
dtype = img.dtype
if img is not None:
img = np.atleast_3d(img)
weights = _compute_gradient_3d(edges, img)
if mask is not None:
edges, weights = _mask_edges_weights(mask, edges, weights)
diag = img.squeeze()[mask]
else:
diag = img.ravel()
n_voxels = diag.size
else:
if mask is not None:
mask = astype(mask, dtype=np.bool, copy=False)
mask = np.asarray(mask, dtype=np.bool)
edges = _mask_edges_weights(mask, edges)
n_voxels = np.sum(mask)
else:
n_voxels = n_x * n_y * n_z
weights = np.ones(edges.shape[1], dtype=dtype)
diag = np.ones(n_voxels, dtype=dtype)
diag_idx = np.arange(n_voxels)
i_idx = np.hstack((edges[0], edges[1]))
j_idx = np.hstack((edges[1], edges[0]))
graph = sparse.coo_matrix((np.hstack((weights, weights, diag)),
(np.hstack((i_idx, diag_idx)),
np.hstack((j_idx, diag_idx)))),
(n_voxels, n_voxels),
dtype=dtype)
if return_as is np.ndarray:
return graph.toarray()
return return_as(graph)
def img_to_graph(img, mask=None, return_as=sparse.coo_matrix, dtype=None):
"""Graph of the pixel-to-pixel gradient connections
Edges are weighted with the gradient values.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
img : ndarray, 2D or 3D
2D or 3D image
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : None or dtype, optional
The data of the returned sparse matrix. By default it is the
dtype of img
Notes
-----
For sklearn versions 0.14.1 and prior, return_as=np.ndarray was handled
by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
img = np.atleast_3d(img)
n_x, n_y, n_z = img.shape
return _to_graph(n_x, n_y, n_z, mask, img, return_as, dtype)
def grid_to_graph(n_x, n_y, n_z=1, mask=None, return_as=sparse.coo_matrix,
dtype=np.int):
"""Graph of the pixel-to-pixel connections
Edges exist if 2 voxels are connected.
Parameters
----------
n_x : int
Dimension in x axis
n_y : int
Dimension in y axis
n_z : int, optional, default 1
Dimension in z axis
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : dtype, optional, default int
The data of the returned sparse matrix. By default it is int
Notes
-----
For sklearn versions 0.14.1 and prior, return_as=np.ndarray was handled
by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
return _to_graph(n_x, n_y, n_z, mask=mask, return_as=return_as,
dtype=dtype)
###############################################################################
# From an image to a set of small image patches
def _compute_n_patches(i_h, i_w, p_h, p_w, max_patches=None):
"""Compute the number of patches that will be extracted in an image.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
i_h : int
The image height
i_w : int
The image with
p_h : int
The height of a patch
p_w : int
The width of a patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
"""
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
all_patches = n_h * n_w
if max_patches:
if (isinstance(max_patches, (numbers.Integral))
and max_patches < all_patches):
return max_patches
elif (isinstance(max_patches, (numbers.Real))
and 0 < max_patches < 1):
return int(max_patches * all_patches)
else:
raise ValueError("Invalid value for max_patches: %r" % max_patches)
else:
return all_patches
def extract_patches(arr, patch_shape=8, extraction_step=1):
"""Extracts patches of any n-dimensional array in place using strides.
Given an n-dimensional array it will return a 2n-dimensional array with
the first n dimensions indexing patch position and the last n indexing
the patch content. This operation is immediate (O(1)). A reshape
performed on the first n dimensions will cause numpy to copy data, leading
to a list of extracted patches.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
arr : ndarray
n-dimensional array of which patches are to be extracted
patch_shape : integer or tuple of length arr.ndim
Indicates the shape of the patches to be extracted. If an
integer is given, the shape will be a hypercube of
sidelength given by its value.
extraction_step : integer or tuple of length arr.ndim
Indicates step size at which extraction shall be performed.
If integer is given, then the step is uniform in all dimensions.
Returns
-------
patches : strided ndarray
2n-dimensional array indexing patches on first n dimensions and
containing patches on the last n dimensions. These dimensions
are fake, but this way no data is copied. A simple reshape invokes
a copying operation to obtain a list of patches:
result.reshape([-1] + list(patch_shape))
"""
arr_ndim = arr.ndim
if isinstance(patch_shape, numbers.Number):
patch_shape = tuple([patch_shape] * arr_ndim)
if isinstance(extraction_step, numbers.Number):
extraction_step = tuple([extraction_step] * arr_ndim)
patch_strides = arr.strides
slices = [slice(None, None, st) for st in extraction_step]
indexing_strides = arr[slices].strides
patch_indices_shape = ((np.array(arr.shape) - np.array(patch_shape)) //
np.array(extraction_step)) + 1
shape = tuple(list(patch_indices_shape) + list(patch_shape))
strides = tuple(list(indexing_strides) + list(patch_strides))
patches = as_strided(arr, shape=shape, strides=strides)
return patches
def extract_patches_2d(image, patch_size, max_patches=None, random_state=None):
"""Reshape a 2D image into a collection of patches
The resulting patches are allocated in a dedicated array.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
image : array, shape = (image_height, image_width) or
(image_height, image_width, n_channels)
The original image data. For color images, the last dimension specifies
the channel: a RGB image would have `n_channels=3`.
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
random_state : int or RandomState
Pseudo number generator state used for random sampling to use if
`max_patches` is not None.
Returns
-------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the image, where `n_patches`
is either `max_patches` or the total number of patches that can be
extracted.
Examples
--------
>>> from sklearn.feature_extraction import image
>>> one_image = np.arange(16).reshape((4, 4))
>>> one_image
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> patches = image.extract_patches_2d(one_image, (2, 2))
>>> print(patches.shape)
(9, 2, 2)
>>> patches[0]
array([[0, 1],
[4, 5]])
>>> patches[1]
array([[1, 2],
[5, 6]])
>>> patches[8]
array([[10, 11],
[14, 15]])
"""
i_h, i_w = image.shape[:2]
p_h, p_w = patch_size
if p_h > i_h:
raise ValueError("Height of the patch should be less than the height"
" of the image.")
if p_w > i_w:
raise ValueError("Width of the patch should be less than the width"
" of the image.")
image = check_array(image, allow_nd=True)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
extracted_patches = extract_patches(image,
patch_shape=(p_h, p_w, n_colors),
extraction_step=1)
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches)
if max_patches:
rng = check_random_state(random_state)
i_s = rng.randint(i_h - p_h + 1, size=n_patches)
j_s = rng.randint(i_w - p_w + 1, size=n_patches)
patches = extracted_patches[i_s, j_s, 0]
else:
patches = extracted_patches
patches = patches.reshape(-1, p_h, p_w, n_colors)
# remove the color dimension if useless
if patches.shape[-1] == 1:
return patches.reshape((n_patches, p_h, p_w))
else:
return patches
def reconstruct_from_patches_2d(patches, image_size):
"""Reconstruct the image from all of its patches.
Patches are assumed to overlap and the image is constructed by filling in
the patches from left to right, top to bottom, averaging the overlapping
regions.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The complete set of patches. If the patches contain colour information,
channels are indexed along the last dimension: RGB patches would
have `n_channels=3`.
image_size : tuple of ints (image_height, image_width) or
(image_height, image_width, n_channels)
the size of the image that will be reconstructed
Returns
-------
image : array, shape = image_size
the reconstructed image
"""
i_h, i_w = image_size[:2]
p_h, p_w = patches.shape[1:3]
img = np.zeros(image_size)
# compute the dimensions of the patches array
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
for p, (i, j) in zip(patches, product(range(n_h), range(n_w))):
img[i:i + p_h, j:j + p_w] += p
for i in range(i_h):
for j in range(i_w):
# divide by the amount of overlap
# XXX: is this the most efficient way? memory-wise yes, cpu wise?
img[i, j] /= float(min(i + 1, p_h, i_h - i) *
min(j + 1, p_w, i_w - j))
return img
class PatchExtractor(BaseEstimator):
"""Extracts patches from a collection of images
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches per image to extract. If max_patches is a
float in (0, 1), it is taken to mean a proportion of the total number
of patches.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
"""
def __init__(self, patch_size=None, max_patches=None, random_state=None):
self.patch_size = patch_size
self.max_patches = max_patches
self.random_state = random_state
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
def transform(self, X):
"""Transforms the image samples in X into a matrix of patch data.
Parameters
----------
X : array, shape = (n_samples, image_height, image_width) or
(n_samples, image_height, image_width, n_channels)
Array of images from which to extract patches. For color images,
the last dimension specifies the channel: a RGB image would have
`n_channels=3`.
Returns
-------
patches: array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the images, where
`n_patches` is either `n_samples * max_patches` or the total
number of patches that can be extracted.
"""
self.random_state = check_random_state(self.random_state)
n_images, i_h, i_w = X.shape[:3]
X = np.reshape(X, (n_images, i_h, i_w, -1))
n_channels = X.shape[-1]
if self.patch_size is None:
patch_size = i_h // 10, i_w // 10
else:
patch_size = self.patch_size
# compute the dimensions of the patches array
p_h, p_w = patch_size
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, self.max_patches)
patches_shape = (n_images * n_patches,) + patch_size
if n_channels > 1:
patches_shape += (n_channels,)
# extract the patches
patches = np.empty(patches_shape)
for ii, image in enumerate(X):
patches[ii * n_patches:(ii + 1) * n_patches] = extract_patches_2d(
image, patch_size, self.max_patches, self.random_state)
return patches
| bsd-3-clause |
adrn/StreamMorphology | scripts/ensemble/analyze.py | 1 | 2836 | # coding: utf-8
""" Analyze the output from KLD mapping """
from __future__ import division, print_function
__author__ = "adrn <[email protected]>"
# Standard library
import os
# Third-party
from astropy import log as logger
import matplotlib.pyplot as plt
import numpy as np
# Project
from streammorphology.ensemble import read
def main(path, vbounds=None):
# read in initial conditions
w0 = np.load(os.path.join(path, 'w0.npy'))
norbits = len(w0)
# read freqmap output
cache_filename = os.path.join(path, 'allkld.dat')
d = read(cache_filename, norbits=len(w0))
logger.info("{} total orbits".format(norbits))
logger.info("\t{} successful".format(d['success'].sum()))
logger.info("\t{} not successful".format((~d['success']).sum()))
good_ix = d['success']
dens_map_t = d['thresh_t'] / 1000.
dens_map_t = dens_map_t[good_ix]
# color scaling
if vbounds is None:
vmin = dens_map_t.min()
vmax = dens_map_t.max()
else:
vmin,vmax = vbounds
# plot initial condition grid, colored by fractional diffusion rate
fig,ax = plt.subplots(1,1,figsize=(9.75,8))
ax.set_xlim(0, max([w0[:,0].max(),w0[:,2].max()]))
ax.set_ylim(*ax.get_xlim())
# automatically determine symbol size
xy_pixels = ax.transData.transform(np.vstack([w0[:,0],w0[:,2]]).T)
xpix, ypix = xy_pixels.T
# In matplotlib, 0,0 is the lower left corner, whereas it's usually the upper
# right for most image software, so we'll flip the y-coords
width, height = fig.canvas.get_width_height()
ypix = height - ypix
# this assumes that your data-points are equally spaced
sz = max((xpix[1]-xpix[0])**2, (ypix[1]-ypix[0])**2)
# plot bad points
ax.scatter(w0[~good_ix,0], w0[~good_ix,2], c='r', s=sz, marker='s')
# plot good points, colored
c = ax.scatter(w0[good_ix,0], w0[good_ix,2], c=dens_map_t,
vmin=vmin, vmax=vmax, cmap='Greys', s=sz, marker='s')
ax.set_xlabel(r'$x_0$ $[{\rm kpc}]$')
ax.set_ylabel(r'$z_0$ $[{\rm kpc}]$')
fig.colorbar(c)
fig.tight_layout()
fig.savefig(os.path.join(path,"kld_map.pdf"))
if __name__ == '__main__':
from argparse import ArgumentParser
# Define parser object
parser = ArgumentParser(description="")
parser.add_argument("-p", "--path", dest="path", required=True,
help="Path to a Numpy memmap file containing the results "
"of frequency mapping.")
parser.add_argument("--vbounds", dest="vbounds", default=None, type=str,
help="bounds of color scale")
args = parser.parse_args()
if args.vbounds is not None:
vbounds = map(float, args.vbounds.split(","))
else:
vbounds = None
main(args.path, vbounds=vbounds)
| mit |
SMAPPNYU/smapp-toolkit | smapp_toolkit/twitter/figure_helpers.py | 2 | 2933 | """
Module contains helper functions for plotting.
@jonathanronen 4/2015
"""
try:
import numpy as np
except:
warnings.warn("Cannot import numpy. Plotting will not work.")
import warnings
try:
import seaborn as sns
import matplotlib.pyplot as plt
except:
warnings.warn("Error importing plotting libraries (seaborn and matplotlib). Plotting functionality will not work.")
from datetime import datetime, timedelta
def plot_histo(d, *args, **kwargs):
count_by = kwargs.pop('count_by', 'minutes')
key_format = kwargs.pop('key_format', '%Y-%m-%d %H:%M')
start_time = datetime.strptime(sorted(d.keys())[0], key_format)
end_time = datetime.strptime(sorted(d.keys())[-1], key_format)
if count_by == 'minutes':
t,y = zip(*[(start_time + i*timedelta(minutes=1), d.get((start_time + i*timedelta(minutes=1)).strftime(key_format), 0)) for i in range(int(np.ceil((end_time - start_time).total_seconds()/60)))])
elif count_by == 'hours':
t,y = zip(*[(start_time + i*timedelta(hours=1), d.get((start_time + i*timedelta(hours=1)).strftime(key_format), 0)) for i in range(int(np.ceil((end_time - start_time).total_seconds()/(60*60))))])
elif count_by == 'days':
t,y = zip(*[(start_time + i*timedelta(days=1), d.get((start_time + i*timedelta(days=1)).strftime(key_format), 0)) for i in range(int(np.ceil((end_time - start_time).total_seconds()/(60*60*24))))])
else:
raise Exception("Can't plot histogram by {}. Legal values are ['minutes', 'hours', 'days'].".format(count_by))
plt.plot(t,y, *args, **kwargs)
return t,y
def term_counts_histogram(data, key_format, count_by, plot_total=True):
"""
Function to make histogram plot for data created using `term_counts()`.
If the data has the format:
{
'2015-01-01 18:10': {
'justin': 12,
'miley': 33
},
'2015-01-01 18:11': {
'justin': 11,
'miley': 9
}
}
Then to make the plot, call
figure_helpers.term_counts_histogram(data, '%Y-%m-%d %H:%M', count_by='minutes')
------------------------------------------------
Legal values for count_by are ['days', 'hours', 'minutes']
and the `key_format` is the strftime string for the keys of the data dict.
"""
colors = sns.color_palette('hls', len(data[data.keys()[0]].keys()))
terms = data[data.keys()[0]].keys()
terms.remove('_total')
for c, term in zip(colors,terms):
t,y = plot_histo({k : data[k][term] for k in data}, label=term, color=c, count_by=count_by, key_format=key_format)
if plot_total:
plot_histo({k: data[k]['_total'] for k in data}, label='total', color='grey', linestyle='--', count_by=count_by, key_format=key_format)
plt.legend()
plt.xticks(t[::len(t)/10],
[ts.strftime(key_format) for ts in t[::len(t)/10]],
rotation=45)
plt.tight_layout()
| gpl-2.0 |
devanshdalal/scikit-learn | sklearn/neighbors/lof.py | 33 | 12186 | # Authors: Nicolas Goix <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from warnings import warn
from scipy.stats import scoreatpercentile
from .base import NeighborsBase
from .base import KNeighborsMixin
from .base import UnsupervisedMixin
from ..utils.validation import check_is_fitted
from ..utils import check_array
__all__ = ["LocalOutlierFactor"]
class LocalOutlierFactor(NeighborsBase, KNeighborsMixin, UnsupervisedMixin):
"""Unsupervised Outlier Detection using Local Outlier Factor (LOF)
The anomaly score of each sample is called Local Outlier Factor.
It measures the local deviation of density of a given sample with
respect to its neighbors.
It is local in that the anomaly score depends on how isolated the object
is with respect to the surrounding neighborhood.
More precisely, locality is given by k-nearest neighbors, whose distance
is used to estimate the local density.
By comparing the local density of a sample to the local densities of
its neighbors, one can identify samples that have a substantially lower
density than their neighbors. These are considered outliers.
Parameters
----------
n_neighbors : int, optional (default=20)
Number of neighbors to use by default for :meth:`kneighbors` queries.
If n_neighbors is larger than the number of samples provided,
all samples will be used.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default=30)
Leaf size passed to :class:`BallTree` or :class:`KDTree`. This can
affect the speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p : integer, optional (default=2)
Parameter for the Minkowski metric from
:ref:`sklearn.metrics.pairwise.pairwise_distances`. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : string or callable, default 'minkowski'
metric used for the distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If 'precomputed', the training input X is expected to be a distance
matrix.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics:
http://docs.scipy.org/doc/scipy/reference/spatial.distance.html
metric_params : dict, optional (default=None)
Additional keyword arguments for the metric function.
contamination : float in (0., 0.5), optional (default=0.1)
The amount of contamination of the data set, i.e. the proportion
of outliers in the data set. When fitting this is used to define the
threshold on the decision function.
n_jobs : int, optional (default=1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Affects only :meth:`kneighbors` and :meth:`kneighbors_graph` methods.
Attributes
----------
negative_outlier_factor_ : numpy array, shape (n_samples,)
The opposite LOF of the training samples. The lower, the more normal.
Inliers tend to have a LOF score close to 1, while outliers tend
to have a larger LOF score.
The local outlier factor (LOF) of a sample captures its
supposed 'degree of abnormality'.
It is the average of the ratio of the local reachability density of
a sample and those of its k-nearest neighbors.
n_neighbors_ : integer
The actual number of neighbors used for :meth:`kneighbors` queries.
References
----------
.. [1] Breunig, M. M., Kriegel, H. P., Ng, R. T., & Sander, J. (2000, May).
LOF: identifying density-based local outliers. In ACM sigmod record.
"""
def __init__(self, n_neighbors=20, algorithm='auto', leaf_size=30,
metric='minkowski', p=2, metric_params=None,
contamination=0.1, n_jobs=1):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs)
self.contamination = contamination
def fit_predict(self, X, y=None):
""""Fits the model to the training set X and returns the labels
(1 inlier, -1 outlier) on the training set according to the LOF score
and the contamination parameter.
Parameters
----------
X : array-like, shape (n_samples, n_features), default=None
The query sample or samples to compute the Local Outlier Factor
w.r.t. to the training samples.
Returns
-------
is_inlier : array, shape (n_samples,)
Returns -1 for anomalies/outliers and 1 for inliers.
"""
return self.fit(X)._predict()
def fit(self, X, y=None):
"""Fit the model using X as training data.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
Returns
-------
self : object
Returns self.
"""
if not (0. < self.contamination <= .5):
raise ValueError("contamination must be in (0, 0.5]")
super(LocalOutlierFactor, self).fit(X)
n_samples = self._fit_X.shape[0]
if self.n_neighbors > n_samples:
warn("n_neighbors (%s) is greater than the "
"total number of samples (%s). n_neighbors "
"will be set to (n_samples - 1) for estimation."
% (self.n_neighbors, n_samples))
self.n_neighbors_ = max(1, min(self.n_neighbors, n_samples - 1))
self._distances_fit_X_, _neighbors_indices_fit_X_ = (
self.kneighbors(None, n_neighbors=self.n_neighbors_))
self._lrd = self._local_reachability_density(
self._distances_fit_X_, _neighbors_indices_fit_X_)
# Compute lof score over training samples to define threshold_:
lrd_ratios_array = (self._lrd[_neighbors_indices_fit_X_] /
self._lrd[:, np.newaxis])
self.negative_outlier_factor_ = -np.mean(lrd_ratios_array, axis=1)
self.threshold_ = -scoreatpercentile(
-self.negative_outlier_factor_, 100. * (1. - self.contamination))
return self
def _predict(self, X=None):
"""Predict the labels (1 inlier, -1 outlier) of X according to LOF.
If X is None, returns the same as fit_predict(X_train).
This method allows to generalize prediction to new observations (not
in the training set). As LOF originally does not deal with new data,
this method is kept private.
Parameters
----------
X : array-like, shape (n_samples, n_features), default=None
The query sample or samples to compute the Local Outlier Factor
w.r.t. to the training samples. If None, makes prediction on the
training data without considering them as their own neighbors.
Returns
-------
is_inlier : array, shape (n_samples,)
Returns -1 for anomalies/outliers and +1 for inliers.
"""
check_is_fitted(self, ["threshold_", "negative_outlier_factor_",
"n_neighbors_", "_distances_fit_X_"])
if X is not None:
X = check_array(X, accept_sparse='csr')
is_inlier = np.ones(X.shape[0], dtype=int)
is_inlier[self._decision_function(X) <= self.threshold_] = -1
else:
is_inlier = np.ones(self._fit_X.shape[0], dtype=int)
is_inlier[self.negative_outlier_factor_ <= self.threshold_] = -1
return is_inlier
def _decision_function(self, X):
"""Opposite of the Local Outlier Factor of X (as bigger is better,
i.e. large values correspond to inliers).
The argument X is supposed to contain *new data*: if X contains a
point from training, it consider the later in its own neighborhood.
Also, the samples in X are not considered in the neighborhood of any
point.
The decision function on training data is available by considering the
opposite of the negative_outlier_factor_ attribute.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The query sample or samples to compute the Local Outlier Factor
w.r.t. the training samples.
Returns
-------
opposite_lof_scores : array, shape (n_samples,)
The opposite of the Local Outlier Factor of each input samples.
The lower, the more abnormal.
"""
check_is_fitted(self, ["threshold_", "negative_outlier_factor_",
"_distances_fit_X_"])
X = check_array(X, accept_sparse='csr')
distances_X, neighbors_indices_X = (
self.kneighbors(X, n_neighbors=self.n_neighbors_))
X_lrd = self._local_reachability_density(distances_X,
neighbors_indices_X)
lrd_ratios_array = (self._lrd[neighbors_indices_X] /
X_lrd[:, np.newaxis])
# as bigger is better:
return -np.mean(lrd_ratios_array, axis=1)
def _local_reachability_density(self, distances_X, neighbors_indices):
"""The local reachability density (LRD)
The LRD of a sample is the inverse of the average reachability
distance of its k-nearest neighbors.
Parameters
----------
distances_X : array, shape (n_query, self.n_neighbors)
Distances to the neighbors (in the training samples `self._fit_X`)
of each query point to compute the LRD.
neighbors_indices : array, shape (n_query, self.n_neighbors)
Neighbors indices (of each query point) among training samples
self._fit_X.
Returns
-------
local_reachability_density : array, shape (n_samples,)
The local reachability density of each sample.
"""
dist_k = self._distances_fit_X_[neighbors_indices,
self.n_neighbors_ - 1]
reach_dist_array = np.maximum(distances_X, dist_k)
# 1e-10 to avoid `nan' when when nb of duplicates > n_neighbors_:
return 1. / (np.mean(reach_dist_array, axis=1) + 1e-10)
| bsd-3-clause |
petosegan/scikit-learn | sklearn/preprocessing/tests/test_label.py | 35 | 18559 | import numpy as np
from scipy.sparse import issparse
from scipy.sparse import coo_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.preprocessing.label import LabelBinarizer
from sklearn.preprocessing.label import MultiLabelBinarizer
from sklearn.preprocessing.label import LabelEncoder
from sklearn.preprocessing.label import label_binarize
from sklearn.preprocessing.label import _inverse_binarize_thresholding
from sklearn.preprocessing.label import _inverse_binarize_multiclass
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_label_binarizer():
lb = LabelBinarizer()
# one-class case defaults to negative label
inp = ["pos", "pos", "pos", "pos"]
expected = np.array([[0, 0, 0, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["pos"])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# two-class case
inp = ["neg", "pos", "pos", "neg"]
expected = np.array([[0, 1, 1, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["neg", "pos"])
assert_array_equal(expected, got)
to_invert = np.array([[1, 0],
[0, 1],
[0, 1],
[1, 0]])
assert_array_equal(lb.inverse_transform(to_invert), inp)
# multi-class case
inp = ["spam", "ham", "eggs", "ham", "0"]
expected = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0]])
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ['0', 'eggs', 'ham', 'spam'])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_unseen_labels():
lb = LabelBinarizer()
expected = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
got = lb.fit_transform(['b', 'd', 'e'])
assert_array_equal(expected, got)
expected = np.array([[0, 0, 0],
[1, 0, 0],
[0, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 0]])
got = lb.transform(['a', 'b', 'c', 'd', 'e', 'f'])
assert_array_equal(expected, got)
@ignore_warnings
def test_label_binarizer_column_y():
# first for binary classification vs multi-label with 1 possible class
# lists are multi-label, array is multi-class :-/
inp_list = [[1], [2], [1]]
inp_array = np.array(inp_list)
multilabel_indicator = np.array([[1, 0], [0, 1], [1, 0]])
binaryclass_array = np.array([[0], [1], [0]])
lb_1 = LabelBinarizer()
out_1 = lb_1.fit_transform(inp_list)
lb_2 = LabelBinarizer()
out_2 = lb_2.fit_transform(inp_array)
assert_array_equal(out_1, multilabel_indicator)
assert_array_equal(out_2, binaryclass_array)
# second for multiclass classification vs multi-label with multiple
# classes
inp_list = [[1], [2], [1], [3]]
inp_array = np.array(inp_list)
# the indicator matrix output is the same in this case
indicator = np.array([[1, 0, 0], [0, 1, 0], [1, 0, 0], [0, 0, 1]])
lb_1 = LabelBinarizer()
out_1 = lb_1.fit_transform(inp_list)
lb_2 = LabelBinarizer()
out_2 = lb_2.fit_transform(inp_array)
assert_array_equal(out_1, out_2)
assert_array_equal(out_2, indicator)
def test_label_binarizer_set_label_encoding():
lb = LabelBinarizer(neg_label=-2, pos_label=0)
# two-class case with pos_label=0
inp = np.array([0, 1, 1, 0])
expected = np.array([[-2, 0, 0, -2]]).T
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
lb = LabelBinarizer(neg_label=-2, pos_label=2)
# multi-class case
inp = np.array([3, 2, 1, 2, 0])
expected = np.array([[-2, -2, -2, +2],
[-2, -2, +2, -2],
[-2, +2, -2, -2],
[-2, -2, +2, -2],
[+2, -2, -2, -2]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
@ignore_warnings
def test_label_binarizer_errors():
# Check that invalid arguments yield ValueError
one_class = np.array([0, 0, 0, 0])
lb = LabelBinarizer().fit(one_class)
multi_label = [(2, 3), (0,), (0, 2)]
assert_raises(ValueError, lb.transform, multi_label)
lb = LabelBinarizer()
assert_raises(ValueError, lb.transform, [])
assert_raises(ValueError, lb.inverse_transform, [])
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=1)
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=2)
assert_raises(ValueError, LabelBinarizer, neg_label=1, pos_label=2,
sparse_output=True)
# Fail on y_type
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2], threshold=0)
# Fail on the number of classes
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2, 3], threshold=0)
# Fail on the dimension of 'binary'
assert_raises(ValueError, _inverse_binarize_thresholding,
y=np.array([[1, 2, 3], [2, 1, 3]]), output_type="binary",
classes=[1, 2, 3], threshold=0)
# Fail on multioutput data
assert_raises(ValueError, LabelBinarizer().fit, np.array([[1, 3], [2, 1]]))
assert_raises(ValueError, label_binarize, np.array([[1, 3], [2, 1]]),
[1, 2, 3])
def test_label_encoder():
# Test LabelEncoder's transform and inverse_transform methods
le = LabelEncoder()
le.fit([1, 1, 4, 5, -1, 0])
assert_array_equal(le.classes_, [-1, 0, 1, 4, 5])
assert_array_equal(le.transform([0, 1, 4, 4, 5, -1, -1]),
[1, 2, 3, 3, 4, 0, 0])
assert_array_equal(le.inverse_transform([1, 2, 3, 3, 4, 0, 0]),
[0, 1, 4, 4, 5, -1, -1])
assert_raises(ValueError, le.transform, [0, 6])
def test_label_encoder_fit_transform():
# Test fit_transform
le = LabelEncoder()
ret = le.fit_transform([1, 1, 4, 5, -1, 0])
assert_array_equal(ret, [2, 2, 3, 4, 0, 1])
le = LabelEncoder()
ret = le.fit_transform(["paris", "paris", "tokyo", "amsterdam"])
assert_array_equal(ret, [1, 1, 2, 0])
def test_label_encoder_errors():
# Check that invalid arguments yield ValueError
le = LabelEncoder()
assert_raises(ValueError, le.transform, [])
assert_raises(ValueError, le.inverse_transform, [])
# Fail on unseen labels
le = LabelEncoder()
le.fit([1, 2, 3, 1, -1])
assert_raises(ValueError, le.inverse_transform, [-1])
def test_sparse_output_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for sparse_output in [True, False]:
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit_transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit(inp()).transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
assert_raises(ValueError, mlb.inverse_transform,
csr_matrix(np.array([[0, 1, 1],
[2, 0, 0],
[1, 1, 0]])))
def test_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer()
got = mlb.fit_transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer()
got = mlb.fit(inp()).transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
def test_multilabel_binarizer_empty_sample():
mlb = MultiLabelBinarizer()
y = [[1, 2], [1], []]
Y = np.array([[1, 1],
[1, 0],
[0, 0]])
assert_array_equal(mlb.fit_transform(y), Y)
def test_multilabel_binarizer_unknown_class():
mlb = MultiLabelBinarizer()
y = [[1, 2]]
assert_raises(KeyError, mlb.fit(y).transform, [[0]])
mlb = MultiLabelBinarizer(classes=[1, 2])
assert_raises(KeyError, mlb.fit_transform, [[0]])
def test_multilabel_binarizer_given_classes():
inp = [(2, 3), (1,), (1, 2)]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# fit().transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# ensure works with extra class
mlb = MultiLabelBinarizer(classes=[4, 1, 3, 2])
assert_array_equal(mlb.fit_transform(inp),
np.hstack(([[0], [0], [0]], indicator_mat)))
assert_array_equal(mlb.classes_, [4, 1, 3, 2])
# ensure fit is no-op as iterable is not consumed
inp = iter(inp)
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
def test_multilabel_binarizer_same_length_sequence():
# Ensure sequences of the same length are not interpreted as a 2-d array
inp = [[1], [0], [2]]
indicator_mat = np.array([[0, 1, 0],
[1, 0, 0],
[0, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
def test_multilabel_binarizer_non_integer_labels():
tuple_classes = np.empty(3, dtype=object)
tuple_classes[:] = [(1,), (2,), (3,)]
inputs = [
([('2', '3'), ('1',), ('1', '2')], ['1', '2', '3']),
([('b', 'c'), ('a',), ('a', 'b')], ['a', 'b', 'c']),
([((2,), (3,)), ((1,),), ((1,), (2,))], tuple_classes),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
for inp, classes in inputs:
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
mlb = MultiLabelBinarizer()
assert_raises(TypeError, mlb.fit_transform, [({}), ({}, {'a': 'b'})])
def test_multilabel_binarizer_non_unique():
inp = [(1, 1, 1, 0)]
indicator_mat = np.array([[1, 1]])
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
def test_multilabel_binarizer_inverse_validation():
inp = [(1, 1, 1, 0)]
mlb = MultiLabelBinarizer()
mlb.fit_transform(inp)
# Not binary
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 3]]))
# The following binary cases are fine, however
mlb.inverse_transform(np.array([[0, 0]]))
mlb.inverse_transform(np.array([[1, 1]]))
mlb.inverse_transform(np.array([[1, 0]]))
# Wrong shape
assert_raises(ValueError, mlb.inverse_transform, np.array([[1]]))
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 1, 1]]))
def test_label_binarize_with_class_order():
out = label_binarize([1, 6], classes=[1, 2, 4, 6])
expected = np.array([[1, 0, 0, 0], [0, 0, 0, 1]])
assert_array_equal(out, expected)
# Modified class order
out = label_binarize([1, 6], classes=[1, 6, 4, 2])
expected = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])
assert_array_equal(out, expected)
out = label_binarize([0, 1, 2, 3], classes=[3, 2, 0, 1])
expected = np.array([[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[1, 0, 0, 0]])
assert_array_equal(out, expected)
def check_binarized_results(y, classes, pos_label, neg_label, expected):
for sparse_output in [True, False]:
if ((pos_label == 0 or neg_label != 0) and sparse_output):
assert_raises(ValueError, label_binarize, y, classes,
neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
continue
# check label_binarize
binarized = label_binarize(y, classes, neg_label=neg_label,
pos_label=pos_label,
sparse_output=sparse_output)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
# check inverse
y_type = type_of_target(y)
if y_type == "multiclass":
inversed = _inverse_binarize_multiclass(binarized, classes=classes)
else:
inversed = _inverse_binarize_thresholding(binarized,
output_type=y_type,
classes=classes,
threshold=((neg_label +
pos_label) /
2.))
assert_array_equal(toarray(inversed), toarray(y))
# Check label binarizer
lb = LabelBinarizer(neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
binarized = lb.fit_transform(y)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
inverse_output = lb.inverse_transform(binarized)
assert_array_equal(toarray(inverse_output), toarray(y))
assert_equal(issparse(inverse_output), issparse(y))
def test_label_binarize_binary():
y = [0, 1, 0]
classes = [0, 1]
pos_label = 2
neg_label = -1
expected = np.array([[2, -1], [-1, 2], [2, -1]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
# Binary case where sparse_output = True will not result in a ValueError
y = [0, 1, 0]
classes = [0, 1]
pos_label = 3
neg_label = 0
expected = np.array([[3, 0], [0, 3], [3, 0]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
def test_label_binarize_multiclass():
y = [0, 1, 2]
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = 2 * np.eye(3)
yield check_binarized_results, y, classes, pos_label, neg_label, expected
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_label_binarize_multilabel():
y_ind = np.array([[0, 1, 0], [1, 1, 1], [0, 0, 0]])
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = pos_label * y_ind
y_sparse = [sparse_matrix(y_ind)
for sparse_matrix in [coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix]]
for y in [y_ind] + y_sparse:
yield (check_binarized_results, y, classes, pos_label, neg_label,
expected)
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_invalid_input_label_binarize():
assert_raises(ValueError, label_binarize, [0, 2], classes=[0, 2],
pos_label=0, neg_label=1)
def test_inverse_binarize_multiclass():
got = _inverse_binarize_multiclass(csr_matrix([[0, 1, 0],
[-1, 0, -1],
[0, 0, 0]]),
np.arange(3))
assert_array_equal(got, np.array([1, 1, 0]))
| bsd-3-clause |
webmasterraj/GaSiProMo | flask/lib/python2.7/site-packages/pandas/io/clipboard.py | 14 | 2947 | """ io on the clipboard """
from pandas import compat, get_option, option_context, DataFrame
from pandas.compat import StringIO
def read_clipboard(**kwargs): # pragma: no cover
"""
Read text from clipboard and pass to read_table. See read_table for the
full argument list
If unspecified, `sep` defaults to '\s+'
Returns
-------
parsed : DataFrame
"""
from pandas.util.clipboard import clipboard_get
from pandas.io.parsers import read_table
text = clipboard_get()
# try to decode (if needed on PY3)
# Strange. linux py33 doesn't complain, win py33 does
if compat.PY3:
try:
text = compat.bytes_to_str(
text, encoding=(kwargs.get('encoding') or
get_option('display.encoding'))
)
except:
pass
# Excel copies into clipboard with \t seperation
# inspect no more then the 10 first lines, if they
# all contain an equal number (>0) of tabs, infer
# that this came from excel and set 'sep' accordingly
lines = text[:10000].split('\n')[:-1][:10]
# Need to remove leading white space, since read_table
# accepts:
# a b
# 0 1 2
# 1 3 4
counts = set([x.lstrip().count('\t') for x in lines])
if len(lines)>1 and len(counts) == 1 and counts.pop() != 0:
kwargs['sep'] = '\t'
if kwargs.get('sep') is None and kwargs.get('delim_whitespace') is None:
kwargs['sep'] = '\s+'
return read_table(StringIO(text), **kwargs)
def to_clipboard(obj, excel=None, sep=None, **kwargs): # pragma: no cover
"""
Attempt to write text representation of object to the system clipboard
The clipboard can be then pasted into Excel for example.
Parameters
----------
obj : the object to write to the clipboard
excel : boolean, defaults to True
if True, use the provided separator, writing in a csv
format for allowing easy pasting into excel.
if False, write a string representation of the object
to the clipboard
sep : optional, defaults to tab
other keywords are passed to to_csv
Notes
-----
Requirements for your platform
- Linux: xclip, or xsel (with gtk or PyQt4 modules)
- Windows:
- OS X:
"""
from pandas.util.clipboard import clipboard_set
if excel is None:
excel = True
if excel:
try:
if sep is None:
sep = '\t'
buf = StringIO()
obj.to_csv(buf, sep=sep, **kwargs)
clipboard_set(buf.getvalue())
return
except:
pass
if isinstance(obj, DataFrame):
# str(df) has various unhelpful defaults, like truncation
with option_context('display.max_colwidth', 999999):
objstr = obj.to_string(**kwargs)
else:
objstr = str(obj)
clipboard_set(objstr)
| gpl-2.0 |
raymondxyang/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimator.py | 3 | 58763 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base Estimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import copy
import os
import tempfile
import numpy as np
import six
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import layers
from tensorflow.contrib import metrics as metrics_lib
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_args
from tensorflow.contrib.framework import list_variables
from tensorflow.contrib.framework import load_variable
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import monitors as monitor_lib
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import _sklearn as sklearn
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import metric_key
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import tensor_signature
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.learn_io import data_feeder
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.contrib.meta_graph_transform import meta_graph_transform
from tensorflow.contrib.training.python.training import evaluation
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.summary import summary as core_summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import device_setter
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver
from tensorflow.python.training import training_util
from tensorflow.python.util import compat
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
AS_ITERABLE_DATE = '2016-09-15'
AS_ITERABLE_INSTRUCTIONS = (
'The default behavior of predict() is changing. The default value for\n'
'as_iterable will change to True, and then the flag will be removed\n'
'altogether. The behavior of this flag is described below.')
SCIKIT_DECOUPLE_DATE = '2016-12-01'
SCIKIT_DECOUPLE_INSTRUCTIONS = (
'Estimator is decoupled from Scikit Learn interface by moving into\n'
'separate class SKCompat. Arguments x, y and batch_size are only\n'
'available in the SKCompat class, Estimator will only accept input_fn.\n'
'Example conversion:\n'
' est = Estimator(...) -> est = SKCompat(Estimator(...))')
def _verify_input_args(x, y, input_fn, feed_fn, batch_size):
"""Verifies validity of co-existence of input arguments."""
if input_fn is None:
if x is None:
raise ValueError('Either x or input_fn must be provided.')
if contrib_framework.is_tensor(x) or (y is not None and
contrib_framework.is_tensor(y)):
raise ValueError('Inputs cannot be tensors. Please provide input_fn.')
if feed_fn is not None:
raise ValueError('Can not provide both feed_fn and x or y.')
else:
if (x is not None) or (y is not None):
raise ValueError('Can not provide both input_fn and x or y.')
if batch_size is not None:
raise ValueError('Can not provide both input_fn and batch_size.')
def _get_input_fn(x, y, input_fn, feed_fn, batch_size, shuffle=False, epochs=1):
"""Make inputs into input and feed functions.
Args:
x: Numpy, Pandas or Dask matrix or iterable.
y: Numpy, Pandas or Dask matrix or iterable.
input_fn: Pre-defined input function for training data.
feed_fn: Pre-defined data feeder function.
batch_size: Size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
Data input and feeder function based on training data.
Raises:
ValueError: Only one of `(x & y)` or `input_fn` must be provided.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if input_fn is not None:
return input_fn, feed_fn
df = data_feeder.setup_train_data_feeder(
x,
y,
n_classes=None,
batch_size=batch_size,
shuffle=shuffle,
epochs=epochs)
return df.input_builder, df.get_feed_dict_fn()
def infer_real_valued_columns_from_input_fn(input_fn):
"""Creates `FeatureColumn` objects for inputs defined by `input_fn`.
This interprets all inputs as dense, fixed-length float values. This creates
a local graph in which it calls `input_fn` to build the tensors, then discards
it.
Args:
input_fn: Input function returning a tuple of:
features - Dictionary of string feature name to `Tensor` or `Tensor`.
labels - `Tensor` of label values.
Returns:
List of `FeatureColumn` objects.
"""
with ops.Graph().as_default():
features, _ = input_fn()
return layers.infer_real_valued_columns(features)
def infer_real_valued_columns_from_input(x):
"""Creates `FeatureColumn` objects for inputs defined by input `x`.
This interprets all inputs as dense, fixed-length float values.
Args:
x: Real-valued matrix of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features.
Returns:
List of `FeatureColumn` objects.
"""
input_fn, _ = _get_input_fn(
x=x, y=None, input_fn=None, feed_fn=None, batch_size=None)
return infer_real_valued_columns_from_input_fn(input_fn)
def _model_fn_args(fn):
"""Get argument names for function-like object.
Args:
fn: Function, or function-like object (e.g., result of `functools.partial`).
Returns:
`tuple` of string argument names.
Raises:
ValueError: if partial function has positionally bound arguments
"""
_, fn = tf_decorator.unwrap(fn)
if hasattr(fn, 'func') and hasattr(fn, 'keywords') and hasattr(fn, 'args'):
# Handle functools.partial and similar objects.
return tuple([
arg for arg in tf_inspect.getargspec(fn.func).args[len(fn.args):]
if arg not in set(fn.keywords.keys())
])
# Handle function.
return tuple(tf_inspect.getargspec(fn).args)
def _get_replica_device_setter(config):
"""Creates a replica device setter if required.
Args:
config: A RunConfig instance.
Returns:
A replica device setter, or None.
"""
ps_ops = [
'Variable', 'VariableV2', 'AutoReloadVariable', 'MutableHashTable',
'MutableHashTableV2', 'MutableHashTableOfTensors',
'MutableHashTableOfTensorsV2', 'MutableDenseHashTable',
'MutableDenseHashTableV2'
]
if config.task_type:
worker_device = '/job:%s/task:%d' % (config.task_type, config.task_id)
else:
worker_device = '/job:worker'
if config.num_ps_replicas > 0:
return device_setter.replica_device_setter(
ps_tasks=config.num_ps_replicas, worker_device=worker_device,
merge_devices=True, ps_ops=ps_ops, cluster=config.cluster_spec)
else:
return None
def _make_metrics_ops(metrics, features, labels, predictions):
"""Add metrics based on `features`, `labels`, and `predictions`.
`metrics` contains a specification for how to run metrics. It is a dict
mapping friendly names to either `MetricSpec` objects, or directly to a metric
function (assuming that `predictions` and `labels` are single tensors), or to
`(pred_name, metric)` `tuple`, which passes `predictions[pred_name]` and
`labels` to `metric` (assuming `labels` is a single tensor).
Users are encouraged to use `MetricSpec` objects, which are more flexible and
cleaner. They also lead to clearer errors.
Args:
metrics: A dict mapping names to metrics specification, for example
`MetricSpec` objects.
features: A dict of tensors returned from an input_fn as features/inputs.
labels: A single tensor or a dict of tensors returned from an input_fn as
labels.
predictions: A single tensor or a dict of tensors output from a model as
predictions.
Returns:
A dict mapping the friendly given in `metrics` to the result of calling the
given metric function.
Raises:
ValueError: If metrics specifications do not work with the type of
`features`, `labels`, or `predictions` provided. Mostly, a dict is given
but no pred_name specified.
"""
metrics = metrics or {}
# If labels is a dict with a single key, unpack into a single tensor.
labels_tensor_or_dict = labels
if isinstance(labels, dict) and len(labels) == 1:
labels_tensor_or_dict = labels[list(labels.keys())[0]]
result = {}
# Iterate in lexicographic order, so the graph is identical among runs.
for name, metric in sorted(six.iteritems(metrics)):
if isinstance(metric, metric_spec.MetricSpec):
result[name] = metric.create_metric_ops(features, labels, predictions)
continue
# TODO(b/31229024): Remove the rest of this loop
logging.warning('Please specify metrics using MetricSpec. Using bare '
'functions or (key, fn) tuples is deprecated and support '
'for it will be removed on Oct 1, 2016.')
if isinstance(name, tuple):
# Multi-head metrics.
if len(name) != 2:
raise ValueError('Invalid metric for {}. It returned a tuple with '
'len {}, expected 2.'.format(name, len(name)))
if not isinstance(predictions, dict):
raise ValueError(
'Metrics passed provide (name, prediction), '
'but predictions are not dict. '
'Metrics: %s, Predictions: %s.' % (metrics, predictions))
# Here are two options: labels are single Tensor or a dict.
if isinstance(labels, dict) and name[1] in labels:
# If labels are dict and the prediction name is in it, apply metric.
result[name[0]] = metric(predictions[name[1]], labels[name[1]])
else:
# Otherwise pass the labels to the metric.
result[name[0]] = metric(predictions[name[1]], labels_tensor_or_dict)
else:
# Single head metrics.
if isinstance(predictions, dict):
raise ValueError(
'Metrics passed provide only name, no prediction, '
'but predictions are dict. '
'Metrics: %s, Labels: %s.' % (metrics, labels_tensor_or_dict))
result[name] = metric(predictions, labels_tensor_or_dict)
return result
def _dict_to_str(dictionary):
"""Get a `str` representation of a `dict`.
Args:
dictionary: The `dict` to be represented as `str`.
Returns:
A `str` representing the `dictionary`.
"""
return ', '.join('%s = %s' % (k, v) for k, v in sorted(dictionary.items()))
def _write_dict_to_summary(output_dir,
dictionary,
current_global_step):
"""Writes a `dict` into summary file in given output directory.
Args:
output_dir: `str`, directory to write the summary file in.
dictionary: the `dict` to be written to summary file.
current_global_step: `int`, the current global step.
"""
logging.info('Saving dict for global step %d: %s', current_global_step,
_dict_to_str(dictionary))
summary_writer = core_summary.FileWriterCache.get(output_dir)
summary_proto = summary_pb2.Summary()
for key in dictionary:
if dictionary[key] is None:
continue
if key == 'global_step':
continue
value = summary_proto.value.add()
value.tag = key
if (isinstance(dictionary[key], np.float32) or
isinstance(dictionary[key], float)):
value.simple_value = float(dictionary[key])
elif (isinstance(dictionary[key], np.int64) or
isinstance(dictionary[key], np.int32) or
isinstance(dictionary[key], int)):
value.simple_value = int(dictionary[key])
else:
logging.warn(
'Skipping summary for %s, must be a float, np.float32, '
'np.int64, np.int32 or int.',
key)
summary_writer.add_summary(summary_proto, current_global_step)
summary_writer.flush()
GraphRewriteSpec = collections.namedtuple('GraphRewriteSpec',
['tags', 'transforms'])
class BaseEstimator(
sklearn.BaseEstimator, evaluable.Evaluable, trainable.Trainable):
"""Abstract BaseEstimator class to train and evaluate TensorFlow models.
Users should not instantiate or subclass this class. Instead, use an `Estimator`.
"""
__metaclass__ = abc.ABCMeta
# Note that for Google users, this is overridden with
# learn_runner.EstimatorConfig.
# TODO(wicke): Remove this once launcher takes over config functionality
_Config = run_config.RunConfig # pylint: disable=invalid-name
def __init__(self, model_dir=None, config=None):
"""Initializes a BaseEstimator instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same.
config: A RunConfig instance.
"""
# Create a run configuration.
if config is None:
self._config = BaseEstimator._Config()
logging.info('Using default config.')
else:
self._config = config
if self._config.session_config is None:
self._session_config = config_pb2.ConfigProto(allow_soft_placement=True)
else:
self._session_config = self._config.session_config
# Model directory.
if (model_dir is not None) and (self._config.model_dir is not None):
if model_dir != self._config.model_dir:
# TODO(b/9965722): remove this suppression after it is no longer
# necessary.
# pylint: disable=g-doc-exception
raise ValueError(
"model_dir are set both in constructor and RunConfig, but with "
"different values. In constructor: '{}', in RunConfig: "
"'{}' ".format(model_dir, self._config.model_dir))
self._model_dir = model_dir or self._config.model_dir
if self._model_dir is None:
self._model_dir = tempfile.mkdtemp()
logging.warning('Using temporary folder as model directory: %s',
self._model_dir)
if self._config.model_dir is None:
self._config = self._config.replace(model_dir=self._model_dir)
logging.info('Using config: %s', str(vars(self._config)))
# Set device function depending if there are replicas or not.
self._device_fn = _get_replica_device_setter(self._config)
# Features and labels TensorSignature objects.
# TODO(wicke): Rename these to something more descriptive
self._features_info = None
self._labels_info = None
self._graph = None
@property
def config(self):
# TODO(wicke): make RunConfig immutable, and then return it without a copy.
return copy.deepcopy(self._config)
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None,
monitors=None, max_steps=None):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Trainable`.
Raises:
ValueError: If `x` or `y` are not `None` while `input_fn` is not `None`.
ValueError: If both `steps` and `max_steps` are not `None`.
"""
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
_verify_input_args(x, y, input_fn, None, batch_size)
if x is not None:
SKCompat(self).fit(x, y, batch_size, steps, max_steps, monitors)
return self
if max_steps is not None:
try:
start_step = load_variable(self._model_dir, ops.GraphKeys.GLOBAL_STEP)
if max_steps <= start_step:
logging.info('Skipping training since max_steps has already saved.')
return self
except: # pylint: disable=bare-except
pass
hooks = monitor_lib.replace_monitors_with_hooks(monitors, self)
if steps is not None or max_steps is not None:
hooks.append(basic_session_run_hooks.StopAtStepHook(steps, max_steps))
loss = self._train_model(input_fn=input_fn, hooks=hooks)
logging.info('Loss for final step: %s.', loss)
return self
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def partial_fit(
self, x=None, y=None, input_fn=None, steps=1, batch_size=None,
monitors=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different or the same chunks of the dataset. This either can
implement iterative training or out-of-core/online training.
This is especially useful when the whole dataset is too big to
fit in memory at the same time. Or when model is taking long time
to converge, and you want to split up training into subparts.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
y: Vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of labels. The training label values
(class labels in classification, real numbers in regression). If set,
`input_fn` must be `None`.
input_fn: Input function. If set, `x`, `y`, and `batch_size` must be
`None`.
steps: Number of steps for which to train model. If `None`, train forever.
batch_size: minibatch size to use on the input, defaults to first
dimension of `x`. Must be `None` if `input_fn` is provided.
monitors: List of `BaseMonitor` subclass instances. Used for callbacks
inside the training loop.
Returns:
`self`, for chaining.
Raises:
ValueError: If at least one of `x` and `y` is provided, and `input_fn` is
provided.
"""
logging.warning('The current implementation of partial_fit is not optimized'
' for use in a loop. Consider using fit() instead.')
return self.fit(x=x, y=y, input_fn=input_fn, steps=steps,
batch_size=batch_size, monitors=monitors)
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def evaluate(self,
x=None,
y=None,
input_fn=None,
feed_fn=None,
batch_size=None,
steps=None,
metrics=None,
name=None,
checkpoint_path=None,
hooks=None,
log_progress=True):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Evaluable`.
Raises:
ValueError: If at least one of `x` or `y` is provided, and at least one of
`input_fn` or `feed_fn` is provided.
Or if `metrics` is not `None` or `dict`.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if x is not None:
return SKCompat(self).score(x, y, batch_size, steps, metrics, name)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name=name,
checkpoint_path=checkpoint_path,
hooks=hooks,
log_progress=log_progress)
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('batch_size', None), ('as_iterable', True)
)
def predict(
self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns predictions for given features.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
input_fn: Input function. If set, `x` and 'batch_size' must be `None`.
batch_size: Override default batch size. If set, 'input_fn' must be
'None'.
outputs: list of `str`, name of the output to predict.
If `None`, returns all.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
A numpy array of predicted classes or regression values if the
constructor's `model_fn` returns a `Tensor` for `predictions` or a `dict`
of numpy arrays if `model_fn` returns a `dict`. Returns an iterable of
predictions if as_iterable is True.
Raises:
ValueError: If x and input_fn are both provided or both `None`.
"""
_verify_input_args(x, None, input_fn, None, batch_size)
if x is not None and not as_iterable:
return SKCompat(self).predict(x, batch_size)
input_fn, feed_fn = _get_input_fn(x, None, input_fn, None, batch_size)
return self._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=as_iterable)
def get_variable_value(self, name):
"""Returns value of the variable given by name.
Args:
name: string, name of the tensor.
Returns:
Numpy array - value of the tensor.
"""
return load_variable(self.model_dir, name)
def get_variable_names(self):
"""Returns list of all variable names in this model.
Returns:
List of names.
"""
return [name for name, _ in list_variables(self.model_dir)]
@property
def model_dir(self):
return self._model_dir
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def export(self,
export_dir,
input_fn=export._default_input_fn, # pylint: disable=protected-access
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
prediction_key=None,
default_batch_size=1,
exports_to_keep=None,
checkpoint_path=None):
"""Exports inference graph into given dir.
Args:
export_dir: A string containing a directory to write the exported graph
and checkpoints.
input_fn: If `use_deprecated_input_fn` is true, then a function that given
`Tensor` of `Example` strings, parses it into features that are then
passed to the model. Otherwise, a function that takes no argument and
returns a tuple of (features, labels), where features is a dict of
string key to `Tensor` and labels is a `Tensor` that's currently not
used (and so can be `None`).
input_feature_key: Only used if `use_deprecated_input_fn` is false. String
key into the features dict returned by `input_fn` that corresponds to a
the raw `Example` strings `Tensor` that the exported model will take as
input. Can only be `None` if you're using a custom `signature_fn` that
does not use the first arg (examples).
use_deprecated_input_fn: Determines the signature format of `input_fn`.
signature_fn: Function that returns a default signature and a named
signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s
for features and `Tensor` or `dict` of `Tensor`s for predictions.
prediction_key: The key for a tensor in the `predictions` dict (output
from the `model_fn`) to use as the `predictions` input to the
`signature_fn`. Optional. If `None`, predictions will pass to
`signature_fn` without filtering.
default_batch_size: Default batch size of the `Example` placeholder.
exports_to_keep: Number of exports to keep.
checkpoint_path: the checkpoint path of the model to be exported. If it is
`None` (which is default), will use the latest checkpoint in
export_dir.
Returns:
The string path to the exported directory. NB: this functionality was
added ca. 2016/09/25; clients that depend on the return value may need
to handle the case where this function returns None because subclasses
are not returning a value.
"""
# pylint: disable=protected-access
return export._export_estimator(
estimator=self,
export_dir=export_dir,
signature_fn=signature_fn,
prediction_key=prediction_key,
input_fn=input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep,
checkpoint_path=checkpoint_path)
@abc.abstractproperty
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overridden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
@abc.abstractproperty
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overridden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
A `ModelFnOps` object.
"""
raise NotImplementedError('_get_eval_ops not implemented in BaseEstimator')
@deprecated(
'2016-09-23',
'The signature of the input_fn accepted by export is changing to be '
'consistent with what\'s used by tf.Learn Estimator\'s train/evaluate, '
'which makes this function useless. This will be removed after the '
'deprecation date.')
def _get_feature_ops_from_example(self, examples_batch):
"""Returns feature parser for given example batch using features info.
This function requires `fit()` has been called.
Args:
examples_batch: batch of tf.Example
Returns:
features: `Tensor` or `dict` of `Tensor` objects.
Raises:
ValueError: If `_features_info` attribute is not available (usually
because `fit()` has not been called).
"""
if self._features_info is None:
raise ValueError('Features information missing, was fit() ever called?')
return tensor_signature.create_example_parser_from_signatures(
self._features_info, examples_batch)
def _check_inputs(self, features, labels):
if self._features_info is not None:
logging.debug('Given features: %s, required signatures: %s.',
str(features), str(self._features_info))
if not tensor_signature.tensors_compatible(features, self._features_info):
raise ValueError('Features are incompatible with given information. '
'Given features: %s, required signatures: %s.' %
(str(features), str(self._features_info)))
else:
self._features_info = tensor_signature.create_signatures(features)
logging.debug('Setting feature info to %s.', str(self._features_info))
if labels is not None:
if self._labels_info is not None:
logging.debug('Given labels: %s, required signatures: %s.',
str(labels), str(self._labels_info))
if not tensor_signature.tensors_compatible(labels, self._labels_info):
raise ValueError('Labels are incompatible with given information. '
'Given labels: %s, required signatures: %s.' %
(str(labels), str(self._labels_info)))
else:
self._labels_info = tensor_signature.create_signatures(labels)
logging.debug('Setting labels info to %s', str(self._labels_info))
def _extract_metric_update_ops(self, eval_dict):
"""Separate update operations from metric value operations."""
update_ops = []
value_ops = {}
for name, metric_ops in six.iteritems(eval_dict):
if isinstance(metric_ops, (list, tuple)):
if len(metric_ops) == 2:
value_ops[name] = metric_ops[0]
update_ops.append(metric_ops[1])
else:
logging.warning(
'Ignoring metric {}. It returned a list|tuple with len {}, '
'expected 2'.format(name, len(metric_ops)))
value_ops[name] = metric_ops
else:
value_ops[name] = metric_ops
if update_ops:
update_ops = control_flow_ops.group(*update_ops)
else:
update_ops = None
return update_ops, value_ops
def _evaluate_model(self,
input_fn,
steps,
feed_fn=None,
metrics=None,
name='',
checkpoint_path=None,
hooks=None,
log_progress=True):
# TODO(wicke): Remove this once Model and associated code are gone.
if (hasattr(self._config, 'execution_mode') and
self._config.execution_mode not in ('all', 'evaluate', 'eval_evalset')):
return None, None
# Check that model has been trained (if nothing has been set explicitly).
if not checkpoint_path:
latest_path = saver.latest_checkpoint(self._model_dir)
if not latest_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
checkpoint_path = latest_path
# Setup output directory.
eval_dir = os.path.join(self._model_dir, 'eval' if not name else
'eval_' + name)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = contrib_framework.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
model_fn_results = self._get_eval_ops(features, labels, metrics)
eval_dict = model_fn_results.eval_metric_ops
update_op, eval_dict = self._extract_metric_update_ops(eval_dict)
# We need to copy the hook array as we modify it, thus [:].
hooks = hooks[:] if hooks else []
if feed_fn:
hooks.append(basic_session_run_hooks.FeedFnHook(feed_fn))
if steps == 0:
logging.warning('evaluation steps are 0. If `input_fn` does not raise'
'OutOfRangeError`, the evaluation will never stop.'
'Use steps=None if intended.')
if steps:
hooks.append(
evaluation.StopAfterNEvalsHook(
steps, log_progress=log_progress))
global_step_key = 'global_step'
while global_step_key in eval_dict:
global_step_key = '_' + global_step_key
eval_dict[global_step_key] = global_step
eval_results = evaluation.evaluate_once(
checkpoint_path=checkpoint_path,
master=self._config.evaluation_master,
scaffold=model_fn_results.scaffold,
eval_ops=update_op,
final_ops=eval_dict,
hooks=hooks,
config=self._session_config)
current_global_step = eval_results[global_step_key]
_write_dict_to_summary(eval_dir, eval_results, current_global_step)
return eval_results, current_global_step
def _get_features_from_input_fn(self, input_fn):
result = input_fn()
if isinstance(result, (list, tuple)):
return result[0]
return result
def _infer_model(self,
input_fn,
feed_fn=None,
outputs=None,
as_iterable=True,
iterate_batches=False):
# Check that model has been trained.
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
contrib_framework.create_global_step(g)
features = self._get_features_from_input_fn(input_fn)
infer_ops = self._get_predict_ops(features)
predictions = self._filter_predictions(infer_ops.predictions, outputs)
mon_sess = monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint_path,
scaffold=infer_ops.scaffold,
config=self._session_config))
if not as_iterable:
with mon_sess:
if not mon_sess.should_stop():
return mon_sess.run(predictions, feed_fn() if feed_fn else None)
else:
return self._predict_generator(mon_sess, predictions, feed_fn,
iterate_batches)
def _predict_generator(self, mon_sess, predictions, feed_fn, iterate_batches):
with mon_sess:
while not mon_sess.should_stop():
preds = mon_sess.run(predictions, feed_fn() if feed_fn else None)
if iterate_batches:
yield preds
elif not isinstance(predictions, dict):
for pred in preds:
yield pred
else:
first_tensor = list(preds.values())[0]
if isinstance(first_tensor, sparse_tensor.SparseTensorValue):
batch_length = first_tensor.dense_shape[0]
else:
batch_length = first_tensor.shape[0]
for i in range(batch_length):
yield {key: value[i] for key, value in six.iteritems(preds)}
if self._is_input_constant(feed_fn, mon_sess.graph):
return
def _is_input_constant(self, feed_fn, graph):
# If there are no queue_runners, the input `predictions` is a
# constant, and we should stop after the first epoch. If,
# instead, there are queue_runners, eventually they should throw
# an `OutOfRangeError`.
if graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS):
return False
# data_feeder uses feed_fn to generate `OutOfRangeError`.
if feed_fn is not None:
return False
return True
def _filter_predictions(self, predictions, outputs):
if not outputs:
return predictions
if not isinstance(predictions, dict):
raise ValueError(
'outputs argument is not valid in case of non-dict predictions.')
existing_keys = predictions.keys()
predictions = {
key: value
for key, value in six.iteritems(predictions) if key in outputs
}
if not predictions:
raise ValueError('Expected to run at least one output from %s, '
'provided %s.' % (existing_keys, outputs))
return predictions
def _train_model(self, input_fn, hooks):
all_hooks = []
self._graph = ops.Graph()
with self._graph.as_default() as g, g.device(self._device_fn):
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = contrib_framework.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
model_fn_ops = self._get_train_ops(features, labels)
ops.add_to_collection(ops.GraphKeys.LOSSES, model_fn_ops.loss)
all_hooks.extend(hooks)
all_hooks.extend([
basic_session_run_hooks.NanTensorHook(model_fn_ops.loss),
basic_session_run_hooks.LoggingTensorHook(
{
'loss': model_fn_ops.loss,
'step': global_step
},
every_n_iter=100)
])
scaffold = model_fn_ops.scaffold or monitored_session.Scaffold()
if not (scaffold.saver or ops.get_collection(ops.GraphKeys.SAVERS)):
ops.add_to_collection(
ops.GraphKeys.SAVERS,
saver.Saver(
sharded=True,
max_to_keep=self._config.keep_checkpoint_max,
defer_build=True,
save_relative_paths=True))
chief_hooks = []
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
saver_hook_exists = any([
isinstance(h, basic_session_run_hooks.CheckpointSaverHook)
for h in (all_hooks + model_fn_ops.training_hooks + chief_hooks +
model_fn_ops.training_chief_hooks)
])
if not saver_hook_exists:
chief_hooks = [
basic_session_run_hooks.CheckpointSaverHook(
self._model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=scaffold)
]
with monitored_session.MonitoredTrainingSession(
master=self._config.master,
is_chief=self._config.is_chief,
checkpoint_dir=self._model_dir,
scaffold=scaffold,
hooks=all_hooks + model_fn_ops.training_hooks,
chief_only_hooks=chief_hooks + model_fn_ops.training_chief_hooks,
save_checkpoint_secs=0, # Saving is handled by a hook.
save_summaries_steps=self._config.save_summary_steps,
config=self._session_config
) as mon_sess:
loss = None
while not mon_sess.should_stop():
_, loss = mon_sess.run([model_fn_ops.train_op, model_fn_ops.loss])
core_summary.FileWriterCache.clear()
return loss
def _identity_feature_engineering_fn(features, labels):
return features, labels
class Estimator(BaseEstimator):
"""Estimator class is the basic TensorFlow model trainer/evaluator.
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
feature_engineering_fn=None):
"""Constructs an `Estimator` instance.
Args:
model_fn: Model function. Follows the signature:
* Args:
* `features`: single `Tensor` or `dict` of `Tensor`s
(depending on data passed to `fit`),
* `labels`: `Tensor` or `dict` of `Tensor`s (for multi-head
models). If mode is `ModeKeys.INFER`, `labels=None` will be
passed. If the `model_fn`'s signature does not accept
`mode`, the `model_fn` must still be able to handle
`labels=None`.
* `mode`: Optional. Specifies if this training, evaluation or
prediction. See `ModeKeys`.
* `params`: Optional `dict` of hyperparameters. Will receive what
is passed to Estimator in `params` parameter. This allows
to configure Estimators from hyper parameter tuning.
* `config`: Optional configuration object. Will receive what is passed
to Estimator in `config` parameter, or the default `config`.
Allows updating things in your model_fn based on configuration
such as `num_ps_replicas`.
* `model_dir`: Optional directory where model parameters, graph etc
are saved. Will receive what is passed to Estimator in
`model_dir` parameter, or the default `model_dir`. Allows
updating things in your model_fn that expect model_dir, such as
training hooks.
* Returns:
`ModelFnOps`
Also supports a legacy signature which returns tuple of:
* predictions: `Tensor`, `SparseTensor` or dictionary of same.
Can also be any type that is convertible to a `Tensor` or
`SparseTensor`, or dictionary of same.
* loss: Scalar loss `Tensor`.
* train_op: Training update `Tensor` or `Operation`.
Supports next three signatures for the function:
* `(features, labels) -> (predictions, loss, train_op)`
* `(features, labels, mode) -> (predictions, loss, train_op)`
* `(features, labels, mode, params) -> (predictions, loss, train_op)`
* `(features, labels, mode, params, config) ->
(predictions, loss, train_op)`
* `(features, labels, mode, params, config, model_dir) ->
(predictions, loss, train_op)`
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
config: Configuration object.
params: `dict` of hyper parameters that will be passed into `model_fn`.
Keys are names of parameters, values are basic python types.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into `model_fn`. Please check `model_fn` for
a definition of features and labels.
Raises:
ValueError: parameters of `model_fn` don't match `params`.
"""
super(Estimator, self).__init__(model_dir=model_dir, config=config)
if model_fn is not None:
# Check number of arguments of the given function matches requirements.
model_fn_args = _model_fn_args(model_fn)
if params is not None and 'params' not in model_fn_args:
raise ValueError('Estimator\'s model_fn (%s) does not have a params '
'argument, but params (%s) were passed to the '
'Estimator\'s constructor.' %
(model_fn, params))
if params is None and 'params' in model_fn_args:
logging.warning('Estimator\'s model_fn (%s) includes params '
'argument, but params are not passed to Estimator.',
model_fn)
self._model_fn = model_fn
self.params = params
self._feature_engineering_fn = (
feature_engineering_fn or _identity_feature_engineering_fn)
def _call_model_fn(self, features, labels, mode):
"""Calls model function with support of 2, 3 or 4 arguments.
Args:
features: features dict.
labels: labels dict.
mode: ModeKeys
Returns:
A `ModelFnOps` object. If model_fn returns a tuple, wraps them up in a
`ModelFnOps` object.
Raises:
ValueError: if model_fn returns invalid objects.
"""
features, labels = self._feature_engineering_fn(features, labels)
model_fn_args = _model_fn_args(self._model_fn)
kwargs = {}
if 'mode' in model_fn_args:
kwargs['mode'] = mode
if 'params' in model_fn_args:
kwargs['params'] = self.params
if 'config' in model_fn_args:
kwargs['config'] = self.config
if 'model_dir' in model_fn_args:
kwargs['model_dir'] = self.model_dir
model_fn_results = self._model_fn(features, labels, **kwargs)
if isinstance(model_fn_results, model_fn_lib.ModelFnOps):
return model_fn_results
# Here model_fn_results should be a tuple with 3 elements.
if len(model_fn_results) != 3:
raise ValueError('Unrecognized value returned by model_fn, '
'please return ModelFnOps.')
return model_fn_lib.ModelFnOps(
mode=mode,
predictions=model_fn_results[0],
loss=model_fn_results[1],
train_op=model_fn_results[2])
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overridden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.TRAIN)
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overridden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
`ModelFnOps` object.
Raises:
ValueError: if `metrics` don't match `labels`.
"""
model_fn_ops = self._call_model_fn(
features, labels, model_fn_lib.ModeKeys.EVAL)
features, labels = self._feature_engineering_fn(features, labels)
# Custom metrics should overwrite defaults.
if metrics:
model_fn_ops.eval_metric_ops.update(_make_metrics_ops(
metrics, features, labels, model_fn_ops.predictions))
if metric_key.MetricKey.LOSS not in model_fn_ops.eval_metric_ops:
model_fn_ops.eval_metric_ops[metric_key.MetricKey.LOSS] = (
metrics_lib.streaming_mean(model_fn_ops.loss))
return model_fn_ops
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Expected to be overridden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
labels = tensor_signature.create_placeholders_from_signatures(
self._labels_info)
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.INFER)
def export_savedmodel(
self, export_dir_base, serving_input_fn,
default_output_alternative_key=None,
assets_extra=None,
as_text=False,
checkpoint_path=None,
graph_rewrite_specs=(GraphRewriteSpec((tag_constants.SERVING,), ()),)):
"""Exports inference graph as a SavedModel into given dir.
Args:
export_dir_base: A string containing a directory to write the exported
graph and checkpoints.
serving_input_fn: A function that takes no argument and
returns an `InputFnOps`.
default_output_alternative_key: the name of the head to serve when none is
specified. Not needed for single-headed models.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel. Each key should give the destination
path (including the filename) relative to the assets.extra directory.
The corresponding value gives the full path of the source file to be
copied. For example, the simple case of copying a single file without
renaming it is specified as
`{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`.
as_text: whether to write the SavedModel proto in text format.
checkpoint_path: The checkpoint path to export. If None (the default),
the most recent checkpoint found within the model directory is chosen.
graph_rewrite_specs: an iterable of `GraphRewriteSpec`. Each element will
produce a separate MetaGraphDef within the exported SavedModel, tagged
and rewritten as specified. Defaults to a single entry using the
default serving tag ("serve") and no rewriting.
Returns:
The string path to the exported directory.
Raises:
ValueError: if an unrecognized export_type is requested.
"""
if serving_input_fn is None:
raise ValueError('serving_input_fn must be defined.')
if not checkpoint_path:
# Locate the latest checkpoint
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
export_dir = saved_model_export_utils.get_timestamped_export_dir(
export_dir_base)
# We'll write the SavedModel to a temporary directory and then atomically
# rename it at the end. This helps to avoid corrupt / incomplete outputs,
# which could otherwise occur if the job is preempted or otherwise fails
# in the middle of SavedModel creation.
temp_export_dir = saved_model_export_utils.get_temp_export_dir(export_dir)
builder = saved_model_builder.SavedModelBuilder(temp_export_dir)
# Build the base graph
with ops.Graph().as_default() as g:
training_util.create_global_step(g)
# Call the serving_input_fn and collect the input alternatives.
input_ops = serving_input_fn()
input_alternatives, features = (
saved_model_export_utils.get_input_alternatives(input_ops))
# TODO(b/34388557) This is a stopgap, pending recording model provenance.
# Record which features are expected at serving time. It is assumed that
# these are the features that were used in training.
for feature_key in input_ops.features.keys():
ops.add_to_collection(
constants.COLLECTION_DEF_KEY_FOR_INPUT_FEATURE_KEYS, feature_key)
# Call the model_fn and collect the output alternatives.
model_fn_ops = self._call_model_fn(features, None,
model_fn_lib.ModeKeys.INFER)
output_alternatives, actual_default_output_alternative_key = (
saved_model_export_utils.get_output_alternatives(
model_fn_ops, default_output_alternative_key))
init_op = control_flow_ops.group(
variables.local_variables_initializer(),
resources.initialize_resources(resources.shared_resources()),
lookup_ops.tables_initializer())
# Build the SignatureDefs from all pairs of input and output alternatives
signature_def_map = saved_model_export_utils.build_all_signature_defs(
input_alternatives, output_alternatives,
actual_default_output_alternative_key)
# Export the first MetaGraphDef with variables, assets etc.
with tf_session.Session('') as session:
# pylint: disable=protected-access
saveables = variables._all_saveable_objects()
# pylint: enable=protected-access
if (model_fn_ops.scaffold is not None and
model_fn_ops.scaffold.saver is not None):
saver_for_restore = model_fn_ops.scaffold.saver
elif saveables:
saver_for_restore = saver.Saver(saveables, sharded=True)
saver_for_restore.restore(session, checkpoint_path)
# Perform the export
if not graph_rewrite_specs or graph_rewrite_specs[0].transforms:
raise ValueError('The first element of graph_rewrite_specs '
'must specify no transforms.')
untransformed_tags = graph_rewrite_specs[0].tags
# TODO(soergel): switch to main_op or otherwise update when dust settles
builder.add_meta_graph_and_variables(
session, untransformed_tags,
signature_def_map=signature_def_map,
assets_collection=ops.get_collection(
ops.GraphKeys.ASSET_FILEPATHS),
legacy_init_op=init_op)
# pylint: disable=protected-access
base_meta_graph_def = builder._saved_model.meta_graphs[0]
# pylint: enable=protected-access
if graph_rewrite_specs[1:]:
# Prepare the input_names and output_names needed for the
# meta_graph_transform call below.
input_names = [tensor.name
for input_dict in input_alternatives.values()
for tensor in input_dict.values()]
output_names = [tensor.name
for output_alternative in output_alternatives.values()
for tensor in output_alternative[1].values()]
# Write the additional MetaGraphDefs
for graph_rewrite_spec in graph_rewrite_specs[1:]:
# TODO(soergel) consider moving most of this to saved_model.builder_impl
# as e.g. builder.add_rewritten_meta_graph(rewritten_graph_def, tags)
transformed_meta_graph_def = meta_graph_transform.meta_graph_transform(
base_meta_graph_def, input_names, output_names,
graph_rewrite_spec.transforms, graph_rewrite_spec.tags)
# pylint: disable=protected-access
meta_graph_def = builder._saved_model.meta_graphs.add()
# pylint: enable=protected-access
meta_graph_def.CopyFrom(transformed_meta_graph_def)
# Add the extra assets
if assets_extra:
assets_extra_path = os.path.join(compat.as_bytes(temp_export_dir),
compat.as_bytes('assets.extra'))
for dest_relative, source in assets_extra.items():
dest_absolute = os.path.join(compat.as_bytes(assets_extra_path),
compat.as_bytes(dest_relative))
dest_path = os.path.dirname(dest_absolute)
gfile.MakeDirs(dest_path)
gfile.Copy(source, dest_absolute)
builder.save(as_text)
gfile.Rename(temp_export_dir, export_dir)
return export_dir
# For time of deprecation x,y from Estimator allow direct access.
# pylint: disable=protected-access
class SKCompat(sklearn.BaseEstimator):
"""Scikit learn wrapper for TensorFlow Learn Estimator."""
def __init__(self, estimator):
self._estimator = estimator
def fit(self, x, y, batch_size=128, steps=None, max_steps=None,
monitors=None):
input_fn, feed_fn = _get_input_fn(x, y, input_fn=None, feed_fn=None,
batch_size=batch_size, shuffle=True,
epochs=None)
all_monitors = []
if feed_fn:
all_monitors = [basic_session_run_hooks.FeedFnHook(feed_fn)]
if monitors:
all_monitors.extend(monitors)
self._estimator.fit(input_fn=input_fn,
steps=steps,
max_steps=max_steps,
monitors=all_monitors)
return self
def score(self, x, y, batch_size=128, steps=None, metrics=None, name=None):
input_fn, feed_fn = _get_input_fn(x, y, input_fn=None,
feed_fn=None, batch_size=batch_size,
shuffle=False, epochs=1)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._estimator._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name=name)
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
def predict(self, x, batch_size=128, outputs=None):
input_fn, feed_fn = _get_input_fn(
x, None, input_fn=None, feed_fn=None, batch_size=batch_size,
shuffle=False, epochs=1)
results = list(
self._estimator._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=True,
iterate_batches=True))
if not isinstance(results[0], dict):
return np.concatenate([output for output in results], axis=0)
return {
key: np.concatenate(
[output[key] for output in results], axis=0)
for key in results[0]
}
| apache-2.0 |
jundongl/scikit-feature | skfeature/example/test_gini_index.py | 3 | 1592 | import scipy.io
from sklearn import svm
from sklearn.metrics import accuracy_score
from skfeature.function.statistical_based import gini_index
from sklearn import cross_validation
def main():
# load data
mat = scipy.io.loadmat('../data/colon.mat')
X = mat['X'] # data
X = X.astype(float)
y = mat['Y'] # label
y = y[:, 0]
n_samples, n_features = X.shape # number of samples and number of features
# split data into 10 folds
ss = cross_validation.KFold(n_samples, n_folds=10, shuffle=True)
# perform evaluation on classification task
num_fea = 100 # number of selected features
clf = svm.LinearSVC() # linear SVM
correct = 0
for train, test in ss:
# obtain the gini_index score of each feature
score = gini_index.gini_index(X[train], y[train])
# rank features in descending order according to score
idx = gini_index.feature_ranking(score)
# obtain the dataset on the selected features
selected_features = X[:, idx[0:num_fea]]
# train a classification model with the selected features on the training dataset
clf.fit(selected_features[train], y[train])
# predict the class labels of test data
y_predict = clf.predict(selected_features[test])
# obtain the classification accuracy on the test data
acc = accuracy_score(y[test], y_predict)
correct = correct + acc
# output the average classification accuracy over all 10 folds
print 'Accuracy:', float(correct)/10
if __name__ == '__main__':
main()
| gpl-2.0 |
pllim/astropy | astropy/modeling/physical_models.py | 4 | 24395 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Models that have physical origins.
"""
# pylint: disable=invalid-name, no-member
import warnings
import numpy as np
from astropy import constants as const
from astropy import units as u
from astropy import cosmology
from astropy.utils.exceptions import AstropyUserWarning
from .core import Fittable1DModel
from .parameters import Parameter, InputParameterError
__all__ = ["BlackBody", "Drude1D", "Plummer1D", "NFW"]
class BlackBody(Fittable1DModel):
"""
Blackbody model using the Planck function.
Parameters
----------
temperature : `~astropy.units.Quantity` ['temperature']
Blackbody temperature.
scale : float or `~astropy.units.Quantity` ['dimensionless']
Scale factor
Notes
-----
Model formula:
.. math:: B_{\\nu}(T) = A \\frac{2 h \\nu^{3} / c^{2}}{exp(h \\nu / k T) - 1}
Examples
--------
>>> from astropy.modeling import models
>>> from astropy import units as u
>>> bb = models.BlackBody(temperature=5000*u.K)
>>> bb(6000 * u.AA) # doctest: +FLOAT_CMP
<Quantity 1.53254685e-05 erg / (cm2 Hz s sr)>
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import BlackBody
from astropy import units as u
from astropy.visualization import quantity_support
bb = BlackBody(temperature=5778*u.K)
wav = np.arange(1000, 110000) * u.AA
flux = bb(wav)
with quantity_support():
plt.figure()
plt.semilogx(wav, flux)
plt.axvline(bb.nu_max.to(u.AA, equivalencies=u.spectral()).value, ls='--')
plt.show()
"""
# We parametrize this model with a temperature and a scale.
temperature = Parameter(default=5000.0, min=0, unit=u.K, description="Blackbody temperature")
scale = Parameter(default=1.0, min=0, description="Scale factor")
# We allow values without units to be passed when evaluating the model, and
# in this case the input x values are assumed to be frequencies in Hz.
_input_units_allow_dimensionless = True
# We enable the spectral equivalency by default for the spectral axis
input_units_equivalencies = {'x': u.spectral()}
def evaluate(self, x, temperature, scale):
"""Evaluate the model.
Parameters
----------
x : float, `~numpy.ndarray`, or `~astropy.units.Quantity` ['frequency']
Frequency at which to compute the blackbody. If no units are given,
this defaults to Hz.
temperature : float, `~numpy.ndarray`, or `~astropy.units.Quantity`
Temperature of the blackbody. If no units are given, this defaults
to Kelvin.
scale : float, `~numpy.ndarray`, or `~astropy.units.Quantity` ['dimensionless']
Desired scale for the blackbody.
Returns
-------
y : number or ndarray
Blackbody spectrum. The units are determined from the units of
``scale``.
.. note::
Use `numpy.errstate` to suppress Numpy warnings, if desired.
.. warning::
Output values might contain ``nan`` and ``inf``.
Raises
------
ValueError
Invalid temperature.
ZeroDivisionError
Wavelength is zero (when converting to frequency).
"""
if not isinstance(temperature, u.Quantity):
in_temp = u.Quantity(temperature, u.K)
else:
in_temp = temperature
# Convert to units for calculations, also force double precision
with u.add_enabled_equivalencies(u.spectral() + u.temperature()):
freq = u.Quantity(x, u.Hz, dtype=np.float64)
temp = u.Quantity(in_temp, u.K)
# check the units of scale and setup the output units
bb_unit = u.erg / (u.cm ** 2 * u.s * u.Hz * u.sr) # default unit
# use the scale that was used at initialization for determining the units to return
# to support returning the right units when fitting where units are stripped
if hasattr(self.scale, "unit") and self.scale.unit is not None:
# check that the units on scale are covertable to surface brightness units
if not self.scale.unit.is_equivalent(bb_unit, u.spectral_density(x)):
raise ValueError(
f"scale units not surface brightness: {self.scale.unit}"
)
# use the scale passed to get the value for scaling
if hasattr(scale, "unit"):
mult_scale = scale.value
else:
mult_scale = scale
bb_unit = self.scale.unit
else:
mult_scale = scale
# Check if input values are physically possible
if np.any(temp < 0):
raise ValueError(f"Temperature should be positive: {temp}")
if not np.all(np.isfinite(freq)) or np.any(freq <= 0):
warnings.warn(
"Input contains invalid wavelength/frequency value(s)",
AstropyUserWarning,
)
log_boltz = const.h * freq / (const.k_B * temp)
boltzm1 = np.expm1(log_boltz)
# Calculate blackbody flux
bb_nu = 2.0 * const.h * freq ** 3 / (const.c ** 2 * boltzm1) / u.sr
y = mult_scale * bb_nu.to(bb_unit, u.spectral_density(freq))
# If the temperature parameter has no unit, we should return a unitless
# value. This occurs for instance during fitting, since we drop the
# units temporarily.
if hasattr(temperature, "unit"):
return y
return y.value
@property
def input_units(self):
# The input units are those of the 'x' value, which should always be
# Hz. Because we do this, and because input_units_allow_dimensionless
# is set to True, dimensionless values are assumed to be in Hz.
return {self.inputs[0]: u.Hz}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {"temperature": u.K}
@property
def bolometric_flux(self):
"""Bolometric flux."""
# bolometric flux in the native units of the planck function
native_bolflux = (
self.scale.value * const.sigma_sb * self.temperature ** 4 / np.pi
)
# return in more "astro" units
return native_bolflux.to(u.erg / (u.cm ** 2 * u.s))
@property
def lambda_max(self):
"""Peak wavelength when the curve is expressed as power density."""
return const.b_wien / self.temperature
@property
def nu_max(self):
"""Peak frequency when the curve is expressed as power density."""
return 2.8214391 * const.k_B * self.temperature / const.h
class Drude1D(Fittable1DModel):
"""
Drude model based one the behavior of electons in materials (esp. metals).
Parameters
----------
amplitude : float
Peak value
x_0 : float
Position of the peak
fwhm : float
Full width at half maximum
Model formula:
.. math:: f(x) = A \\frac{(fwhm/x_0)^2}{((x/x_0 - x_0/x)^2 + (fwhm/x_0)^2}
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Drude1D
fig, ax = plt.subplots()
# generate the curves and plot them
x = np.arange(7.5 , 12.5 , 0.1)
dmodel = Drude1D(amplitude=1.0, fwhm=1.0, x_0=10.0)
ax.plot(x, dmodel(x))
ax.set_xlabel('x')
ax.set_ylabel('F(x)')
plt.show()
"""
amplitude = Parameter(default=1.0, description="Peak Value")
x_0 = Parameter(default=1.0, description="Position of the peak")
fwhm = Parameter(default=1.0, description="Full width at half maximum")
@staticmethod
def evaluate(x, amplitude, x_0, fwhm):
"""
One dimensional Drude model function
"""
return (
amplitude
* ((fwhm / x_0) ** 2)
/ ((x / x_0 - x_0 / x) ** 2 + (fwhm / x_0) ** 2)
)
@staticmethod
def fit_deriv(x, amplitude, x_0, fwhm):
"""
Drude1D model function derivatives.
"""
d_amplitude = (fwhm / x_0) ** 2 / ((x / x_0 - x_0 / x) ** 2 + (fwhm / x_0) ** 2)
d_x_0 = (
-2
* amplitude
* d_amplitude
* (
(1 / x_0)
+ d_amplitude
* (x_0 ** 2 / fwhm ** 2)
* (
(-x / x_0 - 1 / x) * (x / x_0 - x_0 / x)
- (2 * fwhm ** 2 / x_0 ** 3)
)
)
)
d_fwhm = (2 * amplitude * d_amplitude / fwhm) * (1 - d_amplitude)
return [d_amplitude, d_x_0, d_fwhm]
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"fwhm": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
@property
def return_units(self):
if self.amplitude.unit is None:
return None
return {self.outputs[0]: self.amplitude.unit}
@x_0.validator
def x_0(self, val):
""" Ensure `x_0` is not 0."""
if val == 0:
raise InputParameterError("0 is not an allowed value for x_0")
def bounding_box(self, factor=50):
"""Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
Parameters
----------
factor : float
The multiple of FWHM used to define the limits.
"""
x0 = self.x_0
dx = factor * self.fwhm
return (x0 - dx, x0 + dx)
class Plummer1D(Fittable1DModel):
r"""One dimensional Plummer density profile model.
Parameters
----------
mass : float
Total mass of cluster.
r_plum : float
Scale parameter which sets the size of the cluster core.
Notes
-----
Model formula:
.. math::
\rho(r)=\frac{3M}{4\pi a^3}(1+\frac{r^2}{a^2})^{-5/2}
References
----------
.. [1] https://ui.adsabs.harvard.edu/abs/1911MNRAS..71..460P
"""
mass = Parameter(default=1.0, description="Total mass of cluster")
r_plum = Parameter(default=1.0, description="Scale parameter which sets the size of the cluster core")
@staticmethod
def evaluate(x, mass, r_plum):
"""
Evaluate plummer density profile model.
"""
return (3*mass)/(4 * np.pi * r_plum**3) * (1+(x/r_plum)**2)**(-5/2)
@staticmethod
def fit_deriv(x, mass, r_plum):
"""
Plummer1D model derivatives.
"""
d_mass = 3 / ((4*np.pi*r_plum**3) * (((x/r_plum)**2 + 1)**(5/2)))
d_r_plum = (6*mass*x**2-9*mass*r_plum**2) / ((4*np.pi * r_plum**6) *
(1+(x/r_plum)**2)**(7/2))
return [d_mass, d_r_plum]
@property
def input_units(self):
if self.mass.unit is None and self.r_plum.unit is None:
return None
else:
return {self.inputs[0]: self.r_plum.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'mass': outputs_unit[self.outputs[0]] * inputs_unit[self.inputs[0]] ** 3,
'r_plum': inputs_unit[self.inputs[0]]}
class NFW(Fittable1DModel):
r"""
Navarro–Frenk–White (NFW) profile - model for radial distribution of dark matter.
Parameters
----------
mass : float or `~astropy.units.Quantity` ['mass']
Mass of NFW peak within specified overdensity radius.
concentration : float
Concentration of the NFW profile.
redshift : float
Redshift of the NFW profile.
massfactor : tuple or str
Mass overdensity factor and type for provided profiles:
Tuple version:
("virial",) : virial radius
("critical", N) : radius where density is N times that of the critical density
("mean", N) : radius where density is N times that of the mean density
String version:
"virial" : virial radius
"Nc" : radius where density is N times that of the critical density (e.g. "200c")
"Nm" : radius where density is N times that of the mean density (e.g. "500m")
cosmo : :class:`~astropy.cosmology.Cosmology`
Background cosmology for density calculation. If None, the default cosmology will be used.
Notes
-----
Model formula:
.. math:: \rho(r)=\frac{\delta_c\rho_{c}}{r/r_s(1+r/r_s)^2}
References
----------
.. [1] https://arxiv.org/pdf/astro-ph/9508025
.. [2] https://en.wikipedia.org/wiki/Navarro%E2%80%93Frenk%E2%80%93White_profile
.. [3] https://en.wikipedia.org/wiki/Virial_mass
"""
# Model Parameters
# NFW Profile mass
mass = Parameter(default=1.0, min=1.0, unit=u.M_sun,
description="Peak mass within specified overdensity radius")
# NFW profile concentration
concentration = Parameter(default=1.0, min=1.0, description="Concentration")
# NFW Profile redshift
redshift = Parameter(default=0.0, min=0.0, description="Redshift")
# We allow values without units to be passed when evaluating the model, and
# in this case the input r values are assumed to be lengths / positions in kpc.
_input_units_allow_dimensionless = True
def __init__(self, mass=u.Quantity(mass.default, mass.unit),
concentration=concentration.default, redshift=redshift.default,
massfactor=("critical", 200), cosmo=None, **kwargs):
# Set default cosmology
if cosmo is None:
cosmo = cosmology.default_cosmology.get()
# Set mass overdensity type and factor
self._density_delta(massfactor, cosmo, redshift)
# Establish mass units for density calculation (default solar masses)
if not isinstance(mass, u.Quantity):
in_mass = u.Quantity(mass, u.M_sun)
else:
in_mass = mass
# Obtain scale radius
self._radius_s(mass, concentration)
# Obtain scale density
self._density_s(mass, concentration)
super().__init__(mass=in_mass, concentration=concentration, redshift=redshift, **kwargs)
def evaluate(self, r, mass, concentration, redshift):
"""
One dimensional NFW profile function
Parameters
----------
r : float or `~astropy.units.Quantity` ['length']
Radial position of density to be calculated for the NFW profile.
mass : float or `~astropy.units.Quantity` ['mass']
Mass of NFW peak within specified overdensity radius.
concentration : float
Concentration of the NFW profile.
redshift : float
Redshift of the NFW profile.
Returns
-------
density : float or `~astropy.units.Quantity` ['density']
NFW profile mass density at location ``r``. The density units are:
[``mass`` / ``r`` ^3]
Notes
-----
.. warning::
Output values might contain ``nan`` and ``inf``.
"""
# Create radial version of input with dimension
if hasattr(r, "unit"):
in_r = r
else:
in_r = u.Quantity(r, u.kpc)
# Define reduced radius (r / r_{\\rm s})
# also update scale radius
radius_reduced = in_r / self._radius_s(mass, concentration).to(in_r.unit)
# Density distribution
# \rho (r)=\frac{\rho_0}{\frac{r}{R_s}\left(1~+~\frac{r}{R_s}\right)^2}
# also update scale density
density = self._density_s(mass, concentration) / (radius_reduced *
(u.Quantity(1.0) + radius_reduced) ** 2)
if hasattr(mass, "unit"):
return density
else:
return density.value
def _density_delta(self, massfactor, cosmo, redshift):
"""
Calculate density delta.
"""
# Set mass overdensity type and factor
if isinstance(massfactor, tuple):
# Tuple options
# ("virial") : virial radius
# ("critical", N) : radius where density is N that of the critical density
# ("mean", N) : radius where density is N that of the mean density
if massfactor[0].lower() == "virial":
# Virial Mass
delta = None
masstype = massfactor[0].lower()
elif massfactor[0].lower() == "critical":
# Critical or Mean Overdensity Mass
delta = float(massfactor[1])
masstype = 'c'
elif massfactor[0].lower() == "mean":
# Critical or Mean Overdensity Mass
delta = float(massfactor[1])
masstype = 'm'
else:
raise ValueError("Massfactor '" + str(massfactor[0]) + "' not one of 'critical', "
"'mean', or 'virial'")
else:
try:
# String options
# virial : virial radius
# Nc : radius where density is N that of the critical density
# Nm : radius where density is N that of the mean density
if massfactor.lower() == "virial":
# Virial Mass
delta = None
masstype = massfactor.lower()
elif massfactor[-1].lower() == 'c' or massfactor[-1].lower() == 'm':
# Critical or Mean Overdensity Mass
delta = float(massfactor[0:-1])
masstype = massfactor[-1].lower()
else:
raise ValueError("Massfactor " + str(massfactor) + " string not of the form "
"'#m', '#c', or 'virial'")
except (AttributeError, TypeError):
raise TypeError("Massfactor " + str(
massfactor) + " not a tuple or string")
# Set density from masstype specification
if masstype == "virial":
Om_c = cosmo.Om(redshift) - 1.0
d_c = 18.0 * np.pi ** 2 + 82.0 * Om_c - 39.0 * Om_c ** 2
self.density_delta = d_c * cosmo.critical_density(redshift)
elif masstype == 'c':
self.density_delta = delta * cosmo.critical_density(redshift)
elif masstype == 'm':
self.density_delta = delta * cosmo.critical_density(redshift) * cosmo.Om(redshift)
else:
raise ValueError("Invalid masstype '" + str(masstype) +
"'. Should be one of 'virial','c', or 'm'")
return self.density_delta
@staticmethod
def A_NFW(y):
r"""
Dimensionless volume integral of the NFW profile, used as an intermediate step in some
calculations for this model.
Notes
-----
Model formula:
.. math:: A_{NFW} = [\ln(1+y) - \frac{y}{1+y}]
"""
return np.log(1.0 + y) - (y / (1.0 + y))
def _density_s(self, mass, concentration):
"""
Calculate scale density of the NFW profile.
"""
# Enforce default units
if not isinstance(mass, u.Quantity):
in_mass = u.Quantity(mass, u.M_sun)
else:
in_mass = mass
# Calculate scale density
# M_{200} = 4\pi \rho_{s} R_{s}^3 \left[\ln(1+c) - \frac{c}{1+c}\right].
self.density_s = in_mass / (4.0 * np.pi * self._radius_s(in_mass, concentration) ** 3 *
self.A_NFW(concentration))
return self.density_s
@property
def rho_scale(self):
r"""
Scale density of the NFW profile. Often written in the literature as :math:`\rho_s`
"""
return self.density_s
def _radius_s(self, mass, concentration):
"""
Calculate scale radius of the NFW profile.
"""
# Enforce default units
if not isinstance(mass, u.Quantity):
in_mass = u.Quantity(mass, u.M_sun)
else:
in_mass = mass
# Delta Mass is related to delta radius by
# M_{200}=\frac{4}{3}\pi r_{200}^3 200 \rho_{c}
# And delta radius is related to the NFW scale radius by
# c = R / r_{\\rm s}
self.radius_s = (((3.0 * in_mass) / (4.0 * np.pi * self.density_delta)) ** (
1.0 / 3.0)) / concentration
# Set radial units to kiloparsec by default (unit will be rescaled by units of radius
# in evaluate)
return self.radius_s.to(u.kpc)
@property
def r_s(self):
"""
Scale radius of the NFW profile.
"""
return self.radius_s
@property
def r_virial(self):
"""
Mass factor defined virial radius of the NFW profile (R200c for M200c, Rvir for Mvir, etc.).
"""
return self.r_s * self.concentration
@property
def r_max(self):
"""
Radius of maximum circular velocity.
"""
return self.r_s * 2.16258
@property
def v_max(self):
"""
Maximum circular velocity.
"""
return self.circular_velocity(self.r_max)
def circular_velocity(self, r):
r"""
Circular velocities of the NFW profile.
Parameters
----------
r : float or `~astropy.units.Quantity` ['length']
Radial position of velocity to be calculated for the NFW profile.
Returns
-------
velocity : float or `~astropy.units.Quantity` ['speed']
NFW profile circular velocity at location ``r``. The velocity units are:
[km / s]
Notes
-----
Model formula:
.. math:: v_{circ}(r)^2 = \frac{1}{x}\frac{\ln(1+cx)-(cx)/(1+cx)}{\ln(1+c)-c/(1+c)}
.. math:: x = r/r_s
.. warning::
Output values might contain ``nan`` and ``inf``.
"""
# Enforce default units (if parameters are without units)
if hasattr(r, "unit"):
in_r = r
else:
in_r = u.Quantity(r, u.kpc)
# Mass factor defined velocity (i.e. V200c for M200c, Rvir for Mvir)
v_profile = np.sqrt(self.mass * const.G.to(in_r.unit**3 / (self.mass.unit * u.s**2)) /
self.r_virial)
# Define reduced radius (r / r_{\\rm s})
reduced_radius = in_r / self.r_virial.to(in_r.unit)
# Circular velocity given by:
# v^2=\frac{1}{x}\frac{\ln(1+cx)-(cx)/(1+cx)}{\ln(1+c)-c/(1+c)}
# where x=r/r_{200}
velocity = np.sqrt((v_profile**2 * self.A_NFW(self.concentration * reduced_radius)) /
(reduced_radius * self.A_NFW(self.concentration)))
return velocity.to(u.km / u.s)
@property
def input_units(self):
# The units for the 'r' variable should be a length (default kpc)
return {self.inputs[0]: u.kpc}
@property
def return_units(self):
# The units for the 'density' variable should be a matter density (default M_sun / kpc^3)
if (self.mass.unit is None) and (self.input_units[self.inputs[0]] is None):
return {self.outputs[0]: u.M_sun / u.kpc ** 3}
elif (self.mass.unit is None):
return {self.outputs[0]: u.M_sun / self.input_units[self.inputs[0]] ** 3}
elif (self.input_units[self.inputs[0]] is None):
return {self.outputs[0]: self.mass.unit / u.kpc ** 3}
else:
return {self.outputs[0]: self.mass.unit / self.input_units[self.inputs[0]] ** 3}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'mass': u.M_sun,
"concentration": None,
"redshift": None}
| bsd-3-clause |
renatolm/endometriose | mamdani_peta.py | 1 | 42588 | from __future__ import division
import numpy as np
import skfuzzy as fuzz
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
def mamdani_defuzz(dism, disp, dor, cans):
dismenorreia = dism
dispareunia = disp
dorNasCostasPernas = dor
cansaco = cans
print "entradas: dismenorreia "+str(dismenorreia)+" dispareunia "+str(dispareunia)+" dorcp "+str(dorNasCostasPernas)+" cansaco "+str(cansaco)
def nivelLeve(entrada):
if entrada == 0:
return 1
elif entrada >= 4:
return 0
else:
return 1 - (entrada/4)
def nivelModerado(entrada):
if entrada == 5:
return 1
elif entrada < 5 and entrada > 1:
return (entrada/4) - (1/4)
elif entrada > 5 and entrada < 9:
return -(entrada/4) + (9/4)
else:
return 0
def nivelIntenso(entrada):
if entrada == 10:
return 1
elif entrada <=6:
return 0
else:
return (entrada/4) - (6/4)
#Antecedentes
dismenorreia_dominio = np.arange(0,11,0.1) #nivel de dismenorreia (0 a 10)
dispareunia_dominio = np.arange(0,11,1) #nivel de dispareunia (0 a 10)
dorNasCostasPernas_dominio = np.arange(0,11,1) #nivel de dor nas costas/pernas (0 a 10)
cansaco_dominio = np.arange(0,11,1) #nivel de cansaco (0 a 10)
#Consequente
risco = np.arange(0,100,1) #nivel de risco de endometriose (0 a 10)
#Funcoes de pertinencia de dismenorreia
dismenorreia_leve = fuzz.trimf(dismenorreia_dominio, [0,0,3]) #dismenorreia leve
dismenorreia_moderada = fuzz.trimf(dismenorreia_dominio, [2,5,8]) #dismenorreia moderada
dismenorreia_intensa = fuzz.trimf(dismenorreia_dominio, [7,10,10]) #dismenorreia intensa
#Funcoes de pertinencia de dispareunia
dispareunia_leve = fuzz.trimf(dispareunia_dominio, [0,0,3]) #dispareunia leve
dispareunia_moderada = fuzz.trimf(dispareunia_dominio, [2,5,8]) #dispareunia moderada
dispareunia_intensa = fuzz.trimf(dispareunia_dominio, [7,10,10]) #dispareunia intensa
#Funcoes de pertinencia de Dor Nas Costas e Pernas
dorCP_leve = fuzz.trimf(dorNasCostasPernas_dominio, [0,0,3]) #dor nas costas e/ou pernas leve
dorCP_moderada = fuzz.trimf(dorNasCostasPernas_dominio, [2,5,8]) #dor nas costas e/ou pernas moderada
dorCP_intensa = fuzz.trimf(dorNasCostasPernas_dominio, [7,10,10]) #dor nas costas e/ou pernas intensa
#Funcoes de pertinencia de cansaco
cansaco_leve = fuzz.trimf(cansaco_dominio, [0,0,3]) #cansaco leve
cansaco_moderado = fuzz.trimf(cansaco_dominio, [2,5,8]) #cansaco moderada
cansaco_intenso = fuzz.trimf(cansaco_dominio, [7,10,10]) #cansaco intensa
#Funcoes de pertinencia do Risco de Endometriose
risco_improvavel = fuzz.trimf(risco, [0,0,25]) #risco de endometriose baixo
risco_poucoprovavel = fuzz.trimf(risco, [8,33,58]) #risco de endometriose medio
risco_provavel = fuzz.trimf(risco, [42,67,92]) #risco de endometriose alto
risco_muitoprovavel = fuzz.trimf(risco, [75,100,100]) #risco de endometriose alto
## Simulando uma entrada
#dismenorreia_nivel_leve = fuzz.interp_membership(dismenorreia_dominio, dismenorreia_leve, dismenorreia) #faz a intersecao da entrada (10) com a funcao de pertinencia da dismenorreia leve
dismenorreia_nivel_leve = nivelLeve(dismenorreia)
#dismenorreia_nivel_moderada = fuzz.interp_membership(dismenorreia_dominio, dismenorreia_moderada, dismenorreia) #faz a intersecao da entrada (10) com a funcao de pertinencia da dismenorreia moderada
dismenorreia_nivel_moderada = nivelModerado(dismenorreia)
#dismenorreia_nivel_intensa = fuzz.interp_membership(dismenorreia_dominio, dismenorreia_intensa, dismenorreia) #faz a intersecao da entrada (10) com a funcao de pertinencia da dismenorreia intensa
dismenorreia_nivel_intensa = nivelIntenso(dismenorreia)
#dispareunia_nivel_leve = fuzz.interp_membership(dispareunia_dominio, dispareunia_leve, dispareunia) #faz a intersecao da entrada (8) com a funcao de pertinencia da dispareunia leve
dispareunia_nivel_leve = nivelLeve(dispareunia)
#dispareunia_nivel_moderada = fuzz.interp_membership(dispareunia_dominio, dispareunia_moderada, dispareunia) #faz a intersecao da entrada (8) com a funcao de pertinencia da dispareunia moderada
dispareunia_nivel_moderada = nivelModerado(dispareunia)
#dispareunia_nivel_intensa = fuzz.interp_membership(dispareunia_dominio, dispareunia_intensa, dispareunia) #faz a intersecao da entrada (8) com a funcao de pertinencia da dispareunia instensa
dispareunia_nivel_intensa = nivelIntenso(dispareunia)
#dorCP_nivel_leve = fuzz.interp_membership(dorNasCostasPernas_dominio, dorCP_leve, dorNasCostasPernas) #faz a intersecao da entrada (8) com a funcao de pertinencia da dor nas costas/pernas leve
dorCP_nivel_leve = nivelLeve(dorNasCostasPernas)
#dorCP_nivel_moderada = fuzz.interp_membership(dorNasCostasPernas_dominio, dorCP_moderada, dorNasCostasPernas) #faz a intersecao da entrada (8) com a funcao de pertinencia da dor nas costas/pernas moderada
dorCP_nivel_moderada = nivelModerado(dorNasCostasPernas)
#dorCP_nivel_intensa = fuzz.interp_membership(dorNasCostasPernas_dominio, dorCP_intensa, dorNasCostasPernas) #faz a intersecao da entrada (8) com a funcao de pertinencia da dor nas costas/pernas intensa
dorCP_nivel_intensa = nivelIntenso(dorNasCostasPernas)
#cansaco_nivel_leve = fuzz.interp_membership(cansaco_dominio, cansaco_leve, cansaco) #faz a intersecao da entrada (9) com a funcao de pertinencia de cansaco leve
cansaco_nivel_leve = nivelLeve(cansaco)
#cansaco_nivel_moderado = fuzz.interp_membership(cansaco_dominio, cansaco_moderado, cansaco) #faz a intersecao da entrada (9) com a funcao de pertinencia de cansaco moderado
cansaco_nivel_moderado = nivelModerado(cansaco)
#cansaco_nivel_intenso = fuzz.interp_membership(cansaco_dominio, cansaco_intenso, cansaco) #faz a intersecao da entrada (9) com a funcao de pertinencia de cansaco intenso
cansaco_nivel_intenso = nivelIntenso(cansaco)
regras_ativas = []
## Base de regras
#Regra 1: dismenorreia leve; dispareunia leve; dor costas/pernas leve; cansaco leve => risco improvavel
ativa_regra1 = np.fmin(cansaco_nivel_leve, np.fmin(dorCP_nivel_leve, np.fmin(dismenorreia_nivel_leve, dispareunia_nivel_leve))) #composicao usando operador AND (minimo)
regra1 = np.fmin(ativa_regra1, risco_improvavel) #implicacao
if regra1.any() != 0:
regras_ativas.append(1)
#Regra 2: dismenorreia leve; dispareunia leve; dor costas/pernas leve; cansaco moderado => risco improvavel
ativa_regra2 = np.fmin(cansaco_nivel_moderado, np.fmin(dorCP_nivel_leve, np.fmin(dismenorreia_nivel_leve, dispareunia_nivel_leve))) #composicao usando operador AND (minimo)
regra2 = np.fmin(ativa_regra2, risco_improvavel) #implicacao
if regra2.any() != 0:
regras_ativas.append(2)
#Regra 3: dismenorreia leve; dispareunia leve; dor costas/pernas leve; cansaco intenso => risco pouco provavel
ativa_regra3 = np.fmin(cansaco_nivel_intenso, np.fmin(dorCP_nivel_leve, np.fmin(dismenorreia_nivel_leve, dispareunia_nivel_leve))) #composicao usando operador AND (minimo)
regra3 = np.fmin(ativa_regra3, risco_poucoprovavel) #implicacao
if regra3.any() != 0:
regras_ativas.append(3)
#Regra 4: dismenorreia leve; dispareunia leve; dor costas/pernas moderado; cansaco leve => risco pouco provavel
ativa_regra4 = np.fmin(cansaco_nivel_leve, np.fmin(dorCP_nivel_moderada, np.fmin(dismenorreia_nivel_leve, dispareunia_nivel_leve))) #composicao usando operador AND (minimo)
regra4 = np.fmin(ativa_regra4, risco_poucoprovavel) #implicacao
if regra4.any() != 0:
regras_ativas.append(4)
#Regra 5: dismenorreia leve; dispareunia leve; dor costas/pernas moderado; cansaco moderado => risco pouco provavel
ativa_regra5 = np.fmin(cansaco_nivel_moderado, np.fmin(dorCP_nivel_moderada, np.fmin(dismenorreia_nivel_leve, dispareunia_nivel_leve))) #composicao usando operador AND (minimo)
regra5 = np.fmin(ativa_regra5, risco_poucoprovavel) #implicacao
if regra5.any() != 0:
regras_ativas.append(5)
#Regra 6: dismenorreia leve; dispareunia leve; dor costas/pernas moderado; cansaco intenso => risco provavel
ativa_regra6 = np.fmin(cansaco_nivel_intenso, np.fmin(dorCP_nivel_moderada, np.fmin(dismenorreia_nivel_leve, dispareunia_nivel_leve))) #composicao usando operador AND (minimo)
regra6 = np.fmin(ativa_regra6, risco_provavel) #implicacao
if regra6.any() != 0:
regras_ativas.append(6)
#Regra 7: dismenorreia leve; dispareunia leve; dor costas/pernas intenso; cansaco leve => risco pouco provavel
ativa_regra7 = np.fmin(cansaco_nivel_leve, np.fmin(dorCP_nivel_intensa, np.fmin(dismenorreia_nivel_leve, dispareunia_nivel_leve))) #composicao usando operador AND (minimo)
regra7 = np.fmin(ativa_regra7, risco_poucoprovavel) #implicacao
if regra7.any() != 0:
regras_ativas.append(7)
#Regra 8: dismenorreia leve; dispareunia leve; dor costas/pernas intenso; cansaco moderado => risco provavel
ativa_regra8 = np.fmin(cansaco_nivel_moderado, np.fmin(dorCP_nivel_intensa, np.fmin(dismenorreia_nivel_leve, dispareunia_nivel_leve))) #composicao usando operador AND (minimo)
regra8 = np.fmin(ativa_regra8, risco_provavel) #implicacao
if regra8.any() != 0:
regras_ativas.append(8)
#Regra 9: dismenorreia leve; dispareunia leve; dor costas/pernas intenso; cansaco intenso => risco provavel
ativa_regra9 = np.fmin(cansaco_nivel_intenso, np.fmin(dorCP_nivel_intensa, np.fmin(dismenorreia_nivel_leve, dispareunia_nivel_leve))) #composicao usando operador AND (minimo)
regra9 = np.fmin(ativa_regra9, risco_provavel) #implicacao
if regra9.any() != 0:
regras_ativas.append(9)
#Regra 10: dismenorreia leve; dispareunia moderado; dor costas/pernas leve; cansaco leve => risco pouco provavel
ativa_regra10 = np.fmin(cansaco_nivel_leve, np.fmin(dorCP_nivel_leve, np.fmin(dismenorreia_nivel_leve, dispareunia_nivel_moderada))) #composicao usando operador AND (minimo)
regra10 = np.fmin(ativa_regra10, risco_poucoprovavel) #implicacao
if regra10.any() != 0:
regras_ativas.append(10)
#Regra 11: dismenorreia leve; dispareunia moderado; dor costas/pernas leve; cansaco moderado => risco pouco provavel
ativa_regra11 = np.fmin(cansaco_nivel_moderado, np.fmin(dorCP_nivel_leve, np.fmin(dismenorreia_nivel_leve, dispareunia_nivel_moderada))) #composicao usando operador AND (minimo)
regra11 = np.fmin(ativa_regra11, risco_poucoprovavel) #implicacao
if regra11.any() != 0:
regras_ativas.append(11)
#Regra 12: dismenorreia leve; dispareunia moderado; dor costas/pernas leve; cansaco intenso => risco provavel
ativa_regra12 = np.fmin(cansaco_nivel_intenso, np.fmin(dorCP_nivel_leve, np.fmin(dismenorreia_nivel_leve, dispareunia_nivel_moderada))) #composicao usando operador AND (minimo)
regra12 = np.fmin(ativa_regra12, risco_provavel) #implicacao
if regra12.any() != 0:
regras_ativas.append(12)
#Regra 13: dismenorreia leve; dispareunia moderado; dor costas/pernas moderado; cansaco leve => risco provavel
ativa_regra13 = np.fmin(cansaco_nivel_leve, np.fmin(dorCP_nivel_moderada, np.fmin(dismenorreia_nivel_leve, dispareunia_nivel_moderada))) #composicao usando operador AND (minimo)
regra13 = np.fmin(ativa_regra13, risco_provavel) #implicacao
if regra13.any() != 0:
regras_ativas.append(13)
#Regra 14: dismenorreia leve; dispareunia moderado; dor costas/pernas moderado; cansaco moderado => risco provavel
ativa_regra14 = np.fmin(cansaco_nivel_moderado, np.fmin(dorCP_nivel_moderada, np.fmin(dismenorreia_nivel_leve, dispareunia_nivel_moderada))) #composicao usando operador AND (minimo)
regra14 = np.fmin(ativa_regra14, risco_provavel) #implicacao
if regra14.any() != 0:
regras_ativas.append(14)
#Regra 15: dismenorreia leve; dispareunia moderado; dor costas/pernas moderado; cansaco intenso => risco provavel
ativa_regra15 = np.fmin(cansaco_nivel_intenso, np.fmin(dorCP_nivel_moderada, np.fmin(dismenorreia_nivel_leve, dispareunia_nivel_moderada))) #composicao usando operador AND (minimo)
regra15 = np.fmin(ativa_regra15, risco_provavel) #implicacao
if regra15.any() != 0:
regras_ativas.append(15)
#Regra 16: dismenorreia leve; dispareunia moderado; dor costas/pernas intenso; cansaco leve => risco pouco provavel
ativa_regra16 = np.fmin(cansaco_nivel_leve, np.fmin(dorCP_nivel_intensa, np.fmin(dismenorreia_nivel_leve, dispareunia_nivel_moderada))) #composicao usando operador AND (minimo)
regra16 = np.fmin(ativa_regra16, risco_poucoprovavel) #implicacao
if regra16.any() != 0:
regras_ativas.append(16)
#Regra 17: dismenorreia leve; dispareunia moderado; dor costas/pernas intenso; cansaco moderado => risco provavel
ativa_regra17 = np.fmin(cansaco_nivel_moderado, np.fmin(dorCP_nivel_intensa, np.fmin(dismenorreia_nivel_leve, dispareunia_nivel_moderada))) #composicao usando operador AND (minimo)
regra17 = np.fmin(ativa_regra17, risco_provavel) #implicacao
if regra17.any() != 0:
regras_ativas.append(17)
#Regra 18: dismenorreia leve; dispareunia moderado; dor costas/pernas intenso; cansaco intenso => risco provavel
ativa_regra18 = np.fmin(cansaco_nivel_intenso, np.fmin(dorCP_nivel_intensa, np.fmin(dismenorreia_nivel_leve, dispareunia_nivel_moderada))) #composicao usando operador AND (minimo)
regra18 = np.fmin(ativa_regra18, risco_provavel) #implicacao
if regra18.any() != 0:
regras_ativas.append(18)
#Regra 19: dismenorreia leve; dispareunia intenso; dor costas/pernas leve; cansaco leve => risco provavel
ativa_regra19 = np.fmin(cansaco_nivel_leve, np.fmin(dorCP_nivel_leve, np.fmin(dismenorreia_nivel_leve, dispareunia_nivel_intensa))) #composicao usando operador AND (minimo)
regra19 = np.fmin(ativa_regra19, risco_provavel) #implicacao
if regra19.any() != 0:
regras_ativas.append(19)
#Regra 20: dismenorreia leve; dispareunia intenso; dor costas/pernas leve; cansaco moderado => risco provavel
ativa_regra20 = np.fmin(cansaco_nivel_moderado, np.fmin(dorCP_nivel_leve, np.fmin(dismenorreia_nivel_leve, dispareunia_nivel_intensa))) #composicao usando operador AND (minimo)
regra20 = np.fmin(ativa_regra20, risco_provavel) #implicacao
if regra20.any() != 0:
regras_ativas.append(20)
#Regra 21: dismenorreia leve; dispareunia intenso; dor costas/pernas leve; cansaco intenso => risco provavel
ativa_regra21 = np.fmin(cansaco_nivel_intenso, np.fmin(dorCP_nivel_leve, np.fmin(dismenorreia_nivel_leve, dispareunia_nivel_intensa))) #composicao usando operador AND (minimo)
regra21 = np.fmin(ativa_regra21, risco_provavel) #implicacao
if regra21.any() != 0:
regras_ativas.append(21)
#Regra 22: dismenorreia leve; dispareunia intenso; dor costas/pernas moderado; cansaco leve => risco provavel
ativa_regra22 = np.fmin(cansaco_nivel_leve, np.fmin(dorCP_nivel_moderada, np.fmin(dismenorreia_nivel_leve, dispareunia_nivel_intensa))) #composicao usando operador AND (minimo)
regra22 = np.fmin(ativa_regra22, risco_provavel) #implicacao
if regra22.any() != 0:
regras_ativas.append(22)
#Regra 23: dismenorreia leve; dispareunia intenso; dor costas/pernas moderado; cansaco moderado => risco provavel
ativa_regra23 = np.fmin(cansaco_nivel_moderado, np.fmin(dorCP_nivel_moderada, np.fmin(dismenorreia_nivel_leve, dispareunia_nivel_intensa))) #composicao usando operador AND (minimo)
regra23 = np.fmin(ativa_regra23, risco_provavel) #implicacao
if regra23.any() != 0:
regras_ativas.append(23)
#Regra 24: dismenorreia leve; dispareunia intenso; dor costas/pernas moderado; cansaco intenso => risco provavel
ativa_regra24 = np.fmin(cansaco_nivel_intenso, np.fmin(dorCP_nivel_moderada, np.fmin(dismenorreia_nivel_leve, dispareunia_nivel_intensa))) #composicao usando operador AND (minimo)
regra24 = np.fmin(ativa_regra24, risco_provavel) #implicacao
if regra24.any() != 0:
regras_ativas.append(24)
#Regra 25: dismenorreia leve; dispareunia intenso; dor costas/pernas intenso; cansaco leve => risco provavel
ativa_regra25 = np.fmin(cansaco_nivel_leve, np.fmin(dorCP_nivel_intensa, np.fmin(dismenorreia_nivel_leve, dispareunia_nivel_intensa))) #composicao usando operador AND (minimo)
regra25 = np.fmin(ativa_regra25, risco_provavel) #implicacao
if regra25.any() != 0:
regras_ativas.append(25)
#Regra 26: dismenorreia leve; dispareunia intenso; dor costas/pernas intenso; cansaco moderado => risco muito provavel
ativa_regra26 = np.fmin(cansaco_nivel_moderado, np.fmin(dorCP_nivel_intensa, np.fmin(dismenorreia_nivel_leve, dispareunia_nivel_intensa))) #composicao usando operador AND (minimo)
regra26 = np.fmin(ativa_regra26, risco_muitoprovavel) #implicacao
if regra26.any() != 0:
regras_ativas.append(26)
#Regra 27: dismenorreia leve; dispareunia intenso; dor costas/pernas intenso; cansaco intenso => risco muito provavel
ativa_regra27 = np.fmin(cansaco_nivel_intenso, np.fmin(dorCP_nivel_intensa, np.fmin(dismenorreia_nivel_leve, dispareunia_nivel_intensa))) #composicao usando operador AND (minimo)
regra27 = np.fmin(ativa_regra27, risco_muitoprovavel) #implicacao
if regra27.any() != 0:
regras_ativas.append(27)
#Regra 28: dismenorreia moderado; dispareunia leve; dor costas/pernas leve; cansaco leve => risco improvavel
ativa_regra28 = np.fmin(cansaco_nivel_leve, np.fmin(dorCP_nivel_leve, np.fmin(dismenorreia_nivel_moderada, dispareunia_nivel_leve))) #composicao usando operador AND (minimo)
regra28 = np.fmin(ativa_regra28, risco_improvavel) #implicacao
if regra28.any() != 0:
regras_ativas.append(28)
#Regra 29: dismenorreia moderado; dispareunia leve; dor costas/pernas leve; cansaco moderado => risco improvavel
ativa_regra29 = np.fmin(cansaco_nivel_moderado, np.fmin(dorCP_nivel_leve, np.fmin(dismenorreia_nivel_moderada, dispareunia_nivel_leve))) #composicao usando operador AND (minimo)
regra29 = np.fmin(ativa_regra29, risco_improvavel) #implicacao
if regra29.any() != 0:
regras_ativas.append(29)
#Regra 30: dismenorreia moderado; dispareunia leve; dor costas/pernas leve; cansaco intenso => risco provavel
ativa_regra30 = np.fmin(cansaco_nivel_intenso, np.fmin(dorCP_nivel_leve, np.fmin(dismenorreia_nivel_moderada, dispareunia_nivel_leve))) #composicao usando operador AND (minimo)
regra30 = np.fmin(ativa_regra30, risco_provavel) #implicacao
if regra30.any() != 0:
regras_ativas.append(30)
#Regra 31: dismenorreia moderado; dispareunia leve; dor costas/pernas moderado; cansaco leve => risco provavel
ativa_regra31 = np.fmin(cansaco_nivel_leve, np.fmin(dorCP_nivel_moderada, np.fmin(dismenorreia_nivel_moderada, dispareunia_nivel_leve))) #composicao usando operador AND (minimo)
regra31 = np.fmin(ativa_regra31, risco_provavel) #implicacao
if regra31.any() != 0:
regras_ativas.append(31)
#Regra 32: dismenorreia moderado; dispareunia leve; dor costas/pernas moderado; cansaco moderado => risco provavel
ativa_regra32 = np.fmin(cansaco_nivel_moderado, np.fmin(dorCP_nivel_moderada, np.fmin(dismenorreia_nivel_moderada, dispareunia_nivel_leve))) #composicao usando operador AND (minimo)
regra32 = np.fmin(ativa_regra32, risco_provavel) #implicacao
if regra32.any() != 0:
regras_ativas.append(32)
#Regra 33: dismenorreia moderado; dispareunia leve; dor costas/pernas moderado; cansaco intenso => risco muito provavel
ativa_regra33 = np.fmin(cansaco_nivel_intenso, np.fmin(dorCP_nivel_moderada, np.fmin(dismenorreia_nivel_moderada, dispareunia_nivel_leve))) #composicao usando operador AND (minimo)
regra33 = np.fmin(ativa_regra33, risco_muitoprovavel) #implicacao
if regra33.any() != 0:
regras_ativas.append(33)
#Regra 34: dismenorreia moderado; dispareunia leve; dor costas/pernas intenso; cansaco leve => muito provavel
ativa_regra34 = np.fmin(cansaco_nivel_leve, np.fmin(dorCP_nivel_intensa, np.fmin(dismenorreia_nivel_moderada, dispareunia_nivel_leve))) #composicao usando operador AND (minimo)
regra34 = np.fmin(ativa_regra34, risco_muitoprovavel) #implicacao
if regra34.any() != 0:
regras_ativas.append(34)
#Regra 35: dismenorreia moderado; dispareunia leve; dor costas/pernas intenso; cansaco moderado => risco muito provavel
ativa_regra35 = np.fmin(cansaco_nivel_moderado, np.fmin(dorCP_nivel_intensa, np.fmin(dismenorreia_nivel_moderada, dispareunia_nivel_leve))) #composicao usando operador AND (minimo)
regra35 = np.fmin(ativa_regra35, risco_muitoprovavel) #implicacao
if regra35.any() != 0:
regras_ativas.append(35)
#Regra 36: dismenorreia moderado; dispareunia leve; dor costas/pernas intenso; cansaco intenso => risco muito provavel
ativa_regra36 = np.fmin(cansaco_nivel_intenso, np.fmin(dorCP_nivel_intensa, np.fmin(dismenorreia_nivel_moderada, dispareunia_nivel_leve))) #composicao usando operador AND (minimo)
regra36 = np.fmin(ativa_regra36, risco_muitoprovavel) #implicacao
if regra36.any() != 0:
regras_ativas.append(36)
#Regra 37: dismenorreia moderado; dispareunia moderado; dor costas/pernas leve; cansaco leve => risco provavel
ativa_regra37 = np.fmin(cansaco_nivel_leve, np.fmin(dorCP_nivel_leve, np.fmin(dismenorreia_nivel_moderada, dispareunia_nivel_moderada))) #composicao usando operador AND (minimo)
regra37 = np.fmin(ativa_regra37, risco_provavel) #implicacao
if regra37.any() != 0:
regras_ativas.append(37)
#Regra 38: dismenorreia moderado; dispareunia moderado; dor costas/pernas leve; cansaco moderado => risco provavel
ativa_regra38 = np.fmin(cansaco_nivel_moderado, np.fmin(dorCP_nivel_leve, np.fmin(dismenorreia_nivel_moderada, dispareunia_nivel_moderada))) #composicao usando operador AND (minimo)
regra38 = np.fmin(ativa_regra38, risco_provavel) #implicacao
if regra38.any() != 0:
regras_ativas.append(38)
#Regra 39: dismenorreia moderado; dispareunia moderado; dor costas/pernas leve; cansaco intenso => risco provavel
ativa_regra39 = np.fmin(cansaco_nivel_intenso, np.fmin(dorCP_nivel_leve, np.fmin(dismenorreia_nivel_moderada, dispareunia_nivel_moderada))) #composicao usando operador AND (minimo)
regra39 = np.fmin(ativa_regra39, risco_provavel) #implicacao
if regra39.any() != 0:
regras_ativas.append(39)
#Regra 40: dismenorreia moderado; dispareunia moderado; dor costas/pernas moderado; cansaco leve => risco provavel
ativa_regra40 = np.fmin(cansaco_nivel_leve, np.fmin(dorCP_nivel_moderada, np.fmin(dismenorreia_nivel_moderada, dispareunia_nivel_moderada))) #composicao usando operador AND (minimo)
regra40 = np.fmin(ativa_regra40, risco_provavel) #implicacao
if regra40.any() != 0:
regras_ativas.append(40)
#Regra 41: dismenorreia moderado; dispareunia moderado; dor costas/pernas moderado; cansaco moderado => risco provavel
ativa_regra41 = np.fmin(cansaco_nivel_moderado, np.fmin(dorCP_nivel_moderada, np.fmin(dismenorreia_nivel_moderada, dispareunia_nivel_moderada))) #composicao usando operador AND (minimo)
regra41 = np.fmin(ativa_regra41, risco_provavel) #implicacao
if regra41.any() != 0:
regras_ativas.append(41)
#Regra 42: dismenorreia moderado; dispareunia moderado; dor costas/pernas moderado; cansaco intenso => risco muito provavel
ativa_regra42 = np.fmin(cansaco_nivel_intenso, np.fmin(dorCP_nivel_moderada, np.fmin(dismenorreia_nivel_moderada, dispareunia_nivel_moderada))) #composicao usando operador AND (minimo)
regra42 = np.fmin(ativa_regra42, risco_muitoprovavel) #implicacao
if regra42.any() != 0:
regras_ativas.append(42)
#Regra 43: dismenorreia moderado; dispareunia moderado; dor costas/pernas intenso; cansaco leve => risco provavel
ativa_regra43 = np.fmin(cansaco_nivel_leve, np.fmin(dorCP_nivel_intensa, np.fmin(dismenorreia_nivel_moderada, dispareunia_nivel_moderada))) #composicao usando operador AND (minimo)
regra43 = np.fmin(ativa_regra43, risco_provavel) #implicacao
if regra43.any() != 0:
regras_ativas.append(43)
#Regra 44: dismenorreia moderado; dispareunia moderado; dor costas/pernas intenso; cansaco moderado => risco provavel
ativa_regra44 = np.fmin(cansaco_nivel_moderado, np.fmin(dorCP_nivel_intensa, np.fmin(dismenorreia_nivel_moderada, dispareunia_nivel_moderada))) #composicao usando operador AND (minimo)
regra44 = np.fmin(ativa_regra44, risco_provavel) #implicacao
if regra44.any() != 0:
regras_ativas.append(44)
#Regra 45: dismenorreia moderado; dispareunia moderado; dor costas/pernas intenso; cansaco intenso => risco muito provavel
ativa_regra45 = np.fmin(cansaco_nivel_intenso, np.fmin(dorCP_nivel_intensa, np.fmin(dismenorreia_nivel_moderada, dispareunia_nivel_moderada))) #composicao usando operador AND (minimo)
regra45 = np.fmin(ativa_regra45, risco_muitoprovavel) #implicacao
if regra45.any() != 0:
regras_ativas.append(45)
#Regra 46: dismenorreia moderado; dispareunia intenso; dor costas/pernas leve; cansaco leve => risco muito provavel
ativa_regra46 = np.fmin(cansaco_nivel_leve, np.fmin(dorCP_nivel_leve, np.fmin(dismenorreia_nivel_moderada, dispareunia_nivel_intensa))) #composicao usando operador AND (minimo)
regra46 = np.fmin(ativa_regra46, risco_muitoprovavel) #implicacao
if regra46.any() != 0:
regras_ativas.append(46)
#Regra 47: dismenorreia moderado; dispareunia intenso; dor costas/pernas leve; cansaco moderado => risco muito provavel
ativa_regra47 = np.fmin(cansaco_nivel_moderado, np.fmin(dorCP_nivel_leve, np.fmin(dismenorreia_nivel_moderada, dispareunia_nivel_intensa))) #composicao usando operador AND (minimo)
regra47 = np.fmin(ativa_regra47, risco_muitoprovavel) #implicacao
if regra47.any() != 0:
regras_ativas.append(47)
#Regra 48: dismenorreia moderado; dispareunia intenso; dor costas/pernas leve; cansaco intenso => risco muito provavel
ativa_regra48 = np.fmin(cansaco_nivel_intenso, np.fmin(dorCP_nivel_leve, np.fmin(dismenorreia_nivel_moderada, dispareunia_nivel_intensa))) #composicao usando operador AND (minimo)
regra48 = np.fmin(ativa_regra48, risco_muitoprovavel) #implicacao
if regra48.any() != 0:
regras_ativas.append(48)
#Regra 49: dismenorreia moderado; dispareunia intenso; dor costas/pernas moderado; cansaco leve => risco muito provavel
ativa_regra49 = np.fmin(cansaco_nivel_leve, np.fmin(dorCP_nivel_moderada, np.fmin(dismenorreia_nivel_moderada, dispareunia_nivel_intensa))) #composicao usando operador AND (minimo)
regra49 = np.fmin(ativa_regra49, risco_muitoprovavel) #implicacao
if regra49.any() != 0:
regras_ativas.append(49)
#Regra 50: dismenorreia moderado; dispareunia intenso; dor costas/pernas moderado; cansaco moderado => risco muito provavel
ativa_regra50 = np.fmin(cansaco_nivel_moderado, np.fmin(dorCP_nivel_moderada, np.fmin(dismenorreia_nivel_moderada, dispareunia_nivel_intensa))) #composicao usando operador AND (minimo)
regra50 = np.fmin(ativa_regra50, risco_muitoprovavel) #implicacao
if regra50.any() != 0:
regras_ativas.append(50)
#Regra 51: dismenorreia moderado; dispareunia intenso; dor costas/pernas moderado; cansaco intenso => risco muito provavel
ativa_regra51 = np.fmin(cansaco_nivel_intenso, np.fmin(dorCP_nivel_moderada, np.fmin(dismenorreia_nivel_moderada, dispareunia_nivel_intensa))) #composicao usando operador AND (minimo)
regra51 = np.fmin(ativa_regra51, risco_muitoprovavel) #implicacao
if regra51.any() != 0:
regras_ativas.append(51)
#Regra 52: dismenorreia moderado; dispareunia intenso; dor costas/pernas intenso; cansaco leve => risco muito provavel
ativa_regra52 = np.fmin(cansaco_nivel_leve, np.fmin(dorCP_nivel_intensa, np.fmin(dismenorreia_nivel_moderada, dispareunia_nivel_intensa))) #composicao usando operador AND (minimo)
regra52 = np.fmin(ativa_regra52, risco_muitoprovavel) #implicacao
if regra52.any() != 0:
regras_ativas.append(52)
#Regra 53: dismenorreia moderado; dispareunia intenso; dor costas/pernas intenso; cansaco moderado => risco muito provavel
ativa_regra53 = np.fmin(cansaco_nivel_moderado, np.fmin(dorCP_nivel_intensa, np.fmin(dismenorreia_nivel_moderada, dispareunia_nivel_intensa))) #composicao usando operador AND (minimo)
regra53 = np.fmin(ativa_regra53, risco_muitoprovavel) #implicacao
if regra53.any() != 0:
regras_ativas.append(53)
#Regra 54: dismenorreia moderado; dispareunia intenso; dor costas/pernas intenso; cansaco intenso => risco muito provavel
ativa_regra54 = np.fmin(cansaco_nivel_intenso, np.fmin(dorCP_nivel_intensa, np.fmin(dismenorreia_nivel_moderada, dispareunia_nivel_intensa))) #composicao usando operador AND (minimo)
regra54 = np.fmin(ativa_regra54, risco_muitoprovavel) #implicacao
if regra54.any() != 0:
regras_ativas.append(54)
#Regra 55: dismenorreia intenso; dispareunia leve; dor costas/pernas leve; cansaco leve => risco provavel
ativa_regra55 = np.fmin(cansaco_nivel_leve, np.fmin(dorCP_nivel_leve, np.fmin(dismenorreia_nivel_intensa, dispareunia_nivel_leve))) #composicao usando operador AND (minimo)
regra55 = np.fmin(ativa_regra55, risco_provavel) #implicacao
if regra55.any() != 0:
regras_ativas.append(55)
#Regra 56: dismenorreia intenso; dispareunia leve; dor costas/pernas leve; cansaco moderado => risco provavel
ativa_regra56 = np.fmin(cansaco_nivel_moderado, np.fmin(dorCP_nivel_leve, np.fmin(dismenorreia_nivel_intensa, dispareunia_nivel_leve))) #composicao usando operador AND (minimo)
regra56 = np.fmin(ativa_regra56, risco_provavel) #implicacao
if regra56.any() != 0:
regras_ativas.append(56)
#Regra 57: dismenorreia intenso; dispareunia leve; dor costas/pernas leve; cansaco intenso => risco muito provavel
ativa_regra57 = np.fmin(cansaco_nivel_intenso, np.fmin(dorCP_nivel_leve, np.fmin(dismenorreia_nivel_intensa, dispareunia_nivel_leve))) #composicao usando operador AND (minimo)
regra57 = np.fmin(ativa_regra57, risco_muitoprovavel) #implicacao
if regra57.any() != 0:
regras_ativas.append(57)
#Regra 58: dismenorreia intenso; dispareunia leve; dor costas/pernas moderado; cansaco leve => risco provavel
ativa_regra58 = np.fmin(cansaco_nivel_leve, np.fmin(dorCP_nivel_moderada, np.fmin(dismenorreia_nivel_intensa, dispareunia_nivel_leve))) #composicao usando operador AND (minimo)
regra58 = np.fmin(ativa_regra58, risco_provavel) #implicacao
if regra58.any() != 0:
regras_ativas.append(58)
#Regra 59: dismenorreia intenso; dispareunia leve; dor costas/pernas moderado; cansaco moderado => risco provavel
ativa_regra59 = np.fmin(cansaco_nivel_moderado, np.fmin(dorCP_nivel_moderada, np.fmin(dismenorreia_nivel_intensa, dispareunia_nivel_leve))) #composicao usando operador AND (minimo)
regra59 = np.fmin(ativa_regra59, risco_provavel) #implicacao
if regra59.any() != 0:
regras_ativas.append(59)
#Regra 60: dismenorreia intenso; dispareunia leve; dor costas/pernas moderado; cansaco intenso => risco muito provavel
ativa_regra60 = np.fmin(cansaco_nivel_intenso, np.fmin(dorCP_nivel_moderada, np.fmin(dismenorreia_nivel_intensa, dispareunia_nivel_leve))) #composicao usando operador AND (minimo)
regra60 = np.fmin(ativa_regra60, risco_muitoprovavel) #implicacao
if regra60.any() != 0:
regras_ativas.append(60)
#Regra 61: dismenorreia intenso; dispareunia leve; dor costas/pernas intenso; cansaco leve => risco provavel
ativa_regra61 = np.fmin(cansaco_nivel_leve, np.fmin(dorCP_nivel_intensa, np.fmin(dismenorreia_nivel_intensa, dispareunia_nivel_leve))) #composicao usando operador AND (minimo)
regra61 = np.fmin(ativa_regra61, risco_provavel) #implicacao
if regra61.any() != 0:
regras_ativas.append(61)
#Regra 62: dismenorreia intenso; dispareunia leve; dor costas/pernas intenso; cansaco moderado => risco provavel
ativa_regra62 = np.fmin(cansaco_nivel_moderado, np.fmin(dorCP_nivel_intensa, np.fmin(dismenorreia_nivel_intensa, dispareunia_nivel_leve))) #composicao usando operador AND (minimo)
regra62 = np.fmin(ativa_regra62, risco_provavel) #implicacao
if regra62.any() != 0:
regras_ativas.append(62)
#Regra 63: dismenorreia intenso; dispareunia leve; dor costas/pernas intenso; cansaco intenso => risco muito provavel
ativa_regra63 = np.fmin(cansaco_nivel_intenso, np.fmin(dorCP_nivel_intensa, np.fmin(dismenorreia_nivel_intensa, dispareunia_nivel_leve))) #composicao usando operador AND (minimo)
regra63 = np.fmin(ativa_regra63, risco_muitoprovavel) #implicacao
if regra63.any() != 0:
regras_ativas.append(63)
#Regra 64: dismenorreia intenso; dispareunia moderado; dor costas/pernas leve; cansaco leve => risco provavel
ativa_regra64 = np.fmin(cansaco_nivel_leve, np.fmin(dorCP_nivel_leve, np.fmin(dismenorreia_nivel_intensa, dispareunia_nivel_moderada))) #composicao usando operador AND (minimo)
regra64 = np.fmin(ativa_regra64, risco_provavel) #implicacao
if regra64.any() != 0:
regras_ativas.append(64)
#Regra 65: dismenorreia intenso; dispareunia moderado; dor costas/pernas leve; cansaco moderado => risco provavel
ativa_regra65 = np.fmin(cansaco_nivel_moderado, np.fmin(dorCP_nivel_leve, np.fmin(dismenorreia_nivel_intensa, dispareunia_nivel_moderada))) #composicao usando operador AND (minimo)
regra65 = np.fmin(ativa_regra65, risco_provavel) #implicacao
if regra65.any() != 0:
regras_ativas.append(65)
#Regra 66: dismenorreia intenso; dispareunia moderado; dor costas/pernas leve; cansaco intenso => risco muito provavel
ativa_regra66 = np.fmin(cansaco_nivel_intenso, np.fmin(dorCP_nivel_leve, np.fmin(dismenorreia_nivel_intensa, dispareunia_nivel_moderada))) #composicao usando operador AND (minimo)
regra66 = np.fmin(ativa_regra66, risco_muitoprovavel) #implicacao
if regra66.any() != 0:
regras_ativas.append(66)
#Regra 67: dismenorreia intenso; dispareunia moderado; dor costas/pernas moderado; cansaco leve => risco provavel
ativa_regra67 = np.fmin(cansaco_nivel_leve, np.fmin(dorCP_nivel_moderada, np.fmin(dismenorreia_nivel_intensa, dispareunia_nivel_moderada))) #composicao usando operador AND (minimo)
regra67 = np.fmin(ativa_regra67, risco_provavel) #implicacao
if regra67.any() != 0:
regras_ativas.append(67)
#Regra 68: dismenorreia intenso; dispareunia moderado; dor costas/pernas moderado; cansaco moderado => risco muito provavel
ativa_regra68 = np.fmin(cansaco_nivel_moderado, np.fmin(dorCP_nivel_moderada, np.fmin(dismenorreia_nivel_intensa, dispareunia_nivel_moderada))) #composicao usando operador AND (minimo)
regra68 = np.fmin(ativa_regra68, risco_muitoprovavel) #implicacao
if regra68.any() != 0:
regras_ativas.append(68)
#Regra 69: dismenorreia intenso; dispareunia moderado; dor costas/pernas moderado; cansaco intenso => risco muito provavel
ativa_regra69 = np.fmin(cansaco_nivel_intenso, np.fmin(dorCP_nivel_moderada, np.fmin(dismenorreia_nivel_intensa, dispareunia_nivel_moderada))) #composicao usando operador AND (minimo)
regra69 = np.fmin(ativa_regra69, risco_muitoprovavel) #implicacao
if regra69.any() != 0:
regras_ativas.append(69)
#Regra 70: dismenorreia intenso; dispareunia moderado; dor costas/pernas intenso; cansaco leve => risco muito provavel
ativa_regra70 = np.fmin(cansaco_nivel_leve, np.fmin(dorCP_nivel_intensa, np.fmin(dismenorreia_nivel_intensa, dispareunia_nivel_moderada))) #composicao usando operador AND (minimo)
regra70 = np.fmin(ativa_regra70, risco_muitoprovavel) #implicacao
if regra70.any() != 0:
regras_ativas.append(70)
#Regra 71: dismenorreia intenso; dispareunia moderado; dor costas/pernas intenso; cansaco moderado => risco muito provavel
ativa_regra71 = np.fmin(cansaco_nivel_moderado, np.fmin(dorCP_nivel_intensa, np.fmin(dismenorreia_nivel_intensa, dispareunia_nivel_moderada))) #composicao usando operador AND (minimo)
regra71 = np.fmin(ativa_regra71, risco_muitoprovavel) #implicacao
if regra71.any() != 0:
regras_ativas.append(71)
#Regra 72: dismenorreia intenso; dispareunia moderado; dor costas/pernas intenso; cansaco intenso => risco muito provavel
ativa_regra72 = np.fmin(cansaco_nivel_intenso, np.fmin(dorCP_nivel_intensa, np.fmin(dismenorreia_nivel_intensa, dispareunia_nivel_moderada))) #composicao usando operador AND (minimo)
regra72 = np.fmin(ativa_regra72, risco_muitoprovavel) #implicacao
if regra72.any() != 0:
regras_ativas.append(72)
#Regra 73: dismenorreia intenso; dispareunia intenso; dor costas/pernas leve; cansaco leve => risco muito provavel
ativa_regra73 = np.fmin(cansaco_nivel_leve, np.fmin(dorCP_nivel_leve, np.fmin(dismenorreia_nivel_intensa, dispareunia_nivel_intensa))) #composicao usando operador AND (minimo)
regra73 = np.fmin(ativa_regra73, risco_muitoprovavel) #implicacao
if regra73.any() != 0:
regras_ativas.append(73)
#Regra 74: dismenorreia intenso; dispareunia intenso; dor costas/pernas leve; cansaco moderado => risco muito provavel
ativa_regra74 = np.fmin(cansaco_nivel_moderado, np.fmin(dorCP_nivel_leve, np.fmin(dismenorreia_nivel_intensa, dispareunia_nivel_intensa))) #composicao usando operador AND (minimo)
regra74 = np.fmin(ativa_regra74, risco_muitoprovavel) #implicacao
if regra74.any() != 0:
regras_ativas.append(74)
#Regra 75: dismenorreia intenso; dispareunia intenso; dor costas/pernas leve; cansaco intenso => risco muito provavel
ativa_regra75 = np.fmin(cansaco_nivel_intenso, np.fmin(dorCP_nivel_leve, np.fmin(dismenorreia_nivel_intensa, dispareunia_nivel_intensa))) #composicao usando operador AND (minimo)
regra75 = np.fmin(ativa_regra75, risco_muitoprovavel) #implicacao
if regra75.any() != 0:
regras_ativas.append(75)
#Regra 76: dismenorreia intenso; dispareunia intenso; dor costas/pernas moderado; cansaco leve => risco muito provavel
ativa_regra76 = np.fmin(cansaco_nivel_leve, np.fmin(dorCP_nivel_moderada, np.fmin(dismenorreia_nivel_intensa, dispareunia_nivel_intensa))) #composicao usando operador AND (minimo)
regra76 = np.fmin(ativa_regra76, risco_muitoprovavel) #implicacao
if regra76.any() != 0:
regras_ativas.append(76)
#Regra 77: dismenorreia intenso; dispareunia intenso; dor costas/pernas moderado; cansaco moderado => risco muito provavel
ativa_regra77 = np.fmin(cansaco_nivel_moderado, np.fmin(dorCP_nivel_moderada, np.fmin(dismenorreia_nivel_intensa, dispareunia_nivel_intensa))) #composicao usando operador AND (minimo)
regra77 = np.fmin(ativa_regra77, risco_muitoprovavel) #implicacao
if regra77.any() != 0:
regras_ativas.append(77)
#Regra 78: dismenorreia intenso; dispareunia intenso; dor costas/pernas moderado; cansaco intenso => risco muito provavel
ativa_regra78 = np.fmin(cansaco_nivel_intenso, np.fmin(dorCP_nivel_moderada, np.fmin(dismenorreia_nivel_intensa, dispareunia_nivel_intensa))) #composicao usando operador AND (minimo)
regra78 = np.fmin(ativa_regra78, risco_muitoprovavel) #implicacao
if regra78.any() != 0:
regras_ativas.append(78)
#Regra 79: dismenorreia intenso; dispareunia intenso; dor costas/pernas intenso; cansaco leve => risco muito provavel
ativa_regra79 = np.fmin(cansaco_nivel_leve, np.fmin(dorCP_nivel_intensa, np.fmin(dismenorreia_nivel_intensa, dispareunia_nivel_intensa))) #composicao usando operador AND (minimo)
regra79 = np.fmin(ativa_regra79, risco_muitoprovavel) #implicacao
if regra79.any() != 0:
regras_ativas.append(79)
#Regra 80: dismenorreia intenso; dispareunia intenso; dor costas/pernas intenso; cansaco moderado => risco muito provavel
ativa_regra80 = np.fmin(cansaco_nivel_moderado, np.fmin(dorCP_nivel_intensa, np.fmin(dismenorreia_nivel_intensa, dispareunia_nivel_intensa))) #composicao usando operador AND (minimo)
regra80 = np.fmin(ativa_regra80, risco_muitoprovavel) #implicacao
if regra80.any() != 0:
regras_ativas.append(80)
#Regra 81: dismenorreia intenso; dispareunia intenso; dor costas/pernas intenso; cansaco intenso => risco muito provavel
ativa_regra81 = np.fmin(cansaco_nivel_intenso, np.fmin(dorCP_nivel_intensa, np.fmin(dismenorreia_nivel_intensa, dispareunia_nivel_intensa))) #composicao usando operador AND (minimo)
regra81 = np.fmin(ativa_regra81, risco_muitoprovavel) #implicacao
if regra81.any() != 0:
regras_ativas.append(81)
print "regras ativas: "+str(regras_ativas)
## Agregacao das regras
agregacao = np.fmax(regra81,
np.fmax(regra80,
np.fmax(regra79,
np.fmax(regra78,
np.fmax(regra77,
np.fmax(regra76,
np.fmax(regra75,
np.fmax(regra74,
np.fmax(regra73,
np.fmax(regra72,
np.fmax(regra71,
np.fmax(regra70,
np.fmax(regra69,
np.fmax(regra68,
np.fmax(regra67,
np.fmax(regra66,
np.fmax(regra65,
np.fmax(regra64,
np.fmax(regra63,
np.fmax(regra62,
np.fmax(regra61,
np.fmax(regra60,
np.fmax(regra59,
np.fmax(regra58,
np.fmax(regra57,
np.fmax(regra56,
np.fmax(regra55,
np.fmax(regra54,
np.fmax(regra53,
np.fmax(regra52,
np.fmax(regra51,
np.fmax(regra50,
np.fmax(regra49,
np.fmax(regra48,
np.fmax(regra47,
np.fmax(regra46,
np.fmax(regra45,
np.fmax(regra44,
np.fmax(regra43,
np.fmax(regra42,
np.fmax(regra41,
np.fmax(regra40,
np.fmax(regra39,
np.fmax(regra38,
np.fmax(regra37,
np.fmax(regra36,
np.fmax(regra35,
np.fmax(regra34,
np.fmax(regra33,
np.fmax(regra32,
np.fmax(regra31,
np.fmax(regra30,
np.fmax(regra29,
np.fmax(regra28,
np.fmax(regra27,
np.fmax(regra26,
np.fmax(regra25,
np.fmax(regra24,
np.fmax(regra23,
np.fmax(regra22,
np.fmax(regra21,
np.fmax(regra20,
np.fmax(regra19,
np.fmax(regra18,
np.fmax(regra17,
np.fmax(regra16,
np.fmax(regra15,
np.fmax(regra14,
np.fmax(regra13,
np.fmax(regra12,
np.fmax(regra11,
np.fmax(regra10,
np.fmax(regra9,
np.fmax(regra8,
np.fmax(regra7,
np.fmax(regra6,
np.fmax(regra5,
np.fmax(regra4,
np.fmax(regra3,
np.fmax(regra1, regra2)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))) #agregacao das regras
risco0 = np.zeros_like(risco) #variavel auxiliar para montar o grafico
## Calculo do resultado defuzzificado
risco_def = fuzz.defuzz(risco, agregacao, 'centroid') #defuzzificacao pelo metodo centroide
risco_ativacao = fuzz.interp_membership(risco, agregacao, risco_def) #intersecao do risco defuzzificado com a funcao de pertinencia
## Grafico da funcao de pertinencia resultante
fig, ax0 = plt.subplots(figsize=(9.27,3.23))
ax0.plot(risco, risco_improvavel, 'b', linewidth=0.5, label='I', linestyle='--')
ax0.plot(risco, risco_poucoprovavel, 'g', linewidth=0.5, label='PP', linestyle='--')
ax0.plot(risco, risco_provavel, 'y', linewidth=0.5, label='P', linestyle='--')
ax0.plot(risco, risco_muitoprovavel, 'r', linewidth=1.5, label='MP', linestyle='--')
ax0.legend(loc='upper center',bbox_to_anchor=(0.5, 1.05), ncol=4, fancybox=True, shadow=True)
ax0.fill_between(risco, risco0, agregacao, facecolor='Orange', alpha=0.7)
ax0.plot([risco_def, risco_def], [0, risco_ativacao], 'k', linewidth=1.5, alpha=0.9)
plt.xticks(np.append(plt.xticks()[0],risco_def))
plt.xlabel('risco (%)')
ax0.set_title("Agregacao das regras e resultado defuzzificado")
plt.tight_layout()
#plt.show()
plt.savefig('/home/endometriose/mysite/static/resultado_peta.png')
return risco_def
mamdani_defuzz(10,10,10,10) | gpl-3.0 |
icdishb/scikit-learn | sklearn/cluster/tests/test_affinity_propagation.py | 341 | 2620 | """
Testing for Clustering methods
"""
import numpy as np
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.cluster.affinity_propagation_ import AffinityPropagation
from sklearn.cluster.affinity_propagation_ import affinity_propagation
from sklearn.datasets.samples_generator import make_blobs
from sklearn.metrics import euclidean_distances
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=60, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=0)
def test_affinity_propagation():
# Affinity Propagation algorithm
# Compute similarities
S = -euclidean_distances(X, squared=True)
preference = np.median(S) * 10
# Compute Affinity Propagation
cluster_centers_indices, labels = affinity_propagation(
S, preference=preference)
n_clusters_ = len(cluster_centers_indices)
assert_equal(n_clusters, n_clusters_)
af = AffinityPropagation(preference=preference, affinity="precomputed")
labels_precomputed = af.fit(S).labels_
af = AffinityPropagation(preference=preference, verbose=True)
labels = af.fit(X).labels_
assert_array_equal(labels, labels_precomputed)
cluster_centers_indices = af.cluster_centers_indices_
n_clusters_ = len(cluster_centers_indices)
assert_equal(np.unique(labels).size, n_clusters_)
assert_equal(n_clusters, n_clusters_)
# Test also with no copy
_, labels_no_copy = affinity_propagation(S, preference=preference,
copy=False)
assert_array_equal(labels, labels_no_copy)
# Test input validation
assert_raises(ValueError, affinity_propagation, S[:, :-1])
assert_raises(ValueError, affinity_propagation, S, damping=0)
af = AffinityPropagation(affinity="unknown")
assert_raises(ValueError, af.fit, X)
def test_affinity_propagation_predict():
# Test AffinityPropagation.predict
af = AffinityPropagation(affinity="euclidean")
labels = af.fit_predict(X)
labels2 = af.predict(X)
assert_array_equal(labels, labels2)
def test_affinity_propagation_predict_error():
# Test exception in AffinityPropagation.predict
# Not fitted.
af = AffinityPropagation(affinity="euclidean")
assert_raises(ValueError, af.predict, X)
# Predict not supported when affinity="precomputed".
S = np.dot(X, X.T)
af = AffinityPropagation(affinity="precomputed")
af.fit(S)
assert_raises(ValueError, af.predict, X)
| bsd-3-clause |
theoryno3/scikit-learn | benchmarks/bench_plot_ward.py | 290 | 1260 | """
Benchmark scikit-learn's Ward implement compared to SciPy's
"""
import time
import numpy as np
from scipy.cluster import hierarchy
import pylab as pl
from sklearn.cluster import AgglomerativeClustering
ward = AgglomerativeClustering(n_clusters=3, linkage='ward')
n_samples = np.logspace(.5, 3, 9)
n_features = np.logspace(1, 3.5, 7)
N_samples, N_features = np.meshgrid(n_samples,
n_features)
scikits_time = np.zeros(N_samples.shape)
scipy_time = np.zeros(N_samples.shape)
for i, n in enumerate(n_samples):
for j, p in enumerate(n_features):
X = np.random.normal(size=(n, p))
t0 = time.time()
ward.fit(X)
scikits_time[j, i] = time.time() - t0
t0 = time.time()
hierarchy.ward(X)
scipy_time[j, i] = time.time() - t0
ratio = scikits_time / scipy_time
pl.figure("scikit-learn Ward's method benchmark results")
pl.imshow(np.log(ratio), aspect='auto', origin="lower")
pl.colorbar()
pl.contour(ratio, levels=[1, ], colors='k')
pl.yticks(range(len(n_features)), n_features.astype(np.int))
pl.ylabel('N features')
pl.xticks(range(len(n_samples)), n_samples.astype(np.int))
pl.xlabel('N samples')
pl.title("Scikit's time, in units of scipy time (log)")
pl.show()
| bsd-3-clause |
robotics-at-maryland/qubo | src/vision/src/tuners/DQNplayground/image_loader.py | 1 | 3345 | #!/usr/bin/env python
#CV2 format B G R [H W C]
#Torch format [C H W]
#http://pytorch.org/tutorials/beginner/data_loading_tutorial.html
from __future__ import print_function, division
import sys
if (sys.version_info > (3, 0) ):
import queue
else:
import Queue as queue
import os
import torch
#import pandas as pd
#from skimage import io, transform
import numpy as np
import matplotlib.pyplot as plt
#from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
# Ignore warnings
import warnings
warnings.filterwarnings("ignore")
#plt.ion() # interactive mode
import cv2
#import argparse
import threading
import time
# https://pymotw.com/3/queue/
class CV2videoloader(threading.Thread):
def __init__(self,name,videosrc,video_buffer,pulse=0.5,camera_config=None):
self.videosrc= videosrc if videosrc != None else 0
self.camera = cv2.VideoCapture(self.videosrc)
self.buffer = video_buffer if isinstance( video_buffer , queue.Queue) else None
self.stop = False
self.pulse = pulse
self.debug = False
threading.Thread.__init__(self)
def run(self):
while not self.stop:
time.sleep(self.pulse)
if self.debug: print("----get one----")
(grabbed, frame) = self.camera.read()
if grabbed == False:
warnings.warn("the camera {} is grabing air".format(self.videosrc) )
else:
self.buffer.put(frame)
class torchStream():
def __init__(self,name,video_buffer,batchsize=1,transform=None):
self.name =name
self.video_buffer= video_buffer if isinstance( video_buffer , queue.Queue) else None
if transform!= None:
self.transform = transform
else:
self.transform=transforms.ToTensor()
warnings.warn("at least do a ToTensor operation to the raw image")
self.batchsize = batchsize
self.batchbuffer = list()
def __len__(self):
return self.video_buffer.qsize()
def batchpop(self):
self.batchbuffer = [ self.transform( self.video_buffer.get() ) for i in range(self.batchsize) ]
return torch.stack( self.batchbuffer,0 )
def pop(self):
return self.transform( self.video_buffer.get() )
class tfstream():
def __init__(self,name,video_buffer):
pass
def __call__(self):
return 0
def stop():
return 0
def isstop():
return 0
if __name__ == '__main__':
data_transform = transforms.Compose([
transforms.ToPILImage(),
transforms.Scale(256),
transforms.ToTensor(),
])
def show_stream_batch(sample_batched):
images_batch = sample_batched
batch_size = len(images_batch)
grid = utils.make_grid(images_batch)
t=transforms.ToPILImage()
plt.imshow( t(grid) )
video_buffer = queue.Queue( maxsize = 30 )
CV2L= CV2videoloader( name='test',
videosrc=0,video_buffer=video_buffer,
camera_config=None )
torS = torchStream(name='test',video_buffer=video_buffer,batchsize=5,transform=data_transform )
CV2L.start()
time.sleep(3)
show_stream_batch( torS.batchpop() )
CV2L.stop = True
CV2L.join()
plt.show()
| mit |
hoechenberger/psychopy | psychopy/demos/builder/practical IAT/scoreIAT.py | 1 | 7371 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Scoring script for MSU-PsychoPy version of IAT task.
Authors: Jeremy R. Gray & Nate Pasmanter, 2013
"""
from __future__ import division
from __future__ import print_function
from builtins import range
import pandas as pd
import glob, os, sys
def scoreIAT(csvfile, write_file=False):
"""Input = csv file; output = D score (or explanation why data are bad).
Expects column headers of the form Response_#.corr, and Response_#.rt,
where # is 1-7, for IAT block number.
Scoring is mostly per GNB 2003:
Greenwald, A. G., Nosek, B. A., & Banaji, M. R. (2003). Understanding and
using the implicit association test: I. An improved scoring algorithm.
Journal of Personality and Social Psychology, 85, 197-216.
Following Amodio, incorrect responses do not contribute to RT; there's no
RT penalty for this reason. For computing SDs, unbiased SD is used (N-1).
If write_file=True, will save score to a file 'Scored_' + csvfile.
A positive D value from this script indicates a bias in favor of
"creative bad / practical good". I.e., if RT in blocks with creative+good is
longer than RT in blocks with creative+bad, people are more conflicted or
hesitant about creative+good.
The way the task is set up, when side == 1, creative and bad are paired first.
If side == -1, the opposite is true. This pairing is handled by the scoring
script.
"""
# ------------ Thresholds for excluding trials or subjects: ------------
rt_FAST = 0.300
rt_FASTms = int(1000 * rt_FAST) # 300ms
rt_SLOW = 10.
correct = 1
incorrect = 0
# GNB 2003 thresholds for why subject should be excluded:
warn = u''
threshold = {'ac_prac_blk': 0.50,
'ac_prac_all': 0.60, 'rt_prac_all': 0.35,
'ac_task_blk': 0.60, 'rt_task_blk': 0.25,
'ac_task_all': 0.70, 'rt_task_all': 0.10 }
# ------------ Read dataframe (df) from .csv file and parse: ------------
df = pd.read_csv(csvfile)
# accuracy; mean --> proportion correct
prac_ac = [df.loc[:, 'Response_1.corr'].dropna(),
df.loc[:, 'Response_2.corr'].dropna(),
df.loc[:, 'Response_5.corr'].dropna()]
task_ac = [df.loc[:, 'Response_3.corr'].dropna(),
df.loc[:, 'Response_4.corr'].dropna(),
df.loc[:, 'Response_6.corr'].dropna(),
df.loc[:, 'Response_7.corr'].dropna()]
# response time in seconds
prac_rt = [df.loc[:, 'Response_1.rt'].dropna(),
df.loc[:, 'Response_2.rt'].dropna(),
df.loc[:, 'Response_5.rt'].dropna()]
task_rt = [df.loc[:, 'Response_3.rt'].dropna(),
df.loc[:, 'Response_4.rt'].dropna(),
df.loc[:, 'Response_6.rt'].dropna(),
df.loc[:, 'Response_7.rt'].dropna()]
assert len(task_ac[0]) == len(task_ac[2]) == len(task_rt[0]) # block 3, 6
assert len(task_ac[1]) == len(task_ac[3]) == len(task_rt[1]) # block 4, 7
assert len(task_rt[0]) == len(task_rt[2]) > 1 # require 2+ items in 3, 6
assert len(task_rt[1]) == len(task_rt[3]) > 1 # equire 2+ items in 4, 7
assert all([all(task_ac[i].isin([correct, incorrect])) for i in range(4)])
assert all([all(task_rt[i] > 0) for i in range(4)]) # require positive RTs
# counterbalanced IAT screen side: +1 or -1; used in calc of D
side = df.loc[0, 'side']
assert side in [-1, 1]
# ------------ Check participant exclusion thresholds ------------
# check proportion-too-fast in each task block:
for i, rt in enumerate(task_rt):
prop_too_fast = len(rt[(rt < rt_FAST)]) / len(rt)
if prop_too_fast > threshold['rt_task_blk']:
pct = 100 * prop_too_fast
warn += "%.0f%% trials with RT < %dms in task block #%d\n" % (
pct, rt_FASTms, (3, 4, 6, 7)[i])
# check proportion-too-fast all task trials:
rt = task_rt[0].append(task_rt[1]).append(task_rt[2]).append(task_rt[3])
prop_too_fast = len(rt[(rt < rt_FAST)]) / len(rt)
if prop_too_fast > threshold['rt_task_all']:
pct = 100 * prop_too_fast
warn += "%.0f%% trials with RT < %dms across all task blocks\n" % (
pct, rt_FASTms)
# check proportion-too-fast in each practice block:
for i, rt in enumerate(prac_rt):
prop_too_fast = len(rt[(rt < rt_FAST)]) / len(rt)
if prop_too_fast > threshold['rt_prac_all']:
pct = 100 * prop_too_fast
warn += "%.0f%% trials with RT < %dms in practice block #%d\n" % (
pct, rt_FASTms, (1, 2, 5)[i])
# check proportion-error in each practice block:
for i, prac_blk in enumerate(prac_ac):
if prac_blk.mean() < threshold['ac_prac_blk']:
pct = 100 * (1 - prac_blk.mean())
warn += "%.0f%% errors in practice block #%d\n" %(pct, (1, 2, 5)[i])
# check proportion-error in all practice trials:
ac = prac_ac[0].append(prac_ac[1]).append(prac_ac[2]).mean()
if ac < threshold['ac_prac_all']:
pct = 100 * (1 - ac.mean())
warn += "%.0f%% errors across all practice blocks\n" % pct
# check proportion-error in task blocks:
for i, ac in enumerate(task_ac):
if ac.mean() < threshold['ac_task_blk']:
pct = 100 * (1 - ac.mean())
warn += "%.0f%% errors in task block #%d\n" % (pct, (3, 4, 6, 7)[i])
# check proportion-error across all task trials:
ac = task_ac[0].append(task_ac[1]).append(task_ac[2]).append(task_ac[3])
if ac.mean() < threshold['ac_task_all']:
pct = 100 * (1 - ac.mean())
warn += "%.0f%% errors across all task trials\n" % pct
# ------------ Filter out bad trials: ------------
for i, block in enumerate(task_ac):
# retain trials with correct responses:
correct_trials = (block == correct)
task_rt[i] = task_rt[i][correct_trials]
#task_ac[i] = task_ac[i][correct_trials]
for i, block in enumerate(task_rt):
# retain trials where RT is not too fast or too slow:
rt_ok_trials = (block >= rt_FAST) & (block <= rt_SLOW)
task_rt[i] = task_rt[i][rt_ok_trials]
#task_ac[i] = task_ac[i][rt_ok_trials]
# ------------ Calculate summary stats of the filtered data: ----------
mean3, mean4, mean6, mean7 = [a.mean() for a in task_rt]
stdev36 = task_rt[0].append(task_rt[2]).std() # pooled std of blocks 3 & 6
stdev47 = task_rt[1].append(task_rt[3]).std() # pooled std of blocks 4 & 7
d36 = side * (mean6 - mean3) / stdev36 # side is +1 or -1
d47 = side * (mean7 - mean4) / stdev47
D_IAT = (d36 + d47) / 2
stats = D_IAT, side, mean3, mean4, mean6, mean7, stdev36, stdev47, warn.strip() or 'None'
labels = 'D_IAT', 'side', 'mean3', 'mean4', 'mean6', 'mean7', 'sd36', 'sd47', 'warnings'
if write_file:
df = pd.DataFrame([stats], columns=labels)
df.to_csv('Scored_' + csvfile, index=False, index_label=False, encoding='utf-8')
return warn.strip() or D_IAT
def batchScoreIAT(path='.', write_file=False):
"""Call scoreIAT() on all csv files in path
"""
files = glob.glob(os.path.join(path, '*.csv'))
for f in files:
scoreIAT(f, write_file=write_file)
if __name__ == '__main__':
for f in sys.argv[1:]:
print((f, scoreIAT(f)))
| gpl-3.0 |
aldian/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimators_test.py | 9 | 6700 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Custom optimizer tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from tensorflow.python.training import training_util
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn.estimators import estimator as estimator_lib
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.estimators._sklearn import train_test_split
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.training import momentum as momentum_lib
class FeatureEngineeringFunctionTest(test.TestCase):
"""Tests feature_engineering_fn."""
def testFeatureEngineeringFn(self):
def input_fn():
return {
"x": constant_op.constant([1.])
}, {
"y": constant_op.constant([11.])
}
def feature_engineering_fn(features, labels):
_, _ = features, labels
return {
"transformed_x": constant_op.constant([9.])
}, {
"transformed_y": constant_op.constant([99.])
}
def model_fn(features, labels):
# dummy variable:
_ = variables_lib.Variable([0.])
_ = labels
predictions = features["transformed_x"]
loss = constant_op.constant([2.])
update_global_step = training_util.get_global_step().assign_add(1)
return predictions, loss, update_global_step
estimator = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator.fit(input_fn=input_fn, steps=1)
prediction = next(estimator.predict(input_fn=input_fn, as_iterable=True))
# predictions = transformed_x (9)
self.assertEqual(9., prediction)
metrics = estimator.evaluate(
input_fn=input_fn, steps=1,
metrics={"label":
metric_spec.MetricSpec(lambda predictions, labels: labels)})
# labels = transformed_y (99)
self.assertEqual(99., metrics["label"])
def testFeatureEngineeringFnWithSameName(self):
def input_fn():
return {
"x": constant_op.constant(["9."])
}, {
"y": constant_op.constant(["99."])
}
def feature_engineering_fn(features, labels):
# Github #12205: raise a TypeError if called twice.
_ = string_ops.string_split(features["x"])
features["x"] = constant_op.constant([9.])
labels["y"] = constant_op.constant([99.])
return features, labels
def model_fn(features, labels):
# dummy variable:
_ = variables_lib.Variable([0.])
_ = labels
predictions = features["x"]
loss = constant_op.constant([2.])
update_global_step = training_util.get_global_step().assign_add(1)
return predictions, loss, update_global_step
estimator = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator.fit(input_fn=input_fn, steps=1)
prediction = next(estimator.predict(input_fn=input_fn, as_iterable=True))
# predictions = transformed_x (9)
self.assertEqual(9., prediction)
metrics = estimator.evaluate(
input_fn=input_fn, steps=1,
metrics={"label":
metric_spec.MetricSpec(lambda predictions, labels: labels)})
# labels = transformed_y (99)
self.assertEqual(99., metrics["label"])
def testNoneFeatureEngineeringFn(self):
def input_fn():
return {
"x": constant_op.constant([1.])
}, {
"y": constant_op.constant([11.])
}
def feature_engineering_fn(features, labels):
_, _ = features, labels
return {
"x": constant_op.constant([9.])
}, {
"y": constant_op.constant([99.])
}
def model_fn(features, labels):
# dummy variable:
_ = variables_lib.Variable([0.])
_ = labels
predictions = features["x"]
loss = constant_op.constant([2.])
update_global_step = training_util.get_global_step().assign_add(1)
return predictions, loss, update_global_step
estimator_with_fe_fn = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator_with_fe_fn.fit(input_fn=input_fn, steps=1)
estimator_without_fe_fn = estimator_lib.Estimator(model_fn=model_fn)
estimator_without_fe_fn.fit(input_fn=input_fn, steps=1)
# predictions = x
prediction_with_fe_fn = next(
estimator_with_fe_fn.predict(
input_fn=input_fn, as_iterable=True))
self.assertEqual(9., prediction_with_fe_fn)
prediction_without_fe_fn = next(
estimator_without_fe_fn.predict(
input_fn=input_fn, as_iterable=True))
self.assertEqual(1., prediction_without_fe_fn)
class CustomOptimizer(test.TestCase):
"""Custom optimizer tests."""
def testIrisMomentum(self):
random.seed(42)
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
def custom_optimizer():
return momentum_lib.MomentumOptimizer(learning_rate=0.01, momentum=0.9)
classifier = learn.DNNClassifier(
hidden_units=[10, 20, 10],
feature_columns=learn.infer_real_valued_columns_from_input(x_train),
n_classes=3,
optimizer=custom_optimizer,
config=learn.RunConfig(tf_random_seed=1))
classifier.fit(x_train, y_train, steps=400)
predictions = np.array(list(classifier.predict_classes(x_test)))
score = accuracy_score(y_test, predictions)
self.assertGreater(score, 0.65, "Failed with score = {0}".format(score))
if __name__ == "__main__":
test.main()
| apache-2.0 |
jorik041/scikit-learn | examples/manifold/plot_lle_digits.py | 181 | 8510 | """
=============================================================================
Manifold learning on handwritten digits: Locally Linear Embedding, Isomap...
=============================================================================
An illustration of various embeddings on the digits dataset.
The RandomTreesEmbedding, from the :mod:`sklearn.ensemble` module, is not
technically a manifold embedding method, as it learn a high-dimensional
representation on which we apply a dimensionality reduction method.
However, it is often useful to cast a dataset into a representation in
which the classes are linearly-separable.
t-SNE will be initialized with the embedding that is generated by PCA in
this example, which is not the default setting. It ensures global stability
of the embedding, i.e., the embedding does not depend on random
initialization.
"""
# Authors: Fabian Pedregosa <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
from sklearn import (manifold, datasets, decomposition, ensemble, lda,
random_projection)
digits = datasets.load_digits(n_class=6)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
n_neighbors = 30
#----------------------------------------------------------------------
# Scale and visualize the embedding vectors
def plot_embedding(X, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure()
ax = plt.subplot(111)
for i in range(X.shape[0]):
plt.text(X[i, 0], X[i, 1], str(digits.target[i]),
color=plt.cm.Set1(y[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
if hasattr(offsetbox, 'AnnotationBbox'):
# only print thumbnails with matplotlib > 1.0
shown_images = np.array([[1., 1.]]) # just something big
for i in range(digits.data.shape[0]):
dist = np.sum((X[i] - shown_images) ** 2, 1)
if np.min(dist) < 4e-3:
# don't show points that are too close
continue
shown_images = np.r_[shown_images, [X[i]]]
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
X[i])
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
#----------------------------------------------------------------------
# Plot images of the digits
n_img_per_row = 20
img = np.zeros((10 * n_img_per_row, 10 * n_img_per_row))
for i in range(n_img_per_row):
ix = 10 * i + 1
for j in range(n_img_per_row):
iy = 10 * j + 1
img[ix:ix + 8, iy:iy + 8] = X[i * n_img_per_row + j].reshape((8, 8))
plt.imshow(img, cmap=plt.cm.binary)
plt.xticks([])
plt.yticks([])
plt.title('A selection from the 64-dimensional digits dataset')
#----------------------------------------------------------------------
# Random 2D projection using a random unitary matrix
print("Computing random projection")
rp = random_projection.SparseRandomProjection(n_components=2, random_state=42)
X_projected = rp.fit_transform(X)
plot_embedding(X_projected, "Random Projection of the digits")
#----------------------------------------------------------------------
# Projection on to the first 2 principal components
print("Computing PCA projection")
t0 = time()
X_pca = decomposition.TruncatedSVD(n_components=2).fit_transform(X)
plot_embedding(X_pca,
"Principal Components projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Projection on to the first 2 linear discriminant components
print("Computing LDA projection")
X2 = X.copy()
X2.flat[::X.shape[1] + 1] += 0.01 # Make X invertible
t0 = time()
X_lda = lda.LDA(n_components=2).fit_transform(X2, y)
plot_embedding(X_lda,
"Linear Discriminant projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Isomap projection of the digits dataset
print("Computing Isomap embedding")
t0 = time()
X_iso = manifold.Isomap(n_neighbors, n_components=2).fit_transform(X)
print("Done.")
plot_embedding(X_iso,
"Isomap projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Locally linear embedding of the digits dataset
print("Computing LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='standard')
t0 = time()
X_lle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_lle,
"Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Modified Locally linear embedding of the digits dataset
print("Computing modified LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='modified')
t0 = time()
X_mlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_mlle,
"Modified Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# HLLE embedding of the digits dataset
print("Computing Hessian LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='hessian')
t0 = time()
X_hlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_hlle,
"Hessian Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# LTSA embedding of the digits dataset
print("Computing LTSA embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='ltsa')
t0 = time()
X_ltsa = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_ltsa,
"Local Tangent Space Alignment of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# MDS embedding of the digits dataset
print("Computing MDS embedding")
clf = manifold.MDS(n_components=2, n_init=1, max_iter=100)
t0 = time()
X_mds = clf.fit_transform(X)
print("Done. Stress: %f" % clf.stress_)
plot_embedding(X_mds,
"MDS embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Random Trees embedding of the digits dataset
print("Computing Totally Random Trees embedding")
hasher = ensemble.RandomTreesEmbedding(n_estimators=200, random_state=0,
max_depth=5)
t0 = time()
X_transformed = hasher.fit_transform(X)
pca = decomposition.TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
plot_embedding(X_reduced,
"Random forest embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Spectral embedding of the digits dataset
print("Computing Spectral embedding")
embedder = manifold.SpectralEmbedding(n_components=2, random_state=0,
eigen_solver="arpack")
t0 = time()
X_se = embedder.fit_transform(X)
plot_embedding(X_se,
"Spectral embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# t-SNE embedding of the digits dataset
print("Computing t-SNE embedding")
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
t0 = time()
X_tsne = tsne.fit_transform(X)
plot_embedding(X_tsne,
"t-SNE embedding of the digits (time %.2fs)" %
(time() - t0))
plt.show()
| bsd-3-clause |
Fab7c4/paparazzi | sw/tools/calibration/calibration_utils.py | 19 | 9087 |
# Copyright (C) 2010 Antoine Drouin
#
# This file is part of Paparazzi.
#
# Paparazzi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# Paparazzi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Paparazzi; see the file COPYING. If not, write to
# the Free Software Foundation, 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
from __future__ import print_function, division
import re
import numpy as np
from numpy import sin, cos
from scipy import linalg, stats
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def get_ids_in_log(filename):
"""Returns available ac_id from a log."""
f = open(filename, 'r')
ids = []
pattern = re.compile("\S+ (\S+)")
while True:
line = f.readline().strip()
if line == '':
break
m = re.match(pattern, line)
if m:
ac_id = m.group(1)
if not ac_id in ids:
ids.append(ac_id)
return ids
def read_log(ac_id, filename, sensor):
"""Extracts raw sensor measurements from a log."""
f = open(filename, 'r')
pattern = re.compile("(\S+) "+ac_id+" IMU_"+sensor+"_RAW (\S+) (\S+) (\S+)")
list_meas = []
while True:
line = f.readline().strip()
if line == '':
break
m = re.match(pattern, line)
if m:
list_meas.append([float(m.group(2)), float(m.group(3)), float(m.group(4))])
return np.array(list_meas)
def read_log_mag_current(ac_id, filename):
"""Extracts raw magnetometer and current measurements from a log."""
f = open(filename, 'r')
pattern = re.compile("(\S+) "+ac_id+" IMU_MAG_CURRENT_CALIBRATION (\S+) (\S+) (\S+) (\S+)")
list_meas = []
while True:
line = f.readline().strip()
if line == '':
break
m = re.match(pattern, line)
if m:
list_meas.append([float(m.group(2)), float(m.group(3)), float(m.group(4)), float(m.group(5))])
return np.array(list_meas)
def filter_meas(meas, window_size, noise_threshold):
"""Select only non-noisy data."""
filtered_meas = []
filtered_idx = []
for i in range(window_size, len(meas)-window_size):
noise = meas[i-window_size:i+window_size, :].std(axis=0)
if linalg.norm(noise) < noise_threshold:
filtered_meas.append(meas[i, :])
filtered_idx.append(i)
return np.array(filtered_meas), filtered_idx
def get_min_max_guess(meas, scale):
"""Initial boundary based calibration."""
max_meas = meas[:, :].max(axis=0)
min_meas = meas[:, :].min(axis=0)
n = (max_meas + min_meas) / 2
sf = 2*scale/(max_meas - min_meas)
return np.array([n[0], n[1], n[2], sf[0], sf[1], sf[2]])
def scale_measurements(meas, p):
"""Scale the set of measurements."""
l_comp = []
l_norm = []
for m in meas[:, ]:
sm = (m - p[0:3])*p[3:6]
l_comp.append(sm)
l_norm.append(linalg.norm(sm))
return np.array(l_comp), np.array(l_norm)
def estimate_mag_current_relation(meas):
"""Calculate linear coefficient of magnetometer-current relation."""
coefficient = []
for i in range(0, 3):
gradient, intercept, r_value, p_value, std_err = stats.linregress(meas[:, 3], meas[:, i])
coefficient.append(gradient)
return coefficient
def print_xml(p, sensor, res):
"""Print xml for airframe file."""
print("")
print("<define name=\""+sensor+"_X_NEUTRAL\" value=\""+str(int(round(p[0])))+"\"/>")
print("<define name=\""+sensor+"_Y_NEUTRAL\" value=\""+str(int(round(p[1])))+"\"/>")
print("<define name=\""+sensor+"_Z_NEUTRAL\" value=\""+str(int(round(p[2])))+"\"/>")
print("<define name=\""+sensor+"_X_SENS\" value=\""+str(p[3]*2**res)+"\" integer=\"16\"/>")
print("<define name=\""+sensor+"_Y_SENS\" value=\""+str(p[4]*2**res)+"\" integer=\"16\"/>")
print("<define name=\""+sensor+"_Z_SENS\" value=\""+str(p[5]*2**res)+"\" integer=\"16\"/>")
def plot_results(block, measurements, flt_idx, flt_meas, cp0, np0, cp1, np1, sensor_ref):
"""Plot calibration results."""
plt.subplot(3, 1, 1)
plt.plot(measurements[:, 0])
plt.plot(measurements[:, 1])
plt.plot(measurements[:, 2])
plt.plot(flt_idx, flt_meas[:, 0], 'ro')
plt.plot(flt_idx, flt_meas[:, 1], 'ro')
plt.plot(flt_idx, flt_meas[:, 2], 'ro')
plt.xlabel('time (s)')
plt.ylabel('ADC')
plt.title('Raw sensors')
plt.subplot(3, 2, 3)
plt.plot(cp0[:, 0])
plt.plot(cp0[:, 1])
plt.plot(cp0[:, 2])
plt.plot(-sensor_ref*np.ones(len(flt_meas)))
plt.plot(sensor_ref*np.ones(len(flt_meas)))
plt.subplot(3, 2, 4)
plt.plot(np0)
plt.plot(sensor_ref*np.ones(len(flt_meas)))
plt.subplot(3, 2, 5)
plt.plot(cp1[:, 0])
plt.plot(cp1[:, 1])
plt.plot(cp1[:, 2])
plt.plot(-sensor_ref*np.ones(len(flt_meas)))
plt.plot(sensor_ref*np.ones(len(flt_meas)))
plt.subplot(3, 2, 6)
plt.plot(np1)
plt.plot(sensor_ref*np.ones(len(flt_meas)))
# if we want to have another plot we only draw the figure (non-blocking)
# also in matplotlib before 1.0.0 there is only one call to show possible
if block:
plt.show()
else:
plt.draw()
def plot_mag_3d(measured, calibrated, p):
"""Plot magnetometer measurements on 3D sphere."""
# set up points for sphere and ellipsoid wireframes
u = np.r_[0:2 * np.pi:20j]
v = np.r_[0:np.pi:20j]
wx = np.outer(cos(u), sin(v))
wy = np.outer(sin(u), sin(v))
wz = np.outer(np.ones(np.size(u)), cos(v))
ex = p[0] * np.ones(np.size(u)) + np.outer(cos(u), sin(v)) / p[3]
ey = p[1] * np.ones(np.size(u)) + np.outer(sin(u), sin(v)) / p[4]
ez = p[2] * np.ones(np.size(u)) + np.outer(np.ones(np.size(u)), cos(v)) / p[5]
# measurements
mx = measured[:, 0]
my = measured[:, 1]
mz = measured[:, 2]
# calibrated values
cx = calibrated[:, 0]
cy = calibrated[:, 1]
cz = calibrated[:, 2]
# axes size
left = 0.02
bottom = 0.05
width = 0.46
height = 0.9
rect_l = [left, bottom, width, height]
rect_r = [left/2+0.5, bottom, width, height]
fig = plt.figure(figsize=plt.figaspect(0.5))
if matplotlib.__version__.startswith('0'):
ax = Axes3D(fig, rect=rect_l)
else:
ax = fig.add_subplot(1, 2, 1, position=rect_l, projection='3d')
# plot measurements
ax.scatter(mx, my, mz)
plt.hold(True)
# plot line from center to ellipsoid center
ax.plot([0.0, p[0]], [0.0, p[1]], [0.0, p[2]], color='black', marker='+', markersize=10)
# plot ellipsoid
ax.plot_wireframe(ex, ey, ez, color='grey', alpha=0.5)
# Create cubic bounding box to simulate equal aspect ratio
max_range = np.array([mx.max() - mx.min(), my.max() - my.min(), mz.max() - mz.min()]).max()
Xb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][0].flatten() + 0.5 * (mx.max() + mx.min())
Yb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][1].flatten() + 0.5 * (my.max() + my.min())
Zb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][2].flatten() + 0.5 * (mz.max() + mz.min())
# add the fake bounding box:
for xb, yb, zb in zip(Xb, Yb, Zb):
ax.plot([xb], [yb], [zb], 'w')
ax.set_title('MAG raw with fitted ellipsoid and center offset')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
if matplotlib.__version__.startswith('0'):
ax = Axes3D(fig, rect=rect_r)
else:
ax = fig.add_subplot(1, 2, 2, position=rect_r, projection='3d')
ax.plot_wireframe(wx, wy, wz, color='grey', alpha=0.5)
plt.hold(True)
ax.scatter(cx, cy, cz)
ax.set_title('MAG calibrated on unit sphere')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.set_xlim3d(-1, 1)
ax.set_ylim3d(-1, 1)
ax.set_zlim3d(-1, 1)
plt.show()
def read_turntable_log(ac_id, tt_id, filename, _min, _max):
""" Read a turntable log.
return an array which first column is turnatble and next 3 are gyro
"""
f = open(filename, 'r')
pattern_g = re.compile("(\S+) "+str(ac_id)+" IMU_GYRO_RAW (\S+) (\S+) (\S+)")
pattern_t = re.compile("(\S+) "+str(tt_id)+" IMU_TURNTABLE (\S+)")
last_tt = None
list_tt = []
while True:
line = f.readline().strip()
if line == '':
break
m = re.match(pattern_t, line)
if m:
last_tt = float(m.group(2))
m = re.match(pattern_g, line)
if m and last_tt and _min < last_tt < _max:
list_tt.append([last_tt, float(m.group(2)), float(m.group(3)), float(m.group(4))])
return np.array(list_tt)
| gpl-2.0 |
jkarnows/scikit-learn | sklearn/linear_model/tests/test_bayes.py | 299 | 1770 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.linear_model.bayes import BayesianRidge, ARDRegression
from sklearn import datasets
from sklearn.utils.testing import assert_array_almost_equal
def test_bayesian_on_diabetes():
# Test BayesianRidge on diabetes
raise SkipTest("XFailed Test")
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
clf = BayesianRidge(compute_score=True)
# Test with more samples than features
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
# Test with more features than samples
X = X[:5, :]
y = y[:5]
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
def test_toy_bayesian_ridge_object():
# Test BayesianRidge on toy
X = np.array([[1], [2], [6], [8], [10]])
Y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_toy_ard_object():
# Test BayesianRegression ARD classifier
X = np.array([[1], [2], [3]])
Y = np.array([1, 2, 3])
clf = ARDRegression(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
| bsd-3-clause |
hrjn/scikit-learn | sklearn/preprocessing/tests/test_imputation.py | 51 | 12300 |
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false
from sklearn.preprocessing.imputation import Imputer
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn import tree
from sklearn.random_projection import sparse_random_matrix
def _check_statistics(X, X_true,
strategy, statistics, missing_values):
"""Utility function for testing imputation for a given strategy.
Test:
- along the two axes
- with dense and sparse arrays
Check that:
- the statistics (mean, median, mode) are correct
- the missing values are imputed correctly"""
err_msg = "Parameters: strategy = %s, missing_values = %s, " \
"axis = {0}, sparse = {1}" % (strategy, missing_values)
# Normal matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
X_trans = imputer.fit(X).transform(X.copy())
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, False))
assert_array_equal(X_trans, X_true, err_msg.format(0, False))
# Normal matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(X.transpose())
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform, X.copy().transpose())
else:
X_trans = imputer.transform(X.copy().transpose())
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, False))
# Sparse matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
imputer.fit(sparse.csc_matrix(X))
X_trans = imputer.transform(sparse.csc_matrix(X.copy()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, True))
assert_array_equal(X_trans, X_true, err_msg.format(0, True))
# Sparse matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(sparse.csc_matrix(X.transpose()))
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform,
sparse.csc_matrix(X.copy().transpose()))
else:
X_trans = imputer.transform(sparse.csc_matrix(X.copy().transpose()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, True))
def test_imputation_shape():
# Verify the shapes of the imputed matrix for different strategies.
X = np.random.randn(10, 2)
X[::2] = np.nan
for strategy in ['mean', 'median', 'most_frequent']:
imputer = Imputer(strategy=strategy)
X_imputed = imputer.fit_transform(X)
assert_equal(X_imputed.shape, (10, 2))
X_imputed = imputer.fit_transform(sparse.csr_matrix(X))
assert_equal(X_imputed.shape, (10, 2))
def test_imputation_mean_median_only_zero():
# Test imputation using the mean and median strategies, when
# missing_values == 0.
X = np.array([
[np.nan, 0, 0, 0, 5],
[np.nan, 1, 0, np.nan, 3],
[np.nan, 2, 0, 0, 0],
[np.nan, 6, 0, 5, 13],
])
X_imputed_mean = np.array([
[3, 5],
[1, 3],
[2, 7],
[6, 13],
])
statistics_mean = [np.nan, 3, np.nan, np.nan, 7]
# Behaviour of median with NaN is undefined, e.g. different results in
# np.median and np.ma.median
X_for_median = X[:, [0, 1, 2, 4]]
X_imputed_median = np.array([
[2, 5],
[1, 3],
[2, 5],
[6, 13],
])
statistics_median = [np.nan, 2, np.nan, 5]
_check_statistics(X, X_imputed_mean, "mean", statistics_mean, 0)
_check_statistics(X_for_median, X_imputed_median, "median",
statistics_median, 0)
def safe_median(arr, *args, **kwargs):
# np.median([]) raises a TypeError for numpy >= 1.10.1
length = arr.size if hasattr(arr, 'size') else len(arr)
return np.nan if length == 0 else np.median(arr, *args, **kwargs)
def safe_mean(arr, *args, **kwargs):
# np.mean([]) raises a RuntimeWarning for numpy >= 1.10.1
length = arr.size if hasattr(arr, 'size') else len(arr)
return np.nan if length == 0 else np.mean(arr, *args, **kwargs)
def test_imputation_mean_median():
# Test imputation using the mean and median strategies, when
# missing_values != 0.
rng = np.random.RandomState(0)
dim = 10
dec = 10
shape = (dim * dim, dim + dec)
zeros = np.zeros(shape[0])
values = np.arange(1, shape[0] + 1)
values[4::2] = - values[4::2]
tests = [("mean", "NaN", lambda z, v, p: safe_mean(np.hstack((z, v)))),
("mean", 0, lambda z, v, p: np.mean(v)),
("median", "NaN", lambda z, v, p: safe_median(np.hstack((z, v)))),
("median", 0, lambda z, v, p: np.median(v))]
for strategy, test_missing_values, true_value_fun in tests:
X = np.empty(shape)
X_true = np.empty(shape)
true_statistics = np.empty(shape[1])
# Create a matrix X with columns
# - with only zeros,
# - with only missing values
# - with zeros, missing values and values
# And a matrix X_true containing all true values
for j in range(shape[1]):
nb_zeros = (j - dec + 1 > 0) * (j - dec + 1) * (j - dec + 1)
nb_missing_values = max(shape[0] + dec * dec
- (j + dec) * (j + dec), 0)
nb_values = shape[0] - nb_zeros - nb_missing_values
z = zeros[:nb_zeros]
p = np.repeat(test_missing_values, nb_missing_values)
v = values[rng.permutation(len(values))[:nb_values]]
true_statistics[j] = true_value_fun(z, v, p)
# Create the columns
X[:, j] = np.hstack((v, z, p))
if 0 == test_missing_values:
X_true[:, j] = np.hstack((v,
np.repeat(
true_statistics[j],
nb_missing_values + nb_zeros)))
else:
X_true[:, j] = np.hstack((v,
z,
np.repeat(true_statistics[j],
nb_missing_values)))
# Shuffle them the same way
np.random.RandomState(j).shuffle(X[:, j])
np.random.RandomState(j).shuffle(X_true[:, j])
# Mean doesn't support columns containing NaNs, median does
if strategy == "median":
cols_to_keep = ~np.isnan(X_true).any(axis=0)
else:
cols_to_keep = ~np.isnan(X_true).all(axis=0)
X_true = X_true[:, cols_to_keep]
_check_statistics(X, X_true, strategy,
true_statistics, test_missing_values)
def test_imputation_median_special_cases():
# Test median imputation with sparse boundary cases
X = np.array([
[0, np.nan, np.nan], # odd: implicit zero
[5, np.nan, np.nan], # odd: explicit nonzero
[0, 0, np.nan], # even: average two zeros
[-5, 0, np.nan], # even: avg zero and neg
[0, 5, np.nan], # even: avg zero and pos
[4, 5, np.nan], # even: avg nonzeros
[-4, -5, np.nan], # even: avg negatives
[-1, 2, np.nan], # even: crossing neg and pos
]).transpose()
X_imputed_median = np.array([
[0, 0, 0],
[5, 5, 5],
[0, 0, 0],
[-5, 0, -2.5],
[0, 5, 2.5],
[4, 5, 4.5],
[-4, -5, -4.5],
[-1, 2, .5],
]).transpose()
statistics_median = [0, 5, 0, -2.5, 2.5, 4.5, -4.5, .5]
_check_statistics(X, X_imputed_median, "median",
statistics_median, 'NaN')
def test_imputation_most_frequent():
# Test imputation using the most-frequent strategy.
X = np.array([
[-1, -1, 0, 5],
[-1, 2, -1, 3],
[-1, 1, 3, -1],
[-1, 2, 3, 7],
])
X_true = np.array([
[2, 0, 5],
[2, 3, 3],
[1, 3, 3],
[2, 3, 7],
])
# scipy.stats.mode, used in Imputer, doesn't return the first most
# frequent as promised in the doc but the lowest most frequent. When this
# test will fail after an update of scipy, Imputer will need to be updated
# to be consistent with the new (correct) behaviour
_check_statistics(X, X_true, "most_frequent", [np.nan, 2, 3, 3], -1)
def test_imputation_pipeline_grid_search():
# Test imputation within a pipeline + gridsearch.
pipeline = Pipeline([('imputer', Imputer(missing_values=0)),
('tree', tree.DecisionTreeRegressor(random_state=0))])
parameters = {
'imputer__strategy': ["mean", "median", "most_frequent"],
'imputer__axis': [0, 1]
}
l = 100
X = sparse_random_matrix(l, l, density=0.10)
Y = sparse_random_matrix(l, 1, density=0.10).toarray()
gs = GridSearchCV(pipeline, parameters)
gs.fit(X, Y)
def test_imputation_pickle():
# Test for pickling imputers.
import pickle
l = 100
X = sparse_random_matrix(l, l, density=0.10)
for strategy in ["mean", "median", "most_frequent"]:
imputer = Imputer(missing_values=0, strategy=strategy)
imputer.fit(X)
imputer_pickled = pickle.loads(pickle.dumps(imputer))
assert_array_equal(imputer.transform(X.copy()),
imputer_pickled.transform(X.copy()),
"Fail to transform the data after pickling "
"(strategy = %s)" % (strategy))
def test_imputation_copy():
# Test imputation with copy
X_orig = sparse_random_matrix(5, 5, density=0.75, random_state=0)
# copy=True, dense => copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_false(np.all(X == Xt))
# copy=True, sparse csr => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, dense => no copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=False)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_array_equal(X, Xt)
# copy=False, sparse csr, axis=1 => no copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_array_equal(X.data, Xt.data)
# copy=False, sparse csc, axis=0 => no copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_array_equal(X.data, Xt.data)
# copy=False, sparse csr, axis=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=1 => copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=1, missing_values=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=0, strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
assert_false(sparse.issparse(Xt))
# Note: If X is sparse and if missing_values=0, then a (dense) copy of X is
# made, even if copy=False.
| bsd-3-clause |
petebachant/seaborn | setup.py | 22 | 3623 | #! /usr/bin/env python
#
# Copyright (C) 2012-2014 Michael Waskom <[email protected]>
import os
# temporarily redirect config directory to prevent matplotlib importing
# testing that for writeable directory which results in sandbox error in
# certain easy_install versions
os.environ["MPLCONFIGDIR"] = "."
DESCRIPTION = "Seaborn: statistical data visualization"
LONG_DESCRIPTION = """\
Seaborn is a library for making attractive and informative statistical graphics in Python. It is built on top of matplotlib and tightly integrated with the PyData stack, including support for numpy and pandas data structures and statistical routines from scipy and statsmodels.
Some of the features that seaborn offers are
- Several built-in themes that improve on the default matplotlib aesthetics
- Tools for choosing color palettes to make beautiful plots that reveal patterns in your data
- Functions for visualizing univariate and bivariate distributions or for comparing them between subsets of data
- Tools that fit and visualize linear regression models for different kinds of independent and dependent variables
- Functions that visualize matrices of data and use clustering algorithms to discover structure in those matrices
- A function to plot statistical timeseries data with flexible estimation and representation of uncertainty around the estimate
- High-level abstractions for structuring grids of plots that let you easily build complex visualizations
"""
DISTNAME = 'seaborn'
MAINTAINER = 'Michael Waskom'
MAINTAINER_EMAIL = '[email protected]'
URL = 'http://stanford.edu/~mwaskom/software/seaborn/'
LICENSE = 'BSD (3-clause)'
DOWNLOAD_URL = 'https://github.com/mwaskom/seaborn/'
VERSION = '0.7.0.dev'
try:
from setuptools import setup
_has_setuptools = True
except ImportError:
from distutils.core import setup
def check_dependencies():
install_requires = []
# Just make sure dependencies exist, I haven't rigorously
# tested what the minimal versions that will work are
# (help on that would be awesome)
try:
import numpy
except ImportError:
install_requires.append('numpy')
try:
import scipy
except ImportError:
install_requires.append('scipy')
try:
import matplotlib
except ImportError:
install_requires.append('matplotlib')
try:
import pandas
except ImportError:
install_requires.append('pandas')
return install_requires
if __name__ == "__main__":
install_requires = check_dependencies()
setup(name=DISTNAME,
author=MAINTAINER,
author_email=MAINTAINER_EMAIL,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
install_requires=install_requires,
packages=['seaborn', 'seaborn.external', 'seaborn.tests'],
classifiers=[
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'License :: OSI Approved :: BSD License',
'Topic :: Scientific/Engineering :: Visualization',
'Topic :: Multimedia :: Graphics',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS'],
)
| bsd-3-clause |
zrhans/pythonanywhere | .virtualenvs/django19/lib/python3.4/site-packages/pandas/core/internals.py | 9 | 166419 | import copy
import itertools
import re
import operator
from datetime import datetime, timedelta, date
from collections import defaultdict
import numpy as np
from pandas.core.base import PandasObject
from pandas.core.common import (_possibly_downcast_to_dtype, isnull,
_NS_DTYPE, _TD_DTYPE, ABCSeries, is_list_like,
ABCSparseSeries, _infer_dtype_from_scalar,
is_null_slice, is_dtype_equal,
is_null_datelike_scalar, _maybe_promote,
is_timedelta64_dtype, is_datetime64_dtype,
is_datetime64tz_dtype, is_datetimetz, is_sparse,
array_equivalent, _maybe_convert_string_to_object,
is_categorical, needs_i8_conversion, is_datetimelike_v_numeric,
is_numeric_v_string_like, is_internal_type)
from pandas.core.dtypes import DatetimeTZDtype
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas.core.indexing import maybe_convert_indices, length_of_indexer
from pandas.core.categorical import Categorical, maybe_to_categorical
from pandas.tseries.index import DatetimeIndex
import pandas.core.common as com
import pandas.core.missing as mis
import pandas.core.convert as convert
from pandas.sparse.array import _maybe_to_sparse, SparseArray
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.computation.expressions as expressions
from pandas.util.decorators import cache_readonly
from pandas.tslib import Timestamp, Timedelta
from pandas import compat
from pandas.compat import range, map, zip, u
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type
from pandas.lib import BlockPlacement
class Block(PandasObject):
"""
Canonical n-dimensional unit of homogeneous dtype contained in a pandas
data structure
Index-ignorant; let the container take care of that
"""
__slots__ = ['_mgr_locs', 'values', 'ndim']
is_numeric = False
is_float = False
is_integer = False
is_complex = False
is_datetime = False
is_datetimetz = False
is_timedelta = False
is_bool = False
is_object = False
is_categorical = False
is_sparse = False
_box_to_block_values = True
_can_hold_na = False
_downcast_dtype = None
_can_consolidate = True
_verify_integrity = True
_validate_ndim = True
_ftype = 'dense'
_holder = None
def __init__(self, values, placement, ndim=None, fastpath=False):
if ndim is None:
ndim = values.ndim
elif values.ndim != ndim:
raise ValueError('Wrong number of dimensions')
self.ndim = ndim
self.mgr_locs = placement
self.values = values
if len(self.mgr_locs) != len(self.values):
raise ValueError('Wrong number of items passed %d,'
' placement implies %d' % (
len(self.values), len(self.mgr_locs)))
@property
def _consolidate_key(self):
return (self._can_consolidate, self.dtype.name)
@property
def _is_single_block(self):
return self.ndim == 1
@property
def is_view(self):
""" return a boolean if I am possibly a view """
return self.values.base is not None
@property
def is_datelike(self):
""" return True if I am a non-datelike """
return self.is_datetime or self.is_timedelta
def is_categorical_astype(self, dtype):
"""
validate that we have a astypeable to categorical,
returns a boolean if we are a categorical
"""
if com.is_categorical_dtype(dtype):
if dtype == com.CategoricalDtype():
return True
# this is a pd.Categorical, but is not
# a valid type for astypeing
raise TypeError("invalid type {0} for astype".format(dtype))
return False
def external_values(self, dtype=None):
""" return an outside world format, currently just the ndarray """
return self.values
def internal_values(self, dtype=None):
""" return an internal format, currently just the ndarray
this should be the pure internal API format """
return self.values
def get_values(self, dtype=None):
"""
return an internal format, currently just the ndarray
this is often overriden to handle to_dense like operations
"""
return self.values
def to_dense(self):
return self.values.view()
def to_object_block(self, mgr):
""" return myself as an object block """
values = self.get_values(dtype=object)
return self.make_block(values,klass=ObjectBlock)
@property
def fill_value(self):
return np.nan
@property
def mgr_locs(self):
return self._mgr_locs
@property
def array_dtype(self):
""" the dtype to return if I want to construct this block as an array """
return self.dtype
def make_block(self, values, placement=None, ndim=None, **kwargs):
"""
Create a new block, with type inference
propogate any values that are not specified
"""
if placement is None:
placement = self.mgr_locs
if ndim is None:
ndim = self.ndim
return make_block(values, placement=placement, ndim=ndim, **kwargs)
def make_block_same_class(self, values, placement, copy=False, fastpath=True,
**kwargs):
"""
Wrap given values in a block of same type as self.
`kwargs` are used in SparseBlock override.
"""
if copy:
values = values.copy()
return make_block(values, placement, klass=self.__class__,
fastpath=fastpath, **kwargs)
@mgr_locs.setter
def mgr_locs(self, new_mgr_locs):
if not isinstance(new_mgr_locs, BlockPlacement):
new_mgr_locs = BlockPlacement(new_mgr_locs)
self._mgr_locs = new_mgr_locs
def __unicode__(self):
# don't want to print out all of the items here
name = com.pprint_thing(self.__class__.__name__)
if self._is_single_block:
result = '%s: %s dtype: %s' % (
name, len(self), self.dtype)
else:
shape = ' x '.join([com.pprint_thing(s) for s in self.shape])
result = '%s: %s, %s, dtype: %s' % (
name, com.pprint_thing(self.mgr_locs.indexer), shape,
self.dtype)
return result
def __len__(self):
return len(self.values)
def __getstate__(self):
return self.mgr_locs.indexer, self.values
def __setstate__(self, state):
self.mgr_locs = BlockPlacement(state[0])
self.values = state[1]
self.ndim = self.values.ndim
def _slice(self, slicer):
""" return a slice of my values """
return self.values[slicer]
def reshape_nd(self, labels, shape, ref_items, mgr=None):
"""
Parameters
----------
labels : list of new axis labels
shape : new shape
ref_items : new ref_items
return a new block that is transformed to a nd block
"""
return _block2d_to_blocknd(
values=self.get_values().T,
placement=self.mgr_locs,
shape=shape,
labels=labels,
ref_items=ref_items)
def getitem_block(self, slicer, new_mgr_locs=None):
"""
Perform __getitem__-like, return result as block.
As of now, only supports slices that preserve dimensionality.
"""
if new_mgr_locs is None:
if isinstance(slicer, tuple):
axis0_slicer = slicer[0]
else:
axis0_slicer = slicer
new_mgr_locs = self.mgr_locs[axis0_slicer]
new_values = self._slice(slicer)
if self._validate_ndim and new_values.ndim != self.ndim:
raise ValueError("Only same dim slicing is allowed")
return self.make_block_same_class(new_values, new_mgr_locs)
@property
def shape(self):
return self.values.shape
@property
def itemsize(self):
return self.values.itemsize
@property
def dtype(self):
return self.values.dtype
@property
def ftype(self):
return "%s:%s" % (self.dtype, self._ftype)
def merge(self, other):
return _merge_blocks([self, other])
def reindex_axis(self, indexer, method=None, axis=1, fill_value=None,
limit=None, mask_info=None):
"""
Reindex using pre-computed indexer information
"""
if axis < 1:
raise AssertionError('axis must be at least 1, got %d' % axis)
if fill_value is None:
fill_value = self.fill_value
new_values = com.take_nd(self.values, indexer, axis,
fill_value=fill_value, mask_info=mask_info)
return self.make_block(new_values,
fastpath=True)
def get(self, item):
loc = self.items.get_loc(item)
return self.values[loc]
def iget(self, i):
return self.values[i]
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
self.values[locs] = values
def delete(self, loc):
"""
Delete given loc(-s) from block in-place.
"""
self.values = np.delete(self.values, loc, 0)
self.mgr_locs = self.mgr_locs.delete(loc)
def apply(self, func, mgr=None, **kwargs):
""" apply the function to my values; return a block if we are not one """
result = func(self.values, **kwargs)
if not isinstance(result, Block):
result = self.make_block(values=_block_shape(result))
return result
def fillna(self, value, limit=None, inplace=False, downcast=None, mgr=None):
""" fillna on the block with the value. If we fail, then convert to ObjectBlock
and try again """
if not self._can_hold_na:
if inplace:
return self
else:
return self.copy()
original_value = value
mask = isnull(self.values)
if limit is not None:
if self.ndim > 2:
raise NotImplementedError("number of dimensions for 'fillna' "
"is currently limited to 2")
mask[mask.cumsum(self.ndim-1) > limit] = False
# fillna, but if we cannot coerce, then try again as an ObjectBlock
try:
values, _, value, _ = self._try_coerce_args(self.values, value)
blocks = self.putmask(mask, value, inplace=inplace)
blocks = [ b.make_block(values=self._try_coerce_result(b.values)) for b in blocks ]
return self._maybe_downcast(blocks, downcast)
except (TypeError, ValueError):
# we can't process the value, but nothing to do
if not mask.any():
return self if inplace else self.copy()
# we cannot coerce the underlying object, so
# make an ObjectBlock
return self.to_object_block(mgr=mgr).fillna(original_value,
limit=limit,
inplace=inplace,
downcast=False)
def _maybe_downcast(self, blocks, downcast=None):
# no need to downcast our float
# unless indicated
if downcast is None and self.is_float:
return blocks
elif downcast is None and (self.is_timedelta or self.is_datetime):
return blocks
return _extend_blocks([ b.downcast(downcast) for b in blocks ])
def downcast(self, dtypes=None, mgr=None):
""" try to downcast each item to the dict of dtypes if present """
# turn it off completely
if dtypes is False:
return self
values = self.values
# single block handling
if self._is_single_block:
# try to cast all non-floats here
if dtypes is None:
dtypes = 'infer'
nv = _possibly_downcast_to_dtype(values, dtypes)
return self.make_block(nv,
fastpath=True)
# ndim > 1
if dtypes is None:
return self
if not (dtypes == 'infer' or isinstance(dtypes, dict)):
raise ValueError("downcast must have a dictionary or 'infer' as "
"its argument")
# item-by-item
# this is expensive as it splits the blocks items-by-item
blocks = []
for i, rl in enumerate(self.mgr_locs):
if dtypes == 'infer':
dtype = 'infer'
else:
raise AssertionError("dtypes as dict is not supported yet")
dtype = dtypes.get(item, self._downcast_dtype)
if dtype is None:
nv = _block_shape(values[i], ndim=self.ndim)
else:
nv = _possibly_downcast_to_dtype(values[i], dtype)
nv = _block_shape(nv, ndim=self.ndim)
blocks.append(self.make_block(nv,
fastpath=True,
placement=[rl]))
return blocks
def astype(self, dtype, copy=False, raise_on_error=True, values=None, **kwargs):
return self._astype(dtype, copy=copy, raise_on_error=raise_on_error,
values=values, **kwargs)
def _astype(self, dtype, copy=False, raise_on_error=True, values=None,
klass=None, mgr=None, **kwargs):
"""
Coerce to the new type (if copy=True, return a new copy)
raise on an except if raise == True
"""
# may need to convert to categorical
# this is only called for non-categoricals
if self.is_categorical_astype(dtype):
return self.make_block(Categorical(self.values, **kwargs))
# astype processing
dtype = np.dtype(dtype)
if self.dtype == dtype:
if copy:
return self.copy()
return self
if klass is None:
if dtype == np.object_:
klass = ObjectBlock
try:
# force the copy here
if values is None:
if issubclass(dtype.type, (compat.text_type, compat.string_types)):
# use native type formatting for datetime/tz/timedelta
if self.is_datelike:
values = self.to_native_types()
# astype formatting
else:
values = self.values
else:
values = self.get_values(dtype=dtype)
# _astype_nansafe works fine with 1-d only
values = com._astype_nansafe(values.ravel(), dtype, copy=True)
values = values.reshape(self.shape)
newb = make_block(values,
placement=self.mgr_locs,
dtype=dtype,
klass=klass)
except:
if raise_on_error is True:
raise
newb = self.copy() if copy else self
if newb.is_numeric and self.is_numeric:
if newb.shape != self.shape:
raise TypeError("cannot set astype for copy = [%s] for dtype "
"(%s [%s]) with smaller itemsize that current "
"(%s [%s])" % (copy, self.dtype.name,
self.itemsize, newb.dtype.name,
newb.itemsize))
return newb
def convert(self, copy=True, **kwargs):
""" attempt to coerce any object types to better types
return a copy of the block (if copy = True)
by definition we are not an ObjectBlock here! """
return self.copy() if copy else self
def _can_hold_element(self, value):
raise NotImplementedError()
def _try_cast(self, value):
raise NotImplementedError()
def _try_cast_result(self, result, dtype=None):
""" try to cast the result to our original type,
we may have roundtripped thru object in the mean-time """
if dtype is None:
dtype = self.dtype
if self.is_integer or self.is_bool or self.is_datetime:
pass
elif self.is_float and result.dtype == self.dtype:
# protect against a bool/object showing up here
if isinstance(dtype, compat.string_types) and dtype == 'infer':
return result
if not isinstance(dtype, type):
dtype = dtype.type
if issubclass(dtype, (np.bool_, np.object_)):
if issubclass(dtype, np.bool_):
if isnull(result).all():
return result.astype(np.bool_)
else:
result = result.astype(np.object_)
result[result == 1] = True
result[result == 0] = False
return result
else:
return result.astype(np.object_)
return result
# may need to change the dtype here
return _possibly_downcast_to_dtype(result, dtype)
def _try_operate(self, values):
""" return a version to operate on as the input """
return values
def _try_coerce_args(self, values, other):
""" provide coercion to our input arguments """
return values, False, other, False
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
return result
def _try_coerce_and_cast_result(self, result, dtype=None):
result = self._try_coerce_result(result)
result = self._try_cast_result(result, dtype=dtype)
return result
def _try_fill(self, value):
return value
def to_native_types(self, slicer=None, na_rep='nan', quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = isnull(values)
if not self.is_object and not quoting:
values = values.astype(str)
else:
values = np.array(values, dtype='object')
values[mask] = na_rep
return values
# block actions ####
def copy(self, deep=True, mgr=None):
values = self.values
if deep:
values = values.copy()
return self.make_block(values,
klass=self.__class__,
fastpath=True)
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False, convert=True, mgr=None):
""" replace the to_replace value with value, possible to create new
blocks here this is just a call to putmask. regex is not used here.
It is used in ObjectBlocks. It is here for API
compatibility."""
original_to_replace = to_replace
# try to replace, if we raise an error, convert to ObjectBlock and retry
try:
values, _, to_replace, _ = self._try_coerce_args(self.values, to_replace)
mask = com.mask_missing(values, to_replace)
if filter is not None:
filtered_out = ~self.mgr_locs.isin(filter)
mask[filtered_out.nonzero()[0]] = False
blocks = self.putmask(mask, value, inplace=inplace)
if convert:
blocks = [ b.convert(by_item=True, numeric=False, copy=not inplace) for b in blocks ]
return blocks
except (TypeError, ValueError):
# we can't process the value, but nothing to do
if not mask.any():
return self if inplace else self.copy()
return self.to_object_block(mgr=mgr).replace(to_replace=original_to_replace,
value=value,
inplace=inplace,
filter=filter,
regex=regex,
convert=convert)
def _replace_single(self, *args, **kwargs):
""" no-op on a non-ObjectBlock """
return self if kwargs['inplace'] else self.copy()
def setitem(self, indexer, value, mgr=None):
""" set the value inplace; return a new block (of a possibly different
dtype)
indexer is a direct slice/positional indexer; value must be a
compatible shape
"""
# coerce None values, if appropriate
if value is None:
if self.is_numeric:
value = np.nan
# coerce args
values, _, value, _ = self._try_coerce_args(self.values, value)
arr_value = np.array(value)
# cast the values to a type that can hold nan (if necessary)
if not self._can_hold_element(value):
dtype, _ = com._maybe_promote(arr_value.dtype)
values = values.astype(dtype)
transf = (lambda x: x.T) if self.ndim == 2 else (lambda x: x)
values = transf(values)
l = len(values)
# length checking
# boolean with truth values == len of the value is ok too
if isinstance(indexer, (np.ndarray, list)):
if is_list_like(value) and len(indexer) != len(value):
if not (isinstance(indexer, np.ndarray) and
indexer.dtype == np.bool_ and
len(indexer[indexer]) == len(value)):
raise ValueError("cannot set using a list-like indexer "
"with a different length than the value")
# slice
elif isinstance(indexer, slice):
if is_list_like(value) and l:
if len(value) != length_of_indexer(indexer, values):
raise ValueError("cannot set using a slice indexer with a "
"different length than the value")
try:
def _is_scalar_indexer(indexer):
# return True if we are all scalar indexers
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = tuple([indexer])
return all([ np.isscalar(idx) for idx in indexer ])
return False
def _is_empty_indexer(indexer):
# return a boolean if we have an empty indexer
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = tuple([indexer])
return any(isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer)
return False
# empty indexers
# 8669 (empty)
if _is_empty_indexer(indexer):
pass
# setting a single element for each dim and with a rhs that could be say a list
# GH 6043
elif _is_scalar_indexer(indexer):
values[indexer] = value
# if we are an exact match (ex-broadcasting),
# then use the resultant dtype
elif len(arr_value.shape) and arr_value.shape[0] == values.shape[0] and np.prod(arr_value.shape) == np.prod(values.shape):
values[indexer] = value
values = values.astype(arr_value.dtype)
# set
else:
values[indexer] = value
# coerce and try to infer the dtypes of the result
if np.isscalar(value):
dtype, _ = _infer_dtype_from_scalar(value)
else:
dtype = 'infer'
values = self._try_coerce_and_cast_result(values, dtype)
block = self.make_block(transf(values),
fastpath=True)
# may have to soft convert_objects here
if block.is_object and not self.is_object:
block = block.convert(numeric=False)
return block
except (ValueError, TypeError) as detail:
raise
except Exception as detail:
pass
return [self]
def putmask(self, mask, new, align=True, inplace=False,
axis=0, transpose=False, mgr=None):
""" putmask the data to the block; it is possible that we may create a
new dtype of block
return the resulting block(s)
Parameters
----------
mask : the condition to respect
new : a ndarray/object
align : boolean, perform alignment on other/cond, default is True
inplace : perform inplace modification, default is False
axis : int
transpose : boolean
Set to True if self is stored with axes reversed
Returns
-------
a list of new blocks, the result of the putmask
"""
new_values = self.values if inplace else self.values.copy()
if hasattr(new, 'reindex_axis'):
new = new.values
if hasattr(mask, 'reindex_axis'):
mask = mask.values
# if we are passed a scalar None, convert it here
if not is_list_like(new) and isnull(new) and not self.is_object:
new = self.fill_value
if self._can_hold_element(new):
if transpose:
new_values = new_values.T
new = self._try_cast(new)
# If the default repeat behavior in np.putmask would go in the wrong
# direction, then explictly repeat and reshape new instead
if getattr(new, 'ndim', 0) >= 1:
if self.ndim - 1 == new.ndim and axis == 1:
new = np.repeat(new, new_values.shape[-1]).reshape(
self.shape)
new = new.astype(new_values.dtype)
np.putmask(new_values, mask, new)
# maybe upcast me
elif mask.any():
if transpose:
mask = mask.T
if isinstance(new, np.ndarray):
new = new.T
axis = new_values.ndim - axis - 1
# Pseudo-broadcast
if getattr(new, 'ndim', 0) >= 1:
if self.ndim - 1 == new.ndim:
new_shape = list(new.shape)
new_shape.insert(axis, 1)
new = new.reshape(tuple(new_shape))
# need to go column by column
new_blocks = []
if self.ndim > 1:
for i, ref_loc in enumerate(self.mgr_locs):
m = mask[i]
v = new_values[i]
# need a new block
if m.any():
if isinstance(new, np.ndarray):
n = np.squeeze(new[i % new.shape[0]])
else:
n = np.array(new)
# type of the new block
dtype, _ = com._maybe_promote(n.dtype)
# we need to explicitly astype here to make a copy
n = n.astype(dtype)
nv = _putmask_smart(v, m, n)
else:
nv = v if inplace else v.copy()
# Put back the dimension that was taken from it and make
# a block out of the result.
block = self.make_block(values=nv[np.newaxis],
placement=[ref_loc],
fastpath=True)
new_blocks.append(block)
else:
nv = _putmask_smart(new_values, mask, new)
new_blocks.append(self.make_block(values=nv,
fastpath=True))
return new_blocks
if inplace:
return [self]
if transpose:
new_values = new_values.T
return [self.make_block(new_values,
fastpath=True)]
def interpolate(self, method='pad', axis=0, index=None,
values=None, inplace=False, limit=None,
limit_direction='forward',
fill_value=None, coerce=False, downcast=None, mgr=None, **kwargs):
def check_int_bool(self, inplace):
# Only FloatBlocks will contain NaNs.
# timedelta subclasses IntBlock
if (self.is_bool or self.is_integer) and not self.is_timedelta:
if inplace:
return self
else:
return self.copy()
# a fill na type method
try:
m = mis._clean_fill_method(method)
except:
m = None
if m is not None:
r = check_int_bool(self, inplace)
if r is not None:
return r
return self._interpolate_with_fill(method=m,
axis=axis,
inplace=inplace,
limit=limit,
fill_value=fill_value,
coerce=coerce,
downcast=downcast,
mgr=mgr)
# try an interp method
try:
m = mis._clean_interp_method(method, **kwargs)
except:
m = None
if m is not None:
r = check_int_bool(self, inplace)
if r is not None:
return r
return self._interpolate(method=m,
index=index,
values=values,
axis=axis,
limit=limit,
limit_direction=limit_direction,
fill_value=fill_value,
inplace=inplace,
downcast=downcast,
mgr=mgr,
**kwargs)
raise ValueError("invalid method '{0}' to interpolate.".format(method))
def _interpolate_with_fill(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, coerce=False,
downcast=None, mgr=None):
""" fillna but using the interpolate machinery """
# if we are coercing, then don't force the conversion
# if the block can't hold the type
if coerce:
if not self._can_hold_na:
if inplace:
return [self]
else:
return [self.copy()]
values = self.values if inplace else self.values.copy()
values, _, fill_value, _ = self._try_coerce_args(values, fill_value)
values = self._try_operate(values)
values = mis.interpolate_2d(values,
method=method,
axis=axis,
limit=limit,
fill_value=fill_value,
dtype=self.dtype)
values = self._try_coerce_result(values)
blocks = [self.make_block(values,
klass=self.__class__,
fastpath=True)]
return self._maybe_downcast(blocks, downcast)
def _interpolate(self, method=None, index=None, values=None,
fill_value=None, axis=0, limit=None,
limit_direction='forward',
inplace=False, downcast=None, mgr=None, **kwargs):
""" interpolate using scipy wrappers """
data = self.values if inplace else self.values.copy()
# only deal with floats
if not self.is_float:
if not self.is_integer:
return self
data = data.astype(np.float64)
if fill_value is None:
fill_value = self.fill_value
if method in ('krogh', 'piecewise_polynomial', 'pchip'):
if not index.is_monotonic:
raise ValueError("{0} interpolation requires that the "
"index be monotonic.".format(method))
# process 1-d slices in the axis direction
def func(x):
# process a 1-d slice, returning it
# should the axis argument be handled below in apply_along_axis?
# i.e. not an arg to mis.interpolate_1d
return mis.interpolate_1d(index, x, method=method, limit=limit,
limit_direction=limit_direction,
fill_value=fill_value,
bounds_error=False, **kwargs)
# interp each column independently
interp_values = np.apply_along_axis(func, axis, data)
blocks = [self.make_block(interp_values,
klass=self.__class__,
fastpath=True)]
return self._maybe_downcast(blocks, downcast)
def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None):
"""
Take values according to indexer and return them as a block.bb
"""
# com.take_nd dispatches for DatetimeTZBlock, CategoricalBlock
# so need to preserve types
# sparse is treated like an ndarray, but needs .get_values() shaping
values = self.values
if self.is_sparse:
values = self.get_values()
if fill_tuple is None:
fill_value = self.fill_value
new_values = com.take_nd(values, indexer, axis=axis,
allow_fill=False)
else:
fill_value = fill_tuple[0]
new_values = com.take_nd(values, indexer, axis=axis,
allow_fill=True, fill_value=fill_value)
if new_mgr_locs is None:
if axis == 0:
slc = lib.indexer_as_slice(indexer)
if slc is not None:
new_mgr_locs = self.mgr_locs[slc]
else:
new_mgr_locs = self.mgr_locs[indexer]
else:
new_mgr_locs = self.mgr_locs
if not is_dtype_equal(new_values.dtype, self.dtype):
return self.make_block(new_values, new_mgr_locs)
else:
return self.make_block_same_class(new_values, new_mgr_locs)
def diff(self, n, axis=1, mgr=None):
""" return block for the diff of the values """
new_values = com.diff(self.values, n, axis=axis)
return [self.make_block(values=new_values,
fastpath=True)]
def shift(self, periods, axis=0, mgr=None):
""" shift the block by periods, possibly upcast """
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
new_values, fill_value = com._maybe_upcast(self.values)
# make sure array sent to np.roll is c_contiguous
f_ordered = new_values.flags.f_contiguous
if f_ordered:
new_values = new_values.T
axis = new_values.ndim - axis - 1
if np.prod(new_values.shape):
new_values = np.roll(new_values, com._ensure_platform_int(periods), axis=axis)
axis_indexer = [ slice(None) ] * self.ndim
if periods > 0:
axis_indexer[axis] = slice(None,periods)
else:
axis_indexer[axis] = slice(periods,None)
new_values[tuple(axis_indexer)] = fill_value
# restore original order
if f_ordered:
new_values = new_values.T
return [self.make_block(new_values,
fastpath=True)]
def eval(self, func, other, raise_on_error=True, try_cast=False, mgr=None):
"""
evaluate the block; return result block from the result
Parameters
----------
func : how to combine self, other
other : a ndarray/object
raise_on_error : if True, raise when I can't perform the function,
False by default (and just return the data that we had coming in)
try_cast : try casting the results to the input type
Returns
-------
a new block, the result of the func
"""
values = self.values
if hasattr(other, 'reindex_axis'):
other = other.values
# make sure that we can broadcast
is_transposed = False
if hasattr(other, 'ndim') and hasattr(values, 'ndim'):
if values.ndim != other.ndim:
is_transposed = True
else:
if values.shape == other.shape[::-1]:
is_transposed = True
elif values.shape[0] == other.shape[-1]:
is_transposed = True
else:
# this is a broadcast error heree
raise ValueError("cannot broadcast shape [%s] with block "
"values [%s]" % (values.T.shape,
other.shape))
transf = (lambda x: x.T) if is_transposed else (lambda x: x)
# coerce/transpose the args if needed
values, values_mask, other, other_mask = self._try_coerce_args(transf(values), other)
# get the result, may need to transpose the other
def get_result(other):
# avoid numpy warning of comparisons again None
if other is None:
result = not func.__name__ == 'eq'
# avoid numpy warning of elementwise comparisons to object
elif is_numeric_v_string_like(values, other):
result = False
else:
result = func(values, other)
# mask if needed
if isinstance(values_mask, np.ndarray) and values_mask.any():
result = result.astype('float64',copy=False)
result[values_mask] = np.nan
if other_mask is True:
result = result.astype('float64',copy=False)
result[:] = np.nan
elif isinstance(other_mask, np.ndarray) and other_mask.any():
result = result.astype('float64',copy=False)
result[other_mask.ravel()] = np.nan
return self._try_coerce_result(result)
# error handler if we have an issue operating with the function
def handle_error():
if raise_on_error:
raise TypeError('Could not operate %s with block values %s'
% (repr(other), str(detail)))
else:
# return the values
result = np.empty(values.shape, dtype='O')
result.fill(np.nan)
return result
# get the result
try:
result = get_result(other)
# if we have an invalid shape/broadcast error
# GH4576, so raise instead of allowing to pass through
except ValueError as detail:
raise
except Exception as detail:
result = handle_error()
# technically a broadcast error in numpy can 'work' by returning a
# boolean False
if not isinstance(result, np.ndarray):
if not isinstance(result, np.ndarray):
# differentiate between an invalid ndarray-ndarray comparison
# and an invalid type comparison
if isinstance(values, np.ndarray) and is_list_like(other):
raise ValueError('Invalid broadcasting comparison [%s] '
'with block values' % repr(other))
raise TypeError('Could not compare [%s] with block values'
% repr(other))
# transpose if needed
result = transf(result)
# try to cast if requested
if try_cast:
result = self._try_cast_result(result)
return [self.make_block(result,
fastpath=True,)]
def where(self, other, cond, align=True, raise_on_error=True,
try_cast=False, axis=0, transpose=False, mgr=None):
"""
evaluate the block; return result block(s) from the result
Parameters
----------
other : a ndarray/object
cond : the condition to respect
align : boolean, perform alignment on other/cond
raise_on_error : if True, raise when I can't perform the function,
False by default (and just return the data that we had coming in)
axis : int
transpose : boolean
Set to True if self is stored with axes reversed
Returns
-------
a new block(s), the result of the func
"""
values = self.values
if transpose:
values = values.T
if hasattr(other, 'reindex_axis'):
other = other.values
if hasattr(cond, 'reindex_axis'):
cond = cond.values
# If the default broadcasting would go in the wrong direction, then
# explictly reshape other instead
if getattr(other, 'ndim', 0) >= 1:
if values.ndim - 1 == other.ndim and axis == 1:
other = other.reshape(tuple(other.shape + (1,)))
if not hasattr(cond, 'shape'):
raise ValueError("where must have a condition that is ndarray like")
other = _maybe_convert_string_to_object(other)
# our where function
def func(cond, values, other):
if cond.ravel().all():
return values
values, values_mask, other, other_mask = self._try_coerce_args(values, other)
try:
return self._try_coerce_result(
expressions.where(cond, values, other, raise_on_error=True)
)
except Exception as detail:
if raise_on_error:
raise TypeError('Could not operate [%s] with block values '
'[%s]' % (repr(other), str(detail)))
else:
# return the values
result = np.empty(values.shape, dtype='float64')
result.fill(np.nan)
return result
# see if we can operate on the entire block, or need item-by-item
# or if we are a single block (ndim == 1)
result = func(cond, values, other)
if self._can_hold_na or self.ndim == 1:
if not isinstance(result, np.ndarray):
raise TypeError('Could not compare [%s] with block values'
% repr(other))
if transpose:
result = result.T
# try to cast if requested
if try_cast:
result = self._try_cast_result(result)
return self.make_block(result)
# might need to separate out blocks
axis = cond.ndim - 1
cond = cond.swapaxes(axis, 0)
mask = np.array([cond[i].all() for i in range(cond.shape[0])],
dtype=bool)
result_blocks = []
for m in [mask, ~mask]:
if m.any():
r = self._try_cast_result(
result.take(m.nonzero()[0], axis=axis))
result_blocks.append(self.make_block(r.T,
placement=self.mgr_locs[m]))
return result_blocks
def equals(self, other):
if self.dtype != other.dtype or self.shape != other.shape: return False
return array_equivalent(self.values, other.values)
class NonConsolidatableMixIn(object):
""" hold methods for the nonconsolidatable blocks """
_can_consolidate = False
_verify_integrity = False
_validate_ndim = False
_holder = None
def __init__(self, values, placement,
ndim=None, fastpath=False, **kwargs):
# Placement must be converted to BlockPlacement via property setter
# before ndim logic, because placement may be a slice which doesn't
# have a length.
self.mgr_locs = placement
# kludgetastic
if ndim is None:
if len(self.mgr_locs) != 1:
ndim = 1
else:
ndim = 2
self.ndim = ndim
if not isinstance(values, self._holder):
raise TypeError("values must be {0}".format(self._holder.__name__))
self.values = values
@property
def shape(self):
if self.ndim == 1:
return (len(self.values)),
return (len(self.mgr_locs), len(self.values))
def get_values(self, dtype=None):
""" need to to_dense myself (and always return a ndim sized object) """
values = self.values.to_dense()
if values.ndim == self.ndim - 1:
values = values.reshape((1,) + values.shape)
return values
def iget(self, col):
if self.ndim == 2 and isinstance(col, tuple):
col, loc = col
if not is_null_slice(col) and col != 0:
raise IndexError("{0} only contains one item".format(self))
return self.values[loc]
else:
if col != 0:
raise IndexError("{0} only contains one item".format(self))
return self.values
def should_store(self, value):
return isinstance(value, self._holder)
def set(self, locs, values, check=False):
assert locs.tolist() == [0]
self.values = values
def get(self, item):
if self.ndim == 1:
loc = self.items.get_loc(item)
return self.values[loc]
else:
return self.values
def putmask(self, mask, new, align=True, inplace=False,
axis=0, transpose=False, mgr=None):
"""
putmask the data to the block; we must be a single block and not generate
other blocks
return the resulting block
Parameters
----------
mask : the condition to respect
new : a ndarray/object
align : boolean, perform alignment on other/cond, default is True
inplace : perform inplace modification, default is False
Returns
-------
a new block(s), the result of the putmask
"""
new_values = self.values if inplace else self.values.copy()
new_values, _, new, _ = self._try_coerce_args(new_values, new)
if isinstance(new, np.ndarray) and len(new) == len(mask):
new = new[mask]
new_values[mask] = new
new_values = self._try_coerce_result(new_values)
return [self.make_block(values=new_values)]
def _slice(self, slicer):
""" return a slice of my values (but densify first) """
return self.get_values()[slicer]
def _try_cast_result(self, result, dtype=None):
return result
class NumericBlock(Block):
__slots__ = ()
is_numeric = True
_can_hold_na = True
class FloatOrComplexBlock(NumericBlock):
__slots__ = ()
def equals(self, other):
if self.dtype != other.dtype or self.shape != other.shape: return False
left, right = self.values, other.values
return ((left == right) | (np.isnan(left) & np.isnan(right))).all()
class FloatBlock(FloatOrComplexBlock):
__slots__ = ()
is_float = True
_downcast_dtype = 'int64'
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
tipo = element.dtype.type
return issubclass(tipo, (np.floating, np.integer)) and not issubclass(
tipo, (np.datetime64, np.timedelta64))
return isinstance(element, (float, int, np.float_, np.int_)) and not isinstance(
element, (bool, np.bool_, datetime, timedelta, np.datetime64, np.timedelta64))
def _try_cast(self, element):
try:
return float(element)
except: # pragma: no cover
return element
def to_native_types(self, slicer=None, na_rep='', float_format=None, decimal='.',
quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = isnull(values)
formatter = None
if float_format and decimal != '.':
formatter = lambda v : (float_format % v).replace('.',decimal,1)
elif decimal != '.':
formatter = lambda v : ('%g' % v).replace('.',decimal,1)
elif float_format:
formatter = lambda v : float_format % v
if formatter is None and not quoting:
values = values.astype(str)
else:
values = np.array(values, dtype='object')
values[mask] = na_rep
if formatter:
imask = (~mask).ravel()
values.flat[imask] = np.array(
[formatter(val) for val in values.ravel()[imask]])
return values
def should_store(self, value):
# when inserting a column should not coerce integers to floats
# unnecessarily
return (issubclass(value.dtype.type, np.floating) and
value.dtype == self.dtype)
class ComplexBlock(FloatOrComplexBlock):
__slots__ = ()
is_complex = True
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
return issubclass(element.dtype.type, (np.floating, np.integer, np.complexfloating))
return (isinstance(element, (float, int, complex, np.float_, np.int_)) and
not isinstance(bool, np.bool_))
def _try_cast(self, element):
try:
return complex(element)
except: # pragma: no cover
return element
def should_store(self, value):
return issubclass(value.dtype.type, np.complexfloating)
class IntBlock(NumericBlock):
__slots__ = ()
is_integer = True
_can_hold_na = False
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
tipo = element.dtype.type
return issubclass(tipo, np.integer) and not issubclass(tipo, (np.datetime64, np.timedelta64))
return com.is_integer(element)
def _try_cast(self, element):
try:
return int(element)
except: # pragma: no cover
return element
def should_store(self, value):
return com.is_integer_dtype(value) and value.dtype == self.dtype
class TimeDeltaBlock(IntBlock):
__slots__ = ()
is_timedelta = True
_can_hold_na = True
is_numeric = False
@property
def fill_value(self):
return tslib.iNaT
def fillna(self, value, **kwargs):
# allow filling with integers to be
# interpreted as seconds
if not isinstance(value, np.timedelta64) and com.is_integer(value):
value = Timedelta(value,unit='s')
return super(TimeDeltaBlock, self).fillna(value, **kwargs)
def _try_coerce_args(self, values, other):
"""
Coerce values and other to int64, with null values converted to
iNaT. values is always ndarray-like, other may not be
Parameters
----------
values : ndarray-like
other : ndarray-like or scalar
Returns
-------
base-type values, values mask, base-type other, other mask
"""
values_mask = isnull(values)
values = values.view('i8')
other_mask = False
if isinstance(other, bool):
raise TypeError
elif is_null_datelike_scalar(other):
other = tslib.iNaT
other_mask = True
elif isinstance(other, Timedelta):
other_mask = isnull(other)
other = other.value
elif isinstance(other, np.timedelta64):
other_mask = isnull(other)
other = other.view('i8')
elif isinstance(other, timedelta):
other = Timedelta(other).value
elif isinstance(other, np.ndarray):
other_mask = isnull(other)
other = other.astype('i8',copy=False).view('i8')
else:
# scalar
other = Timedelta(other)
other_mask = isnull(other)
other = other.value
return values, values_mask, other, other_mask
def _try_operate(self, values):
""" return a version to operate on """
return values.view('i8')
def _try_coerce_result(self, result):
""" reverse of try_coerce_args / try_operate """
if isinstance(result, np.ndarray):
mask = isnull(result)
if result.dtype.kind in ['i', 'f', 'O']:
result = result.astype('m8[ns]')
result[mask] = tslib.iNaT
elif isinstance(result, np.integer):
result = lib.Timedelta(result)
return result
def should_store(self, value):
return issubclass(value.dtype.type, np.timedelta64)
def to_native_types(self, slicer=None, na_rep=None, quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = isnull(values)
rvalues = np.empty(values.shape, dtype=object)
if na_rep is None:
na_rep = 'NaT'
rvalues[mask] = na_rep
imask = (~mask).ravel()
#### FIXME ####
# should use the core.format.Timedelta64Formatter here
# to figure what format to pass to the Timedelta
# e.g. to not show the decimals say
rvalues.flat[imask] = np.array([Timedelta(val)._repr_base(format='all')
for val in values.ravel()[imask]],
dtype=object)
return rvalues
def get_values(self, dtype=None):
# return object dtypes as Timedelta
if dtype == object:
return lib.map_infer(self.values.ravel(), lib.Timedelta
).reshape(self.values.shape)
return self.values
class BoolBlock(NumericBlock):
__slots__ = ()
is_bool = True
_can_hold_na = False
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
return issubclass(element.dtype.type, np.integer)
return isinstance(element, (int, bool))
def _try_cast(self, element):
try:
return bool(element)
except: # pragma: no cover
return element
def should_store(self, value):
return issubclass(value.dtype.type, np.bool_)
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False, mgr=None):
to_replace_values = np.atleast_1d(to_replace)
if not np.can_cast(to_replace_values, bool):
return self
return super(BoolBlock, self).replace(to_replace, value,
inplace=inplace, filter=filter,
regex=regex, mgr=mgr)
class ObjectBlock(Block):
__slots__ = ()
is_object = True
_can_hold_na = True
def __init__(self, values, ndim=2, fastpath=False,
placement=None, **kwargs):
if issubclass(values.dtype.type, compat.string_types):
values = np.array(values, dtype=object)
super(ObjectBlock, self).__init__(values, ndim=ndim,
fastpath=fastpath,
placement=placement, **kwargs)
@property
def is_bool(self):
""" we can be a bool if we have only bool values but are of type
object
"""
return lib.is_bool_array(self.values.ravel())
# TODO: Refactor when convert_objects is removed since there will be 1 path
def convert(self, *args, **kwargs):
""" attempt to coerce any object types to better types
return a copy of the block (if copy = True)
by definition we ARE an ObjectBlock!!!!!
can return multiple blocks!
"""
if args:
raise NotImplementedError
by_item = True if 'by_item' not in kwargs else kwargs['by_item']
new_inputs = ['coerce','datetime','numeric','timedelta']
new_style = False
for kw in new_inputs:
new_style |= kw in kwargs
if new_style:
fn = convert._soft_convert_objects
fn_inputs = new_inputs
else:
fn = convert._possibly_convert_objects
fn_inputs = ['convert_dates','convert_numeric','convert_timedeltas']
fn_inputs += ['copy']
fn_kwargs = {}
for key in fn_inputs:
if key in kwargs:
fn_kwargs[key] = kwargs[key]
# attempt to create new type blocks
blocks = []
if by_item and not self._is_single_block:
for i, rl in enumerate(self.mgr_locs):
values = self.iget(i)
values = fn(values.ravel(), **fn_kwargs).reshape(values.shape)
values = _block_shape(values, ndim=self.ndim)
newb = make_block(values, ndim=self.ndim, placement=[rl])
blocks.append(newb)
else:
values = fn(self.values.ravel(), **fn_kwargs).reshape(self.values.shape)
blocks.append(make_block(values, ndim=self.ndim, placement=self.mgr_locs))
return blocks
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
# GH6026
if check:
try:
if (self.values[locs] == values).all():
return
except:
pass
try:
self.values[locs] = values
except (ValueError):
# broadcasting error
# see GH6171
new_shape = list(values.shape)
new_shape[0] = len(self.items)
self.values = np.empty(tuple(new_shape),dtype=self.dtype)
self.values.fill(np.nan)
self.values[locs] = values
def _maybe_downcast(self, blocks, downcast=None):
if downcast is not None:
return blocks
# split and convert the blocks
return _extend_blocks([ b.convert(datetime=True, numeric=False) for b in blocks ])
def _can_hold_element(self, element):
return True
def _try_cast(self, element):
return element
def should_store(self, value):
return not (issubclass(value.dtype.type,
(np.integer, np.floating, np.complexfloating,
np.datetime64, np.bool_)) or is_internal_type(value))
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False, convert=True, mgr=None):
to_rep_is_list = com.is_list_like(to_replace)
value_is_list = com.is_list_like(value)
both_lists = to_rep_is_list and value_is_list
either_list = to_rep_is_list or value_is_list
result_blocks = []
blocks = [self]
if not either_list and com.is_re(to_replace):
return self._replace_single(to_replace, value,
inplace=inplace, filter=filter,
regex=True, convert=convert, mgr=mgr)
elif not (either_list or regex):
return super(ObjectBlock, self).replace(to_replace, value,
inplace=inplace,
filter=filter, regex=regex,
convert=convert, mgr=mgr)
elif both_lists:
for to_rep, v in zip(to_replace, value):
result_blocks = []
for b in blocks:
result = b._replace_single(to_rep, v, inplace=inplace,
filter=filter, regex=regex,
convert=convert, mgr=mgr)
result_blocks = _extend_blocks(result, result_blocks)
blocks = result_blocks
return result_blocks
elif to_rep_is_list and regex:
for to_rep in to_replace:
result_blocks = []
for b in blocks:
result = b._replace_single(to_rep, value,
inplace=inplace,
filter=filter, regex=regex,
convert=convert, mgr=mgr)
result_blocks = _extend_blocks(result, result_blocks)
blocks = result_blocks
return result_blocks
return self._replace_single(to_replace, value,
inplace=inplace, filter=filter,
convert=convert, regex=regex, mgr=mgr)
def _replace_single(self, to_replace, value, inplace=False, filter=None,
regex=False, convert=True, mgr=None):
# to_replace is regex compilable
to_rep_re = regex and com.is_re_compilable(to_replace)
# regex is regex compilable
regex_re = com.is_re_compilable(regex)
# only one will survive
if to_rep_re and regex_re:
raise AssertionError('only one of to_replace and regex can be '
'regex compilable')
# if regex was passed as something that can be a regex (rather than a
# boolean)
if regex_re:
to_replace = regex
regex = regex_re or to_rep_re
# try to get the pattern attribute (compiled re) or it's a string
try:
pattern = to_replace.pattern
except AttributeError:
pattern = to_replace
# if the pattern is not empty and to_replace is either a string or a
# regex
if regex and pattern:
rx = re.compile(to_replace)
else:
# if the thing to replace is not a string or compiled regex call
# the superclass method -> to_replace is some kind of object
return super(ObjectBlock, self).replace(to_replace, value,
inplace=inplace,
filter=filter,
regex=regex,
mgr=mgr)
new_values = self.values if inplace else self.values.copy()
# deal with replacing values with objects (strings) that match but
# whose replacement is not a string (numeric, nan, object)
if isnull(value) or not isinstance(value, compat.string_types):
def re_replacer(s):
try:
return value if rx.search(s) is not None else s
except TypeError:
return s
else:
# value is guaranteed to be a string here, s can be either a string
# or null if it's null it gets returned
def re_replacer(s):
try:
return rx.sub(value, s)
except TypeError:
return s
f = np.vectorize(re_replacer, otypes=[self.dtype])
if filter is None:
filt = slice(None)
else:
filt = self.mgr_locs.isin(filter).nonzero()[0]
new_values[filt] = f(new_values[filt])
# convert
block = self.make_block(new_values)
if convert:
block = block.convert(by_item=True,numeric=False)
return block
class CategoricalBlock(NonConsolidatableMixIn, ObjectBlock):
__slots__ = ()
is_categorical = True
_verify_integrity = True
_can_hold_na = True
_holder = Categorical
def __init__(self, values, placement,
fastpath=False, **kwargs):
# coerce to categorical if we can
super(CategoricalBlock, self).__init__(maybe_to_categorical(values),
fastpath=True, placement=placement,
**kwargs)
@property
def is_view(self):
""" I am never a view """
return False
def to_dense(self):
return self.values.to_dense().view()
def convert(self, copy=True, **kwargs):
return self.copy() if copy else self
@property
def array_dtype(self):
""" the dtype to return if I want to construct this block as an array """
return np.object_
def _slice(self, slicer):
""" return a slice of my values """
# slice the category
# return same dims as we currently have
return self.values._slice(slicer)
def fillna(self, value, limit=None, inplace=False, downcast=None, mgr=None):
# we may need to upcast our fill to match our dtype
if limit is not None:
raise NotImplementedError("specifying a limit for 'fillna' has "
"not been implemented yet")
values = self.values if inplace else self.values.copy()
values = self._try_coerce_result(values.fillna(value=value,
limit=limit))
return [self.make_block(values=values)]
def interpolate(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, **kwargs):
values = self.values if inplace else self.values.copy()
return self.make_block_same_class(values=values.fillna(fill_value=fill_value,
method=method,
limit=limit),
placement=self.mgr_locs)
def shift(self, periods, axis=0, mgr=None):
return self.make_block_same_class(values=self.values.shift(periods),
placement=self.mgr_locs)
def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None):
"""
Take values according to indexer and return them as a block.bb
"""
if fill_tuple is None:
fill_value = None
else:
fill_value = fill_tuple[0]
# axis doesn't matter; we are really a single-dim object
# but are passed the axis depending on the calling routing
# if its REALLY axis 0, then this will be a reindex and not a take
new_values = self.values.take_nd(indexer, fill_value=fill_value)
# if we are a 1-dim object, then always place at 0
if self.ndim == 1:
new_mgr_locs = [0]
else:
if new_mgr_locs is None:
new_mgr_locs = self.mgr_locs
return self.make_block_same_class(new_values, new_mgr_locs)
def _astype(self, dtype, copy=False, raise_on_error=True, values=None,
klass=None, mgr=None):
"""
Coerce to the new type (if copy=True, return a new copy)
raise on an except if raise == True
"""
if self.is_categorical_astype(dtype):
values = self.values
else:
values = np.asarray(self.values).astype(dtype, copy=False)
if copy:
values = values.copy()
return self.make_block(values)
def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
# Categorical is always one dimension
values = values[slicer]
mask = isnull(values)
values = np.array(values, dtype='object')
values[mask] = na_rep
# we are expected to return a 2-d ndarray
return values.reshape(1,len(values))
class DatetimeBlock(Block):
__slots__ = ()
is_datetime = True
_can_hold_na = True
def __init__(self, values, placement,
fastpath=False, **kwargs):
if values.dtype != _NS_DTYPE:
values = tslib.cast_to_nanoseconds(values)
super(DatetimeBlock, self).__init__(values,
fastpath=True, placement=placement,
**kwargs)
def _astype(self, dtype, mgr=None, **kwargs):
"""
these automatically copy, so copy=True has no effect
raise on an except if raise == True
"""
# if we are passed a datetime64[ns, tz]
if com.is_datetime64tz_dtype(dtype):
dtype = DatetimeTZDtype(dtype)
values = self.values
if getattr(values,'tz',None) is None:
values = DatetimeIndex(values).tz_localize('UTC')
values = values.tz_convert(dtype.tz)
return self.make_block(values)
# delegate
return super(DatetimeBlock, self)._astype(dtype=dtype, **kwargs)
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
return element.dtype == _NS_DTYPE or element.dtype == np.int64
return (com.is_integer(element) or
isinstance(element, datetime) or
isnull(element))
def _try_cast(self, element):
try:
return int(element)
except:
return element
def _try_operate(self, values):
""" return a version to operate on """
return values.view('i8')
def _try_coerce_args(self, values, other):
"""
Coerce values and other to dtype 'i8'. NaN and NaT convert to
the smallest i8, and will correctly round-trip to NaT if converted
back in _try_coerce_result. values is always ndarray-like, other
may not be
Parameters
----------
values : ndarray-like
other : ndarray-like or scalar
Returns
-------
base-type values, values mask, base-type other, other mask
"""
values_mask = isnull(values)
values = values.view('i8')
other_mask = False
if isinstance(other, bool):
raise TypeError
elif is_null_datelike_scalar(other):
other = tslib.iNaT
other_mask = True
elif isinstance(other, (datetime, np.datetime64, date)):
other = lib.Timestamp(other)
if getattr(other,'tz') is not None:
raise TypeError("cannot coerce a Timestamp with a tz on a naive Block")
other_mask = isnull(other)
other = other.asm8.view('i8')
elif hasattr(other, 'dtype') and com.is_integer_dtype(other):
other = other.view('i8')
else:
try:
other = np.asarray(other)
other_mask = isnull(other)
other = other.astype('i8',copy=False).view('i8')
except ValueError:
# coercion issues
# let higher levels handle
raise TypeError
return values, values_mask, other, other_mask
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
if isinstance(result, np.ndarray):
if result.dtype.kind in ['i', 'f', 'O']:
result = result.astype('M8[ns]')
elif isinstance(result, (np.integer, np.datetime64)):
result = lib.Timestamp(result)
return result
@property
def fill_value(self):
return tslib.iNaT
def to_native_types(self, slicer=None, na_rep=None, date_format=None,
quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[..., slicer]
from pandas.core.format import _get_format_datetime64_from_values
format = _get_format_datetime64_from_values(values, date_format)
result = tslib.format_array_from_datetime(values.view('i8').ravel(),
tz=getattr(self.values,'tz',None),
format=format,
na_rep=na_rep).reshape(values.shape)
return np.atleast_2d(result)
def should_store(self, value):
return issubclass(value.dtype.type, np.datetime64) and not is_datetimetz(value)
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
if values.dtype != _NS_DTYPE:
# Workaround for numpy 1.6 bug
values = tslib.cast_to_nanoseconds(values)
self.values[locs] = values
def get_values(self, dtype=None):
# return object dtype as Timestamps
if dtype == object:
return lib.map_infer(self.values.ravel(), lib.Timestamp)\
.reshape(self.values.shape)
return self.values
class DatetimeTZBlock(NonConsolidatableMixIn, DatetimeBlock):
""" implement a datetime64 block with a tz attribute """
__slots__ = ()
_holder = DatetimeIndex
is_datetimetz = True
def __init__(self, values, placement, ndim=2,
**kwargs):
if not isinstance(values, self._holder):
values = self._holder(values)
if values.tz is None:
raise ValueError("cannot create a DatetimeTZBlock without a tz")
super(DatetimeTZBlock, self).__init__(values,
placement=placement,
ndim=ndim,
**kwargs)
def external_values(self):
""" we internally represent the data as a DatetimeIndex, but for external
compat with ndarray, export as a ndarray of Timestamps """
return self.values.astype('datetime64[ns]').values
def get_values(self, dtype=None):
# return object dtype as Timestamps with the zones
if dtype == object:
return lib.map_infer(self.values.ravel(), lambda x: lib.Timestamp(x,tz=self.values.tz))\
.reshape(self.values.shape)
return self.values
def to_object_block(self, mgr):
"""
return myself as an object block
Since we keep the DTI as a 1-d object, this is different
depends on BlockManager's ndim
"""
values = self.get_values(dtype=object)
kwargs = {}
if mgr.ndim > 1:
values = _block_shape(values,ndim=mgr.ndim)
kwargs['ndim'] = mgr.ndim
kwargs['placement']=[0]
return self.make_block(values, klass=ObjectBlock, **kwargs)
def replace(self, *args, **kwargs):
# if we are forced to ObjectBlock, then don't coerce (to UTC)
kwargs['convert'] = False
return super(DatetimeTZBlock, self).replace(*args, **kwargs)
def _slice(self, slicer):
""" return a slice of my values """
if isinstance(slicer, tuple):
col, loc = slicer
if not is_null_slice(col) and col != 0:
raise IndexError("{0} only contains one item".format(self))
return self.values[loc]
return self.values[slicer]
def _try_coerce_args(self, values, other):
"""
localize and return i8 for the values
Parameters
----------
values : ndarray-like
other : ndarray-like or scalar
Returns
-------
base-type values, values mask, base-type other, other mask
"""
values_mask = isnull(values)
values = values.tz_localize(None).asi8
other_mask = False
if isinstance(other, ABCSeries):
other = self._holder(other)
other_mask = isnull(other)
if isinstance(other, bool):
raise TypeError
elif is_null_datelike_scalar(other):
other = tslib.iNaT
other_mask = True
elif isinstance(other, self._holder):
if other.tz != self.values.tz:
raise ValueError("incompatible or non tz-aware value")
other = other.tz_localize(None).asi8
other_mask = isnull(other)
elif isinstance(other, (np.datetime64, datetime, date)):
other = lib.Timestamp(other)
tz = getattr(other, 'tz', None)
# test we can have an equal time zone
if tz is None or str(tz) != str(self.values.tz):
raise ValueError("incompatible or non tz-aware value")
other_mask = isnull(other)
other = other.tz_localize(None).value
return values, values_mask, other, other_mask
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
result = super(DatetimeTZBlock, self)._try_coerce_result(result)
if isinstance(result, np.ndarray):
result = self._holder(result, tz=self.values.tz)
elif isinstance(result, (np.integer, np.datetime64)):
result = lib.Timestamp(result, tz=self.values.tz)
return result
def shift(self, periods, axis=0, mgr=None):
""" shift the block by periods """
### think about moving this to the DatetimeIndex. This is a non-freq (number of periods) shift ###
N = len(self)
indexer = np.zeros(N, dtype=int)
if periods > 0:
indexer[periods:] = np.arange(N - periods)
else:
indexer[:periods] = np.arange(-periods, N)
# move to UTC & take
new_values = self.values.tz_localize(None).asi8.take(indexer)
if periods > 0:
new_values[:periods] = tslib.iNaT
else:
new_values[periods:] = tslib.iNaT
new_values = DatetimeIndex(new_values,tz=self.values.tz)
return [self.make_block_same_class(new_values, placement=self.mgr_locs)]
class SparseBlock(NonConsolidatableMixIn, Block):
""" implement as a list of sparse arrays of the same dtype """
__slots__ = ()
is_sparse = True
is_numeric = True
_box_to_block_values = False
_can_hold_na = True
_ftype = 'sparse'
_holder = SparseArray
@property
def shape(self):
return (len(self.mgr_locs), self.sp_index.length)
@property
def itemsize(self):
return self.dtype.itemsize
@property
def fill_value(self):
#return np.nan
return self.values.fill_value
@fill_value.setter
def fill_value(self, v):
# we may need to upcast our fill to match our dtype
if issubclass(self.dtype.type, np.floating):
v = float(v)
self.values.fill_value = v
def to_dense(self):
return self.values.to_dense().view()
@property
def sp_values(self):
return self.values.sp_values
@sp_values.setter
def sp_values(self, v):
# reset the sparse values
self.values = SparseArray(v, sparse_index=self.sp_index,
kind=self.kind, dtype=v.dtype,
fill_value=self.values.fill_value,
copy=False)
@property
def sp_index(self):
return self.values.sp_index
@property
def kind(self):
return self.values.kind
def __len__(self):
try:
return self.sp_index.length
except:
return 0
def copy(self, deep=True, mgr=None):
return self.make_block_same_class(values=self.values,
sparse_index=self.sp_index,
kind=self.kind, copy=deep,
placement=self.mgr_locs)
def make_block_same_class(self, values, placement,
sparse_index=None, kind=None, dtype=None,
fill_value=None, copy=False, fastpath=True, **kwargs):
""" return a new block """
if dtype is None:
dtype = self.dtype
if fill_value is None:
fill_value = self.values.fill_value
# if not isinstance(values, SparseArray) and values.ndim != self.ndim:
# raise ValueError("ndim mismatch")
if values.ndim == 2:
nitems = values.shape[0]
if nitems == 0:
# kludgy, but SparseBlocks cannot handle slices, where the
# output is 0-item, so let's convert it to a dense block: it
# won't take space since there's 0 items, plus it will preserve
# the dtype.
return self.make_block(np.empty(values.shape, dtype=dtype),
placement,
fastpath=True)
elif nitems > 1:
raise ValueError("Only 1-item 2d sparse blocks are supported")
else:
values = values.reshape(values.shape[1])
new_values = SparseArray(values, sparse_index=sparse_index,
kind=kind or self.kind, dtype=dtype,
fill_value=fill_value, copy=copy)
return self.make_block(new_values,
fastpath=fastpath,
placement=placement)
def interpolate(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, **kwargs):
values = mis.interpolate_2d(
self.values.to_dense(), method, axis, limit, fill_value)
return self.make_block_same_class(values=values,
placement=self.mgr_locs)
def fillna(self, value, limit=None, inplace=False, downcast=None, mgr=None):
# we may need to upcast our fill to match our dtype
if limit is not None:
raise NotImplementedError("specifying a limit for 'fillna' has "
"not been implemented yet")
if issubclass(self.dtype.type, np.floating):
value = float(value)
values = self.values if inplace else self.values.copy()
return [self.make_block_same_class(values=values.get_values(value),
fill_value=value,
placement=self.mgr_locs)]
def shift(self, periods, axis=0, mgr=None):
""" shift the block by periods """
N = len(self.values.T)
indexer = np.zeros(N, dtype=int)
if periods > 0:
indexer[periods:] = np.arange(N - periods)
else:
indexer[:periods] = np.arange(-periods, N)
new_values = self.values.to_dense().take(indexer)
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
new_values, fill_value = com._maybe_upcast(new_values)
if periods > 0:
new_values[:periods] = fill_value
else:
new_values[periods:] = fill_value
return [self.make_block_same_class(new_values, placement=self.mgr_locs)]
def reindex_axis(self, indexer, method=None, axis=1, fill_value=None,
limit=None, mask_info=None):
"""
Reindex using pre-computed indexer information
"""
if axis < 1:
raise AssertionError('axis must be at least 1, got %d' % axis)
# taking on the 0th axis always here
if fill_value is None:
fill_value = self.fill_value
return self.make_block_same_class(self.values.take(indexer),
fill_value=fill_value,
placement=self.mgr_locs)
def sparse_reindex(self, new_index):
""" sparse reindex and return a new block
current reindex only works for float64 dtype! """
values = self.values
values = values.sp_index.to_int_index().reindex(
values.sp_values.astype('float64'), values.fill_value, new_index)
return self.make_block_same_class(values, sparse_index=new_index,
placement=self.mgr_locs)
def make_block(values, placement, klass=None, ndim=None,
dtype=None, fastpath=False):
if klass is None:
dtype = dtype or values.dtype
vtype = dtype.type
if isinstance(values, SparseArray):
klass = SparseBlock
elif issubclass(vtype, np.floating):
klass = FloatBlock
elif (issubclass(vtype, np.integer) and
issubclass(vtype, np.timedelta64)):
klass = TimeDeltaBlock
elif (issubclass(vtype, np.integer) and
not issubclass(vtype, np.datetime64)):
klass = IntBlock
elif dtype == np.bool_:
klass = BoolBlock
elif issubclass(vtype, np.datetime64):
if hasattr(values,'tz'):
klass = DatetimeTZBlock
else:
klass = DatetimeBlock
elif is_datetimetz(values):
klass = DatetimeTZBlock
elif issubclass(vtype, np.complexfloating):
klass = ComplexBlock
elif is_categorical(values):
klass = CategoricalBlock
else:
klass = ObjectBlock
return klass(values, ndim=ndim, fastpath=fastpath,
placement=placement)
# TODO: flexible with index=None and/or items=None
class BlockManager(PandasObject):
"""
Core internal data structure to implement DataFrame, Series, Panel, etc.
Manage a bunch of labeled 2D mixed-type ndarrays. Essentially it's a
lightweight blocked set of labeled data to be manipulated by the DataFrame
public API class
Attributes
----------
shape
ndim
axes
values
items
Methods
-------
set_axis(axis, new_labels)
copy(deep=True)
get_dtype_counts
get_ftype_counts
get_dtypes
get_ftypes
apply(func, axes, block_filter_fn)
get_bool_data
get_numeric_data
get_slice(slice_like, axis)
get(label)
iget(loc)
get_scalar(label_tup)
take(indexer, axis)
reindex_axis(new_labels, axis)
reindex_indexer(new_labels, indexer, axis)
delete(label)
insert(loc, label, value)
set(label, value)
Parameters
----------
Notes
-----
This is *not* a public API class
"""
__slots__ = ['axes', 'blocks', '_ndim', '_shape', '_known_consolidated',
'_is_consolidated', '_blknos', '_blklocs']
def __init__(self, blocks, axes, do_integrity_check=True, fastpath=True):
self.axes = [_ensure_index(ax) for ax in axes]
self.blocks = tuple(blocks)
for block in blocks:
if block.is_sparse:
if len(block.mgr_locs) != 1:
raise AssertionError("Sparse block refers to multiple items")
else:
if self.ndim != block.ndim:
raise AssertionError(('Number of Block dimensions (%d) must '
'equal number of axes (%d)')
% (block.ndim, self.ndim))
if do_integrity_check:
self._verify_integrity()
self._consolidate_check()
self._rebuild_blknos_and_blklocs()
def make_empty(self, axes=None):
""" return an empty BlockManager with the items axis of len 0 """
if axes is None:
axes = [_ensure_index([])] + [
_ensure_index(a) for a in self.axes[1:]
]
# preserve dtype if possible
if self.ndim == 1:
blocks = np.array([], dtype=self.array_dtype)
else:
blocks = []
return self.__class__(blocks, axes)
def __nonzero__(self):
return True
# Python3 compat
__bool__ = __nonzero__
@property
def shape(self):
return tuple(len(ax) for ax in self.axes)
@property
def ndim(self):
return len(self.axes)
def set_axis(self, axis, new_labels):
new_labels = _ensure_index(new_labels)
old_len = len(self.axes[axis])
new_len = len(new_labels)
if new_len != old_len:
raise ValueError('Length mismatch: Expected axis has %d elements, '
'new values have %d elements' % (old_len, new_len))
self.axes[axis] = new_labels
def rename_axis(self, mapper, axis, copy=True):
"""
Rename one of axes.
Parameters
----------
mapper : unary callable
axis : int
copy : boolean, default True
"""
obj = self.copy(deep=copy)
obj.set_axis(axis, _transform_index(self.axes[axis], mapper))
return obj
def add_prefix(self, prefix):
f = (str(prefix) + '%s').__mod__
return self.rename_axis(f, axis=0)
def add_suffix(self, suffix):
f = ('%s' + str(suffix)).__mod__
return self.rename_axis(f, axis=0)
@property
def _is_single_block(self):
if self.ndim == 1:
return True
if len(self.blocks) != 1:
return False
blk = self.blocks[0]
return (blk.mgr_locs.is_slice_like and
blk.mgr_locs.as_slice == slice(0, len(self), 1))
def _rebuild_blknos_and_blklocs(self):
"""
Update mgr._blknos / mgr._blklocs.
"""
new_blknos = np.empty(self.shape[0], dtype=np.int64)
new_blklocs = np.empty(self.shape[0], dtype=np.int64)
new_blknos.fill(-1)
new_blklocs.fill(-1)
for blkno, blk in enumerate(self.blocks):
rl = blk.mgr_locs
new_blknos[rl.indexer] = blkno
new_blklocs[rl.indexer] = np.arange(len(rl))
if (new_blknos == -1).any():
raise AssertionError("Gaps in blk ref_locs")
self._blknos = new_blknos
self._blklocs = new_blklocs
# make items read only for now
def _get_items(self):
return self.axes[0]
items = property(fget=_get_items)
def _get_counts(self, f):
""" return a dict of the counts of the function in BlockManager """
self._consolidate_inplace()
counts = dict()
for b in self.blocks:
v = f(b)
counts[v] = counts.get(v, 0) + b.shape[0]
return counts
def get_dtype_counts(self):
return self._get_counts(lambda b: b.dtype.name)
def get_ftype_counts(self):
return self._get_counts(lambda b: b.ftype)
def get_dtypes(self):
dtypes = np.array([blk.dtype for blk in self.blocks])
return com.take_1d(dtypes, self._blknos, allow_fill=False)
def get_ftypes(self):
ftypes = np.array([blk.ftype for blk in self.blocks])
return com.take_1d(ftypes, self._blknos, allow_fill=False)
def __getstate__(self):
block_values = [b.values for b in self.blocks]
block_items = [self.items[b.mgr_locs.indexer] for b in self.blocks]
axes_array = [ax for ax in self.axes]
extra_state = {
'0.14.1': {
'axes': axes_array,
'blocks': [dict(values=b.values,
mgr_locs=b.mgr_locs.indexer)
for b in self.blocks]
}
}
# First three elements of the state are to maintain forward
# compatibility with 0.13.1.
return axes_array, block_values, block_items, extra_state
def __setstate__(self, state):
def unpickle_block(values, mgr_locs):
# numpy < 1.7 pickle compat
if values.dtype == 'M8[us]':
values = values.astype('M8[ns]')
return make_block(values, placement=mgr_locs)
if (isinstance(state, tuple) and len(state) >= 4
and '0.14.1' in state[3]):
state = state[3]['0.14.1']
self.axes = [_ensure_index(ax) for ax in state['axes']]
self.blocks = tuple(
unpickle_block(b['values'], b['mgr_locs'])
for b in state['blocks'])
else:
# discard anything after 3rd, support beta pickling format for a
# little while longer
ax_arrays, bvalues, bitems = state[:3]
self.axes = [_ensure_index(ax) for ax in ax_arrays]
if len(bitems) == 1 and self.axes[0].equals(bitems[0]):
# This is a workaround for pre-0.14.1 pickles that didn't
# support unpickling multi-block frames/panels with non-unique
# columns/items, because given a manager with items ["a", "b",
# "a"] there's no way of knowing which block's "a" is where.
#
# Single-block case can be supported under the assumption that
# block items corresponded to manager items 1-to-1.
all_mgr_locs = [slice(0, len(bitems[0]))]
else:
all_mgr_locs = [self.axes[0].get_indexer(blk_items)
for blk_items in bitems]
self.blocks = tuple(
unpickle_block(values, mgr_locs)
for values, mgr_locs in zip(bvalues, all_mgr_locs))
self._post_setstate()
def _post_setstate(self):
self._is_consolidated = False
self._known_consolidated = False
self._rebuild_blknos_and_blklocs()
def __len__(self):
return len(self.items)
def __unicode__(self):
output = com.pprint_thing(self.__class__.__name__)
for i, ax in enumerate(self.axes):
if i == 0:
output += u('\nItems: %s') % ax
else:
output += u('\nAxis %d: %s') % (i, ax)
for block in self.blocks:
output += u('\n%s') % com.pprint_thing(block)
return output
def _verify_integrity(self):
mgr_shape = self.shape
tot_items = sum(len(x.mgr_locs) for x in self.blocks)
for block in self.blocks:
if block._verify_integrity and block.shape[1:] != mgr_shape[1:]:
construction_error(tot_items, block.shape[1:], self.axes)
if len(self.items) != tot_items:
raise AssertionError('Number of manager items must equal union of '
'block items\n# manager items: {0}, # '
'tot_items: {1}'.format(len(self.items),
tot_items))
def apply(self, f, axes=None, filter=None, do_integrity_check=False, consolidate=True, **kwargs):
"""
iterate over the blocks, collect and create a new block manager
Parameters
----------
f : the callable or function name to operate on at the block level
axes : optional (if not supplied, use self.axes)
filter : list, if supplied, only call the block if the filter is in
the block
do_integrity_check : boolean, default False. Do the block manager integrity check
consolidate: boolean, default True. Join together blocks having same dtype
Returns
-------
Block Manager (new object)
"""
result_blocks = []
# filter kwarg is used in replace-* family of methods
if filter is not None:
filter_locs = set(self.items.get_indexer_for(filter))
if len(filter_locs) == len(self.items):
# All items are included, as if there were no filtering
filter = None
else:
kwargs['filter'] = filter_locs
if consolidate:
self._consolidate_inplace()
if f == 'where':
align_copy = True
if kwargs.get('align', True):
align_keys = ['other', 'cond']
else:
align_keys = ['cond']
elif f == 'putmask':
align_copy = False
if kwargs.get('align', True):
align_keys = ['new', 'mask']
else:
align_keys = ['mask']
elif f == 'eval':
align_copy = False
align_keys = ['other']
elif f == 'fillna':
# fillna internally does putmask, maybe it's better to do this
# at mgr, not block level?
align_copy = False
align_keys = ['value']
else:
align_keys = []
aligned_args = dict((k, kwargs[k]) for k in align_keys
if hasattr(kwargs[k], 'reindex_axis'))
for b in self.blocks:
if filter is not None:
if not b.mgr_locs.isin(filter_locs).any():
result_blocks.append(b)
continue
if aligned_args:
b_items = self.items[b.mgr_locs.indexer]
for k, obj in aligned_args.items():
axis = getattr(obj, '_info_axis_number', 0)
kwargs[k] = obj.reindex_axis(b_items, axis=axis,
copy=align_copy)
kwargs['mgr'] = self
applied = getattr(b, f)(**kwargs)
result_blocks = _extend_blocks(applied, result_blocks)
if len(result_blocks) == 0:
return self.make_empty(axes or self.axes)
bm = self.__class__(result_blocks, axes or self.axes,
do_integrity_check=do_integrity_check)
bm._consolidate_inplace()
return bm
def isnull(self, **kwargs):
return self.apply('apply', **kwargs)
def where(self, **kwargs):
return self.apply('where', **kwargs)
def eval(self, **kwargs):
return self.apply('eval', **kwargs)
def setitem(self, **kwargs):
return self.apply('setitem', **kwargs)
def putmask(self, **kwargs):
return self.apply('putmask', **kwargs)
def diff(self, **kwargs):
return self.apply('diff', **kwargs)
def interpolate(self, **kwargs):
return self.apply('interpolate', **kwargs)
def shift(self, **kwargs):
return self.apply('shift', **kwargs)
def fillna(self, **kwargs):
return self.apply('fillna', **kwargs)
def downcast(self, **kwargs):
return self.apply('downcast', **kwargs)
def astype(self, dtype, **kwargs):
return self.apply('astype', dtype=dtype, **kwargs)
def convert(self, **kwargs):
return self.apply('convert', **kwargs)
def replace(self, **kwargs):
return self.apply('replace', **kwargs)
def replace_list(self, src_list, dest_list, inplace=False, regex=False, mgr=None):
""" do a list replace """
if mgr is None:
mgr = self
# figure out our mask a-priori to avoid repeated replacements
values = self.as_matrix()
def comp(s):
if isnull(s):
return isnull(values)
return _possibly_compare(values, getattr(s, 'asm8', s),
operator.eq)
masks = [comp(s) for i, s in enumerate(src_list)]
result_blocks = []
for blk in self.blocks:
# its possible to get multiple result blocks here
# replace ALWAYS will return a list
rb = [blk if inplace else blk.copy()]
for i, (s, d) in enumerate(zip(src_list, dest_list)):
new_rb = []
for b in rb:
if b.dtype == np.object_:
result = b.replace(s, d, inplace=inplace,
regex=regex, mgr=mgr)
new_rb = _extend_blocks(result, new_rb)
else:
# get our mask for this element, sized to this
# particular block
m = masks[i][b.mgr_locs.indexer]
if m.any():
new_rb.extend(b.putmask(m, d, inplace=True))
else:
new_rb.append(b)
rb = new_rb
result_blocks.extend(rb)
bm = self.__class__(result_blocks, self.axes)
bm._consolidate_inplace()
return bm
def reshape_nd(self, axes, **kwargs):
""" a 2d-nd reshape operation on a BlockManager """
return self.apply('reshape_nd', axes=axes, **kwargs)
def is_consolidated(self):
"""
Return True if more than one block with the same dtype
"""
if not self._known_consolidated:
self._consolidate_check()
return self._is_consolidated
def _consolidate_check(self):
ftypes = [blk.ftype for blk in self.blocks]
self._is_consolidated = len(ftypes) == len(set(ftypes))
self._known_consolidated = True
@property
def is_mixed_type(self):
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return len(self.blocks) > 1
@property
def is_numeric_mixed_type(self):
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return all([block.is_numeric for block in self.blocks])
@property
def is_datelike_mixed_type(self):
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return any([block.is_datelike for block in self.blocks])
@property
def is_view(self):
""" return a boolean if we are a single block and are a view """
if len(self.blocks) == 1:
return self.blocks[0].is_view
# It is technically possible to figure out which blocks are views
# e.g. [ b.values.base is not None for b in self.blocks ]
# but then we have the case of possibly some blocks being a view
# and some blocks not. setting in theory is possible on the non-view
# blocks w/o causing a SettingWithCopy raise/warn. But this is a bit
# complicated
return False
def get_bool_data(self, copy=False):
"""
Parameters
----------
copy : boolean, default False
Whether to copy the blocks
"""
self._consolidate_inplace()
return self.combine([b for b in self.blocks if b.is_bool], copy)
def get_numeric_data(self, copy=False):
"""
Parameters
----------
copy : boolean, default False
Whether to copy the blocks
"""
self._consolidate_inplace()
return self.combine([b for b in self.blocks if b.is_numeric], copy)
def combine(self, blocks, copy=True):
""" return a new manager with the blocks """
if len(blocks) == 0:
return self.make_empty()
# FIXME: optimization potential
indexer = np.sort(np.concatenate([b.mgr_locs.as_array for b in blocks]))
inv_indexer = lib.get_reverse_indexer(indexer, self.shape[0])
new_items = self.items.take(indexer)
new_blocks = []
for b in blocks:
b = b.copy(deep=copy)
b.mgr_locs = com.take_1d(inv_indexer, b.mgr_locs.as_array, axis=0,
allow_fill=False)
new_blocks.append(b)
new_axes = list(self.axes)
new_axes[0] = new_items
return self.__class__(new_blocks, new_axes, do_integrity_check=False)
def get_slice(self, slobj, axis=0):
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
if axis == 0:
new_blocks = self._slice_take_blocks_ax0(slobj)
else:
slicer = [slice(None)] * (axis + 1)
slicer[axis] = slobj
slicer = tuple(slicer)
new_blocks = [blk.getitem_block(slicer) for blk in self.blocks]
new_axes = list(self.axes)
new_axes[axis] = new_axes[axis][slobj]
bm = self.__class__(new_blocks, new_axes, do_integrity_check=False,
fastpath=True)
bm._consolidate_inplace()
return bm
def __contains__(self, item):
return item in self.items
@property
def nblocks(self):
return len(self.blocks)
def copy(self, deep=True, mgr=None):
"""
Make deep or shallow copy of BlockManager
Parameters
----------
deep : boolean o rstring, default True
If False, return shallow copy (do not copy data)
If 'all', copy data and a deep copy of the index
Returns
-------
copy : BlockManager
"""
# this preserves the notion of view copying of axes
if deep:
if deep == 'all':
copy = lambda ax: ax.copy(deep=True)
else:
copy = lambda ax: ax.view()
new_axes = [ copy(ax) for ax in self.axes]
else:
new_axes = list(self.axes)
return self.apply('copy', axes=new_axes, deep=deep,
do_integrity_check=False)
def as_matrix(self, items=None):
if len(self.blocks) == 0:
return np.empty(self.shape, dtype=float)
if items is not None:
mgr = self.reindex_axis(items, axis=0)
else:
mgr = self
if self._is_single_block or not self.is_mixed_type:
return mgr.blocks[0].get_values()
else:
return mgr._interleave()
def _interleave(self):
"""
Return ndarray from blocks with specified item order
Items must be contained in the blocks
"""
dtype = _interleaved_dtype(self.blocks)
result = np.empty(self.shape, dtype=dtype)
if result.shape[0] == 0:
# Workaround for numpy 1.7 bug:
#
# >>> a = np.empty((0,10))
# >>> a[slice(0,0)]
# array([], shape=(0, 10), dtype=float64)
# >>> a[[]]
# Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# IndexError: index 0 is out of bounds for axis 0 with size 0
return result
itemmask = np.zeros(self.shape[0])
for blk in self.blocks:
rl = blk.mgr_locs
result[rl.indexer] = blk.get_values(dtype)
itemmask[rl.indexer] = 1
if not itemmask.all():
raise AssertionError('Some items were not contained in blocks')
return result
def xs(self, key, axis=1, copy=True, takeable=False):
if axis < 1:
raise AssertionError('Can only take xs across axis >= 1, got %d'
% axis)
# take by position
if takeable:
loc = key
else:
loc = self.axes[axis].get_loc(key)
slicer = [slice(None, None) for _ in range(self.ndim)]
slicer[axis] = loc
slicer = tuple(slicer)
new_axes = list(self.axes)
# could be an array indexer!
if isinstance(loc, (slice, np.ndarray)):
new_axes[axis] = new_axes[axis][loc]
else:
new_axes.pop(axis)
new_blocks = []
if len(self.blocks) > 1:
# we must copy here as we are mixed type
for blk in self.blocks:
newb = make_block(values=blk.values[slicer],
klass=blk.__class__, fastpath=True,
placement=blk.mgr_locs)
new_blocks.append(newb)
elif len(self.blocks) == 1:
block = self.blocks[0]
vals = block.values[slicer]
if copy:
vals = vals.copy()
new_blocks = [make_block(values=vals, placement=block.mgr_locs,
klass=block.__class__, fastpath=True,)]
return self.__class__(new_blocks, new_axes)
def fast_xs(self, loc):
"""
get a cross sectional for a given location in the
items ; handle dups
return the result, is *could* be a view in the case of a
single block
"""
if len(self.blocks) == 1:
return self.blocks[0].iget((slice(None), loc))
items = self.items
# non-unique (GH4726)
if not items.is_unique:
result = self._interleave()
if self.ndim == 2:
result = result.T
return result[loc]
# unique
dtype = _interleaved_dtype(self.blocks)
n = len(items)
result = np.empty(n, dtype=dtype)
for blk in self.blocks:
# Such assignment may incorrectly coerce NaT to None
# result[blk.mgr_locs] = blk._slice((slice(None), loc))
for i, rl in enumerate(blk.mgr_locs):
result[rl] = blk._try_coerce_result(blk.iget((i, loc)))
return result
def consolidate(self):
"""
Join together blocks having same dtype
Returns
-------
y : BlockManager
"""
if self.is_consolidated():
return self
bm = self.__class__(self.blocks, self.axes)
bm._is_consolidated = False
bm._consolidate_inplace()
return bm
def _consolidate_inplace(self):
if not self.is_consolidated():
self.blocks = tuple(_consolidate(self.blocks))
self._is_consolidated = True
self._known_consolidated = True
self._rebuild_blknos_and_blklocs()
def get(self, item, fastpath=True):
"""
Return values for selected item (ndarray or BlockManager).
"""
if self.items.is_unique:
if not isnull(item):
loc = self.items.get_loc(item)
else:
indexer = np.arange(len(self.items))[isnull(self.items)]
# allow a single nan location indexer
if not np.isscalar(indexer):
if len(indexer) == 1:
loc = indexer.item()
else:
raise ValueError("cannot label index with a null key")
return self.iget(loc, fastpath=fastpath)
else:
if isnull(item):
raise TypeError("cannot label index with a null key")
indexer = self.items.get_indexer_for([item])
return self.reindex_indexer(new_axis=self.items[indexer],
indexer=indexer, axis=0, allow_dups=True)
def iget(self, i, fastpath=True):
"""
Return the data as a SingleBlockManager if fastpath=True and possible
Otherwise return as a ndarray
"""
block = self.blocks[self._blknos[i]]
values = block.iget(self._blklocs[i])
if not fastpath or not block._box_to_block_values or values.ndim != 1:
return values
# fastpath shortcut for select a single-dim from a 2-dim BM
return SingleBlockManager([ block.make_block_same_class(values,
placement=slice(0, len(values)),
ndim=1,
fastpath=True) ],
self.axes[1])
def get_scalar(self, tup):
"""
Retrieve single item
"""
full_loc = list(ax.get_loc(x)
for ax, x in zip(self.axes, tup))
blk = self.blocks[self._blknos[full_loc[0]]]
full_loc[0] = self._blklocs[full_loc[0]]
# FIXME: this may return non-upcasted types?
return blk.values[tuple(full_loc)]
def delete(self, item):
"""
Delete selected item (items if non-unique) in-place.
"""
indexer = self.items.get_loc(item)
is_deleted = np.zeros(self.shape[0], dtype=np.bool_)
is_deleted[indexer] = True
ref_loc_offset = -is_deleted.cumsum()
is_blk_deleted = [False] * len(self.blocks)
if isinstance(indexer, int):
affected_start = indexer
else:
affected_start = is_deleted.nonzero()[0][0]
for blkno, _ in _fast_count_smallints(self._blknos[affected_start:]):
blk = self.blocks[blkno]
bml = blk.mgr_locs
blk_del = is_deleted[bml.indexer].nonzero()[0]
if len(blk_del) == len(bml):
is_blk_deleted[blkno] = True
continue
elif len(blk_del) != 0:
blk.delete(blk_del)
bml = blk.mgr_locs
blk.mgr_locs = bml.add(ref_loc_offset[bml.indexer])
# FIXME: use Index.delete as soon as it uses fastpath=True
self.axes[0] = self.items[~is_deleted]
self.blocks = tuple(b for blkno, b in enumerate(self.blocks)
if not is_blk_deleted[blkno])
self._shape = None
self._rebuild_blknos_and_blklocs()
def set(self, item, value, check=False):
"""
Set new item in-place. Does not consolidate. Adds new Block if not
contained in the current set of items
if check, then validate that we are not setting the same data in-place
"""
# FIXME: refactor, clearly separate broadcasting & zip-like assignment
# can prob also fix the various if tests for sparse/categorical
value_is_internal_type = is_internal_type(value)
# categorical/spares/datetimetz
if value_is_internal_type:
def value_getitem(placement):
return value
else:
if value.ndim == self.ndim - 1:
value = value.reshape((1,) + value.shape)
def value_getitem(placement):
return value
else:
def value_getitem(placement):
return value[placement.indexer]
if value.shape[1:] != self.shape[1:]:
raise AssertionError('Shape of new values must be compatible '
'with manager shape')
try:
loc = self.items.get_loc(item)
except KeyError:
# This item wasn't present, just insert at end
self.insert(len(self.items), item, value)
return
if isinstance(loc, int):
loc = [loc]
blknos = self._blknos[loc]
blklocs = self._blklocs[loc].copy()
unfit_mgr_locs = []
unfit_val_locs = []
removed_blknos = []
for blkno, val_locs in _get_blkno_placements(blknos, len(self.blocks),
group=True):
blk = self.blocks[blkno]
blk_locs = blklocs[val_locs.indexer]
if blk.should_store(value):
blk.set(blk_locs, value_getitem(val_locs), check=check)
else:
unfit_mgr_locs.append(blk.mgr_locs.as_array[blk_locs])
unfit_val_locs.append(val_locs)
# If all block items are unfit, schedule the block for removal.
if len(val_locs) == len(blk.mgr_locs):
removed_blknos.append(blkno)
else:
self._blklocs[blk.mgr_locs.indexer] = -1
blk.delete(blk_locs)
self._blklocs[blk.mgr_locs.indexer] = np.arange(len(blk))
if len(removed_blknos):
# Remove blocks & update blknos accordingly
is_deleted = np.zeros(self.nblocks, dtype=np.bool_)
is_deleted[removed_blknos] = True
new_blknos = np.empty(self.nblocks, dtype=np.int64)
new_blknos.fill(-1)
new_blknos[~is_deleted] = np.arange(self.nblocks -
len(removed_blknos))
self._blknos = com.take_1d(new_blknos, self._blknos, axis=0,
allow_fill=False)
self.blocks = tuple(blk for i, blk in enumerate(self.blocks)
if i not in set(removed_blknos))
if unfit_val_locs:
unfit_mgr_locs = np.concatenate(unfit_mgr_locs)
unfit_count = len(unfit_mgr_locs)
new_blocks = []
if value_is_internal_type:
# This code (ab-)uses the fact that sparse blocks contain only
# one item.
new_blocks.extend(
make_block(values=value.copy(), ndim=self.ndim,
placement=slice(mgr_loc, mgr_loc + 1))
for mgr_loc in unfit_mgr_locs)
self._blknos[unfit_mgr_locs] = (np.arange(unfit_count) +
len(self.blocks))
self._blklocs[unfit_mgr_locs] = 0
else:
# unfit_val_locs contains BlockPlacement objects
unfit_val_items = unfit_val_locs[0].append(unfit_val_locs[1:])
new_blocks.append(
make_block(values=value_getitem(unfit_val_items),
ndim=self.ndim, placement=unfit_mgr_locs))
self._blknos[unfit_mgr_locs] = len(self.blocks)
self._blklocs[unfit_mgr_locs] = np.arange(unfit_count)
self.blocks += tuple(new_blocks)
# Newly created block's dtype may already be present.
self._known_consolidated = False
def insert(self, loc, item, value, allow_duplicates=False):
"""
Insert item at selected position.
Parameters
----------
loc : int
item : hashable
value : array_like
allow_duplicates: bool
If False, trying to insert non-unique item will raise
"""
if not allow_duplicates and item in self.items:
# Should this be a different kind of error??
raise ValueError('cannot insert %s, already exists' % item)
if not isinstance(loc, int):
raise TypeError("loc must be int")
# insert to the axis; this could possibly raise a TypeError
new_axis = self.items.insert(loc, item)
block = make_block(values=value,
ndim=self.ndim,
placement=slice(loc, loc+1))
for blkno, count in _fast_count_smallints(self._blknos[loc:]):
blk = self.blocks[blkno]
if count == len(blk.mgr_locs):
blk.mgr_locs = blk.mgr_locs.add(1)
else:
new_mgr_locs = blk.mgr_locs.as_array.copy()
new_mgr_locs[new_mgr_locs >= loc] += 1
blk.mgr_locs = new_mgr_locs
if loc == self._blklocs.shape[0]:
# np.append is a lot faster (at least in numpy 1.7.1), let's use it
# if we can.
self._blklocs = np.append(self._blklocs, 0)
self._blknos = np.append(self._blknos, len(self.blocks))
else:
self._blklocs = np.insert(self._blklocs, loc, 0)
self._blknos = np.insert(self._blknos, loc, len(self.blocks))
self.axes[0] = new_axis
self.blocks += (block,)
self._shape = None
self._known_consolidated = False
if len(self.blocks) > 100:
self._consolidate_inplace()
def reindex_axis(self, new_index, axis, method=None, limit=None,
fill_value=None, copy=True):
"""
Conform block manager to new index.
"""
new_index = _ensure_index(new_index)
new_index, indexer = self.axes[axis].reindex(
new_index, method=method, limit=limit)
return self.reindex_indexer(new_index, indexer, axis=axis,
fill_value=fill_value, copy=copy)
def reindex_indexer(self, new_axis, indexer, axis, fill_value=None,
allow_dups=False, copy=True):
"""
Parameters
----------
new_axis : Index
indexer : ndarray of int64 or None
axis : int
fill_value : object
allow_dups : bool
pandas-indexer with -1's only.
"""
if indexer is None:
if new_axis is self.axes[axis] and not copy:
return self
result = self.copy(deep=copy)
result.axes = list(self.axes)
result.axes[axis] = new_axis
return result
self._consolidate_inplace()
# some axes don't allow reindexing with dups
if not allow_dups:
self.axes[axis]._can_reindex(indexer)
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
if axis == 0:
new_blocks = self._slice_take_blocks_ax0(
indexer, fill_tuple=(fill_value,))
else:
new_blocks = [blk.take_nd(indexer, axis=axis,
fill_tuple=(fill_value if fill_value is not None else
blk.fill_value,))
for blk in self.blocks]
new_axes = list(self.axes)
new_axes[axis] = new_axis
return self.__class__(new_blocks, new_axes)
def _slice_take_blocks_ax0(self, slice_or_indexer, fill_tuple=None):
"""
Slice/take blocks along axis=0.
Overloaded for SingleBlock
Returns
-------
new_blocks : list of Block
"""
allow_fill = fill_tuple is not None
sl_type, slobj, sllen = _preprocess_slice_or_indexer(
slice_or_indexer, self.shape[0], allow_fill=allow_fill)
if self._is_single_block:
blk = self.blocks[0]
if sl_type in ('slice', 'mask'):
return [blk.getitem_block(slobj,
new_mgr_locs=slice(0, sllen))]
elif not allow_fill or self.ndim == 1:
if allow_fill and fill_tuple[0] is None:
_, fill_value = com._maybe_promote(blk.dtype)
fill_tuple = (fill_value,)
return [blk.take_nd(slobj, axis=0,
new_mgr_locs=slice(0, sllen),
fill_tuple=fill_tuple)]
if sl_type in ('slice', 'mask'):
blknos = self._blknos[slobj]
blklocs = self._blklocs[slobj]
else:
blknos = com.take_1d(self._blknos, slobj, fill_value=-1,
allow_fill=allow_fill)
blklocs = com.take_1d(self._blklocs, slobj, fill_value=-1,
allow_fill=allow_fill)
# When filling blknos, make sure blknos is updated before appending to
# blocks list, that way new blkno is exactly len(blocks).
#
# FIXME: mgr_groupby_blknos must return mgr_locs in ascending order,
# pytables serialization will break otherwise.
blocks = []
for blkno, mgr_locs in _get_blkno_placements(blknos, len(self.blocks),
group=True):
if blkno == -1:
# If we've got here, fill_tuple was not None.
fill_value = fill_tuple[0]
blocks.append(self._make_na_block(
placement=mgr_locs, fill_value=fill_value))
else:
blk = self.blocks[blkno]
# Otherwise, slicing along items axis is necessary.
if not blk._can_consolidate:
# A non-consolidatable block, it's easy, because there's only one item
# and each mgr loc is a copy of that single item.
for mgr_loc in mgr_locs:
newblk = blk.copy(deep=True)
newblk.mgr_locs = slice(mgr_loc, mgr_loc + 1)
blocks.append(newblk)
else:
blocks.append(blk.take_nd(
blklocs[mgr_locs.indexer], axis=0,
new_mgr_locs=mgr_locs, fill_tuple=None))
return blocks
def _make_na_block(self, placement, fill_value=None):
# TODO: infer dtypes other than float64 from fill_value
if fill_value is None:
fill_value = np.nan
block_shape = list(self.shape)
block_shape[0] = len(placement)
dtype, fill_value = com._infer_dtype_from_scalar(fill_value)
block_values = np.empty(block_shape, dtype=dtype)
block_values.fill(fill_value)
return make_block(block_values, placement=placement)
def take(self, indexer, axis=1, verify=True, convert=True):
"""
Take items along any axis.
"""
self._consolidate_inplace()
indexer = np.arange(indexer.start, indexer.stop, indexer.step,
dtype='int64') if isinstance(indexer, slice) \
else np.asanyarray(indexer, dtype='int64')
n = self.shape[axis]
if convert:
indexer = maybe_convert_indices(indexer, n)
if verify:
if ((indexer == -1) | (indexer >= n)).any():
raise Exception('Indices must be nonzero and less than '
'the axis length')
new_labels = self.axes[axis].take(indexer)
return self.reindex_indexer(new_axis=new_labels, indexer=indexer,
axis=axis, allow_dups=True)
def merge(self, other, lsuffix='', rsuffix=''):
if not self._is_indexed_like(other):
raise AssertionError('Must have same axes to merge managers')
l, r = items_overlap_with_suffix(left=self.items, lsuffix=lsuffix,
right=other.items, rsuffix=rsuffix)
new_items = _concat_indexes([l, r])
new_blocks = [blk.copy(deep=False)
for blk in self.blocks]
offset = self.shape[0]
for blk in other.blocks:
blk = blk.copy(deep=False)
blk.mgr_locs = blk.mgr_locs.add(offset)
new_blocks.append(blk)
new_axes = list(self.axes)
new_axes[0] = new_items
return self.__class__(_consolidate(new_blocks), new_axes)
def _is_indexed_like(self, other):
"""
Check all axes except items
"""
if self.ndim != other.ndim:
raise AssertionError(('Number of dimensions must agree '
'got %d and %d') % (self.ndim, other.ndim))
for ax, oax in zip(self.axes[1:], other.axes[1:]):
if not ax.equals(oax):
return False
return True
def equals(self, other):
self_axes, other_axes = self.axes, other.axes
if len(self_axes) != len(other_axes):
return False
if not all (ax1.equals(ax2) for ax1, ax2 in zip(self_axes, other_axes)):
return False
self._consolidate_inplace()
other._consolidate_inplace()
if len(self.blocks) != len(other.blocks):
return False
# canonicalize block order, using a tuple combining the type
# name and then mgr_locs because there might be unconsolidated
# blocks (say, Categorical) which can only be distinguished by
# the iteration order
def canonicalize(block):
return (block.dtype.name, block.mgr_locs.as_array.tolist())
self_blocks = sorted(self.blocks, key=canonicalize)
other_blocks = sorted(other.blocks, key=canonicalize)
return all(block.equals(oblock) for block, oblock in
zip(self_blocks, other_blocks))
class SingleBlockManager(BlockManager):
""" manage a single block with """
ndim = 1
_is_consolidated = True
_known_consolidated = True
__slots__ = ()
def __init__(self, block, axis, do_integrity_check=False, fastpath=False):
if isinstance(axis, list):
if len(axis) != 1:
raise ValueError(
"cannot create SingleBlockManager with more than 1 axis")
axis = axis[0]
# passed from constructor, single block, single axis
if fastpath:
self.axes = [axis]
if isinstance(block, list):
# empty block
if len(block) == 0:
block = [np.array([])]
elif len(block) != 1:
raise ValueError('Cannot create SingleBlockManager with '
'more than 1 block')
block = block[0]
else:
self.axes = [_ensure_index(axis)]
# create the block here
if isinstance(block, list):
# provide consolidation to the interleaved_dtype
if len(block) > 1:
dtype = _interleaved_dtype(block)
block = [b.astype(dtype) for b in block]
block = _consolidate(block)
if len(block) != 1:
raise ValueError('Cannot create SingleBlockManager with '
'more than 1 block')
block = block[0]
if not isinstance(block, Block):
block = make_block(block,
placement=slice(0, len(axis)),
ndim=1, fastpath=True)
self.blocks = [block]
def _post_setstate(self):
pass
@property
def _block(self):
return self.blocks[0]
@property
def _values(self):
return self._block.values
def reindex(self, new_axis, indexer=None, method=None, fill_value=None,
limit=None, copy=True):
# if we are the same and don't copy, just return
if self.index.equals(new_axis):
if copy:
return self.copy(deep=True)
else:
return self
values = self._block.get_values()
if indexer is None:
indexer = self.items.get_indexer_for(new_axis)
if fill_value is None:
# FIXME: is fill_value used correctly in sparse blocks?
if not self._block.is_sparse:
fill_value = self._block.fill_value
else:
fill_value = np.nan
new_values = com.take_1d(values, indexer,
fill_value=fill_value)
# fill if needed
if method is not None or limit is not None:
new_values = mis.interpolate_2d(new_values, method=method,
limit=limit, fill_value=fill_value)
if self._block.is_sparse:
make_block = self._block.make_block_same_class
block = make_block(new_values, copy=copy,
placement=slice(0, len(new_axis)))
mgr = SingleBlockManager(block, new_axis)
mgr._consolidate_inplace()
return mgr
def get_slice(self, slobj, axis=0):
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
return self.__class__(self._block._slice(slobj),
self.index[slobj], fastpath=True)
@property
def index(self):
return self.axes[0]
def convert(self, **kwargs):
""" convert the whole block as one """
kwargs['by_item'] = False
return self.apply('convert', **kwargs)
@property
def dtype(self):
return self._block.dtype
@property
def array_dtype(self):
return self._block.array_dtype
@property
def ftype(self):
return self._block.ftype
def get_dtype_counts(self):
return {self.dtype.name: 1}
def get_ftype_counts(self):
return {self.ftype: 1}
def get_dtypes(self):
return np.array([self._block.dtype])
def get_ftypes(self):
return np.array([self._block.ftype])
def external_values(self):
return self._block.external_values()
def internal_values(self):
return self._block.internal_values()
def get_values(self):
""" return a dense type view """
return np.array(self._block.to_dense(),copy=False)
@property
def itemsize(self):
return self._block.values.itemsize
@property
def _can_hold_na(self):
return self._block._can_hold_na
def is_consolidated(self):
return True
def _consolidate_check(self):
pass
def _consolidate_inplace(self):
pass
def delete(self, item):
"""
Delete single item from SingleBlockManager.
Ensures that self.blocks doesn't become empty.
"""
loc = self.items.get_loc(item)
self._block.delete(loc)
self.axes[0] = self.axes[0].delete(loc)
def fast_xs(self, loc):
"""
fast path for getting a cross-section
return a view of the data
"""
return self._block.values[loc]
def construction_error(tot_items, block_shape, axes, e=None):
""" raise a helpful message about our construction """
passed = tuple(map(int, [tot_items] + list(block_shape)))
implied = tuple(map(int, [len(ax) for ax in axes]))
if passed == implied and e is not None:
raise e
raise ValueError("Shape of passed values is {0}, indices imply {1}".format(
passed,implied))
def create_block_manager_from_blocks(blocks, axes):
try:
if len(blocks) == 1 and not isinstance(blocks[0], Block):
# if blocks[0] is of length 0, return empty blocks
if not len(blocks[0]):
blocks = []
else:
# It's OK if a single block is passed as values, its placement is
# basically "all items", but if there're many, don't bother
# converting, it's an error anyway.
blocks = [make_block(values=blocks[0],
placement=slice(0, len(axes[0])))]
mgr = BlockManager(blocks, axes)
mgr._consolidate_inplace()
return mgr
except (ValueError) as e:
blocks = [getattr(b, 'values', b) for b in blocks]
tot_items = sum(b.shape[0] for b in blocks)
construction_error(tot_items, blocks[0].shape[1:], axes, e)
def create_block_manager_from_arrays(arrays, names, axes):
try:
blocks = form_blocks(arrays, names, axes)
mgr = BlockManager(blocks, axes)
mgr._consolidate_inplace()
return mgr
except (ValueError) as e:
construction_error(len(arrays), arrays[0].shape, axes, e)
def form_blocks(arrays, names, axes):
# put "leftover" items in float bucket, where else?
# generalize?
float_items = []
complex_items = []
int_items = []
bool_items = []
object_items = []
sparse_items = []
datetime_items = []
datetime_tz_items = []
cat_items = []
extra_locs = []
names_idx = Index(names)
if names_idx.equals(axes[0]):
names_indexer = np.arange(len(names_idx))
else:
assert names_idx.intersection(axes[0]).is_unique
names_indexer = names_idx.get_indexer_for(axes[0])
for i, name_idx in enumerate(names_indexer):
if name_idx == -1:
extra_locs.append(i)
continue
k = names[name_idx]
v = arrays[name_idx]
if is_sparse(v):
sparse_items.append((i, k, v))
elif issubclass(v.dtype.type, np.floating):
float_items.append((i, k, v))
elif issubclass(v.dtype.type, np.complexfloating):
complex_items.append((i, k, v))
elif issubclass(v.dtype.type, np.datetime64):
if v.dtype != _NS_DTYPE:
v = tslib.cast_to_nanoseconds(v)
if is_datetimetz(v):
datetime_tz_items.append((i, k, v))
else:
datetime_items.append((i, k, v))
elif is_datetimetz(v):
datetime_tz_items.append((i, k, v))
elif issubclass(v.dtype.type, np.integer):
if v.dtype == np.uint64:
# HACK #2355 definite overflow
if (v > 2 ** 63 - 1).any():
object_items.append((i, k, v))
continue
int_items.append((i, k, v))
elif v.dtype == np.bool_:
bool_items.append((i, k, v))
elif is_categorical(v):
cat_items.append((i, k, v))
else:
object_items.append((i, k, v))
blocks = []
if len(float_items):
float_blocks = _multi_blockify(float_items)
blocks.extend(float_blocks)
if len(complex_items):
complex_blocks = _multi_blockify(complex_items)
blocks.extend(complex_blocks)
if len(int_items):
int_blocks = _multi_blockify(int_items)
blocks.extend(int_blocks)
if len(datetime_items):
datetime_blocks = _simple_blockify(
datetime_items, _NS_DTYPE)
blocks.extend(datetime_blocks)
if len(datetime_tz_items):
dttz_blocks = [ make_block(array,
klass=DatetimeTZBlock,
fastpath=True,
placement=[i],
) for i, names, array in datetime_tz_items ]
blocks.extend(dttz_blocks)
if len(bool_items):
bool_blocks = _simple_blockify(
bool_items, np.bool_)
blocks.extend(bool_blocks)
if len(object_items) > 0:
object_blocks = _simple_blockify(
object_items, np.object_)
blocks.extend(object_blocks)
if len(sparse_items) > 0:
sparse_blocks = _sparse_blockify(sparse_items)
blocks.extend(sparse_blocks)
if len(cat_items) > 0:
cat_blocks = [ make_block(array,
klass=CategoricalBlock,
fastpath=True,
placement=[i]
) for i, names, array in cat_items ]
blocks.extend(cat_blocks)
if len(extra_locs):
shape = (len(extra_locs),) + tuple(len(x) for x in axes[1:])
# empty items -> dtype object
block_values = np.empty(shape, dtype=object)
block_values.fill(np.nan)
na_block = make_block(block_values, placement=extra_locs)
blocks.append(na_block)
return blocks
def _simple_blockify(tuples, dtype):
""" return a single array of a block that has a single dtype; if dtype is
not None, coerce to this dtype
"""
values, placement = _stack_arrays(tuples, dtype)
# CHECK DTYPE?
if dtype is not None and values.dtype != dtype: # pragma: no cover
values = values.astype(dtype)
block = make_block(values, placement=placement)
return [block]
def _multi_blockify(tuples, dtype=None):
""" return an array of blocks that potentially have different dtypes """
# group by dtype
grouper = itertools.groupby(tuples, lambda x: x[2].dtype)
new_blocks = []
for dtype, tup_block in grouper:
values, placement = _stack_arrays(
list(tup_block), dtype)
block = make_block(values, placement=placement)
new_blocks.append(block)
return new_blocks
def _sparse_blockify(tuples, dtype=None):
""" return an array of blocks that potentially have different dtypes (and
are sparse)
"""
new_blocks = []
for i, names, array in tuples:
array = _maybe_to_sparse(array)
block = make_block(
array, klass=SparseBlock, fastpath=True,
placement=[i])
new_blocks.append(block)
return new_blocks
def _stack_arrays(tuples, dtype):
# fml
def _asarray_compat(x):
if isinstance(x, ABCSeries):
return x._values
else:
return np.asarray(x)
def _shape_compat(x):
if isinstance(x, ABCSeries):
return len(x),
else:
return x.shape
placement, names, arrays = zip(*tuples)
first = arrays[0]
shape = (len(arrays),) + _shape_compat(first)
stacked = np.empty(shape, dtype=dtype)
for i, arr in enumerate(arrays):
stacked[i] = _asarray_compat(arr)
return stacked, placement
def _interleaved_dtype(blocks):
if not len(blocks):
return None
counts = defaultdict(list)
for x in blocks:
counts[type(x)].append(x)
def _lcd_dtype(l):
""" find the lowest dtype that can accomodate the given types """
m = l[0].dtype
for x in l[1:]:
if x.dtype.itemsize > m.itemsize:
m = x.dtype
return m
have_int = len(counts[IntBlock]) > 0
have_bool = len(counts[BoolBlock]) > 0
have_object = len(counts[ObjectBlock]) > 0
have_float = len(counts[FloatBlock]) > 0
have_complex = len(counts[ComplexBlock]) > 0
have_dt64 = len(counts[DatetimeBlock]) > 0
have_dt64_tz = len(counts[DatetimeTZBlock]) > 0
have_td64 = len(counts[TimeDeltaBlock]) > 0
have_cat = len(counts[CategoricalBlock]) > 0
have_sparse = len(counts[SparseBlock]) > 0
have_numeric = have_float or have_complex or have_int
has_non_numeric = have_dt64 or have_dt64_tz or have_td64 or have_cat
if (have_object or
(have_bool and (have_numeric or have_dt64 or have_dt64_tz or have_td64)) or
(have_numeric and has_non_numeric) or
have_cat or
have_dt64 or
have_dt64_tz or
have_td64):
return np.dtype(object)
elif have_bool:
return np.dtype(bool)
elif have_int and not have_float and not have_complex:
# if we are mixing unsigned and signed, then return
# the next biggest int type (if we can)
lcd = _lcd_dtype(counts[IntBlock])
kinds = set([i.dtype.kind for i in counts[IntBlock]])
if len(kinds) == 1:
return lcd
if lcd == 'uint64' or lcd == 'int64':
return np.dtype('int64')
# return 1 bigger on the itemsize if unsinged
if lcd.kind == 'u':
return np.dtype('int%s' % (lcd.itemsize * 8 * 2))
return lcd
elif have_complex:
return np.dtype('c16')
else:
return _lcd_dtype(counts[FloatBlock] + counts[SparseBlock])
def _consolidate(blocks):
"""
Merge blocks having same dtype, exclude non-consolidating blocks
"""
# sort by _can_consolidate, dtype
gkey = lambda x: x._consolidate_key
grouper = itertools.groupby(sorted(blocks, key=gkey), gkey)
new_blocks = []
for (_can_consolidate, dtype), group_blocks in grouper:
merged_blocks = _merge_blocks(list(group_blocks), dtype=dtype,
_can_consolidate=_can_consolidate)
new_blocks = _extend_blocks(merged_blocks, new_blocks)
return new_blocks
def _merge_blocks(blocks, dtype=None, _can_consolidate=True):
if len(blocks) == 1:
return blocks[0]
if _can_consolidate:
if dtype is None:
if len(set([b.dtype for b in blocks])) != 1:
raise AssertionError("_merge_blocks are invalid!")
dtype = blocks[0].dtype
# FIXME: optimization potential in case all mgrs contain slices and
# combination of those slices is a slice, too.
new_mgr_locs = np.concatenate([b.mgr_locs.as_array for b in blocks])
new_values = _vstack([b.values for b in blocks], dtype)
argsort = np.argsort(new_mgr_locs)
new_values = new_values[argsort]
new_mgr_locs = new_mgr_locs[argsort]
return make_block(new_values,
fastpath=True, placement=new_mgr_locs)
# no merge
return blocks
def _extend_blocks(result, blocks=None):
""" return a new extended blocks, givin the result """
if blocks is None:
blocks = []
if isinstance(result, list):
for r in result:
if isinstance(r, list):
blocks.extend(r)
else:
blocks.append(r)
elif isinstance(result, BlockManager):
blocks.extend(result.blocks)
else:
blocks.append(result)
return blocks
def _block_shape(values, ndim=1, shape=None):
""" guarantee the shape of the values to be at least 1 d """
if values.ndim <= ndim:
if shape is None:
shape = values.shape
values = values.reshape(tuple((1,) + shape))
return values
def _vstack(to_stack, dtype):
# work around NumPy 1.6 bug
if dtype == _NS_DTYPE or dtype == _TD_DTYPE:
new_values = np.vstack([x.view('i8') for x in to_stack])
return new_values.view(dtype)
else:
return np.vstack(to_stack)
def _possibly_compare(a, b, op):
is_a_array = isinstance(a, np.ndarray)
is_b_array = isinstance(b, np.ndarray)
# numpy deprecation warning to have i8 vs integer comparisions
if is_datetimelike_v_numeric(a, b):
result = False
# numpy deprecation warning if comparing numeric vs string-like
elif is_numeric_v_string_like(a, b):
result = False
else:
result = op(a, b)
if lib.isscalar(result) and (is_a_array or is_b_array):
type_names = [type(a).__name__, type(b).__name__]
if is_a_array:
type_names[0] = 'ndarray(dtype=%s)' % a.dtype
if is_b_array:
type_names[1] = 'ndarray(dtype=%s)' % b.dtype
raise TypeError("Cannot compare types %r and %r" % tuple(type_names))
return result
def _concat_indexes(indexes):
return indexes[0].append(indexes[1:])
def _block2d_to_blocknd(values, placement, shape, labels, ref_items):
""" pivot to the labels shape """
from pandas.core.internals import make_block
panel_shape = (len(placement),) + shape
# TODO: lexsort depth needs to be 2!!
# Create observation selection vector using major and minor
# labels, for converting to panel format.
selector = _factor_indexer(shape[1:], labels)
mask = np.zeros(np.prod(shape), dtype=bool)
mask.put(selector, True)
if mask.all():
pvalues = np.empty(panel_shape, dtype=values.dtype)
else:
dtype, fill_value = _maybe_promote(values.dtype)
pvalues = np.empty(panel_shape, dtype=dtype)
pvalues.fill(fill_value)
values = values
for i in range(len(placement)):
pvalues[i].flat[mask] = values[:, i]
return make_block(pvalues, placement=placement)
def _factor_indexer(shape, labels):
"""
given a tuple of shape and a list of Categorical labels, return the
expanded label indexer
"""
mult = np.array(shape)[::-1].cumprod()[::-1]
return com._ensure_platform_int(
np.sum(np.array(labels).T * np.append(mult, [1]), axis=1).T)
def _get_blkno_placements(blknos, blk_count, group=True):
"""
Parameters
----------
blknos : array of int64
blk_count : int
group : bool
Returns
-------
iterator
yield (BlockPlacement, blkno)
"""
blknos = com._ensure_int64(blknos)
# FIXME: blk_count is unused, but it may avoid the use of dicts in cython
for blkno, indexer in lib.get_blkno_indexers(blknos, group):
yield blkno, BlockPlacement(indexer)
def items_overlap_with_suffix(left, lsuffix, right, rsuffix):
"""
If two indices overlap, add suffixes to overlapping entries.
If corresponding suffix is empty, the entry is simply converted to string.
"""
to_rename = left.intersection(right)
if len(to_rename) == 0:
return left, right
else:
if not lsuffix and not rsuffix:
raise ValueError('columns overlap but no suffix specified: %s' %
to_rename)
def lrenamer(x):
if x in to_rename:
return '%s%s' % (x, lsuffix)
return x
def rrenamer(x):
if x in to_rename:
return '%s%s' % (x, rsuffix)
return x
return (_transform_index(left, lrenamer),
_transform_index(right, rrenamer))
def _transform_index(index, func):
"""
Apply function to all values found in index.
This includes transforming multiindex entries separately.
"""
if isinstance(index, MultiIndex):
items = [tuple(func(y) for y in x) for x in index]
return MultiIndex.from_tuples(items, names=index.names)
else:
items = [func(x) for x in index]
return Index(items, name=index.name)
def _putmask_smart(v, m, n):
"""
Return a new block, try to preserve dtype if possible.
Parameters
----------
v : `values`, updated in-place (array like)
m : `mask`, applies to both sides (array like)
n : `new values` either scalar or an array like aligned with `values`
"""
# n should be the length of the mask or a scalar here
if not is_list_like(n):
n = np.array([n] * len(m))
elif isinstance(n, np.ndarray) and n.ndim == 0: # numpy scalar
n = np.repeat(np.array(n, ndmin=1), len(m))
# see if we are only masking values that if putted
# will work in the current dtype
try:
nn = n[m]
nn_at = nn.astype(v.dtype)
comp = (nn == nn_at)
if is_list_like(comp) and comp.all():
nv = v.copy()
nv[m] = nn_at
return nv
except (ValueError, IndexError, TypeError):
pass
# change the dtype
dtype, _ = com._maybe_promote(n.dtype)
nv = v.astype(dtype)
try:
nv[m] = n[m]
except ValueError:
idx, = np.where(np.squeeze(m))
for mask_index, new_val in zip(idx, n[m]):
nv[mask_index] = new_val
return nv
def concatenate_block_managers(mgrs_indexers, axes, concat_axis, copy):
"""
Concatenate block managers into one.
Parameters
----------
mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples
axes : list of Index
concat_axis : int
copy : bool
"""
concat_plan = combine_concat_plans([get_mgr_concatenation_plan(mgr, indexers)
for mgr, indexers in mgrs_indexers],
concat_axis)
blocks = [make_block(concatenate_join_units(join_units, concat_axis,
copy=copy),
placement=placement)
for placement, join_units in concat_plan]
return BlockManager(blocks, axes)
def get_empty_dtype_and_na(join_units):
"""
Return dtype and N/A values to use when concatenating specified units.
Returned N/A value may be None which means there was no casting involved.
Returns
-------
dtype
na
"""
if len(join_units) == 1:
blk = join_units[0].block
if blk is None:
return np.float64, np.nan
has_none_blocks = False
dtypes = [None] * len(join_units)
for i, unit in enumerate(join_units):
if unit.block is None:
has_none_blocks = True
else:
dtypes[i] = unit.dtype
upcast_classes = defaultdict(list)
null_upcast_classes = defaultdict(list)
for dtype, unit in zip(dtypes, join_units):
if dtype is None:
continue
if com.is_categorical_dtype(dtype):
upcast_cls = 'category'
elif com.is_datetimetz(dtype):
upcast_cls = 'datetimetz'
elif issubclass(dtype.type, np.bool_):
upcast_cls = 'bool'
elif issubclass(dtype.type, np.object_):
upcast_cls = 'object'
elif is_datetime64_dtype(dtype):
upcast_cls = 'datetime'
elif is_timedelta64_dtype(dtype):
upcast_cls = 'timedelta'
else:
upcast_cls = 'float'
# Null blocks should not influence upcast class selection, unless there
# are only null blocks, when same upcasting rules must be applied to
# null upcast classes.
if unit.is_null:
null_upcast_classes[upcast_cls].append(dtype)
else:
upcast_classes[upcast_cls].append(dtype)
if not upcast_classes:
upcast_classes = null_upcast_classes
# create the result
if 'object' in upcast_classes:
return np.dtype(np.object_), np.nan
elif 'bool' in upcast_classes:
if has_none_blocks:
return np.dtype(np.object_), np.nan
else:
return np.dtype(np.bool_), None
elif 'category' in upcast_classes:
return np.dtype(np.object_), np.nan
elif 'float' in upcast_classes:
return np.dtype(np.float64), np.nan
elif 'datetimetz' in upcast_classes:
dtype = upcast_classes['datetimetz']
return dtype[0], tslib.iNaT
elif 'datetime' in upcast_classes:
return np.dtype('M8[ns]'), tslib.iNaT
elif 'timedelta' in upcast_classes:
return np.dtype('m8[ns]'), tslib.iNaT
else: # pragma
raise AssertionError("invalid dtype determination in get_concat_dtype")
def concatenate_join_units(join_units, concat_axis, copy):
"""
Concatenate values from several join units along selected axis.
"""
if concat_axis == 0 and len(join_units) > 1:
# Concatenating join units along ax0 is handled in _merge_blocks.
raise AssertionError("Concatenating join units along axis0")
empty_dtype, upcasted_na = get_empty_dtype_and_na(join_units)
to_concat = [ju.get_reindexed_values(empty_dtype=empty_dtype,
upcasted_na=upcasted_na)
for ju in join_units]
if len(to_concat) == 1:
# Only one block, nothing to concatenate.
concat_values = to_concat[0]
if copy and concat_values.base is not None:
concat_values = concat_values.copy()
else:
concat_values = com._concat_compat(to_concat, axis=concat_axis)
return concat_values
def get_mgr_concatenation_plan(mgr, indexers):
"""
Construct concatenation plan for given block manager and indexers.
Parameters
----------
mgr : BlockManager
indexers : dict of {axis: indexer}
Returns
-------
plan : list of (BlockPlacement, JoinUnit) tuples
"""
# Calculate post-reindex shape , save for item axis which will be separate
# for each block anyway.
mgr_shape = list(mgr.shape)
for ax, indexer in indexers.items():
mgr_shape[ax] = len(indexer)
mgr_shape = tuple(mgr_shape)
if 0 in indexers:
ax0_indexer = indexers.pop(0)
blknos = com.take_1d(mgr._blknos, ax0_indexer, fill_value=-1)
blklocs = com.take_1d(mgr._blklocs, ax0_indexer, fill_value=-1)
else:
if mgr._is_single_block:
blk = mgr.blocks[0]
return [(blk.mgr_locs, JoinUnit(blk, mgr_shape, indexers))]
ax0_indexer = None
blknos = mgr._blknos
blklocs = mgr._blklocs
plan = []
for blkno, placements in _get_blkno_placements(blknos, len(mgr.blocks),
group=False):
assert placements.is_slice_like
join_unit_indexers = indexers.copy()
shape = list(mgr_shape)
shape[0] = len(placements)
shape = tuple(shape)
if blkno == -1:
unit = JoinUnit(None, shape)
else:
blk = mgr.blocks[blkno]
ax0_blk_indexer = blklocs[placements.indexer]
unit_no_ax0_reindexing = (
len(placements) == len(blk.mgr_locs) and
# Fastpath detection of join unit not needing to reindex its
# block: no ax0 reindexing took place and block placement was
# sequential before.
((ax0_indexer is None
and blk.mgr_locs.is_slice_like
and blk.mgr_locs.as_slice.step == 1) or
# Slow-ish detection: all indexer locs are sequential (and
# length match is checked above).
(np.diff(ax0_blk_indexer) == 1).all()))
# Omit indexer if no item reindexing is required.
if unit_no_ax0_reindexing:
join_unit_indexers.pop(0, None)
else:
join_unit_indexers[0] = ax0_blk_indexer
unit = JoinUnit(blk, shape, join_unit_indexers)
plan.append((placements, unit))
return plan
def combine_concat_plans(plans, concat_axis):
"""
Combine multiple concatenation plans into one.
existing_plan is updated in-place.
"""
if len(plans) == 1:
for p in plans[0]:
yield p[0], [p[1]]
elif concat_axis == 0:
offset = 0
for plan in plans:
last_plc = None
for plc, unit in plan:
yield plc.add(offset), [unit]
last_plc = plc
if last_plc is not None:
offset += last_plc.as_slice.stop
else:
num_ended = [0]
def _next_or_none(seq):
retval = next(seq, None)
if retval is None:
num_ended[0] += 1
return retval
plans = list(map(iter, plans))
next_items = list(map(_next_or_none, plans))
while num_ended[0] != len(next_items):
if num_ended[0] > 0:
raise ValueError("Plan shapes are not aligned")
placements, units = zip(*next_items)
lengths = list(map(len, placements))
min_len, max_len = min(lengths), max(lengths)
if min_len == max_len:
yield placements[0], units
next_items[:] = map(_next_or_none, plans)
else:
yielded_placement = None
yielded_units = [None] * len(next_items)
for i, (plc, unit) in enumerate(next_items):
yielded_units[i] = unit
if len(plc) > min_len:
# trim_join_unit updates unit in place, so only
# placement needs to be sliced to skip min_len.
next_items[i] = (plc[min_len:],
trim_join_unit(unit, min_len))
else:
yielded_placement = plc
next_items[i] = _next_or_none(plans[i])
yield yielded_placement, yielded_units
def trim_join_unit(join_unit, length):
"""
Reduce join_unit's shape along item axis to length.
Extra items that didn't fit are returned as a separate block.
"""
if 0 not in join_unit.indexers:
extra_indexers = join_unit.indexers
if join_unit.block is None:
extra_block = None
else:
extra_block = join_unit.block.getitem_block(slice(length, None))
join_unit.block = join_unit.block.getitem_block(slice(length))
else:
extra_block = join_unit.block
extra_indexers = copy.copy(join_unit.indexers)
extra_indexers[0] = extra_indexers[0][length:]
join_unit.indexers[0] = join_unit.indexers[0][:length]
extra_shape = (join_unit.shape[0] - length,) + join_unit.shape[1:]
join_unit.shape = (length,) + join_unit.shape[1:]
return JoinUnit(block=extra_block, indexers=extra_indexers,
shape=extra_shape)
class JoinUnit(object):
def __init__(self, block, shape, indexers={}):
# Passing shape explicitly is required for cases when block is None.
self.block = block
self.indexers = indexers
self.shape = shape
def __repr__(self):
return '%s(%r, %s)' % (self.__class__.__name__,
self.block, self.indexers)
@cache_readonly
def needs_filling(self):
for indexer in self.indexers.values():
# FIXME: cache results of indexer == -1 checks.
if (indexer == -1).any():
return True
return False
@cache_readonly
def dtype(self):
if self.block is None:
raise AssertionError("Block is None, no dtype")
if not self.needs_filling:
return self.block.dtype
else:
return com._get_dtype(com._maybe_promote(self.block.dtype,
self.block.fill_value)[0])
return self._dtype
@cache_readonly
def is_null(self):
if self.block is None:
return True
if not self.block._can_hold_na:
return False
# Usually it's enough to check but a small fraction of values to see if
# a block is NOT null, chunks should help in such cases. 1000 value
# was chosen rather arbitrarily.
values = self.block.values
if self.block.is_categorical:
values_flat = values.categories
else:
values_flat = values.ravel()
total_len = values_flat.shape[0]
chunk_len = max(total_len // 40, 1000)
for i in range(0, total_len, chunk_len):
if not isnull(values_flat[i: i + chunk_len]).all():
return False
return True
def get_reindexed_values(self, empty_dtype, upcasted_na):
if upcasted_na is None:
# No upcasting is necessary
fill_value = self.block.fill_value
values = self.block.get_values()
else:
fill_value = upcasted_na
if self.is_null and not getattr(self.block,'is_categorical',None):
missing_arr = np.empty(self.shape, dtype=empty_dtype)
if np.prod(self.shape):
# NumPy 1.6 workaround: this statement gets strange if all
# blocks are of same dtype and some of them are empty:
# empty one are considered "null" so they must be filled,
# but no dtype upcasting happens and the dtype may not
# allow NaNs.
#
# In general, no one should get hurt when one tries to put
# incorrect values into empty array, but numpy 1.6 is
# strict about that.
missing_arr.fill(fill_value)
return missing_arr
if not self.indexers:
if not self.block._can_consolidate:
# preserve these for validation in _concat_compat
return self.block.values
if self.block.is_bool:
# External code requested filling/upcasting, bool values must
# be upcasted to object to avoid being upcasted to numeric.
values = self.block.astype(np.object_).values
else:
# No dtype upcasting is done here, it will be performed during
# concatenation itself.
values = self.block.get_values()
if not self.indexers:
# If there's no indexing to be done, we want to signal outside
# code that this array must be copied explicitly. This is done
# by returning a view and checking `retval.base`.
values = values.view()
else:
for ax, indexer in self.indexers.items():
values = com.take_nd(values, indexer, axis=ax,
fill_value=fill_value)
return values
def _fast_count_smallints(arr):
"""Faster version of set(arr) for sequences of small numbers."""
if len(arr) == 0:
# Handle empty arr case separately: numpy 1.6 chokes on that.
return np.empty((0, 2), dtype=arr.dtype)
else:
counts = np.bincount(arr.astype(np.int_))
nz = counts.nonzero()[0]
return np.c_[nz, counts[nz]]
def _preprocess_slice_or_indexer(slice_or_indexer, length, allow_fill):
if isinstance(slice_or_indexer, slice):
return 'slice', slice_or_indexer, lib.slice_len(slice_or_indexer,
length)
elif (isinstance(slice_or_indexer, np.ndarray) and
slice_or_indexer.dtype == np.bool_):
return 'mask', slice_or_indexer, slice_or_indexer.sum()
else:
indexer = np.asanyarray(slice_or_indexer, dtype=np.int64)
if not allow_fill:
indexer = maybe_convert_indices(indexer, length)
return 'fancy', indexer, len(indexer)
| apache-2.0 |
tomlof/scikit-learn | examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py | 87 | 3903 | """
==============================================
Feature agglomeration vs. univariate selection
==============================================
This example compares 2 dimensionality reduction strategies:
- univariate feature selection with Anova
- feature agglomeration with Ward hierarchical clustering
Both methods are compared in a regression problem using
a BayesianRidge as supervised estimator.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
import shutil
import tempfile
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg, ndimage
from sklearn.feature_extraction.image import grid_to_graph
from sklearn import feature_selection
from sklearn.cluster import FeatureAgglomeration
from sklearn.linear_model import BayesianRidge
from sklearn.pipeline import Pipeline
from sklearn.externals.joblib import Memory
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold
###############################################################################
# Generate data
n_samples = 200
size = 40 # image size
roi_size = 15
snr = 5.
np.random.seed(0)
mask = np.ones([size, size], dtype=np.bool)
coef = np.zeros((size, size))
coef[0:roi_size, 0:roi_size] = -1.
coef[-roi_size:, -roi_size:] = 1.
X = np.random.randn(n_samples, size ** 2)
for x in X: # smooth data
x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel()
X -= X.mean(axis=0)
X /= X.std(axis=0)
y = np.dot(X, coef.ravel())
noise = np.random.randn(y.shape[0])
noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.)) / linalg.norm(noise, 2)
y += noise_coef * noise # add noise
###############################################################################
# Compute the coefs of a Bayesian Ridge with GridSearch
cv = KFold(2) # cross-validation generator for model selection
ridge = BayesianRidge()
cachedir = tempfile.mkdtemp()
mem = Memory(cachedir=cachedir, verbose=1)
# Ward agglomeration followed by BayesianRidge
connectivity = grid_to_graph(n_x=size, n_y=size)
ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity,
memory=mem)
clf = Pipeline([('ward', ward), ('ridge', ridge)])
# Select the optimal number of parcels with grid search
clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_agglomeration_ = coef_.reshape(size, size)
# Anova univariate feature selection followed by BayesianRidge
f_regression = mem.cache(feature_selection.f_regression) # caching function
anova = feature_selection.SelectPercentile(f_regression)
clf = Pipeline([('anova', anova), ('ridge', ridge)])
# Select the optimal percentage of features with grid search
clf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_.reshape(1, -1))
coef_selection_ = coef_.reshape(size, size)
###############################################################################
# Inverse the transformation to plot the results on an image
plt.close('all')
plt.figure(figsize=(7.3, 2.7))
plt.subplot(1, 3, 1)
plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("True weights")
plt.subplot(1, 3, 2)
plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Selection")
plt.subplot(1, 3, 3)
plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Agglomeration")
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26)
plt.show()
# Attempt to remove the temporary cachedir, but don't worry if it fails
shutil.rmtree(cachedir, ignore_errors=True)
| bsd-3-clause |
wzbozon/scikit-learn | benchmarks/bench_plot_neighbors.py | 287 | 6433 | """
Plot the scaling of the nearest neighbors algorithms with k, D, and N
"""
from time import time
import numpy as np
import pylab as pl
from matplotlib import ticker
from sklearn import neighbors, datasets
def get_data(N, D, dataset='dense'):
if dataset == 'dense':
np.random.seed(0)
return np.random.random((N, D))
elif dataset == 'digits':
X = datasets.load_digits().data
i = np.argsort(X[0])[::-1]
X = X[:, i]
return X[:N, :D]
else:
raise ValueError("invalid dataset: %s" % dataset)
def barplot_neighbors(Nrange=2 ** np.arange(1, 11),
Drange=2 ** np.arange(7),
krange=2 ** np.arange(10),
N=1000,
D=64,
k=5,
leaf_size=30,
dataset='digits'):
algorithms = ('kd_tree', 'brute', 'ball_tree')
fiducial_values = {'N': N,
'D': D,
'k': k}
#------------------------------------------------------------
# varying N
N_results_build = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
N_results_query = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
for i, NN in enumerate(Nrange):
print("N = %i (%i out of %i)" % (NN, i + 1, len(Nrange)))
X = get_data(NN, D, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=min(NN, k),
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
N_results_build[algorithm][i] = (t1 - t0)
N_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying D
D_results_build = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
D_results_query = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
for i, DD in enumerate(Drange):
print("D = %i (%i out of %i)" % (DD, i + 1, len(Drange)))
X = get_data(N, DD, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=k,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
D_results_build[algorithm][i] = (t1 - t0)
D_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying k
k_results_build = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
k_results_query = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
X = get_data(N, DD, dataset)
for i, kk in enumerate(krange):
print("k = %i (%i out of %i)" % (kk, i + 1, len(krange)))
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=kk,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
k_results_build[algorithm][i] = (t1 - t0)
k_results_query[algorithm][i] = (t2 - t1)
pl.figure(figsize=(8, 11))
for (sbplt, vals, quantity,
build_time, query_time) in [(311, Nrange, 'N',
N_results_build,
N_results_query),
(312, Drange, 'D',
D_results_build,
D_results_query),
(313, krange, 'k',
k_results_build,
k_results_query)]:
ax = pl.subplot(sbplt, yscale='log')
pl.grid(True)
tick_vals = []
tick_labels = []
bottom = 10 ** np.min([min(np.floor(np.log10(build_time[alg])))
for alg in algorithms])
for i, alg in enumerate(algorithms):
xvals = 0.1 + i * (1 + len(vals)) + np.arange(len(vals))
width = 0.8
c_bar = pl.bar(xvals, build_time[alg] - bottom,
width, bottom, color='r')
q_bar = pl.bar(xvals, query_time[alg],
width, build_time[alg], color='b')
tick_vals += list(xvals + 0.5 * width)
tick_labels += ['%i' % val for val in vals]
pl.text((i + 0.02) / len(algorithms), 0.98, alg,
transform=ax.transAxes,
ha='left',
va='top',
bbox=dict(facecolor='w', edgecolor='w', alpha=0.5))
pl.ylabel('Time (s)')
ax.xaxis.set_major_locator(ticker.FixedLocator(tick_vals))
ax.xaxis.set_major_formatter(ticker.FixedFormatter(tick_labels))
for label in ax.get_xticklabels():
label.set_rotation(-90)
label.set_fontsize(10)
title_string = 'Varying %s' % quantity
descr_string = ''
for s in 'NDk':
if s == quantity:
pass
else:
descr_string += '%s = %i, ' % (s, fiducial_values[s])
descr_string = descr_string[:-2]
pl.text(1.01, 0.5, title_string,
transform=ax.transAxes, rotation=-90,
ha='left', va='center', fontsize=20)
pl.text(0.99, 0.5, descr_string,
transform=ax.transAxes, rotation=-90,
ha='right', va='center')
pl.gcf().suptitle("%s data set" % dataset.capitalize(), fontsize=16)
pl.figlegend((c_bar, q_bar), ('construction', 'N-point query'),
'upper right')
if __name__ == '__main__':
barplot_neighbors(dataset='digits')
barplot_neighbors(dataset='dense')
pl.show()
| bsd-3-clause |
benhamner/Stack-Overflow-Competition | competition_utilities.py | 2 | 3323 | from __future__ import division
from collections import Counter
import csv
import dateutil
import numpy as np
import os
import pandas as pd
data_path = None
submissions_path = None
if not data_path or not submissions_path:
raise Exception("Set the data and submission paths in competition_utilities.py!")
def parse_date_maybe_null(date):
if date:
return dateutil.parser.parse(date)
return None
df_converters = {"PostCreationDate": dateutil.parser.parse,
"OwnerCreationDate": dateutil.parser.parse}
# "PostClosedDate": parse_date_maybe_null}
def get_reader(file_name="train-sample.csv"):
reader = csv.reader(open(os.path.join(data_path, file_name)))
header = reader.next()
return reader
def get_header(file_name="train-sample.csv"):
reader = csv.reader(open(os.path.join(data_path, file_name)))
header = reader.next()
return header
def get_closed_count(file_name):
return sum(1 for q in iter_closed_questions(file_name))
def iter_closed_questions(file_name):
df_iter = pd.io.parsers.read_csv(os.path.join(data_path, file_name), iterator=True, chunksize=1000)
return (question[1] for df in df_iter for question in df[df["OpenStatus"] != "open"].iterrows())
def iter_open_questions(file_name):
df_iter = pd.io.parsers.read_csv(os.path.join(data_path, file_name), iterator=True, chunksize=1000)
return (question[1] for df in df_iter for question in df[df["OpenStatus"] == "open"].iterrows())
def get_dataframe(file_name="train-sample.csv"):
return pd.io.parsers.read_csv(os.path.join(data_path, file_name), converters = df_converters)
def get_priors(file_name):
closed_reasons = [r[14] for r in get_reader(file_name)]
closed_reason_counts = Counter(closed_reasons)
reasons = sorted(closed_reason_counts.keys())
total = len(closed_reasons)
priors = [closed_reason_counts[reason]/total for reason in reasons]
return priors
def write_sample(file_name, header, sample):
writer = csv.writer(open(os.path.join(data_path, file_name), "w"), lineterminator="\n")
writer.writerow(header)
writer.writerows(sample)
def update_prior(old_prior, old_posterior, new_prior):
evidence_ratio = (old_prior*(1-old_posterior)) / (old_posterior*(1-old_prior))
new_posterior = new_prior / (new_prior + (1-new_prior)*evidence_ratio)
return new_posterior
def cap_and_update_priors(old_priors, old_posteriors, new_priors, epsilon):
old_posteriors = cap_predictions(old_posteriors, epsilon)
old_priors = np.kron(np.ones((np.size(old_posteriors, 0), 1)), old_priors)
new_priors = np.kron(np.ones((np.size(old_posteriors, 0), 1)), new_priors)
evidence_ratio = (old_priors*(1-old_posteriors)) / (old_posteriors*(1-old_priors))
new_posteriors = new_priors / (new_priors + (1-new_priors)*evidence_ratio)
new_posteriors = cap_predictions(new_posteriors, epsilon)
return new_posteriors
def cap_predictions(probs, epsilon):
probs[probs>1-epsilon] = 1-epsilon
probs[probs<epsilon] = epsilon
row_sums = probs.sum(axis=1)
probs = probs / row_sums[:, np.newaxis]
return probs
def write_submission(file_name, predictions):
writer = csv.writer(open(os.path.join(submissions_path, file_name), "w"), lineterminator="\n")
writer.writerows(predictions) | bsd-2-clause |
shenzebang/scikit-learn | examples/manifold/plot_mds.py | 261 | 2616 | """
=========================
Multi-dimensional scaling
=========================
An illustration of the metric and non-metric MDS on generated noisy data.
The reconstructed points using the metric MDS and non metric MDS are slightly
shifted to avoid overlapping.
"""
# Author: Nelle Varoquaux <[email protected]>
# Licence: BSD
print(__doc__)
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from sklearn import manifold
from sklearn.metrics import euclidean_distances
from sklearn.decomposition import PCA
n_samples = 20
seed = np.random.RandomState(seed=3)
X_true = seed.randint(0, 20, 2 * n_samples).astype(np.float)
X_true = X_true.reshape((n_samples, 2))
# Center the data
X_true -= X_true.mean()
similarities = euclidean_distances(X_true)
# Add noise to the similarities
noise = np.random.rand(n_samples, n_samples)
noise = noise + noise.T
noise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0
similarities += noise
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed,
dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(similarities).embedding_
nmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12,
dissimilarity="precomputed", random_state=seed, n_jobs=1,
n_init=1)
npos = nmds.fit_transform(similarities, init=pos)
# Rescale the data
pos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum())
npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum())
# Rotate the data
clf = PCA(n_components=2)
X_true = clf.fit_transform(X_true)
pos = clf.fit_transform(pos)
npos = clf.fit_transform(npos)
fig = plt.figure(1)
ax = plt.axes([0., 0., 1., 1.])
plt.scatter(X_true[:, 0], X_true[:, 1], c='r', s=20)
plt.scatter(pos[:, 0], pos[:, 1], s=20, c='g')
plt.scatter(npos[:, 0], npos[:, 1], s=20, c='b')
plt.legend(('True position', 'MDS', 'NMDS'), loc='best')
similarities = similarities.max() / similarities * 100
similarities[np.isinf(similarities)] = 0
# Plot the edges
start_idx, end_idx = np.where(pos)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[X_true[i, :], X_true[j, :]]
for i in range(len(pos)) for j in range(len(pos))]
values = np.abs(similarities)
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, values.max()))
lc.set_array(similarities.flatten())
lc.set_linewidths(0.5 * np.ones(len(segments)))
ax.add_collection(lc)
plt.show()
| bsd-3-clause |
JensMunkHansen/sofus | fnm/test_fnm.py | 1 | 3003 | import addpaths
import numpy as np
import sys
import swig_fnm as fnm
import numpy as np
from fnm import rect
from timeit import default_timer as timer
import matplotlib.pyplot as plt
plt.ion()
def log_compress(pressure,dBrange=20):
logp = np.abs(pressure)
logp = logp / logp.max()
logp = 20*np.log10(logp)
logp[logp < -dBrange] = -dBrange
logp[0,0] = 0
logp[-1,-1] = -dBrange
return logp
def create_time_string(seconds):
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
ms = np.round(s * 1000) % 1000
timeString = "%d:%02d:%02d,%03d" % (h, m, s, (1000*ms))
return timeString;
print('This script uses the Fast Nearfield Method to calculate the CW pressure field of');
print('a single element. The script outputs the pressure field.\n');
f0 = 1e6 # excitation frequency,Hz
soundspeed = 1500 # m/s
lamda = soundspeed / f0 # wavelength, m
#define a transducer structure/array
nelex = 1
neley = 1
kerf = 5.0e-4
width = 3e-3 # transducer width, m
height = 50e-3 # transducer height, m
d = nelex * (width+kerf)
xmin = -1.5 * d/2
xmax = 1.5 * d/2
ymin = 0
ymax = 0
zmin = 0.0
zmax = 2*d
# Fast if many z-coordinates
nx = 130
nz = 250
dx = (xmax - xmin) / max(nx-1.0,1.0)
dz = (zmax - zmin) / max(nz-1.0,1.0)
xs = (np.r_[0:nx] - (nx-1.0)/2.0) * dx
zs = (np.r_[0:nz]) * dz
factor = int(height / width) # 16
ndiv = 4
k = (2*np.pi)/lamda
xs,zs = np.meshgrid(xs,zs,indexing='ij')
ys = np.zeros(xs.shape)
rect = rect(hw=width/2.0,hh=height/2.0,nAbcissa=[ndiv,ndiv*factor])
if 0:
start = timer()
result2 = rect.H4(xs,ys,zs,k)
end = timer()
timeString = create_time_string(end-start)
print(timeString)
plt.figure()
result2 = np.real(np.abs(result2))
plt.imshow(log_compress(result2),aspect='auto',extent=np.round(1000*np.r_[0,2*d,-d/2,d/2]),interpolation='none')
plt.xlabel('Depth [mm]')
plt.ylabel('Width [mm]')
if 0:
start = timer()
#result2 = rect.HN(xs,ys,zs,k)
result2 = rect.H_accurate(xs,ys,zs,k)
end = timer()
timeString = create_time_string(end-start)
print(timeString)
plt.figure()
result2 = np.real(np.abs(result2))
result2 = result2.reshape((nx,nz))
plt.imshow(log_compress(result2),aspect='auto',extent=np.round(1000*np.r_[0,2*d,-d/2,d/2]),interpolation='none')
plt.xlabel('Depth [mm]')
plt.ylabel('Width [mm]')
pos = np.c_[xs.flatten(), ys.flatten(), zs.flatten()].astype(np.float32)
a = fnm.ApertureFloat(1,width,kerf,height)
a.f0 = f0
a.c = soundspeed
a.nDivH = ndiv
a.nDivW = ndiv*factor
a.apodization = np.ones(1,dtype=np.float32)
start = timer()
out = a.CalcCwFast(pos)[1]
#out = a.CalcCwFieldRef(pos)[1] # Wrong
#out = a.CalcCwField2(pos)[1] # Looks okay
#out = a.CalcCwField(pos)[1] # Looks okay
end = timer()
timeString = create_time_string(end-start)
print(timeString)
plt.figure()
result3 = np.real(np.abs(out)).reshape((nx,nz))
plt.imshow(log_compress(result3),aspect='auto',extent=np.round(1000*np.r_[0,2*d,-d/2,d/2]),interpolation='none')
plt.xlabel('Depth [mm]')
plt.ylabel('Width [mm]')
| gpl-3.0 |
petosegan/scikit-learn | examples/cluster/plot_agglomerative_clustering_metrics.py | 402 | 4492 | """
Agglomerative clustering with different metrics
===============================================
Demonstrates the effect of different metrics on the hierarchical clustering.
The example is engineered to show the effect of the choice of different
metrics. It is applied to waveforms, which can be seen as
high-dimensional vector. Indeed, the difference between metrics is
usually more pronounced in high dimension (in particular for euclidean
and cityblock).
We generate data from three groups of waveforms. Two of the waveforms
(waveform 1 and waveform 2) are proportional one to the other. The cosine
distance is invariant to a scaling of the data, as a result, it cannot
distinguish these two waveforms. Thus even with no noise, clustering
using this distance will not separate out waveform 1 and 2.
We add observation noise to these waveforms. We generate very sparse
noise: only 6% of the time points contain noise. As a result, the
l1 norm of this noise (ie "cityblock" distance) is much smaller than it's
l2 norm ("euclidean" distance). This can be seen on the inter-class
distance matrices: the values on the diagonal, that characterize the
spread of the class, are much bigger for the Euclidean distance than for
the cityblock distance.
When we apply clustering to the data, we find that the clustering
reflects what was in the distance matrices. Indeed, for the Euclidean
distance, the classes are ill-separated because of the noise, and thus
the clustering does not separate the waveforms. For the cityblock
distance, the separation is good and the waveform classes are recovered.
Finally, the cosine distance does not separate at all waveform 1 and 2,
thus the clustering puts them in the same cluster.
"""
# Author: Gael Varoquaux
# License: BSD 3-Clause or CC-0
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
np.random.seed(0)
# Generate waveform data
n_features = 2000
t = np.pi * np.linspace(0, 1, n_features)
def sqr(x):
return np.sign(np.cos(x))
X = list()
y = list()
for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]):
for _ in range(30):
phase_noise = .01 * np.random.normal()
amplitude_noise = .04 * np.random.normal()
additional_noise = 1 - 2 * np.random.rand(n_features)
# Make the noise sparse
additional_noise[np.abs(additional_noise) < .997] = 0
X.append(12 * ((a + amplitude_noise)
* (sqr(6 * (t + phi + phase_noise)))
+ additional_noise))
y.append(i)
X = np.array(X)
y = np.array(y)
n_clusters = 3
labels = ('Waveform 1', 'Waveform 2', 'Waveform 3')
# Plot the ground-truth labelling
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c, n in zip(range(n_clusters), 'rgb',
labels):
lines = plt.plot(X[y == l].T, c=c, alpha=.5)
lines[0].set_label(n)
plt.legend(loc='best')
plt.axis('tight')
plt.axis('off')
plt.suptitle("Ground truth", size=20)
# Plot the distances
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
avg_dist = np.zeros((n_clusters, n_clusters))
plt.figure(figsize=(5, 4.5))
for i in range(n_clusters):
for j in range(n_clusters):
avg_dist[i, j] = pairwise_distances(X[y == i], X[y == j],
metric=metric).mean()
avg_dist /= avg_dist.max()
for i in range(n_clusters):
for j in range(n_clusters):
plt.text(i, j, '%5.3f' % avg_dist[i, j],
verticalalignment='center',
horizontalalignment='center')
plt.imshow(avg_dist, interpolation='nearest', cmap=plt.cm.gnuplot2,
vmin=0)
plt.xticks(range(n_clusters), labels, rotation=45)
plt.yticks(range(n_clusters), labels)
plt.colorbar()
plt.suptitle("Interclass %s distances" % metric, size=18)
plt.tight_layout()
# Plot clustering results
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
model = AgglomerativeClustering(n_clusters=n_clusters,
linkage="average", affinity=metric)
model.fit(X)
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c in zip(np.arange(model.n_clusters), 'rgbk'):
plt.plot(X[model.labels_ == l].T, c=c, alpha=.5)
plt.axis('tight')
plt.axis('off')
plt.suptitle("AgglomerativeClustering(affinity=%s)" % metric, size=20)
plt.show()
| bsd-3-clause |
stscieisenhamer/glue | glue/dialogs/subset_facet/qt/tests/test_subset_facet.py | 4 | 1169 | from __future__ import absolute_import, division, print_function
from mock import patch
from matplotlib import cm
from glue.core import Data, DataCollection
from ..subset_facet import SubsetFacet
patched_facet = patch('glue.dialogs.subset_facet.qt.subset_facet.facet_subsets')
class TestSubsetFacet(object):
def setup_method(self, method):
d = Data(x=[1, 2, 3])
dc = DataCollection([d])
self.collect = dc
self.s = dc.new_subset_group()
def test_limits(self):
s = SubsetFacet(self.collect)
s.data = self.collect[0]
s.component = self.collect[0].id['x']
assert s.vmin == 1
assert s.vmax == 3
def test_get_set_cmap(self):
s = SubsetFacet(self.collect)
assert s.cmap is cm.cool
def test_apply(self):
with patched_facet as p:
s = SubsetFacet(self.collect)
s.data = self.collect[0]
s.component = self.collect[0].id['x']
s._apply()
p.assert_called_once_with(self.collect, s.component,
lo=1, hi=3,
steps=5, log=False)
| bsd-3-clause |
raj4/bigbang | bigbang/twopeople.py | 1 | 2189 | from bigbang.archive import Archive
import bigbang.parse as parse
import bigbang.graph as graph
import bigbang.mailman as mailman
import bigbang.process as process
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
from pprint import pprint as pp
import pytz
def duration(exchanges, A, B):
AtoB = exchanges[exchanges['From_original'] == A]
AtoB = AtoB[AtoB['From_response'] == B]
BtoA = exchanges[exchanges['From_original'] == B]
BtoA = BtoA[BtoA['From_response']==A]
if len(AtoB) == 0:
return max(BtoA['Date']) - min(BtoA['Date'])
if len(BtoA) == 0:
return max(AtoB['Date']) - min(AtoB['Date'])
return max(max(AtoB['Date']), max(BtoA['Date'])) - min(min(AtoB['Date']), min(BtoA['Date']))
def num_replies(exchanges, A, B):
AtoB = exchanges[exchanges['From_original'] == A]
AtoB = AtoB[AtoB['From_response'] == B]
BtoA = exchanges[exchanges['From_original'] == B]
BtoA = BtoA[BtoA['From_response'] == A]
return (len(AtoB), len(BtoA))
def reciprocity(exchanges, A, B):
num = num_replies(exchanges, A, B)
return float(min(num)) / max(num)
def unique_pairs(exchanges):
pairs = set()
total_responses = len(exchanges['From_original'])
for i in range(total_responses):
pair = (exchanges['From_original'][i], exchanges['From_response'][i])
pair_reversed = (exchanges['From_response'][i], exchanges['From_original'][i])
if pair_reversed not in pairs:
pairs.add(pair)
return pairs
def panda_pair(exchanges, A, B):
try:
return pd.DataFrame([{'A': A, 'B': B, 'duration':duration(exchanges, A, B), 'num_replies': sum(num_replies(exchanges, A, B)), 'reciprocity':reciprocity(exchanges, A, B)}])
except:
print 'No exchange between "%s" and "%s" exists.' % (A, B)
def panda_allpairs(exchanges, pairs):
data_list = []
for pair in pairs:
A = pair[0]
B = pair[1]
data_list.append({'A': A, 'B': B, 'duration':duration(exchanges, A, B), 'num_replies': sum(num_replies(exchanges, A, B)), 'reciprocity':reciprocity(exchanges, A, B)})
return pd.DataFrame(data_list)
| gpl-2.0 |
evanbiederstedt/RRBSfun | epiphen/total_chr12.py | 2 | 32998 | import glob
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', 50) # print all rows
import os
os.chdir("/gpfs/commons/home/biederstedte-934/evan_projects/correct_phylo_files")
normalB = glob.glob("binary_position_RRBS_normal_B_cell*")
mcell = glob.glob("binary_position_RRBS_NormalBCD19pCD27mcell*")
pcell = glob.glob("binary_position_RRBS_NormalBCD19pCD27pcell*")
cd19cell = glob.glob("binary_position_RRBS_NormalBCD19pcell*")
cw154 = glob.glob("binary_position_RRBS_cw154*")
trito = glob.glob("binary_position_RRBS_trito_pool*")
print(len(normalB))
print(len(mcell))
print(len(pcell))
print(len(cd19cell))
print(len(cw154))
print(len(trito))
totalfiles = normalB + mcell + pcell + cd19cell + cw154 + trito
print(len(totalfiles))
df_list = []
for file in totalfiles:
df = pd.read_csv(file)
df = df.drop("Unnamed: 0", axis=1)
df["chromosome"] = df["position"].map(lambda x: str(x)[:5])
df = df[df["chromosome"] == "chr12"]
df = df.drop("chromosome", axis=1)
df_list.append(df)
print(len(df_list))
total_matrix = pd.concat([df.set_index("position") for df in df_list], axis=1).reset_index().astype(object)
total_matrix = total_matrix.drop("index", axis=1)
len(total_matrix.columns)
total_matrix.columns = ["RRBS_normal_B_cell_A1_24_TAAGGCGA.ACAACC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ACCGCG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ACGTGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.AGGATG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ATAGCG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ATCGAC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CAAGAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CATGAC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CGGTAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CTATTG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CTCAGC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GACACG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GCTGCC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GGCATC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GTGAGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GTTGAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TAGCGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TATCTC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TCTCTG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TGACAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACAACC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACCGCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACTCAC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ATAGCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CAAGAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CATGAC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CCTTCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CGGTAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CTATTG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CTCAGC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GACACG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GCATTC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GGCATC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GTGAGG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GTTGAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TAGCGG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TATCTC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TCTCTG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TGACAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TGCTGC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACAACC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACCGCG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACGTGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACTCAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.AGGATG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ATAGCG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ATCGAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CAAGAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CATGAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CGGTAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CTATTG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GACACG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GCATTC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GCTGCC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GGCATC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GTGAGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GTTGAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.TAGCGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.TATCTC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACAACC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACCGCG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACGTGG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACTCAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.AGGATG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ATCGAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CAAGAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CATGAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CCTTCG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CGGTAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CTATTG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CTCAGC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GACACG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GCATTC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GCTGCC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GGCATC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GTTGAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.TAGCGG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.TATCTC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACAACC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACCGCG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACGTGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACTCAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.AGGATG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ATAGCG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ATCGAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CAAGAG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CATGAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CGGTAG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CTATTG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CTCAGC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GACACG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GCATTC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GCTGCC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GGCATC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GTGAGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.TAGCGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.TATCTC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACCGCG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACGTGG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACTCAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.AGGATG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ATCGAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CAAGAG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CATGAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CCTTCG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CTATTG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CTCAGC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GCATTC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GCTGCC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GGCATC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GTGAGG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GTTGAG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.TCTCTG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACCGCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACGTGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACTCAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ATAGCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ATCGAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CAAGAG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CATGAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CCTTCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CTATTG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CTCAGC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GACACG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GCATTC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GCTGCC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GGCATC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GTGAGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GTTGAG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.TAGCGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.TATCTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACAACC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACCGCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACGTGG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACTCAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.AGGATG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ATAGCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ATCGAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CAAGAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CATGAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CCTTCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CGGTAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CTATTG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CTCAGC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GACACG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GCATTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GTGAGG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GTTGAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.TATCTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.TCTCTG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACAACC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACGTGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACTCAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.AGGATG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ATAGCG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ATCGAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CAAGAG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CATGAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CCTTCG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CGGTAG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CTATTG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CTCAGC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.GACACG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.GTGAGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TAGCGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TATCTC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TCTCTG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACAACC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACCGCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACGTGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACTCAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.AGGATG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ATAGCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ATCGAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CAAGAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CATGAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CCTTCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CGGTAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CTATTG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CTCAGC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GACACG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GCATTC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GGCATC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GTGAGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GTTGAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TAGCGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TATCTC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TCTCTG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACAACC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACCGCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACTCAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.AGGATG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ATAGCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ATCGAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CAAGAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CATGAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CCTTCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CGGTAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CTATTG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CTCAGC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GCATTC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GCTGCC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GGCATC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GTGAGG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GTTGAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.TAGCGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACAACC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACCGCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACGTGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACTCAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.AGGATG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ATAGCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ATCGAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CAAGAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CATGAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CCTTCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CGGTAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CTATTG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CTCAGC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GACACG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GCATTC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GCTGCC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GGCATC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GTGAGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GTTGAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TAGCGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TATCTC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TCTCTG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ACCGCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ACTCAC",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ATAGCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CAAGAG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CCTTCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CTATTG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.GACACG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.GTGAGG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.TAGCGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACAACC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACCGCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACGTGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACTCAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.AGGATG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ATAGCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ATCGAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CATGAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CCTTCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CGGTAG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CTATTG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CTCAGC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GACACG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GCATTC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GCTGCC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GGCATC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GTGAGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GTTGAG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TAGCGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TATCTC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TCTCTG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACAACC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACCGCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACGTGG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACTCAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.AGGATG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ATAGCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ATCGAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CAAGAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CATGAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CCTTCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CGGTAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CTATTG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CTCAGC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GACACG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GCATTC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GCTGCC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GGCATC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GTTGAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TAGCGG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TATCTC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TCTCTG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACAACC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACCGCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACGTGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACTCAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.AGGATG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ATAGCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ATCGAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CATGAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CCTTCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CGGTAG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CTATTG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CTCAGC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GACACG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GCATTC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GCTGCC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GGCATC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GTGAGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TAGCGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TATCTC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TCTCTG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACAACC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACCGCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACGTGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACTCAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.AGGATG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ATAGCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ATCGAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CAAGAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CATGAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CCTTCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CGGTAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CTATTG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CTCAGC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GACACG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GCATTC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GCTGCC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GGCATC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GTGAGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GTTGAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TAGCGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TATCTC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TCTCTG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACAACC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACCGCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACGTGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACTCAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.AGGATG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ATAGCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ATCGAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CAAGAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CATGAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CCTTCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CGGTAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CTATTG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CTCAGC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GCATTC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GCTGCC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GGCATC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GTGAGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GTTGAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TAGCGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TATCTC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TCTCTG",
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACAACC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACCGCG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACGTGG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACTCAC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.AGGATG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ATAGCG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ATCGAC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CAAGAG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CATGAC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CCTTCG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CGGTAG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CTCAGC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GACACG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GCATTC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GCTGCC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GGCATC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GTGAGG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TAGCGG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TATCTC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TCTCTG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACAACC',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACCGCG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACGTGG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACTCAC',
'RRBS_cw154_Tris_protease_CTCTCTAC.AGGATG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ATAGCG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ATCGAC',
'RRBS_cw154_Tris_protease_CTCTCTAC.CATGAC',
'RRBS_cw154_Tris_protease_CTCTCTAC.CCTTCG',
'RRBS_cw154_Tris_protease_CTCTCTAC.CGGTAG',
'RRBS_cw154_Tris_protease_CTCTCTAC.CTATTG',
'RRBS_cw154_Tris_protease_CTCTCTAC.CTCAGC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GACACG',
'RRBS_cw154_Tris_protease_CTCTCTAC.GCATTC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GCTGCC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GGCATC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GTGAGG',
'RRBS_cw154_Tris_protease_CTCTCTAC.GTTGAG',
'RRBS_cw154_Tris_protease_CTCTCTAC.TAGCGG',
'RRBS_cw154_Tris_protease_CTCTCTAC.TATCTC',
'RRBS_cw154_Tris_protease_CTCTCTAC.TCTCTG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACAACC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACCGCG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACGTGG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACTCAC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.AGGATG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ATAGCG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ATCGAC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CATGAC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CCTTCG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CGGTAG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CTATTG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CTCAGC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GACACG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GCATTC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GCTGCC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GGCATC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GTGAGG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GTTGAG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.TAGCGG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.TATCTC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.TCTCTG',
'RRBS_trito_pool_1_TAAGGCGA.ACAACC',
'RRBS_trito_pool_1_TAAGGCGA.ACGTGG',
'RRBS_trito_pool_1_TAAGGCGA.ACTCAC',
'RRBS_trito_pool_1_TAAGGCGA.ATAGCG',
'RRBS_trito_pool_1_TAAGGCGA.ATCGAC',
'RRBS_trito_pool_1_TAAGGCGA.CAAGAG',
'RRBS_trito_pool_1_TAAGGCGA.CATGAC',
'RRBS_trito_pool_1_TAAGGCGA.CCTTCG',
'RRBS_trito_pool_1_TAAGGCGA.CGGTAG',
'RRBS_trito_pool_1_TAAGGCGA.CTATTG',
'RRBS_trito_pool_1_TAAGGCGA.GACACG',
'RRBS_trito_pool_1_TAAGGCGA.GCATTC',
'RRBS_trito_pool_1_TAAGGCGA.GCTGCC',
'RRBS_trito_pool_1_TAAGGCGA.GGCATC',
'RRBS_trito_pool_1_TAAGGCGA.GTGAGG',
'RRBS_trito_pool_1_TAAGGCGA.GTTGAG',
'RRBS_trito_pool_1_TAAGGCGA.TAGCGG',
'RRBS_trito_pool_1_TAAGGCGA.TATCTC',
'RRBS_trito_pool_1_TAAGGCGA.TCTCTG',
'RRBS_trito_pool_1_TAAGGCGA.TGACAG',
'RRBS_trito_pool_1_TAAGGCGA.TGCTGC',
'RRBS_trito_pool_2_CGTACTAG.ACAACC',
'RRBS_trito_pool_2_CGTACTAG.ACGTGG',
'RRBS_trito_pool_2_CGTACTAG.ACTCAC',
'RRBS_trito_pool_2_CGTACTAG.AGGATG',
'RRBS_trito_pool_2_CGTACTAG.ATAGCG',
'RRBS_trito_pool_2_CGTACTAG.ATCGAC',
'RRBS_trito_pool_2_CGTACTAG.CAAGAG',
'RRBS_trito_pool_2_CGTACTAG.CATGAC',
'RRBS_trito_pool_2_CGTACTAG.CCTTCG',
'RRBS_trito_pool_2_CGTACTAG.CGGTAG',
'RRBS_trito_pool_2_CGTACTAG.CTATTG',
'RRBS_trito_pool_2_CGTACTAG.GACACG',
'RRBS_trito_pool_2_CGTACTAG.GCATTC',
'RRBS_trito_pool_2_CGTACTAG.GCTGCC',
'RRBS_trito_pool_2_CGTACTAG.GGCATC',
'RRBS_trito_pool_2_CGTACTAG.GTGAGG',
'RRBS_trito_pool_2_CGTACTAG.GTTGAG',
'RRBS_trito_pool_2_CGTACTAG.TAGCGG',
'RRBS_trito_pool_2_CGTACTAG.TATCTC',
'RRBS_trito_pool_2_CGTACTAG.TCTCTG',
'RRBS_trito_pool_2_CGTACTAG.TGACAG']
print(total_matrix.shape)
total_matrix = total_matrix.applymap(lambda x: int(x) if pd.notnull(x) else str("?"))
total_matrix = total_matrix.astype(str).apply(''.join)
tott = pd.Series(total_matrix.index.astype(str).str.cat(total_matrix.astype(str),' '))
tott.to_csv("total_chrom12.phy", header=None, index=None)
print(tott.shape)
| mit |
sarahgrogan/scikit-learn | sklearn/tests/test_metaestimators.py | 226 | 4954 | """Common tests for metaestimators"""
import functools
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.externals.six import iterkeys
from sklearn.datasets import make_classification
from sklearn.utils.testing import assert_true, assert_false, assert_raises
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.feature_selection import RFE, RFECV
from sklearn.ensemble import BaggingClassifier
class DelegatorData(object):
def __init__(self, name, construct, skip_methods=(),
fit_args=make_classification()):
self.name = name
self.construct = construct
self.fit_args = fit_args
self.skip_methods = skip_methods
DELEGATING_METAESTIMATORS = [
DelegatorData('Pipeline', lambda est: Pipeline([('est', est)])),
DelegatorData('GridSearchCV',
lambda est: GridSearchCV(
est, param_grid={'param': [5]}, cv=2),
skip_methods=['score']),
DelegatorData('RandomizedSearchCV',
lambda est: RandomizedSearchCV(
est, param_distributions={'param': [5]}, cv=2, n_iter=1),
skip_methods=['score']),
DelegatorData('RFE', RFE,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('RFECV', RFECV,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('BaggingClassifier', BaggingClassifier,
skip_methods=['transform', 'inverse_transform', 'score',
'predict_proba', 'predict_log_proba', 'predict'])
]
def test_metaestimator_delegation():
# Ensures specified metaestimators have methods iff subestimator does
def hides(method):
@property
def wrapper(obj):
if obj.hidden_method == method.__name__:
raise AttributeError('%r is hidden' % obj.hidden_method)
return functools.partial(method, obj)
return wrapper
class SubEstimator(BaseEstimator):
def __init__(self, param=1, hidden_method=None):
self.param = param
self.hidden_method = hidden_method
def fit(self, X, y=None, *args, **kwargs):
self.coef_ = np.arange(X.shape[1])
return True
def _check_fit(self):
if not hasattr(self, 'coef_'):
raise RuntimeError('Estimator is not fit')
@hides
def inverse_transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def predict(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_log_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def decision_function(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def score(self, X, *args, **kwargs):
self._check_fit()
return 1.0
methods = [k for k in iterkeys(SubEstimator.__dict__)
if not k.startswith('_') and not k.startswith('fit')]
methods.sort()
for delegator_data in DELEGATING_METAESTIMATORS:
delegate = SubEstimator()
delegator = delegator_data.construct(delegate)
for method in methods:
if method in delegator_data.skip_methods:
continue
assert_true(hasattr(delegate, method))
assert_true(hasattr(delegator, method),
msg="%s does not have method %r when its delegate does"
% (delegator_data.name, method))
# delegation before fit raises an exception
assert_raises(Exception, getattr(delegator, method),
delegator_data.fit_args[0])
delegator.fit(*delegator_data.fit_args)
for method in methods:
if method in delegator_data.skip_methods:
continue
# smoke test delegation
getattr(delegator, method)(delegator_data.fit_args[0])
for method in methods:
if method in delegator_data.skip_methods:
continue
delegate = SubEstimator(hidden_method=method)
delegator = delegator_data.construct(delegate)
assert_false(hasattr(delegate, method))
assert_false(hasattr(delegator, method),
msg="%s has method %r when its delegate does not"
% (delegator_data.name, method))
| bsd-3-clause |
Ziqi-Li/bknqgis | pandas/pandas/core/reshape/concat.py | 1 | 21238 | """
concat routines
"""
import numpy as np
from pandas import compat, DataFrame, Series, Index, MultiIndex
from pandas.core.index import (_get_objs_combined_axis,
_ensure_index, _get_consensus_names,
_all_indexes_same)
from pandas.core.categorical import (_factorize_from_iterable,
_factorize_from_iterables)
from pandas.core.internals import concatenate_block_managers
from pandas.core import common as com
from pandas.core.generic import NDFrame
import pandas.core.dtypes.concat as _concat
# ---------------------------------------------------------------------
# Concatenate DataFrame objects
def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False,
keys=None, levels=None, names=None, verify_integrity=False,
copy=True):
"""
Concatenate pandas objects along a particular axis with optional set logic
along the other axes.
Can also add a layer of hierarchical indexing on the concatenation axis,
which may be useful if the labels are the same (or overlapping) on
the passed axis number.
Parameters
----------
objs : a sequence or mapping of Series, DataFrame, or Panel objects
If a dict is passed, the sorted keys will be used as the `keys`
argument, unless it is passed, in which case the values will be
selected (see below). Any None objects will be dropped silently unless
they are all None in which case a ValueError will be raised
axis : {0/'index', 1/'columns'}, default 0
The axis to concatenate along
join : {'inner', 'outer'}, default 'outer'
How to handle indexes on other axis(es)
join_axes : list of Index objects
Specific indexes to use for the other n - 1 axes instead of performing
inner/outer set logic
ignore_index : boolean, default False
If True, do not use the index values along the concatenation axis. The
resulting axis will be labeled 0, ..., n - 1. This is useful if you are
concatenating objects where the concatenation axis does not have
meaningful indexing information. Note the index values on the other
axes are still respected in the join.
keys : sequence, default None
If multiple levels passed, should contain tuples. Construct
hierarchical index using the passed keys as the outermost level
levels : list of sequences, default None
Specific levels (unique values) to use for constructing a
MultiIndex. Otherwise they will be inferred from the keys
names : list, default None
Names for the levels in the resulting hierarchical index
verify_integrity : boolean, default False
Check whether the new concatenated axis contains duplicates. This can
be very expensive relative to the actual data concatenation
copy : boolean, default True
If False, do not copy data unnecessarily
Returns
-------
concatenated : object, type of objs
Notes
-----
The keys, levels, and names arguments are all optional.
A walkthrough of how this method fits in with other tools for combining
panda objects can be found `here
<http://pandas.pydata.org/pandas-docs/stable/merging.html>`__.
See Also
--------
Series.append
DataFrame.append
DataFrame.join
DataFrame.merge
Examples
--------
Combine two ``Series``.
>>> s1 = pd.Series(['a', 'b'])
>>> s2 = pd.Series(['c', 'd'])
>>> pd.concat([s1, s2])
0 a
1 b
0 c
1 d
dtype: object
Clear the existing index and reset it in the result
by setting the ``ignore_index`` option to ``True``.
>>> pd.concat([s1, s2], ignore_index=True)
0 a
1 b
2 c
3 d
dtype: object
Add a hierarchical index at the outermost level of
the data with the ``keys`` option.
>>> pd.concat([s1, s2], keys=['s1', 's2',])
s1 0 a
1 b
s2 0 c
1 d
dtype: object
Label the index keys you create with the ``names`` option.
>>> pd.concat([s1, s2], keys=['s1', 's2'],
... names=['Series name', 'Row ID'])
Series name Row ID
s1 0 a
1 b
s2 0 c
1 d
dtype: object
Combine two ``DataFrame`` objects with identical columns.
>>> df1 = pd.DataFrame([['a', 1], ['b', 2]],
... columns=['letter', 'number'])
>>> df1
letter number
0 a 1
1 b 2
>>> df2 = pd.DataFrame([['c', 3], ['d', 4]],
... columns=['letter', 'number'])
>>> df2
letter number
0 c 3
1 d 4
>>> pd.concat([df1, df2])
letter number
0 a 1
1 b 2
0 c 3
1 d 4
Combine ``DataFrame`` objects with overlapping columns
and return everything. Columns outside the intersection will
be filled with ``NaN`` values.
>>> df3 = pd.DataFrame([['c', 3, 'cat'], ['d', 4, 'dog']],
... columns=['letter', 'number', 'animal'])
>>> df3
letter number animal
0 c 3 cat
1 d 4 dog
>>> pd.concat([df1, df3])
animal letter number
0 NaN a 1
1 NaN b 2
0 cat c 3
1 dog d 4
Combine ``DataFrame`` objects with overlapping columns
and return only those that are shared by passing ``inner`` to
the ``join`` keyword argument.
>>> pd.concat([df1, df3], join="inner")
letter number
0 a 1
1 b 2
0 c 3
1 d 4
Combine ``DataFrame`` objects horizontally along the x axis by
passing in ``axis=1``.
>>> df4 = pd.DataFrame([['bird', 'polly'], ['monkey', 'george']],
... columns=['animal', 'name'])
>>> pd.concat([df1, df4], axis=1)
letter number animal name
0 a 1 bird polly
1 b 2 monkey george
Prevent the result from including duplicate index values with the
``verify_integrity`` option.
>>> df5 = pd.DataFrame([1], index=['a'])
>>> df5
0
a 1
>>> df6 = pd.DataFrame([2], index=['a'])
>>> df6
0
a 2
>>> pd.concat([df5, df6], verify_integrity=True)
Traceback (most recent call last):
...
ValueError: Indexes have overlapping values: ['a']
"""
op = _Concatenator(objs, axis=axis, join_axes=join_axes,
ignore_index=ignore_index, join=join,
keys=keys, levels=levels, names=names,
verify_integrity=verify_integrity,
copy=copy)
return op.get_result()
class _Concatenator(object):
"""
Orchestrates a concatenation operation for BlockManagers
"""
def __init__(self, objs, axis=0, join='outer', join_axes=None,
keys=None, levels=None, names=None,
ignore_index=False, verify_integrity=False, copy=True):
if isinstance(objs, (NDFrame, compat.string_types)):
raise TypeError('first argument must be an iterable of pandas '
'objects, you passed an object of type '
'"{0}"'.format(type(objs).__name__))
if join == 'outer':
self.intersect = False
elif join == 'inner':
self.intersect = True
else: # pragma: no cover
raise ValueError('Only can inner (intersect) or outer (union) '
'join the other axis')
if isinstance(objs, dict):
if keys is None:
keys = sorted(objs)
objs = [objs[k] for k in keys]
else:
objs = list(objs)
if len(objs) == 0:
raise ValueError('No objects to concatenate')
if keys is None:
objs = [obj for obj in objs if obj is not None]
else:
# #1649
clean_keys = []
clean_objs = []
for k, v in zip(keys, objs):
if v is None:
continue
clean_keys.append(k)
clean_objs.append(v)
objs = clean_objs
name = getattr(keys, 'name', None)
keys = Index(clean_keys, name=name)
if len(objs) == 0:
raise ValueError('All objects passed were None')
# consolidate data & figure out what our result ndim is going to be
ndims = set()
for obj in objs:
if not isinstance(obj, NDFrame):
raise TypeError("cannot concatenate a non-NDFrame object")
# consolidate
obj._consolidate(inplace=True)
ndims.add(obj.ndim)
# get the sample
# want the higest ndim that we have, and must be non-empty
# unless all objs are empty
sample = None
if len(ndims) > 1:
max_ndim = max(ndims)
for obj in objs:
if obj.ndim == max_ndim and np.sum(obj.shape):
sample = obj
break
else:
# filter out the empties if we have not multi-index possibilities
# note to keep empty Series as it affect to result columns / name
non_empties = [obj for obj in objs
if sum(obj.shape) > 0 or isinstance(obj, Series)]
if (len(non_empties) and (keys is None and names is None and
levels is None and
join_axes is None and
not self.intersect)):
objs = non_empties
sample = objs[0]
if sample is None:
sample = objs[0]
self.objs = objs
# Standardize axis parameter to int
if isinstance(sample, Series):
axis = DataFrame()._get_axis_number(axis)
else:
axis = sample._get_axis_number(axis)
# Need to flip BlockManager axis in the DataFrame special case
self._is_frame = isinstance(sample, DataFrame)
if self._is_frame:
axis = 1 if axis == 0 else 0
self._is_series = isinstance(sample, Series)
if not 0 <= axis <= sample.ndim:
raise AssertionError("axis must be between 0 and {0}, "
"input was {1}".format(sample.ndim, axis))
# if we have mixed ndims, then convert to highest ndim
# creating column numbers as needed
if len(ndims) > 1:
current_column = 0
max_ndim = sample.ndim
self.objs, objs = [], self.objs
for obj in objs:
ndim = obj.ndim
if ndim == max_ndim:
pass
elif ndim != max_ndim - 1:
raise ValueError("cannot concatenate unaligned mixed "
"dimensional NDFrame objects")
else:
name = getattr(obj, 'name', None)
if ignore_index or name is None:
name = current_column
current_column += 1
# doing a row-wise concatenation so need everything
# to line up
if self._is_frame and axis == 1:
name = 0
obj = sample._constructor({name: obj})
self.objs.append(obj)
# note: this is the BlockManager axis (since DataFrame is transposed)
self.axis = axis
self.join_axes = join_axes
self.keys = keys
self.names = names or getattr(keys, 'names', None)
self.levels = levels
self.ignore_index = ignore_index
self.verify_integrity = verify_integrity
self.copy = copy
self.new_axes = self._get_new_axes()
def get_result(self):
# series only
if self._is_series:
# stack blocks
if self.axis == 0:
# concat Series with length to keep dtype as much
non_empties = [x for x in self.objs if len(x) > 0]
if len(non_empties) > 0:
values = [x._values for x in non_empties]
else:
values = [x._values for x in self.objs]
new_data = _concat._concat_compat(values)
name = com._consensus_name_attr(self.objs)
cons = _concat._get_series_result_type(new_data)
return (cons(new_data, index=self.new_axes[0],
name=name, dtype=new_data.dtype)
.__finalize__(self, method='concat'))
# combine as columns in a frame
else:
data = dict(zip(range(len(self.objs)), self.objs))
cons = _concat._get_series_result_type(data)
index, columns = self.new_axes
df = cons(data, index=index)
df.columns = columns
return df.__finalize__(self, method='concat')
# combine block managers
else:
mgrs_indexers = []
for obj in self.objs:
mgr = obj._data
indexers = {}
for ax, new_labels in enumerate(self.new_axes):
if ax == self.axis:
# Suppress reindexing on concat axis
continue
obj_labels = mgr.axes[ax]
if not new_labels.equals(obj_labels):
indexers[ax] = obj_labels.reindex(new_labels)[1]
mgrs_indexers.append((obj._data, indexers))
new_data = concatenate_block_managers(
mgrs_indexers, self.new_axes, concat_axis=self.axis,
copy=self.copy)
if not self.copy:
new_data._consolidate_inplace()
cons = _concat._get_frame_result_type(new_data, self.objs)
return (cons._from_axes(new_data, self.new_axes)
.__finalize__(self, method='concat'))
def _get_result_dim(self):
if self._is_series and self.axis == 1:
return 2
else:
return self.objs[0].ndim
def _get_new_axes(self):
ndim = self._get_result_dim()
new_axes = [None] * ndim
if self.join_axes is None:
for i in range(ndim):
if i == self.axis:
continue
new_axes[i] = self._get_comb_axis(i)
else:
if len(self.join_axes) != ndim - 1:
raise AssertionError("length of join_axes must not be "
"equal to {0}".format(ndim - 1))
# ufff...
indices = compat.lrange(ndim)
indices.remove(self.axis)
for i, ax in zip(indices, self.join_axes):
new_axes[i] = ax
new_axes[self.axis] = self._get_concat_axis()
return new_axes
def _get_comb_axis(self, i):
data_axis = self.objs[0]._get_block_manager_axis(i)
try:
return _get_objs_combined_axis(self.objs, axis=data_axis,
intersect=self.intersect)
except IndexError:
types = [type(x).__name__ for x in self.objs]
raise TypeError("Cannot concatenate list of %s" % types)
def _get_concat_axis(self):
"""
Return index to be used along concatenation axis.
"""
if self._is_series:
if self.axis == 0:
indexes = [x.index for x in self.objs]
elif self.ignore_index:
idx = com._default_index(len(self.objs))
return idx
elif self.keys is None:
names = [None] * len(self.objs)
num = 0
has_names = False
for i, x in enumerate(self.objs):
if not isinstance(x, Series):
raise TypeError("Cannot concatenate type 'Series' "
"with object of type "
"%r" % type(x).__name__)
if x.name is not None:
names[i] = x.name
has_names = True
else:
names[i] = num
num += 1
if has_names:
return Index(names)
else:
return com._default_index(len(self.objs))
else:
return _ensure_index(self.keys)
else:
indexes = [x._data.axes[self.axis] for x in self.objs]
if self.ignore_index:
idx = com._default_index(sum(len(i) for i in indexes))
return idx
if self.keys is None:
concat_axis = _concat_indexes(indexes)
else:
concat_axis = _make_concat_multiindex(indexes, self.keys,
self.levels, self.names)
self._maybe_check_integrity(concat_axis)
return concat_axis
def _maybe_check_integrity(self, concat_index):
if self.verify_integrity:
if not concat_index.is_unique:
overlap = concat_index.get_duplicates()
raise ValueError('Indexes have overlapping values: %s'
% str(overlap))
def _concat_indexes(indexes):
return indexes[0].append(indexes[1:])
def _make_concat_multiindex(indexes, keys, levels=None, names=None):
if ((levels is None and isinstance(keys[0], tuple)) or
(levels is not None and len(levels) > 1)):
zipped = compat.lzip(*keys)
if names is None:
names = [None] * len(zipped)
if levels is None:
_, levels = _factorize_from_iterables(zipped)
else:
levels = [_ensure_index(x) for x in levels]
else:
zipped = [keys]
if names is None:
names = [None]
if levels is None:
levels = [_ensure_index(keys)]
else:
levels = [_ensure_index(x) for x in levels]
if not _all_indexes_same(indexes):
label_list = []
# things are potentially different sizes, so compute the exact labels
# for each level and pass those to MultiIndex.from_arrays
for hlevel, level in zip(zipped, levels):
to_concat = []
for key, index in zip(hlevel, indexes):
try:
i = level.get_loc(key)
except KeyError:
raise ValueError('Key %s not in level %s'
% (str(key), str(level)))
to_concat.append(np.repeat(i, len(index)))
label_list.append(np.concatenate(to_concat))
concat_index = _concat_indexes(indexes)
# these go at the end
if isinstance(concat_index, MultiIndex):
levels.extend(concat_index.levels)
label_list.extend(concat_index.labels)
else:
codes, categories = _factorize_from_iterable(concat_index)
levels.append(categories)
label_list.append(codes)
if len(names) == len(levels):
names = list(names)
else:
# make sure that all of the passed indices have the same nlevels
if not len(set([idx.nlevels for idx in indexes])) == 1:
raise AssertionError("Cannot concat indices that do"
" not have the same number of levels")
# also copies
names = names + _get_consensus_names(indexes)
return MultiIndex(levels=levels, labels=label_list, names=names,
verify_integrity=False)
new_index = indexes[0]
n = len(new_index)
kpieces = len(indexes)
# also copies
new_names = list(names)
new_levels = list(levels)
# construct labels
new_labels = []
# do something a bit more speedy
for hlevel, level in zip(zipped, levels):
hlevel = _ensure_index(hlevel)
mapped = level.get_indexer(hlevel)
mask = mapped == -1
if mask.any():
raise ValueError('Values not found in passed level: %s'
% str(hlevel[mask]))
new_labels.append(np.repeat(mapped, n))
if isinstance(new_index, MultiIndex):
new_levels.extend(new_index.levels)
new_labels.extend([np.tile(lab, kpieces) for lab in new_index.labels])
else:
new_levels.append(new_index)
new_labels.append(np.tile(np.arange(n), kpieces))
if len(new_names) < len(new_levels):
new_names.extend(new_index.names)
return MultiIndex(levels=new_levels, labels=new_labels, names=new_names,
verify_integrity=False)
| gpl-2.0 |
azjps/bokeh | bokeh/core/compat/mplexporter/utils.py | 7 | 11497 | """
Utility Routines for Working with Matplotlib Objects
====================================================
"""
import itertools
import io
import base64
import numpy as np
import warnings
import matplotlib
from matplotlib.colors import colorConverter
from matplotlib.path import Path
from matplotlib.markers import MarkerStyle
from matplotlib.transforms import Affine2D
from matplotlib import ticker
# NOTE: bokeh mod
from bokeh.util.dependencies import import_optional
pd = import_optional('pandas')
def color_to_hex(color):
"""Convert matplotlib color code to hex color code"""
if color is None or colorConverter.to_rgba(color)[3] == 0:
return 'none'
else:
rgb = colorConverter.to_rgb(color)
return '#{0:02X}{1:02X}{2:02X}'.format(*(int(255 * c) for c in rgb))
def _many_to_one(input_dict):
"""Convert a many-to-one mapping to a one-to-one mapping"""
return dict((key, val)
for keys, val in input_dict.items()
for key in keys)
LINESTYLES = _many_to_one({('solid', '-', (None, None)): 'none',
('dashed', '--'): "6,6",
('dotted', ':'): "2,2",
('dashdot', '-.'): "4,4,2,4",
('', ' ', 'None', 'none'): None})
def get_dasharray(obj):
"""Get an SVG dash array for the given matplotlib linestyle
Parameters
----------
obj : matplotlib object
The matplotlib line or path object, which must have a get_linestyle()
method which returns a valid matplotlib line code
Returns
-------
dasharray : string
The HTML/SVG dasharray code associated with the object.
"""
if obj.__dict__.get('_dashSeq') is not None:
return ','.join(map(str, obj._dashSeq))
else:
ls = obj.get_linestyle()
dasharray = LINESTYLES.get(ls, 'not found')
if dasharray == 'not found':
warnings.warn("line style '{0}' not understood: "
"defaulting to solid line.".format(ls))
dasharray = LINESTYLES['solid']
return dasharray
PATH_DICT = {Path.LINETO: 'L',
Path.MOVETO: 'M',
Path.CURVE3: 'S',
Path.CURVE4: 'C',
Path.CLOSEPOLY: 'Z'}
def SVG_path(path, transform=None, simplify=False):
"""Construct the vertices and SVG codes for the path
Parameters
----------
path : matplotlib.Path object
transform : matplotlib transform (optional)
if specified, the path will be transformed before computing the output.
Returns
-------
vertices : array
The shape (M, 2) array of vertices of the Path. Note that some Path
codes require multiple vertices, so the length of these vertices may
be longer than the list of path codes.
path_codes : list
A length N list of single-character path codes, N <= M. Each code is
a single character, in ['L','M','S','C','Z']. See the standard SVG
path specification for a description of these.
"""
if transform is not None:
path = path.transformed(transform)
vc_tuples = [(vertices if path_code != Path.CLOSEPOLY else [],
PATH_DICT[path_code])
for (vertices, path_code)
in path.iter_segments(simplify=simplify)]
if not vc_tuples:
# empty path is a special case
return np.zeros((0, 2)), []
else:
vertices, codes = zip(*vc_tuples)
vertices = np.array(list(itertools.chain(*vertices))).reshape(-1, 2)
return vertices, list(codes)
def get_path_style(path, fill=True):
"""Get the style dictionary for matplotlib path objects"""
style = {}
style['alpha'] = path.get_alpha()
if style['alpha'] is None:
style['alpha'] = 1
style['edgecolor'] = color_to_hex(path.get_edgecolor())
if fill:
style['facecolor'] = color_to_hex(path.get_facecolor())
else:
style['facecolor'] = 'none'
style['edgewidth'] = path.get_linewidth()
style['dasharray'] = get_dasharray(path)
style['zorder'] = path.get_zorder()
return style
def get_line_style(line):
"""Get the style dictionary for matplotlib line objects"""
style = {}
style['alpha'] = line.get_alpha()
if style['alpha'] is None:
style['alpha'] = 1
style['color'] = color_to_hex(line.get_color())
style['linewidth'] = line.get_linewidth()
style['dasharray'] = get_dasharray(line)
style['zorder'] = line.get_zorder()
return style
def get_marker_style(line):
"""Get the style dictionary for matplotlib marker objects"""
style = {}
style['alpha'] = line.get_alpha()
if style['alpha'] is None:
style['alpha'] = 1
style['facecolor'] = color_to_hex(line.get_markerfacecolor())
style['edgecolor'] = color_to_hex(line.get_markeredgecolor())
style['edgewidth'] = line.get_markeredgewidth()
style['marker'] = line.get_marker()
markerstyle = MarkerStyle(line.get_marker())
markersize = line.get_markersize()
markertransform = (markerstyle.get_transform()
+ Affine2D().scale(markersize, -markersize))
style['markerpath'] = SVG_path(markerstyle.get_path(),
markertransform)
style['markersize'] = markersize
style['zorder'] = line.get_zorder()
return style
def get_text_style(text):
"""Return the text style dict for a text instance"""
style = {}
style['alpha'] = text.get_alpha()
if style['alpha'] is None:
style['alpha'] = 1
style['fontsize'] = text.get_size()
style['color'] = color_to_hex(text.get_color())
style['halign'] = text.get_horizontalalignment() # left, center, right
style['valign'] = text.get_verticalalignment() # baseline, center, top
style['malign'] = text._multialignment # text alignment when '\n' in text
style['rotation'] = text.get_rotation()
style['zorder'] = text.get_zorder()
return style
def get_axis_properties(axis):
"""Return the property dictionary for a matplotlib.Axis instance"""
props = {}
label1On = axis._major_tick_kw.get('label1On', True)
if isinstance(axis, matplotlib.axis.XAxis):
if label1On:
props['position'] = "bottom"
else:
props['position'] = "top"
elif isinstance(axis, matplotlib.axis.YAxis):
if label1On:
props['position'] = "left"
else:
props['position'] = "right"
else:
raise ValueError("{0} should be an Axis instance".format(axis))
# Use tick values if appropriate
locator = axis.get_major_locator()
props['nticks'] = len(locator())
if isinstance(locator, ticker.FixedLocator):
props['tickvalues'] = list(locator())
else:
props['tickvalues'] = None
# Find tick formats
formatter = axis.get_major_formatter()
if isinstance(formatter, ticker.NullFormatter):
props['tickformat'] = ""
elif isinstance(formatter, ticker.FixedFormatter):
props['tickformat'] = list(formatter.seq)
elif not any(label.get_visible() for label in axis.get_ticklabels()):
props['tickformat'] = ""
else:
props['tickformat'] = None
# Get axis scale
props['scale'] = axis.get_scale()
# Get major tick label size (assumes that's all we really care about!)
labels = axis.get_ticklabels()
if labels:
props['fontsize'] = labels[0].get_fontsize()
else:
props['fontsize'] = None
# Get associated grid
props['grid'] = get_grid_style(axis)
# get axis visibility
props['visible'] = axis.get_visible()
return props
def get_grid_style(axis):
gridlines = axis.get_gridlines()
if axis._gridOnMajor and len(gridlines) > 0:
color = color_to_hex(gridlines[0].get_color())
alpha = gridlines[0].get_alpha()
dasharray = get_dasharray(gridlines[0])
return dict(gridOn=True,
color=color,
dasharray=dasharray,
alpha=alpha)
else:
return {"gridOn": False}
def get_figure_properties(fig):
return {'figwidth': fig.get_figwidth(),
'figheight': fig.get_figheight(),
'dpi': fig.dpi}
def get_axes_properties(ax):
props = {'axesbg': color_to_hex(ax.patch.get_facecolor()),
'axesbgalpha': ax.patch.get_alpha(),
'bounds': ax.get_position().bounds,
'dynamic': ax.get_navigate(),
'axison': ax.axison,
'frame_on': ax.get_frame_on(),
'patch_visible':ax.patch.get_visible(),
'axes': [get_axis_properties(ax.xaxis),
get_axis_properties(ax.yaxis)]}
for axname in ['x', 'y']:
axis = getattr(ax, axname + 'axis')
domain = getattr(ax, 'get_{0}lim'.format(axname))()
lim = domain
if isinstance(axis.converter, matplotlib.dates.DateConverter):
scale = 'date'
if pd and isinstance(axis.converter, pd.tseries.converter.PeriodConverter):
_dates = [pd.Period(ordinal=int(d), freq=axis.freq)
for d in domain]
domain = [(d.year, d.month - 1, d.day,
d.hour, d.minute, d.second, 0)
for d in _dates]
else:
domain = [(d.year, d.month - 1, d.day,
d.hour, d.minute, d.second,
d.microsecond * 1E-3)
for d in matplotlib.dates.num2date(domain)]
else:
scale = axis.get_scale()
if scale not in ['date', 'linear', 'log']:
raise ValueError("Unknown axis scale: "
"{0}".format(axis.get_scale()))
props[axname + 'scale'] = scale
props[axname + 'lim'] = lim
props[axname + 'domain'] = domain
return props
def iter_all_children(obj, skipContainers=False):
"""
Returns an iterator over all children and nested children using
obj's get_children() method
if skipContainers is true, only childless objects are returned.
"""
if hasattr(obj, 'get_children') and len(obj.get_children()) > 0:
for child in obj.get_children():
if not skipContainers:
yield child
# could use `yield from` in python 3...
for grandchild in iter_all_children(child, skipContainers):
yield grandchild
else:
yield obj
def get_legend_properties(ax, legend):
handles, labels = ax.get_legend_handles_labels()
visible = legend.get_visible()
return {'handles': handles, 'labels': labels, 'visible': visible}
def image_to_base64(image):
"""
Convert a matplotlib image to a base64 png representation
Parameters
----------
image : matplotlib image object
The image to be converted.
Returns
-------
image_base64 : string
The UTF8-encoded base64 string representation of the png image.
"""
ax = image.axes
binary_buffer = io.BytesIO()
# image is saved in axes coordinates: we need to temporarily
# set the correct limits to get the correct image
lim = ax.axis()
ax.axis(image.get_extent())
image.write_png(binary_buffer)
ax.axis(lim)
binary_buffer.seek(0)
return base64.b64encode(binary_buffer.read()).decode('utf-8')
| bsd-3-clause |
h2educ/scikit-learn | sklearn/covariance/robust_covariance.py | 198 | 29735 | """
Robust location and covariance estimators.
Here are implemented estimators that are resistant to outliers.
"""
# Author: Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import linalg
from scipy.stats import chi2
from . import empirical_covariance, EmpiricalCovariance
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_random_state, check_array
# Minimum Covariance Determinant
# Implementing of an algorithm by Rousseeuw & Van Driessen described in
# (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
# 1999, American Statistical Association and the American Society
# for Quality, TECHNOMETRICS)
# XXX Is this really a public function? It's not listed in the docs or
# exported by sklearn.covariance. Deprecate?
def c_step(X, n_support, remaining_iterations=30, initial_estimates=None,
verbose=False, cov_computation_method=empirical_covariance,
random_state=None):
"""C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data set in which we look for the n_support observations whose
scatter matrix has minimum determinant.
n_support : int, > n_samples / 2
Number of observations to compute the robust estimates of location
and covariance from.
remaining_iterations : int, optional
Number of iterations to perform.
According to [Rouseeuw1999]_, two iterations are sufficient to get
close to the minimum, and we never need more than 30 to reach
convergence.
initial_estimates : 2-tuple, optional
Initial estimates of location and shape from which to run the c_step
procedure:
- initial_estimates[0]: an initial location estimate
- initial_estimates[1]: an initial covariance estimate
verbose : boolean, optional
Verbose mode.
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Returns
-------
location : array-like, shape (n_features,)
Robust location estimates.
covariance : array-like, shape (n_features, n_features)
Robust covariance estimates.
support : array-like, shape (n_samples,)
A mask for the `n_support` observations whose scatter matrix has
minimum determinant.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
X = np.asarray(X)
random_state = check_random_state(random_state)
return _c_step(X, n_support, remaining_iterations=remaining_iterations,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state)
def _c_step(X, n_support, random_state, remaining_iterations=30,
initial_estimates=None, verbose=False,
cov_computation_method=empirical_covariance):
n_samples, n_features = X.shape
# Initialisation
support = np.zeros(n_samples, dtype=bool)
if initial_estimates is None:
# compute initial robust estimates from a random subset
support[random_state.permutation(n_samples)[:n_support]] = True
else:
# get initial robust estimates from the function parameters
location = initial_estimates[0]
covariance = initial_estimates[1]
# run a special iteration for that case (to get an initial support)
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(1)
# compute new estimates
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(0)
covariance = cov_computation_method(X_support)
# Iterative procedure for Minimum Covariance Determinant computation
det = fast_logdet(covariance)
previous_det = np.inf
while (det < previous_det) and (remaining_iterations > 0):
# save old estimates values
previous_location = location
previous_covariance = covariance
previous_det = det
previous_support = support
# compute a new support from the full data set mahalanobis distances
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
# compute new estimates
support = np.zeros(n_samples, dtype=bool)
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(axis=0)
covariance = cov_computation_method(X_support)
det = fast_logdet(covariance)
# update remaining iterations for early stopping
remaining_iterations -= 1
previous_dist = dist
dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
# Catch computation errors
if np.isinf(det):
raise ValueError(
"Singular covariance matrix. "
"Please check that the covariance matrix corresponding "
"to the dataset is full rank and that MinCovDet is used with "
"Gaussian-distributed data (or at least data drawn from a "
"unimodal, symmetric distribution.")
# Check convergence
if np.allclose(det, previous_det):
# c_step procedure converged
if verbose:
print("Optimal couple (location, covariance) found before"
" ending iterations (%d left)" % (remaining_iterations))
results = location, covariance, det, support, dist
elif det > previous_det:
# determinant has increased (should not happen)
warnings.warn("Warning! det > previous_det (%.15f > %.15f)"
% (det, previous_det), RuntimeWarning)
results = previous_location, previous_covariance, \
previous_det, previous_support, previous_dist
# Check early stopping
if remaining_iterations == 0:
if verbose:
print('Maximum number of iterations reached')
results = location, covariance, det, support, dist
return results
def select_candidates(X, n_support, n_trials, select=1, n_iter=30,
verbose=False,
cov_computation_method=empirical_covariance,
random_state=None):
"""Finds the best pure subset of observations to compute MCD from it.
The purpose of this function is to find the best sets of n_support
observations with respect to a minimization of their covariance
matrix determinant. Equivalently, it removes n_samples-n_support
observations to construct what we call a pure data set (i.e. not
containing outliers). The list of the observations of the pure
data set is referred to as the `support`.
Starting from a random support, the pure data set is found by the
c_step procedure introduced by Rousseeuw and Van Driessen in
[Rouseeuw1999]_.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data (sub)set in which we look for the n_support purest observations.
n_support : int, [(n + p + 1)/2] < n_support < n
The number of samples the pure data set must contain.
select : int, int > 0
Number of best candidates results to return.
n_trials : int, nb_trials > 0 or 2-tuple
Number of different initial sets of observations from which to
run the algorithm.
Instead of giving a number of trials to perform, one can provide a
list of initial estimates that will be used to iteratively run
c_step procedures. In this case:
- n_trials[0]: array-like, shape (n_trials, n_features)
is the list of `n_trials` initial location estimates
- n_trials[1]: array-like, shape (n_trials, n_features, n_features)
is the list of `n_trials` initial covariances estimates
n_iter : int, nb_iter > 0
Maximum number of iterations for the c_step procedure.
(2 is enough to be close to the final solution. "Never" exceeds 20).
random_state : integer or numpy.RandomState, default None
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
verbose : boolean, default False
Control the output verbosity.
See Also
---------
c_step
Returns
-------
best_locations : array-like, shape (select, n_features)
The `select` location estimates computed from the `select` best
supports found in the data set (`X`).
best_covariances : array-like, shape (select, n_features, n_features)
The `select` covariance estimates computed from the `select`
best supports found in the data set (`X`).
best_supports : array-like, shape (select, n_samples)
The `select` best supports found in the data set (`X`).
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
random_state = check_random_state(random_state)
n_samples, n_features = X.shape
if isinstance(n_trials, numbers.Integral):
run_from_estimates = False
elif isinstance(n_trials, tuple):
run_from_estimates = True
estimates_list = n_trials
n_trials = estimates_list[0].shape[0]
else:
raise TypeError("Invalid 'n_trials' parameter, expected tuple or "
" integer, got %s (%s)" % (n_trials, type(n_trials)))
# compute `n_trials` location and shape estimates candidates in the subset
all_estimates = []
if not run_from_estimates:
# perform `n_trials` computations from random initial supports
for j in range(n_trials):
all_estimates.append(
_c_step(
X, n_support, remaining_iterations=n_iter, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
else:
# perform computations from every given initial estimates
for j in range(n_trials):
initial_estimates = (estimates_list[0][j], estimates_list[1][j])
all_estimates.append(_c_step(
X, n_support, remaining_iterations=n_iter,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = \
zip(*all_estimates)
# find the `n_best` best results among the `n_trials` ones
index_best = np.argsort(all_dets_sub)[:select]
best_locations = np.asarray(all_locs_sub)[index_best]
best_covariances = np.asarray(all_covs_sub)[index_best]
best_supports = np.asarray(all_supports_sub)[index_best]
best_ds = np.asarray(all_ds_sub)[index_best]
return best_locations, best_covariances, best_supports, best_ds
def fast_mcd(X, support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None):
"""Estimates the Minimum Covariance Determinant matrix.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
`[n_sample + n_features + 1] / 2`.
random_state : integer or numpy.RandomState, optional
The generator used to randomly subsample. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [Rouseeuw1999]_,
see the MinCovDet object.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
Returns
-------
location : array-like, shape (n_features,)
Robust location of the data.
covariance : array-like, shape (n_features, n_features)
Robust covariance of the features.
support : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
"""
random_state = check_random_state(random_state)
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
n_samples, n_features = X.shape
# minimum breakdown value
if support_fraction is None:
n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
else:
n_support = int(support_fraction * n_samples)
# 1-dimensional case quick computation
# (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
# Regression and Outlier Detection, John Wiley & Sons, chapter 4)
if n_features == 1:
if n_support < n_samples:
# find the sample shortest halves
X_sorted = np.sort(np.ravel(X))
diff = X_sorted[n_support:] - X_sorted[:(n_samples - n_support)]
halves_start = np.where(diff == np.min(diff))[0]
# take the middle points' mean to get the robust location estimate
location = 0.5 * (X_sorted[n_support + halves_start]
+ X_sorted[halves_start]).mean()
support = np.zeros(n_samples, dtype=bool)
X_centered = X - location
support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
covariance = np.asarray([[np.var(X[support])]])
location = np.array([location])
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
else:
support = np.ones(n_samples, dtype=bool)
covariance = np.asarray([[np.var(X)]])
location = np.asarray([np.mean(X)])
X_centered = X - location
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
# Starting FastMCD algorithm for p-dimensional case
if (n_samples > 500) and (n_features > 1):
# 1. Find candidate supports on subsets
# a. split the set in subsets of size ~ 300
n_subsets = n_samples // 300
n_samples_subsets = n_samples // n_subsets
samples_shuffle = random_state.permutation(n_samples)
h_subset = int(np.ceil(n_samples_subsets *
(n_support / float(n_samples))))
# b. perform a total of 500 trials
n_trials_tot = 500
# c. select 10 best (location, covariance) for each subset
n_best_sub = 10
n_trials = max(10, n_trials_tot // n_subsets)
n_best_tot = n_subsets * n_best_sub
all_best_locations = np.zeros((n_best_tot, n_features))
try:
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
except MemoryError:
# The above is too big. Let's try with something much small
# (and less optimal)
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
n_best_tot = 10
n_best_sub = 2
for i in range(n_subsets):
low_bound = i * n_samples_subsets
high_bound = low_bound + n_samples_subsets
current_subset = X[samples_shuffle[low_bound:high_bound]]
best_locations_sub, best_covariances_sub, _, _ = select_candidates(
current_subset, h_subset, n_trials,
select=n_best_sub, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
all_best_locations[subset_slice] = best_locations_sub
all_best_covariances[subset_slice] = best_covariances_sub
# 2. Pool the candidate supports into a merged set
# (possibly the full dataset)
n_samples_merged = min(1500, n_samples)
h_merged = int(np.ceil(n_samples_merged *
(n_support / float(n_samples))))
if n_samples > 1500:
n_best_merged = 10
else:
n_best_merged = 1
# find the best couples (location, covariance) on the merged set
selection = random_state.permutation(n_samples)[:n_samples_merged]
locations_merged, covariances_merged, supports_merged, d = \
select_candidates(
X[selection], h_merged,
n_trials=(all_best_locations, all_best_covariances),
select=n_best_merged,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 3. Finally get the overall best (locations, covariance) couple
if n_samples < 1500:
# directly get the best couple (location, covariance)
location = locations_merged[0]
covariance = covariances_merged[0]
support = np.zeros(n_samples, dtype=bool)
dist = np.zeros(n_samples)
support[selection] = supports_merged[0]
dist[selection] = d[0]
else:
# select the best couple on the full dataset
locations_full, covariances_full, supports_full, d = \
select_candidates(
X, n_support,
n_trials=(locations_merged, covariances_merged),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
elif n_features > 1:
# 1. Find the 10 best couples (location, covariance)
# considering two iterations
n_trials = 30
n_best = 10
locations_best, covariances_best, _, _ = select_candidates(
X, n_support, n_trials=n_trials, select=n_best, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 2. Select the best couple on the full dataset amongst the 10
locations_full, covariances_full, supports_full, d = select_candidates(
X, n_support, n_trials=(locations_best, covariances_best),
select=1, cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
return location, covariance, support, dist
class MinCovDet(EmpiricalCovariance):
"""Minimum Covariance Determinant (MCD): robust estimator of covariance.
The Minimum Covariance Determinant covariance estimator is to be applied
on Gaussian-distributed data, but could still be relevant on data
drawn from a unimodal, symmetric distribution. It is not meant to be used
with multi-modal data (the algorithm used to fit a MinCovDet object is
likely to fail in such a case).
One should consider projection pursuit methods to deal with multi-modal
datasets.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered : Boolean
If True, the support of the robust location and the covariance
estimates is computed, and a covariance estimate is recomputed from
it, without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
[n_sample + n_features + 1] / 2
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
raw_location_ : array-like, shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : array-like, shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
location_ : array-like, shape (n_features,)
Estimated robust location
covariance_ : array-like, shape (n_features, n_features)
Estimated robust covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the robust estimates of location and shape.
dist_ : array-like, shape (n_samples,)
Mahalanobis distances of the training set (on which `fit` is called)
observations.
References
----------
.. [Rouseeuw1984] `P. J. Rousseeuw. Least median of squares regression.
J. Am Stat Ass, 79:871, 1984.`
.. [Rouseeuw1999] `A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS`
.. [Butler1993] `R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400`
"""
_nonrobust_covariance = staticmethod(empirical_covariance)
def __init__(self, store_precision=True, assume_centered=False,
support_fraction=None, random_state=None):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.support_fraction = support_fraction
self.random_state = random_state
def fit(self, X, y=None):
"""Fits a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn("The covariance matrix associated to your dataset "
"is not full rank")
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X, support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(X[raw_support],
assume_centered=True)
# get precision matrix in an optimized way
precision = pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
def correct_covariance(self, data):
"""Apply a correction to raw Minimum Covariance Determinant estimates.
Correction using the empirical correction factor suggested
by Rousseeuw and Van Driessen in [Rouseeuw1984]_.
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
covariance_corrected : array-like, shape (n_features, n_features)
Corrected robust covariance estimate.
"""
correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
covariance_corrected = self.raw_covariance_ * correction
self.dist_ /= correction
return covariance_corrected
def reweight_covariance(self, data):
"""Re-weight raw Minimum Covariance Determinant estimates.
Re-weight observations using Rousseeuw's method (equivalent to
deleting outlying observations from the data set before
computing location and covariance estimates). [Rouseeuw1984]_
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
location_reweighted : array-like, shape (n_features, )
Re-weighted robust location estimate.
covariance_reweighted : array-like, shape (n_features, n_features)
Re-weighted robust covariance estimate.
support_reweighted : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the re-weighted robust location and covariance estimates.
"""
n_samples, n_features = data.shape
mask = self.dist_ < chi2(n_features).isf(0.025)
if self.assume_centered:
location_reweighted = np.zeros(n_features)
else:
location_reweighted = data[mask].mean(0)
covariance_reweighted = self._nonrobust_covariance(
data[mask], assume_centered=self.assume_centered)
support_reweighted = np.zeros(n_samples, dtype=bool)
support_reweighted[mask] = True
self._set_covariance(covariance_reweighted)
self.location_ = location_reweighted
self.support_ = support_reweighted
X_centered = data - self.location_
self.dist_ = np.sum(
np.dot(X_centered, self.get_precision()) * X_centered, 1)
return location_reweighted, covariance_reweighted, support_reweighted
| bsd-3-clause |
shangwuhencc/scikit-learn | examples/datasets/plot_random_dataset.py | 348 | 2254 | """
==============================================
Plot randomly generated classification dataset
==============================================
Plot several randomly generated 2D classification datasets.
This example illustrates the :func:`datasets.make_classification`
:func:`datasets.make_blobs` and :func:`datasets.make_gaussian_quantiles`
functions.
For ``make_classification``, three binary and two multi-class classification
datasets are generated, with different numbers of informative features and
clusters per class. """
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_gaussian_quantiles
plt.figure(figsize=(8, 8))
plt.subplots_adjust(bottom=.05, top=.9, left=.05, right=.95)
plt.subplot(321)
plt.title("One informative feature, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=1,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(322)
plt.title("Two informative features, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(323)
plt.title("Two informative features, two clusters per class", fontsize='small')
X2, Y2 = make_classification(n_features=2, n_redundant=0, n_informative=2)
plt.scatter(X2[:, 0], X2[:, 1], marker='o', c=Y2)
plt.subplot(324)
plt.title("Multi-class, two informative features, one cluster",
fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(325)
plt.title("Three blobs", fontsize='small')
X1, Y1 = make_blobs(n_features=2, centers=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(326)
plt.title("Gaussian divided into three quantiles", fontsize='small')
X1, Y1 = make_gaussian_quantiles(n_features=2, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.show()
| bsd-3-clause |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/pandas/plotting/_converter.py | 7 | 35069 | from datetime import datetime, timedelta
import datetime as pydt
import numpy as np
from dateutil.relativedelta import relativedelta
import matplotlib.units as units
import matplotlib.dates as dates
from matplotlib.ticker import Formatter, AutoLocator, Locator
from matplotlib.transforms import nonsingular
from pandas.core.dtypes.common import (
is_float, is_integer,
is_integer_dtype,
is_float_dtype,
is_datetime64_ns_dtype,
is_period_arraylike,
is_nested_list_like
)
from pandas.compat import lrange
import pandas.compat as compat
import pandas._libs.lib as lib
import pandas.core.common as com
from pandas.core.index import Index
from pandas.core.series import Series
from pandas.core.indexes.datetimes import date_range
import pandas.core.tools.datetimes as tools
import pandas.tseries.frequencies as frequencies
from pandas.tseries.frequencies import FreqGroup
from pandas.core.indexes.period import Period, PeriodIndex
from pandas.plotting._compat import _mpl_le_2_0_0
# constants
HOURS_PER_DAY = 24.
MIN_PER_HOUR = 60.
SEC_PER_MIN = 60.
SEC_PER_HOUR = SEC_PER_MIN * MIN_PER_HOUR
SEC_PER_DAY = SEC_PER_HOUR * HOURS_PER_DAY
MUSEC_PER_DAY = 1e6 * SEC_PER_DAY
def register():
units.registry[lib.Timestamp] = DatetimeConverter()
units.registry[Period] = PeriodConverter()
units.registry[pydt.datetime] = DatetimeConverter()
units.registry[pydt.date] = DatetimeConverter()
units.registry[pydt.time] = TimeConverter()
units.registry[np.datetime64] = DatetimeConverter()
def _to_ordinalf(tm):
tot_sec = (tm.hour * 3600 + tm.minute * 60 + tm.second +
float(tm.microsecond / 1e6))
return tot_sec
def time2num(d):
if isinstance(d, compat.string_types):
parsed = tools.to_datetime(d)
if not isinstance(parsed, datetime):
raise ValueError('Could not parse time %s' % d)
return _to_ordinalf(parsed.time())
if isinstance(d, pydt.time):
return _to_ordinalf(d)
return d
class TimeConverter(units.ConversionInterface):
@staticmethod
def convert(value, unit, axis):
valid_types = (str, pydt.time)
if (isinstance(value, valid_types) or is_integer(value) or
is_float(value)):
return time2num(value)
if isinstance(value, Index):
return value.map(time2num)
if isinstance(value, (list, tuple, np.ndarray, Index)):
return [time2num(x) for x in value]
return value
@staticmethod
def axisinfo(unit, axis):
if unit != 'time':
return None
majloc = AutoLocator()
majfmt = TimeFormatter(majloc)
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='time')
@staticmethod
def default_units(x, axis):
return 'time'
# time formatter
class TimeFormatter(Formatter):
def __init__(self, locs):
self.locs = locs
def __call__(self, x, pos=0):
fmt = '%H:%M:%S'
s = int(x)
ms = int((x - s) * 1e3)
us = int((x - s) * 1e6 - ms)
m, s = divmod(s, 60)
h, m = divmod(m, 60)
_, h = divmod(h, 24)
if us != 0:
fmt += '.%6f'
elif ms != 0:
fmt += '.%3f'
return pydt.time(h, m, s, us).strftime(fmt)
# Period Conversion
class PeriodConverter(dates.DateConverter):
@staticmethod
def convert(values, units, axis):
if is_nested_list_like(values):
values = [PeriodConverter._convert_1d(v, units, axis)
for v in values]
else:
values = PeriodConverter._convert_1d(values, units, axis)
return values
@staticmethod
def _convert_1d(values, units, axis):
if not hasattr(axis, 'freq'):
raise TypeError('Axis must have `freq` set to convert to Periods')
valid_types = (compat.string_types, datetime,
Period, pydt.date, pydt.time)
if (isinstance(values, valid_types) or is_integer(values) or
is_float(values)):
return get_datevalue(values, axis.freq)
if isinstance(values, PeriodIndex):
return values.asfreq(axis.freq)._values
if isinstance(values, Index):
return values.map(lambda x: get_datevalue(x, axis.freq))
if is_period_arraylike(values):
return PeriodIndex(values, freq=axis.freq)._values
if isinstance(values, (list, tuple, np.ndarray, Index)):
return [get_datevalue(x, axis.freq) for x in values]
return values
def get_datevalue(date, freq):
if isinstance(date, Period):
return date.asfreq(freq).ordinal
elif isinstance(date, (compat.string_types, datetime,
pydt.date, pydt.time)):
return Period(date, freq).ordinal
elif (is_integer(date) or is_float(date) or
(isinstance(date, (np.ndarray, Index)) and (date.size == 1))):
return date
elif date is None:
return None
raise ValueError("Unrecognizable date '%s'" % date)
def _dt_to_float_ordinal(dt):
"""
Convert :mod:`datetime` to the Gregorian date as UTC float days,
preserving hours, minutes, seconds and microseconds. Return value
is a :func:`float`.
"""
if (isinstance(dt, (np.ndarray, Index, Series)
) and is_datetime64_ns_dtype(dt)):
base = dates.epoch2num(dt.asi8 / 1.0E9)
else:
base = dates.date2num(dt)
return base
# Datetime Conversion
class DatetimeConverter(dates.DateConverter):
@staticmethod
def convert(values, unit, axis):
# values might be a 1-d array, or a list-like of arrays.
if is_nested_list_like(values):
values = [DatetimeConverter._convert_1d(v, unit, axis)
for v in values]
else:
values = DatetimeConverter._convert_1d(values, unit, axis)
return values
@staticmethod
def _convert_1d(values, unit, axis):
def try_parse(values):
try:
return _dt_to_float_ordinal(tools.to_datetime(values))
except Exception:
return values
if isinstance(values, (datetime, pydt.date)):
return _dt_to_float_ordinal(values)
elif isinstance(values, np.datetime64):
return _dt_to_float_ordinal(lib.Timestamp(values))
elif isinstance(values, pydt.time):
return dates.date2num(values)
elif (is_integer(values) or is_float(values)):
return values
elif isinstance(values, compat.string_types):
return try_parse(values)
elif isinstance(values, (list, tuple, np.ndarray, Index)):
if isinstance(values, Index):
values = values.values
if not isinstance(values, np.ndarray):
values = com._asarray_tuplesafe(values)
if is_integer_dtype(values) or is_float_dtype(values):
return values
try:
values = tools.to_datetime(values)
if isinstance(values, Index):
values = _dt_to_float_ordinal(values)
else:
values = [_dt_to_float_ordinal(x) for x in values]
except Exception:
values = _dt_to_float_ordinal(values)
return values
@staticmethod
def axisinfo(unit, axis):
"""
Return the :class:`~matplotlib.units.AxisInfo` for *unit*.
*unit* is a tzinfo instance or None.
The *axis* argument is required but not used.
"""
tz = unit
majloc = PandasAutoDateLocator(tz=tz)
majfmt = PandasAutoDateFormatter(majloc, tz=tz)
datemin = pydt.date(2000, 1, 1)
datemax = pydt.date(2010, 1, 1)
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='',
default_limits=(datemin, datemax))
class PandasAutoDateFormatter(dates.AutoDateFormatter):
def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d'):
dates.AutoDateFormatter.__init__(self, locator, tz, defaultfmt)
# matplotlib.dates._UTC has no _utcoffset called by pandas
if self._tz is dates.UTC:
self._tz._utcoffset = self._tz.utcoffset(None)
# For mpl > 2.0 the format strings are controlled via rcparams
# so do not mess with them. For mpl < 2.0 change the second
# break point and add a musec break point
if _mpl_le_2_0_0():
self.scaled[1. / SEC_PER_DAY] = '%H:%M:%S'
self.scaled[1. / MUSEC_PER_DAY] = '%H:%M:%S.%f'
class PandasAutoDateLocator(dates.AutoDateLocator):
def get_locator(self, dmin, dmax):
'Pick the best locator based on a distance.'
delta = relativedelta(dmax, dmin)
num_days = (delta.years * 12.0 + delta.months) * 31.0 + delta.days
num_sec = (delta.hours * 60.0 + delta.minutes) * 60.0 + delta.seconds
tot_sec = num_days * 86400. + num_sec
if abs(tot_sec) < self.minticks:
self._freq = -1
locator = MilliSecondLocator(self.tz)
locator.set_axis(self.axis)
locator.set_view_interval(*self.axis.get_view_interval())
locator.set_data_interval(*self.axis.get_data_interval())
return locator
return dates.AutoDateLocator.get_locator(self, dmin, dmax)
def _get_unit(self):
return MilliSecondLocator.get_unit_generic(self._freq)
class MilliSecondLocator(dates.DateLocator):
UNIT = 1. / (24 * 3600 * 1000)
def __init__(self, tz):
dates.DateLocator.__init__(self, tz)
self._interval = 1.
def _get_unit(self):
return self.get_unit_generic(-1)
@staticmethod
def get_unit_generic(freq):
unit = dates.RRuleLocator.get_unit_generic(freq)
if unit < 0:
return MilliSecondLocator.UNIT
return unit
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
if dmin > dmax:
dmax, dmin = dmin, dmax
# We need to cap at the endpoints of valid datetime
# TODO(wesm) unused?
# delta = relativedelta(dmax, dmin)
# try:
# start = dmin - delta
# except ValueError:
# start = _from_ordinal(1.0)
# try:
# stop = dmax + delta
# except ValueError:
# # The magic number!
# stop = _from_ordinal(3652059.9999999)
nmax, nmin = dates.date2num((dmax, dmin))
num = (nmax - nmin) * 86400 * 1000
max_millis_ticks = 6
for interval in [1, 10, 50, 100, 200, 500]:
if num <= interval * (max_millis_ticks - 1):
self._interval = interval
break
else:
# We went through the whole loop without breaking, default to 1
self._interval = 1000.
estimate = (nmax - nmin) / (self._get_unit() * self._get_interval())
if estimate > self.MAXTICKS * 2:
raise RuntimeError(('MillisecondLocator estimated to generate %d '
'ticks from %s to %s: exceeds Locator.MAXTICKS'
'* 2 (%d) ') %
(estimate, dmin, dmax, self.MAXTICKS * 2))
freq = '%dL' % self._get_interval()
tz = self.tz.tzname(None)
st = _from_ordinal(dates.date2num(dmin)) # strip tz
ed = _from_ordinal(dates.date2num(dmax))
all_dates = date_range(start=st, end=ed, freq=freq, tz=tz).asobject
try:
if len(all_dates) > 0:
locs = self.raise_if_exceeds(dates.date2num(all_dates))
return locs
except Exception: # pragma: no cover
pass
lims = dates.date2num([dmin, dmax])
return lims
def _get_interval(self):
return self._interval
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
if dmin > dmax:
dmax, dmin = dmin, dmax
# We need to cap at the endpoints of valid datetime
# TODO(wesm): unused?
# delta = relativedelta(dmax, dmin)
# try:
# start = dmin - delta
# except ValueError:
# start = _from_ordinal(1.0)
# try:
# stop = dmax + delta
# except ValueError:
# # The magic number!
# stop = _from_ordinal(3652059.9999999)
dmin, dmax = self.datalim_to_dt()
vmin = dates.date2num(dmin)
vmax = dates.date2num(dmax)
return self.nonsingular(vmin, vmax)
def _from_ordinal(x, tz=None):
ix = int(x)
dt = datetime.fromordinal(ix)
remainder = float(x) - ix
hour, remainder = divmod(24 * remainder, 1)
minute, remainder = divmod(60 * remainder, 1)
second, remainder = divmod(60 * remainder, 1)
microsecond = int(1e6 * remainder)
if microsecond < 10:
microsecond = 0 # compensate for rounding errors
dt = datetime(dt.year, dt.month, dt.day, int(hour), int(minute),
int(second), microsecond)
if tz is not None:
dt = dt.astimezone(tz)
if microsecond > 999990: # compensate for rounding errors
dt += timedelta(microseconds=1e6 - microsecond)
return dt
# Fixed frequency dynamic tick locators and formatters
# -------------------------------------------------------------------------
# --- Locators ---
# -------------------------------------------------------------------------
def _get_default_annual_spacing(nyears):
"""
Returns a default spacing between consecutive ticks for annual data.
"""
if nyears < 11:
(min_spacing, maj_spacing) = (1, 1)
elif nyears < 20:
(min_spacing, maj_spacing) = (1, 2)
elif nyears < 50:
(min_spacing, maj_spacing) = (1, 5)
elif nyears < 100:
(min_spacing, maj_spacing) = (5, 10)
elif nyears < 200:
(min_spacing, maj_spacing) = (5, 25)
elif nyears < 600:
(min_spacing, maj_spacing) = (10, 50)
else:
factor = nyears // 1000 + 1
(min_spacing, maj_spacing) = (factor * 20, factor * 100)
return (min_spacing, maj_spacing)
def period_break(dates, period):
"""
Returns the indices where the given period changes.
Parameters
----------
dates : PeriodIndex
Array of intervals to monitor.
period : string
Name of the period to monitor.
"""
current = getattr(dates, period)
previous = getattr(dates - 1, period)
return np.nonzero(current - previous)[0]
def has_level_label(label_flags, vmin):
"""
Returns true if the ``label_flags`` indicate there is at least one label
for this level.
if the minimum view limit is not an exact integer, then the first tick
label won't be shown, so we must adjust for that.
"""
if label_flags.size == 0 or (label_flags.size == 1 and
label_flags[0] == 0 and
vmin % 1 > 0.0):
return False
else:
return True
def _daily_finder(vmin, vmax, freq):
periodsperday = -1
if freq >= FreqGroup.FR_HR:
if freq == FreqGroup.FR_NS:
periodsperday = 24 * 60 * 60 * 1000000000
elif freq == FreqGroup.FR_US:
periodsperday = 24 * 60 * 60 * 1000000
elif freq == FreqGroup.FR_MS:
periodsperday = 24 * 60 * 60 * 1000
elif freq == FreqGroup.FR_SEC:
periodsperday = 24 * 60 * 60
elif freq == FreqGroup.FR_MIN:
periodsperday = 24 * 60
elif freq == FreqGroup.FR_HR:
periodsperday = 24
else: # pragma: no cover
raise ValueError("unexpected frequency: %s" % freq)
periodsperyear = 365 * periodsperday
periodspermonth = 28 * periodsperday
elif freq == FreqGroup.FR_BUS:
periodsperyear = 261
periodspermonth = 19
elif freq == FreqGroup.FR_DAY:
periodsperyear = 365
periodspermonth = 28
elif frequencies.get_freq_group(freq) == FreqGroup.FR_WK:
periodsperyear = 52
periodspermonth = 3
else: # pragma: no cover
raise ValueError("unexpected frequency")
# save this for later usage
vmin_orig = vmin
(vmin, vmax) = (Period(ordinal=int(vmin), freq=freq),
Period(ordinal=int(vmax), freq=freq))
span = vmax.ordinal - vmin.ordinal + 1
dates_ = PeriodIndex(start=vmin, end=vmax, freq=freq)
# Initialize the output
info = np.zeros(span,
dtype=[('val', np.int64), ('maj', bool),
('min', bool), ('fmt', '|S20')])
info['val'][:] = dates_._values
info['fmt'][:] = ''
info['maj'][[0, -1]] = True
# .. and set some shortcuts
info_maj = info['maj']
info_min = info['min']
info_fmt = info['fmt']
def first_label(label_flags):
if (label_flags[0] == 0) and (label_flags.size > 1) and \
((vmin_orig % 1) > 0.0):
return label_flags[1]
else:
return label_flags[0]
# Case 1. Less than a month
if span <= periodspermonth:
day_start = period_break(dates_, 'day')
month_start = period_break(dates_, 'month')
def _hour_finder(label_interval, force_year_start):
_hour = dates_.hour
_prev_hour = (dates_ - 1).hour
hour_start = (_hour - _prev_hour) != 0
info_maj[day_start] = True
info_min[hour_start & (_hour % label_interval == 0)] = True
year_start = period_break(dates_, 'year')
info_fmt[hour_start & (_hour % label_interval == 0)] = '%H:%M'
info_fmt[day_start] = '%H:%M\n%d-%b'
info_fmt[year_start] = '%H:%M\n%d-%b\n%Y'
if force_year_start and not has_level_label(year_start, vmin_orig):
info_fmt[first_label(day_start)] = '%H:%M\n%d-%b\n%Y'
def _minute_finder(label_interval):
hour_start = period_break(dates_, 'hour')
_minute = dates_.minute
_prev_minute = (dates_ - 1).minute
minute_start = (_minute - _prev_minute) != 0
info_maj[hour_start] = True
info_min[minute_start & (_minute % label_interval == 0)] = True
year_start = period_break(dates_, 'year')
info_fmt = info['fmt']
info_fmt[minute_start & (_minute % label_interval == 0)] = '%H:%M'
info_fmt[day_start] = '%H:%M\n%d-%b'
info_fmt[year_start] = '%H:%M\n%d-%b\n%Y'
def _second_finder(label_interval):
minute_start = period_break(dates_, 'minute')
_second = dates_.second
_prev_second = (dates_ - 1).second
second_start = (_second - _prev_second) != 0
info['maj'][minute_start] = True
info['min'][second_start & (_second % label_interval == 0)] = True
year_start = period_break(dates_, 'year')
info_fmt = info['fmt']
info_fmt[second_start & (_second %
label_interval == 0)] = '%H:%M:%S'
info_fmt[day_start] = '%H:%M:%S\n%d-%b'
info_fmt[year_start] = '%H:%M:%S\n%d-%b\n%Y'
if span < periodsperday / 12000.0:
_second_finder(1)
elif span < periodsperday / 6000.0:
_second_finder(2)
elif span < periodsperday / 2400.0:
_second_finder(5)
elif span < periodsperday / 1200.0:
_second_finder(10)
elif span < periodsperday / 800.0:
_second_finder(15)
elif span < periodsperday / 400.0:
_second_finder(30)
elif span < periodsperday / 150.0:
_minute_finder(1)
elif span < periodsperday / 70.0:
_minute_finder(2)
elif span < periodsperday / 24.0:
_minute_finder(5)
elif span < periodsperday / 12.0:
_minute_finder(15)
elif span < periodsperday / 6.0:
_minute_finder(30)
elif span < periodsperday / 2.5:
_hour_finder(1, False)
elif span < periodsperday / 1.5:
_hour_finder(2, False)
elif span < periodsperday * 1.25:
_hour_finder(3, False)
elif span < periodsperday * 2.5:
_hour_finder(6, True)
elif span < periodsperday * 4:
_hour_finder(12, True)
else:
info_maj[month_start] = True
info_min[day_start] = True
year_start = period_break(dates_, 'year')
info_fmt = info['fmt']
info_fmt[day_start] = '%d'
info_fmt[month_start] = '%d\n%b'
info_fmt[year_start] = '%d\n%b\n%Y'
if not has_level_label(year_start, vmin_orig):
if not has_level_label(month_start, vmin_orig):
info_fmt[first_label(day_start)] = '%d\n%b\n%Y'
else:
info_fmt[first_label(month_start)] = '%d\n%b\n%Y'
# Case 2. Less than three months
elif span <= periodsperyear // 4:
month_start = period_break(dates_, 'month')
info_maj[month_start] = True
if freq < FreqGroup.FR_HR:
info['min'] = True
else:
day_start = period_break(dates_, 'day')
info['min'][day_start] = True
week_start = period_break(dates_, 'week')
year_start = period_break(dates_, 'year')
info_fmt[week_start] = '%d'
info_fmt[month_start] = '\n\n%b'
info_fmt[year_start] = '\n\n%b\n%Y'
if not has_level_label(year_start, vmin_orig):
if not has_level_label(month_start, vmin_orig):
info_fmt[first_label(week_start)] = '\n\n%b\n%Y'
else:
info_fmt[first_label(month_start)] = '\n\n%b\n%Y'
# Case 3. Less than 14 months ...............
elif span <= 1.15 * periodsperyear:
year_start = period_break(dates_, 'year')
month_start = period_break(dates_, 'month')
week_start = period_break(dates_, 'week')
info_maj[month_start] = True
info_min[week_start] = True
info_min[year_start] = False
info_min[month_start] = False
info_fmt[month_start] = '%b'
info_fmt[year_start] = '%b\n%Y'
if not has_level_label(year_start, vmin_orig):
info_fmt[first_label(month_start)] = '%b\n%Y'
# Case 4. Less than 2.5 years ...............
elif span <= 2.5 * periodsperyear:
year_start = period_break(dates_, 'year')
quarter_start = period_break(dates_, 'quarter')
month_start = period_break(dates_, 'month')
info_maj[quarter_start] = True
info_min[month_start] = True
info_fmt[quarter_start] = '%b'
info_fmt[year_start] = '%b\n%Y'
# Case 4. Less than 4 years .................
elif span <= 4 * periodsperyear:
year_start = period_break(dates_, 'year')
month_start = period_break(dates_, 'month')
info_maj[year_start] = True
info_min[month_start] = True
info_min[year_start] = False
month_break = dates_[month_start].month
jan_or_jul = month_start[(month_break == 1) | (month_break == 7)]
info_fmt[jan_or_jul] = '%b'
info_fmt[year_start] = '%b\n%Y'
# Case 5. Less than 11 years ................
elif span <= 11 * periodsperyear:
year_start = period_break(dates_, 'year')
quarter_start = period_break(dates_, 'quarter')
info_maj[year_start] = True
info_min[quarter_start] = True
info_min[year_start] = False
info_fmt[year_start] = '%Y'
# Case 6. More than 12 years ................
else:
year_start = period_break(dates_, 'year')
year_break = dates_[year_start].year
nyears = span / periodsperyear
(min_anndef, maj_anndef) = _get_default_annual_spacing(nyears)
major_idx = year_start[(year_break % maj_anndef == 0)]
info_maj[major_idx] = True
minor_idx = year_start[(year_break % min_anndef == 0)]
info_min[minor_idx] = True
info_fmt[major_idx] = '%Y'
return info
def _monthly_finder(vmin, vmax, freq):
periodsperyear = 12
vmin_orig = vmin
(vmin, vmax) = (int(vmin), int(vmax))
span = vmax - vmin + 1
# Initialize the output
info = np.zeros(span,
dtype=[('val', int), ('maj', bool), ('min', bool),
('fmt', '|S8')])
info['val'] = np.arange(vmin, vmax + 1)
dates_ = info['val']
info['fmt'] = ''
year_start = (dates_ % 12 == 0).nonzero()[0]
info_maj = info['maj']
info_fmt = info['fmt']
if span <= 1.15 * periodsperyear:
info_maj[year_start] = True
info['min'] = True
info_fmt[:] = '%b'
info_fmt[year_start] = '%b\n%Y'
if not has_level_label(year_start, vmin_orig):
if dates_.size > 1:
idx = 1
else:
idx = 0
info_fmt[idx] = '%b\n%Y'
elif span <= 2.5 * periodsperyear:
quarter_start = (dates_ % 3 == 0).nonzero()
info_maj[year_start] = True
# TODO: Check the following : is it really info['fmt'] ?
info['fmt'][quarter_start] = True
info['min'] = True
info_fmt[quarter_start] = '%b'
info_fmt[year_start] = '%b\n%Y'
elif span <= 4 * periodsperyear:
info_maj[year_start] = True
info['min'] = True
jan_or_jul = (dates_ % 12 == 0) | (dates_ % 12 == 6)
info_fmt[jan_or_jul] = '%b'
info_fmt[year_start] = '%b\n%Y'
elif span <= 11 * periodsperyear:
quarter_start = (dates_ % 3 == 0).nonzero()
info_maj[year_start] = True
info['min'][quarter_start] = True
info_fmt[year_start] = '%Y'
else:
nyears = span / periodsperyear
(min_anndef, maj_anndef) = _get_default_annual_spacing(nyears)
years = dates_[year_start] // 12 + 1
major_idx = year_start[(years % maj_anndef == 0)]
info_maj[major_idx] = True
info['min'][year_start[(years % min_anndef == 0)]] = True
info_fmt[major_idx] = '%Y'
return info
def _quarterly_finder(vmin, vmax, freq):
periodsperyear = 4
vmin_orig = vmin
(vmin, vmax) = (int(vmin), int(vmax))
span = vmax - vmin + 1
info = np.zeros(span,
dtype=[('val', int), ('maj', bool), ('min', bool),
('fmt', '|S8')])
info['val'] = np.arange(vmin, vmax + 1)
info['fmt'] = ''
dates_ = info['val']
info_maj = info['maj']
info_fmt = info['fmt']
year_start = (dates_ % 4 == 0).nonzero()[0]
if span <= 3.5 * periodsperyear:
info_maj[year_start] = True
info['min'] = True
info_fmt[:] = 'Q%q'
info_fmt[year_start] = 'Q%q\n%F'
if not has_level_label(year_start, vmin_orig):
if dates_.size > 1:
idx = 1
else:
idx = 0
info_fmt[idx] = 'Q%q\n%F'
elif span <= 11 * periodsperyear:
info_maj[year_start] = True
info['min'] = True
info_fmt[year_start] = '%F'
else:
years = dates_[year_start] // 4 + 1
nyears = span / periodsperyear
(min_anndef, maj_anndef) = _get_default_annual_spacing(nyears)
major_idx = year_start[(years % maj_anndef == 0)]
info_maj[major_idx] = True
info['min'][year_start[(years % min_anndef == 0)]] = True
info_fmt[major_idx] = '%F'
return info
def _annual_finder(vmin, vmax, freq):
(vmin, vmax) = (int(vmin), int(vmax + 1))
span = vmax - vmin + 1
info = np.zeros(span,
dtype=[('val', int), ('maj', bool), ('min', bool),
('fmt', '|S8')])
info['val'] = np.arange(vmin, vmax + 1)
info['fmt'] = ''
dates_ = info['val']
(min_anndef, maj_anndef) = _get_default_annual_spacing(span)
major_idx = dates_ % maj_anndef == 0
info['maj'][major_idx] = True
info['min'][(dates_ % min_anndef == 0)] = True
info['fmt'][major_idx] = '%Y'
return info
def get_finder(freq):
if isinstance(freq, compat.string_types):
freq = frequencies.get_freq(freq)
fgroup = frequencies.get_freq_group(freq)
if fgroup == FreqGroup.FR_ANN:
return _annual_finder
elif fgroup == FreqGroup.FR_QTR:
return _quarterly_finder
elif freq == FreqGroup.FR_MTH:
return _monthly_finder
elif ((freq >= FreqGroup.FR_BUS) or fgroup == FreqGroup.FR_WK):
return _daily_finder
else: # pragma: no cover
errmsg = "Unsupported frequency: %s" % (freq)
raise NotImplementedError(errmsg)
class TimeSeries_DateLocator(Locator):
"""
Locates the ticks along an axis controlled by a :class:`Series`.
Parameters
----------
freq : {var}
Valid frequency specifier.
minor_locator : {False, True}, optional
Whether the locator is for minor ticks (True) or not.
dynamic_mode : {True, False}, optional
Whether the locator should work in dynamic mode.
base : {int}, optional
quarter : {int}, optional
month : {int}, optional
day : {int}, optional
"""
def __init__(self, freq, minor_locator=False, dynamic_mode=True,
base=1, quarter=1, month=1, day=1, plot_obj=None):
if isinstance(freq, compat.string_types):
freq = frequencies.get_freq(freq)
self.freq = freq
self.base = base
(self.quarter, self.month, self.day) = (quarter, month, day)
self.isminor = minor_locator
self.isdynamic = dynamic_mode
self.offset = 0
self.plot_obj = plot_obj
self.finder = get_finder(freq)
def _get_default_locs(self, vmin, vmax):
"Returns the default locations of ticks."
if self.plot_obj.date_axis_info is None:
self.plot_obj.date_axis_info = self.finder(vmin, vmax, self.freq)
locator = self.plot_obj.date_axis_info
if self.isminor:
return np.compress(locator['min'], locator['val'])
return np.compress(locator['maj'], locator['val'])
def __call__(self):
'Return the locations of the ticks.'
# axis calls Locator.set_axis inside set_m<xxxx>_formatter
vi = tuple(self.axis.get_view_interval())
if vi != self.plot_obj.view_interval:
self.plot_obj.date_axis_info = None
self.plot_obj.view_interval = vi
vmin, vmax = vi
if vmax < vmin:
vmin, vmax = vmax, vmin
if self.isdynamic:
locs = self._get_default_locs(vmin, vmax)
else: # pragma: no cover
base = self.base
(d, m) = divmod(vmin, base)
vmin = (d + 1) * base
locs = lrange(vmin, vmax + 1, base)
return locs
def autoscale(self):
"""
Sets the view limits to the nearest multiples of base that contain the
data.
"""
# requires matplotlib >= 0.98.0
(vmin, vmax) = self.axis.get_data_interval()
locs = self._get_default_locs(vmin, vmax)
(vmin, vmax) = locs[[0, -1]]
if vmin == vmax:
vmin -= 1
vmax += 1
return nonsingular(vmin, vmax)
# -------------------------------------------------------------------------
# --- Formatter ---
# -------------------------------------------------------------------------
class TimeSeries_DateFormatter(Formatter):
"""
Formats the ticks along an axis controlled by a :class:`PeriodIndex`.
Parameters
----------
freq : {int, string}
Valid frequency specifier.
minor_locator : {False, True}
Whether the current formatter should apply to minor ticks (True) or
major ticks (False).
dynamic_mode : {True, False}
Whether the formatter works in dynamic mode or not.
"""
def __init__(self, freq, minor_locator=False, dynamic_mode=True,
plot_obj=None):
if isinstance(freq, compat.string_types):
freq = frequencies.get_freq(freq)
self.format = None
self.freq = freq
self.locs = []
self.formatdict = None
self.isminor = minor_locator
self.isdynamic = dynamic_mode
self.offset = 0
self.plot_obj = plot_obj
self.finder = get_finder(freq)
def _set_default_format(self, vmin, vmax):
"Returns the default ticks spacing."
if self.plot_obj.date_axis_info is None:
self.plot_obj.date_axis_info = self.finder(vmin, vmax, self.freq)
info = self.plot_obj.date_axis_info
if self.isminor:
format = np.compress(info['min'] & np.logical_not(info['maj']),
info)
else:
format = np.compress(info['maj'], info)
self.formatdict = dict([(x, f) for (x, _, _, f) in format])
return self.formatdict
def set_locs(self, locs):
'Sets the locations of the ticks'
# don't actually use the locs. This is just needed to work with
# matplotlib. Force to use vmin, vmax
self.locs = locs
(vmin, vmax) = vi = tuple(self.axis.get_view_interval())
if vi != self.plot_obj.view_interval:
self.plot_obj.date_axis_info = None
self.plot_obj.view_interval = vi
if vmax < vmin:
(vmin, vmax) = (vmax, vmin)
self._set_default_format(vmin, vmax)
def __call__(self, x, pos=0):
if self.formatdict is None:
return ''
else:
fmt = self.formatdict.pop(x, '')
return Period(ordinal=int(x), freq=self.freq).strftime(fmt)
class TimeSeries_TimedeltaFormatter(Formatter):
"""
Formats the ticks along an axis controlled by a :class:`TimedeltaIndex`.
"""
@staticmethod
def format_timedelta_ticks(x, pos, n_decimals):
"""
Convert seconds to 'D days HH:MM:SS.F'
"""
s, ns = divmod(x, 1e9)
m, s = divmod(s, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
decimals = int(ns * 10**(n_decimals - 9))
s = r'{:02d}:{:02d}:{:02d}'.format(int(h), int(m), int(s))
if n_decimals > 0:
s += '.{{:0{:0d}d}}'.format(n_decimals).format(decimals)
if d != 0:
s = '{:d} days '.format(int(d)) + s
return s
def __call__(self, x, pos=0):
(vmin, vmax) = tuple(self.axis.get_view_interval())
n_decimals = int(np.ceil(np.log10(100 * 1e9 / (vmax - vmin))))
if n_decimals > 9:
n_decimals = 9
return self.format_timedelta_ticks(x, pos, n_decimals)
| mit |
bgris/ODL_bgris | lib/python3.5/site-packages/odl/test/test_doc.py | 2 | 2975 | # Copyright 2014-2016 The ODL development group
#
# This file is part of ODL.
#
# ODL is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ODL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ODL. If not, see <http://www.gnu.org/licenses/>.
"""Run all doctests in the online documentation.
Running this file causes all relevant files in the online documentation to
be run by ``doctest``, and any exception will give a FAILED result.
All other results are considered PASSED.
This test file assumes that all dependencies are installed.
"""
# Imports for common Python 2/3 codebase
from __future__ import print_function, division, absolute_import
from future import standard_library
standard_library.install_aliases()
import doctest
from doctest import IGNORE_EXCEPTION_DETAIL, ELLIPSIS, NORMALIZE_WHITESPACE
import os
import pytest
try:
import matplotlib
matplotlib.use('Agg') # To avoid the backend freezing
import matplotlib.pyplot as plt
except ImportError:
pass
# Modules to be added to testing globals
import numpy
import scipy
import odl
try:
import proximal
except ImportError:
proximal = None
doctest_extraglobs = {'odl': odl, 'np': numpy, 'scipy': scipy,
'proximal': proximal}
root_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'../../doc/source')
root_dir = os.path.normpath(root_dir)
test_dirs = ['guide', 'getting_started']
test_suffixes = ['.rst', '.py']
exclude_files = ['faq.rst']
doc_src_files = []
doctest_optionflags = NORMALIZE_WHITESPACE | ELLIPSIS | IGNORE_EXCEPTION_DETAIL
for test_dir in test_dirs:
for path, _, filenames in os.walk(os.path.join(root_dir, test_dir)):
for filename in filenames:
if (any(filename.endswith(suffix) for suffix in test_suffixes) and
filename not in exclude_files):
doc_src_files.append(os.path.join(path, filename))
@pytest.fixture(scope="module", ids=doc_src_files, params=doc_src_files)
def doc_src_file(request):
return request.param
@pytest.mark.skipif("not pytest.config.getoption('--doctest-doc')",
reason='Need --doctest-doc option to run')
def test_file(doc_src_file):
doctest.testfile(doc_src_file, module_relative=False, report=True,
extraglobs=doctest_extraglobs, verbose=True,
optionflags=doctest_optionflags)
plt.close('all')
if __name__ == '__main__':
pytest.main([str(__file__.replace('\\', '/')), '-v', '--doctest-doc'])
| gpl-3.0 |
Adai0808/scikit-learn | sklearn/tests/test_multiclass.py | 136 | 23649 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_greater
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OutputCodeClassifier
from sklearn.multiclass import fit_ovr
from sklearn.multiclass import fit_ovo
from sklearn.multiclass import fit_ecoc
from sklearn.multiclass import predict_ovr
from sklearn.multiclass import predict_ovo
from sklearn.multiclass import predict_ecoc
from sklearn.multiclass import predict_proba_ovr
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.preprocessing import LabelBinarizer
from sklearn.svm import LinearSVC, SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import (LinearRegression, Lasso, ElasticNet, Ridge,
Perceptron, LogisticRegression)
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn import svm
from sklearn import datasets
from sklearn.externals.six.moves import zip
iris = datasets.load_iris()
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
n_classes = 3
def test_ovr_exceptions():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovr.predict, [])
with ignore_warnings():
assert_raises(ValueError, predict_ovr, [LinearSVC(), MultinomialNB()],
LabelBinarizer(), [])
# Fail on multioutput data
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1, 2], [3, 1]]))
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1.5, 2.4], [3.1, 0.8]]))
def test_ovr_fit_predict():
# A classifier which implements decision_function.
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
clf = LinearSVC(random_state=0)
pred2 = clf.fit(iris.data, iris.target).predict(iris.data)
assert_equal(np.mean(iris.target == pred), np.mean(iris.target == pred2))
# A classifier which implements predict_proba.
ovr = OneVsRestClassifier(MultinomialNB())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_greater(np.mean(iris.target == pred), 0.65)
def test_ovr_ovo_regressor():
# test that ovr and ovo work on regressors which don't have a decision_function
ovr = OneVsRestClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
ovr = OneVsOneClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes * (n_classes - 1) / 2)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
def test_ovr_fit_predict_sparse():
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
base_clf = MultinomialNB(alpha=1)
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
clf_sprs = OneVsRestClassifier(base_clf).fit(X_train, sparse(Y_train))
Y_pred_sprs = clf_sprs.predict(X_test)
assert_true(clf.multilabel_)
assert_true(sp.issparse(Y_pred_sprs))
assert_array_equal(Y_pred_sprs.toarray(), Y_pred)
# Test predict_proba
Y_proba = clf_sprs.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred_sprs.toarray())
# Test decision_function
clf_sprs = OneVsRestClassifier(svm.SVC()).fit(X_train, sparse(Y_train))
dec_pred = (clf_sprs.decision_function(X_test) > 0).astype(int)
assert_array_equal(dec_pred, clf_sprs.predict(X_test).toarray())
def test_ovr_always_present():
# Test that ovr works with classes that are always present or absent.
# Note: tests is the case where _ConstantPredictor is utilised
X = np.ones((10, 2))
X[:5, :] = 0
# Build an indicator matrix where two features are always on.
# As list of lists, it would be: [[int(i >= 5), 2, 3] for i in range(10)]
y = np.zeros((10, 3))
y[5:, 0] = 1
y[:, 1] = 1
y[:, 2] = 1
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict(X)
assert_array_equal(np.array(y_pred), np.array(y))
y_pred = ovr.decision_function(X)
assert_equal(np.unique(y_pred[:, -2:]), 1)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.ones(X.shape[0]))
# y has a constantly absent label
y = np.zeros((10, 2))
y[5:, 0] = 1 # variable label
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.zeros(X.shape[0]))
def test_ovr_multiclass():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "ham", "eggs", "ham"]
Y = np.array([[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet()):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 0, 4]])[0]
assert_array_equal(y_pred, [0, 0, 1])
def test_ovr_binary():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "spam", "eggs", "spam"]
Y = np.array([[0, 1, 1, 0, 1]]).T
classes = set("eggs spam".split())
def conduct_test(base_clf, test_predict_proba=False):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
if test_predict_proba:
X_test = np.array([[0, 0, 4]])
probabilities = clf.predict_proba(X_test)
assert_equal(2, len(probabilities[0]))
assert_equal(clf.classes_[np.argmax(probabilities, axis=1)],
clf.predict(X_test))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[3, 0, 0]])[0]
assert_equal(y_pred, 1)
for base_clf in (LinearSVC(random_state=0), LinearRegression(),
Ridge(), ElasticNet()):
conduct_test(base_clf)
for base_clf in (MultinomialNB(), SVC(probability=True),
LogisticRegression()):
conduct_test(base_clf, test_predict_proba=True)
def test_ovr_multilabel():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]])
y = np.array([[0, 1, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 1],
[1, 0, 0]])
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet(), Lasso(alpha=0.5)):
clf = OneVsRestClassifier(base_clf).fit(X, y)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_array_equal(y_pred, [0, 1, 1])
assert_true(clf.multilabel_)
def test_ovr_fit_predict_svc():
ovr = OneVsRestClassifier(svm.SVC())
ovr.fit(iris.data, iris.target)
assert_equal(len(ovr.estimators_), 3)
assert_greater(ovr.score(iris.data, iris.target), .9)
def test_ovr_multilabel_dataset():
base_clf = MultinomialNB(alpha=1)
for au, prec, recall in zip((True, False), (0.51, 0.66), (0.51, 0.80)):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=2,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
assert_true(clf.multilabel_)
assert_almost_equal(precision_score(Y_test, Y_pred, average="micro"),
prec,
decimal=2)
assert_almost_equal(recall_score(Y_test, Y_pred, average="micro"),
recall,
decimal=2)
def test_ovr_multilabel_predict_proba():
base_clf = MultinomialNB(alpha=1)
for au in (False, True):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
# Estimator with predict_proba disabled, depending on parameters.
decision_only = OneVsRestClassifier(svm.SVC(probability=False))
decision_only.fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred)
def test_ovr_single_label_predict_proba():
base_clf = MultinomialNB(alpha=1)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
assert_almost_equal(Y_proba.sum(axis=1), 1.0)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = np.array([l.argmax() for l in Y_proba])
assert_false((pred - Y_pred).any())
def test_ovr_multilabel_decision_function():
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal((clf.decision_function(X_test) > 0).astype(int),
clf.predict(X_test))
def test_ovr_single_label_decision_function():
X, Y = datasets.make_classification(n_samples=100,
n_features=20,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal(clf.decision_function(X_test).ravel() > 0,
clf.predict(X_test))
def test_ovr_gridsearch():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovr, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovr_pipeline():
# Test with pipeline of length one
# This test is needed because the multiclass estimators may fail to detect
# the presence of predict_proba or decision_function.
clf = Pipeline([("tree", DecisionTreeClassifier())])
ovr_pipe = OneVsRestClassifier(clf)
ovr_pipe.fit(iris.data, iris.target)
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data))
def test_ovr_coef_():
for base_classifier in [SVC(kernel='linear', random_state=0), LinearSVC(random_state=0)]:
# SVC has sparse coef with sparse input data
ovr = OneVsRestClassifier(base_classifier)
for X in [iris.data, sp.csr_matrix(iris.data)]:
# test with dense and sparse coef
ovr.fit(X, iris.target)
shape = ovr.coef_.shape
assert_equal(shape[0], n_classes)
assert_equal(shape[1], iris.data.shape[1])
# don't densify sparse coefficients
assert_equal(sp.issparse(ovr.estimators_[0].coef_), sp.issparse(ovr.coef_))
def test_ovr_coef_exceptions():
# Not fitted exception!
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
# lambda is needed because we don't want coef_ to be evaluated right away
assert_raises(ValueError, lambda x: ovr.coef_, None)
# Doesn't have coef_ exception!
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_raises(AttributeError, lambda x: ovr.coef_, None)
def test_ovo_exceptions():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovo.predict, [])
def test_ovo_fit_on_list():
# Test that OneVsOne fitting works with a list of targets and yields the
# same output as predict from an array
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
prediction_from_array = ovo.fit(iris.data, iris.target).predict(iris.data)
prediction_from_list = ovo.fit(iris.data,
list(iris.target)).predict(iris.data)
assert_array_equal(prediction_from_array, prediction_from_list)
def test_ovo_fit_predict():
# A classifier which implements decision_function.
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
# A classifier which implements predict_proba.
ovo = OneVsOneClassifier(MultinomialNB())
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
def test_ovo_decision_function():
n_samples = iris.data.shape[0]
ovo_clf = OneVsOneClassifier(LinearSVC(random_state=0))
ovo_clf.fit(iris.data, iris.target)
decisions = ovo_clf.decision_function(iris.data)
assert_equal(decisions.shape, (n_samples, n_classes))
assert_array_equal(decisions.argmax(axis=1), ovo_clf.predict(iris.data))
# Compute the votes
votes = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
pred = ovo_clf.estimators_[k].predict(iris.data)
votes[pred == 0, i] += 1
votes[pred == 1, j] += 1
k += 1
# Extract votes and verify
assert_array_equal(votes, np.round(decisions))
for class_idx in range(n_classes):
# For each sample and each class, there only 3 possible vote levels
# because they are only 3 distinct class pairs thus 3 distinct
# binary classifiers.
# Therefore, sorting predictions based on votes would yield
# mostly tied predictions:
assert_true(set(votes[:, class_idx]).issubset(set([0., 1., 2.])))
# The OVO decision function on the other hand is able to resolve
# most of the ties on this data as it combines both the vote counts
# and the aggregated confidence levels of the binary classifiers
# to compute the aggregate decision function. The iris dataset
# has 150 samples with a couple of duplicates. The OvO decisions
# can resolve most of the ties:
assert_greater(len(np.unique(decisions[:, class_idx])), 146)
def test_ovo_gridsearch():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovo, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovo_ties():
# Test that ties are broken using the decision function,
# not defaulting to the smallest label
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y = np.array([2, 0, 1, 2])
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
ovo_decision = multi_clf.decision_function(X)
# Classifiers are in order 0-1, 0-2, 1-2
# Use decision_function to compute the votes and the normalized
# sum_of_confidences, which is used to disambiguate when there is a tie in
# votes.
votes = np.round(ovo_decision)
normalized_confidences = ovo_decision - votes
# For the first point, there is one vote per class
assert_array_equal(votes[0, :], 1)
# For the rest, there is no tie and the prediction is the argmax
assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:])
# For the tie, the prediction is the class with the highest score
assert_equal(ovo_prediction[0], normalized_confidences[0].argmax())
def test_ovo_ties2():
# test that ties can not only be won by the first two labels
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y_ref = np.array([2, 0, 1, 2])
# cycle through labels so that each label wins once
for i in range(3):
y = (y_ref + i) % 3
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
assert_equal(ovo_prediction[0], i % 3)
def test_ovo_string_y():
# Test that the OvO doesn't mess up the encoding of string labels
X = np.eye(4)
y = np.array(['a', 'b', 'c', 'd'])
ovo = OneVsOneClassifier(LinearSVC())
ovo.fit(X, y)
assert_array_equal(y, ovo.predict(X))
def test_ecoc_exceptions():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ecoc.predict, [])
def test_ecoc_fit_predict():
# A classifier which implements decision_function.
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
# A classifier which implements predict_proba.
ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
def test_ecoc_gridsearch():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
random_state=0)
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ecoc, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
@ignore_warnings
def test_deprecated():
base_estimator = DecisionTreeClassifier(random_state=0)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
all_metas = [
(OneVsRestClassifier, fit_ovr, predict_ovr, predict_proba_ovr),
(OneVsOneClassifier, fit_ovo, predict_ovo, None),
(OutputCodeClassifier, fit_ecoc, predict_ecoc, None),
]
for MetaEst, fit_func, predict_func, proba_func in all_metas:
try:
meta_est = MetaEst(base_estimator,
random_state=0).fit(X_train, Y_train)
fitted_return = fit_func(base_estimator, X_train, Y_train,
random_state=0)
except TypeError:
meta_est = MetaEst(base_estimator).fit(X_train, Y_train)
fitted_return = fit_func(base_estimator, X_train, Y_train)
if len(fitted_return) == 2:
estimators_, classes_or_lb = fitted_return
assert_almost_equal(predict_func(estimators_, classes_or_lb,
X_test),
meta_est.predict(X_test))
if proba_func is not None:
assert_almost_equal(proba_func(estimators_, X_test,
is_multilabel=False),
meta_est.predict_proba(X_test))
else:
estimators_, classes_or_lb, codebook = fitted_return
assert_almost_equal(predict_func(estimators_, classes_or_lb,
codebook, X_test),
meta_est.predict(X_test))
| bsd-3-clause |
loicNorgeot/THE_LONG_DARK | TLD_mapping_tool.py | 1 | 7804 |
# coding: utf-8
## The Long Dark Mapping Tool
# The MIT License (MIT)
# Copyright (c) 2015 Loïc Norgeot
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
### 1 - Map-related functions
#### 1.0 - Module imports
# In[1]:
import os
import numpy as np
import matplotlib.pyplot as plt
from numpy import linspace, meshgrid
from matplotlib.mlab import griddata
from pylab import rcParams
rcParams['figure.figsize'] = 12,8
#import scipy.ndimage as nd
#### 1.1 - Screenshots and savefiles manipulation
# In[2]:
def readCoordsFromScreenshots(path):
screenshots = os.listdir(path)
screenshots = [S for S in screenshots if "screen" in S]
coords = np.array([[int(x) for x in s[s.find("(")+1:s.find(")")].split(",")] for s in screenshots])
return coords
def readCoordsFromFile(fileName):
C = []
with open(fileName) as f:
content = f.readlines()
for c in content:
s = c.split(" ")
C.append([int(s[0]), int(s[2]), int(s[1])])
return np.array(C)
def writeCoordsToFile(data, fileName, mode="w"):
with open(fileName, mode) as f:
for c in data:
f.write(str(c[0]) + " " + str(c[2]) + " " + str(c[1]) +"\n" )
def deleteScreenshots(path):
for fileName in os.listdir(path):
if((".png" in fileName) and ("screen" in fileName)):
os.remove(path + fileName)
#### 1.2 - Plotting
# In[3]:
def contourPlot(data, path, save=True):
fig = plt.figure()
xi = linspace(min(data[:,0]),max(data[:,0]),111)
yi = linspace(min(data[:,2]),max(data[:,2]),111)
zi = griddata(data[:,0],data[:,2],data[:,1], xi,yi, interp='linear')
#zi = nd.gaussian_filter(zi, sigma=0.6, order=0)
plt.contour (xi,yi,zi,41,linewidths=1,colors='black')
plt.contourf(xi,yi,zi,82);
plt.colorbar()
plt.grid(True)
plt.set_cmap('terrain')
if(save):
plt.savefig(path + "TM_map_contour.png",dpi=150)
def scatterPlot(data, path, save=True):
fig = plt.figure()
plt.scatter(data[:,0],data[:,2], c=data[:,1], linewidth=0,s=40)
plt.xlim(min(data[:,0]),max(data[:,0]))
plt.ylim(min(data[:,2]),max(data[:,2]))
plt.colorbar()
plt.grid(True)
plt.set_cmap('terrain')
if(save):
plt.savefig(path + "TM_map_path.png",dpi=150)
#### 1.3 - User routines
# In[4]:
def createMaps(sPath, fPath):
fC = readCoordsFromFile(fPath + "coords.txt")
sC = readCoordsFromScreenshots(sPath)
coordinates = np.array([]);
if( (len(fC)==0) and (len(sC)==0)):
print("No data to work on! Doing nothing...")
elif( len(fC)==0 ):
print("No files, but screenshots, going on...")
coordinates=sC
writeCoordsToFile(coordinates, fpath+ "coords.txt")
deleteScreenshots(sPath)
elif( len(sC)==0 ):
print("No screenshots, but files, going on...")
coordinates=fC
else:
print("Screenshots and files! Going on...")
coordinates = np.concatenate((fC, sC))
writeCoordsToFile(coordinates, fPath+ "coords.txt")
deleteScreenshots(sPath)
contourPlot(coordinates, fPath)
scatterPlot(coordinates, fPath)
def checkFile(fileName):
fC = readCoordsFromFile(fileName)
coordinates = np.array([]);
if( (len(fC)==0)):
print("No data to work on! Doing nothing...")
else:
print("No screenshots, but a file, going on...")
print("Number of points in the file = ", len(coordinates))
coordinates=fC
contourPlot(coordinates, " ", save=False)
scatterPlot(coordinates, " ", save=False)
### 2 - Interractive mapping functions
#### 2.0 - Imports
# In[5]:
from subprocess import check_output
from win32api import keybd_event, GetAsyncKeyState
import time
#### 2.1 - Functions
# In[6]:
def isTLDRunning():
processes_string = check_output("wmic process get description", shell=True)
return ('tld.exe' in processes_string.split())
def press(key=0x77):
keybd_event(key, 0,1,0)
time.sleep(.05)
keybd_event(key, 0,2,0)
def wasPressed(key=0x76):
return GetAsyncKeyState(key)
def startInteractiveMapping(sPath, fPath, time_step=2.5):
t = time.time()
recording = False
while(isTLDRunning()):
if(wasPressed(0x76)):
if not recording:
recording = True
else:
recording = False
if(recording):
if(time.time() - t > time_step):
press(0x77)
t = time.time()
coord = readCoordsFromScreenshots(sPath)
writeCoordsToFile(coord, fPath + "coords.txt", "a")
deleteScreenshots(sPath)
time.sleep(0.2)
deleteScreenshots(sPath)
### 3 - GUI
# In[7]:
import Tkinter, tkFileDialog, Tkconstants
class TLD_Mapping_tool_tk(Tkinter.Tk):
mPath=""
sPath=""
def __init__(self,parent):
Tkinter.Tk.__init__(self,parent)
self.parent = parent
self.initialize()
def initialize(self):
self.grid()
maps_bt = Tkinter.Button(self, text='Choose maps directory', command=self.chooseMapsDir)
maps_bt.grid(column=0,row=0,columnspan=2,sticky='EW')
screenshots_bt = Tkinter.Button(self, text='Choose screenshots directory', command=self.chooseScreenDir)
screenshots_bt.grid(column=0,row=1,columnspan=2,sticky='EW')
self.run_bt = Tkinter.Button(self,
text = u"Start mapping",
state = 'disabled',
command = lambda: startInteractiveMapping(self.sPath, self.mPath))
self.run_bt.grid(column=0,row=2)
self.createmap_bt = Tkinter.Button(self,
text = u"Create maps",
state = 'disabled',
command = lambda: createMaps(self.sPath, self.mPath))
self.createmap_bt.grid(column=1,row=2)
self.grid_columnconfigure(0,weight=1)
self.resizable(False,False)
def enableButtons(self):
if self.sPath!="" and self.mPath!="":
self.run_bt['state'] = 'normal'
self.createmap_bt['state'] = 'normal'
def chooseScreenDir(self):
self.sPath = tkFileDialog.askdirectory() + "/"
self.enableButtons()
def chooseMapsDir(self):
self.mPath = tkFileDialog.askdirectory() + "/"
self.enableButtons()
### 4 - Execution
# In[8]:
if __name__ == "__main__":
app = TLD_Mapping_tool_tk(None)
app.title('TLD Mapping Tool')
app.mainloop()
| mit |
f3r/scikit-learn | examples/bicluster/bicluster_newsgroups.py | 142 | 7183 | """
================================================================
Biclustering documents with the Spectral Co-clustering algorithm
================================================================
This example demonstrates the Spectral Co-clustering algorithm on the
twenty newsgroups dataset. The 'comp.os.ms-windows.misc' category is
excluded because it contains many posts containing nothing but data.
The TF-IDF vectorized posts form a word frequency matrix, which is
then biclustered using Dhillon's Spectral Co-Clustering algorithm. The
resulting document-word biclusters indicate subsets words used more
often in those subsets documents.
For a few of the best biclusters, its most common document categories
and its ten most important words get printed. The best biclusters are
determined by their normalized cut. The best words are determined by
comparing their sums inside and outside the bicluster.
For comparison, the documents are also clustered using
MiniBatchKMeans. The document clusters derived from the biclusters
achieve a better V-measure than clusters found by MiniBatchKMeans.
Output::
Vectorizing...
Coclustering...
Done in 9.53s. V-measure: 0.4455
MiniBatchKMeans...
Done in 12.00s. V-measure: 0.3309
Best biclusters:
----------------
bicluster 0 : 1951 documents, 4373 words
categories : 23% talk.politics.guns, 19% talk.politics.misc, 14% sci.med
words : gun, guns, geb, banks, firearms, drugs, gordon, clinton, cdt, amendment
bicluster 1 : 1165 documents, 3304 words
categories : 29% talk.politics.mideast, 26% soc.religion.christian, 25% alt.atheism
words : god, jesus, christians, atheists, kent, sin, morality, belief, resurrection, marriage
bicluster 2 : 2219 documents, 2830 words
categories : 18% comp.sys.mac.hardware, 16% comp.sys.ibm.pc.hardware, 16% comp.graphics
words : voltage, dsp, board, receiver, circuit, shipping, packages, stereo, compression, package
bicluster 3 : 1860 documents, 2745 words
categories : 26% rec.motorcycles, 23% rec.autos, 13% misc.forsale
words : bike, car, dod, engine, motorcycle, ride, honda, cars, bmw, bikes
bicluster 4 : 12 documents, 155 words
categories : 100% rec.sport.hockey
words : scorer, unassisted, reichel, semak, sweeney, kovalenko, ricci, audette, momesso, nedved
"""
from __future__ import print_function
print(__doc__)
from collections import defaultdict
import operator
import re
from time import time
import numpy as np
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster import MiniBatchKMeans
from sklearn.externals.six import iteritems
from sklearn.datasets.twenty_newsgroups import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.cluster import v_measure_score
def number_aware_tokenizer(doc):
""" Tokenizer that maps all numeric tokens to a placeholder.
For many applications, tokens that begin with a number are not directly
useful, but the fact that such a token exists can be relevant. By applying
this form of dimensionality reduction, some methods may perform better.
"""
token_pattern = re.compile(u'(?u)\\b\\w\\w+\\b')
tokens = token_pattern.findall(doc)
tokens = ["#NUMBER" if token[0] in "0123456789_" else token
for token in tokens]
return tokens
# exclude 'comp.os.ms-windows.misc'
categories = ['alt.atheism', 'comp.graphics',
'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware',
'comp.windows.x', 'misc.forsale', 'rec.autos',
'rec.motorcycles', 'rec.sport.baseball',
'rec.sport.hockey', 'sci.crypt', 'sci.electronics',
'sci.med', 'sci.space', 'soc.religion.christian',
'talk.politics.guns', 'talk.politics.mideast',
'talk.politics.misc', 'talk.religion.misc']
newsgroups = fetch_20newsgroups(categories=categories)
y_true = newsgroups.target
vectorizer = TfidfVectorizer(stop_words='english', min_df=5,
tokenizer=number_aware_tokenizer)
cocluster = SpectralCoclustering(n_clusters=len(categories),
svd_method='arpack', random_state=0)
kmeans = MiniBatchKMeans(n_clusters=len(categories), batch_size=20000,
random_state=0)
print("Vectorizing...")
X = vectorizer.fit_transform(newsgroups.data)
print("Coclustering...")
start_time = time()
cocluster.fit(X)
y_cocluster = cocluster.row_labels_
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_cocluster, y_true)))
print("MiniBatchKMeans...")
start_time = time()
y_kmeans = kmeans.fit_predict(X)
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_kmeans, y_true)))
feature_names = vectorizer.get_feature_names()
document_names = list(newsgroups.target_names[i] for i in newsgroups.target)
def bicluster_ncut(i):
rows, cols = cocluster.get_indices(i)
if not (np.any(rows) and np.any(cols)):
import sys
return sys.float_info.max
row_complement = np.nonzero(np.logical_not(cocluster.rows_[i]))[0]
col_complement = np.nonzero(np.logical_not(cocluster.columns_[i]))[0]
# Note: the following is identical to X[rows[:, np.newaxis], cols].sum() but
# much faster in scipy <= 0.16
weight = X[rows][:, cols].sum()
cut = (X[row_complement][:, cols].sum() +
X[rows][:, col_complement].sum())
return cut / weight
def most_common(d):
"""Items of a defaultdict(int) with the highest values.
Like Counter.most_common in Python >=2.7.
"""
return sorted(iteritems(d), key=operator.itemgetter(1), reverse=True)
bicluster_ncuts = list(bicluster_ncut(i)
for i in range(len(newsgroups.target_names)))
best_idx = np.argsort(bicluster_ncuts)[:5]
print()
print("Best biclusters:")
print("----------------")
for idx, cluster in enumerate(best_idx):
n_rows, n_cols = cocluster.get_shape(cluster)
cluster_docs, cluster_words = cocluster.get_indices(cluster)
if not len(cluster_docs) or not len(cluster_words):
continue
# categories
counter = defaultdict(int)
for i in cluster_docs:
counter[document_names[i]] += 1
cat_string = ", ".join("{:.0f}% {}".format(float(c) / n_rows * 100, name)
for name, c in most_common(counter)[:3])
# words
out_of_cluster_docs = cocluster.row_labels_ != cluster
out_of_cluster_docs = np.where(out_of_cluster_docs)[0]
word_col = X[:, cluster_words]
word_scores = np.array(word_col[cluster_docs, :].sum(axis=0) -
word_col[out_of_cluster_docs, :].sum(axis=0))
word_scores = word_scores.ravel()
important_words = list(feature_names[cluster_words[i]]
for i in word_scores.argsort()[:-11:-1])
print("bicluster {} : {} documents, {} words".format(
idx, n_rows, n_cols))
print("categories : {}".format(cat_string))
print("words : {}\n".format(', '.join(important_words)))
| bsd-3-clause |
michaelaye/scikit-image | doc/examples/plot_rag.py | 25 | 2139 | """
=======================
Region Adjacency Graphs
=======================
This example demonstrates the use of the `merge_nodes` function of a Region
Adjacency Graph (RAG). The `RAG` class represents a undirected weighted graph
which inherits from `networkx.graph` class. When a new node is formed by
merging two nodes, the edge weight of all the edges incident on the resulting
node can be updated by a user defined function `weight_func`.
The default behaviour is to use the smaller edge weight in case of a conflict.
The example below also shows how to use a custom function to select the larger
weight instead.
"""
from skimage.future.graph import rag
import networkx as nx
from matplotlib import pyplot as plt
import numpy as np
def max_edge(g, src, dst, n):
"""Callback to handle merging nodes by choosing maximum weight.
Returns either the weight between (`src`, `n`) or (`dst`, `n`)
in `g` or the maximum of the two when both exist.
Parameters
----------
g : RAG
The graph under consideration.
src, dst : int
The vertices in `g` to be merged.
n : int
A neighbor of `src` or `dst` or both.
Returns
-------
weight : float
The weight between (`src`, `n`) or (`dst`, `n`) in `g` or the
maximum of the two when both exist.
"""
w1 = g[n].get(src, {'weight': -np.inf})['weight']
w2 = g[n].get(dst, {'weight': -np.inf})['weight']
return max(w1, w2)
def display(g, title):
"""Displays a graph with the given title."""
pos = nx.circular_layout(g)
plt.figure()
plt.title(title)
nx.draw(g, pos)
nx.draw_networkx_edge_labels(g, pos, font_size=20)
g = rag.RAG()
g.add_edge(1, 2, weight=10)
g.add_edge(2, 3, weight=20)
g.add_edge(3, 4, weight=30)
g.add_edge(4, 1, weight=40)
g.add_edge(1, 3, weight=50)
# Assigning dummy labels.
for n in g.nodes():
g.node[n]['labels'] = [n]
gc = g.copy()
display(g, "Original Graph")
g.merge_nodes(1, 3)
display(g, "Merged with default (min)")
gc.merge_nodes(1, 3, weight_func=max_edge, in_place=False)
display(gc, "Merged with max without in_place")
plt.show()
| bsd-3-clause |
lin-credible/scikit-learn | sklearn/cross_decomposition/cca_.py | 209 | 3150 | from .pls_ import _PLS
__all__ = ['CCA']
class CCA(_PLS):
"""CCA Canonical Correlation Analysis.
CCA inherits from PLS with mode="B" and deflation_mode="canonical".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2).
number of components to keep.
scale : boolean, (default True)
whether to scale the data?
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop
tol : non-negative real, default 1e-06.
the tolerance used in the iterative algorithm
copy : boolean
Whether the deflation be done on a copy. Let the default value
to True unless you don't care about side effects
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find the weights u, v that maximizes
max corr(Xk u, Yk v), such that ``|u| = |v| = 1``
Note that it maximizes only the correlations between the scores.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score.
Examples
--------
>>> from sklearn.cross_decomposition import CCA
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [3.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> cca = CCA(n_components=1)
>>> cca.fit(X, Y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
CCA(copy=True, max_iter=500, n_components=1, scale=True, tol=1e-06)
>>> X_c, Y_c = cca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSSVD
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="B",
norm_y_weights=True, algorithm="nipals",
max_iter=max_iter, tol=tol, copy=copy)
| bsd-3-clause |
yaukwankiu/armor | geometry/granulometry.py | 1 | 2954 | # http://en.wikipedia.org/wiki/Granulometry_%28morphology%29
# http://scipy-lectures.github.io/advanced/image_processing/
# http://scipy-lectures.github.io/advanced/image_processing/auto_examples/plot_granulo.html#example-plot-granulo-py
#
from scipy import ndimage
import matplotlib.pyplot as plt
import numpy as np
import time
from .. import defaultParameters as dp
def disk_structure(n):
struct = np.zeros((2 * n + 1, 2 * n + 1))
x, y = np.indices((2 * n + 1, 2 * n + 1))
mask = (x - n)**2 + (y - n)**2 <= n**2
struct[mask] = 1
return struct.astype(np.bool)
def granulometry(data, sizes=None):
s = max(data.shape)
if sizes == None:
sizes = range(1, s/2, 2)
granulo = [ndimage.binary_opening(data, \
structure=disk_structure(n)).sum() for n in sizes]
return granulo
def analyse(im, threshold="", scales=[4,10,14,40], verbose=True,display=True, outputFolder=""):
if threshold != "":
try:
mask = (im>threshold)
except:
im = im.matrix
mask = (im>threshold)
else:
try:
mask = im > im.mean()
except:
im = im.matrix
mask = im > im.mean()
#granulo = granulometry(mask, sizes=np.arange(2, 19, 4))
granulo = granulometry(mask, sizes=scales)
print 'granulo:', granulo
plt.figure(figsize=(6, 2.2))
plt.subplot(121)
plt.imshow(mask, cmap=plt.cm.gray, origin='lower')
openedList = [0] * len(scales)
for i, s in enumerate(scales):
openedList[i] = ndimage.binary_opening(mask, structure=disk_structure(s))
if len(scales)==4:
plt.contour(openedList[0], [0.5], colors='g', linewidths=1)
plt.contour(openedList[1], [0.5], colors='b', linewidths=1)
plt.contour(openedList[2], [0.5], colors='r', linewidths=1)
plt.contour(openedList[3], [0.5], colors='k', linewidths=1)
else:
for i in range(len(scales)):
plt.contour(openedList[i], [0.5], colors=dp.coloursList[i], linewidths=1)
plt.axis('off')
plt.subplot(122)
plt.plot(scales, granulo, 'ok', ms=8)
plt.subplots_adjust(wspace=0.02, hspace=0.15, top=0.95, bottom=0.15, left=0, right=0.95)
#################################
# maximise frame window
# http://stackoverflow.com/questions/12439588/how-to-maximize-a-plt-show-window-using-python
# how?
#
#################################
if outputFolder!="":
plt.savefig(outputFolder+ str(time.time())+ str(scales)+'granulometry.png', dpi=200)
if display:
plt.show(block=False)
return openedList
def main():
np.random.seed(1)
n = 10
l = 256
im = np.zeros((l, l))
points = l*np.random.random((2, n**2))
im[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
im = ndimage.gaussian_filter(im, sigma=l/(4.*n))
analyse(im)
if __name__ == '__main__':
main()
| cc0-1.0 |
gtrichards/PHYS_T480 | code/fig_central_limit.py | 4 | 2845 | r"""
Example of central limit theorem
--------------------------------
Figure 3.20.
An illustration of the central limit theorem. The histogram in each panel shows
the distribution of the mean value of N random variables drawn from the (0, 1)
range (a uniform distribution with :math:`\mu = 0.5` and W = 1; see eq. 3.39).
The distribution for N = 2 has a triangular shape and as N increases it becomes
increasingly similar to a Gaussian, in agreement with the central limit
theorem. The predicted normal distribution with :math:`\mu = 0.5` and
:math:`\sigma = 1/ \sqrt{12 N}` is shown by the line. Already for N = 10,
the "observed" distribution is essentially the same as the predicted
distribution.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from scipy.stats import norm
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Generate the uniform samples
N = [2, 3, 10]
np.random.seed(42)
x = np.random.random((max(N), 1E6))
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 5))
fig.subplots_adjust(hspace=0.05)
for i in range(len(N)):
ax = fig.add_subplot(3, 1, i + 1)
# take the mean of the first N[i] samples
x_i = x[:N[i], :].mean(0)
# histogram the data
ax.hist(x_i, bins=np.linspace(0, 1, 101),
histtype='stepfilled', alpha=0.5, normed=True)
# plot the expected gaussian pdf
mu = 0.5
sigma = 1. / np.sqrt(12 * N[i])
dist = norm(mu, sigma)
x_pdf = np.linspace(-0.5, 1.5, 1000)
ax.plot(x_pdf, dist.pdf(x_pdf), '-k')
ax.set_xlim(0.0, 1.0)
ax.set_ylim(0.001, None)
ax.xaxis.set_major_locator(plt.MultipleLocator(0.2))
ax.yaxis.set_major_locator(plt.MaxNLocator(5))
ax.text(0.99, 0.95, r"$N = %i$" % N[i],
ha='right', va='top', transform=ax.transAxes)
if i == len(N) - 1:
ax.xaxis.set_major_formatter(plt.FormatStrFormatter('%.4f'))
ax.set_xlabel(r'$x$')
else:
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.set_ylabel('$p(x)$')
plt.show()
| mit |
paladin74/neural-network-animation | matplotlib/figure.py | 10 | 58719 | """
The figure module provides the top-level
:class:`~matplotlib.artist.Artist`, the :class:`Figure`, which
contains all the plot elements. The following classes are defined
:class:`SubplotParams`
control the default spacing of the subplots
:class:`Figure`
top level container for all plot elements
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import warnings
from operator import itemgetter
import numpy as np
from matplotlib import rcParams
from matplotlib import docstring
from matplotlib import __version__ as _mpl_version
import matplotlib.artist as martist
from matplotlib.artist import Artist, allow_rasterization
import matplotlib.cbook as cbook
from matplotlib.cbook import Stack, iterable
from matplotlib import _image
from matplotlib.image import FigureImage
import matplotlib.colorbar as cbar
from matplotlib.axes import Axes, SubplotBase, subplot_class_factory
from matplotlib.blocking_input import BlockingMouseInput, BlockingKeyMouseInput
from matplotlib.legend import Legend
from matplotlib.patches import Rectangle
from matplotlib.projections import (get_projection_names,
process_projection_requirements)
from matplotlib.text import Text, _process_text_args
from matplotlib.transforms import (Affine2D, Bbox, BboxTransformTo,
TransformedBbox)
from matplotlib.backend_bases import NonGuiException
docstring.interpd.update(projection_names=get_projection_names())
class AxesStack(Stack):
"""
Specialization of the Stack to handle all tracking of Axes in a Figure.
This stack stores ``key, (ind, axes)`` pairs, where:
* **key** should be a hash of the args and kwargs
used in generating the Axes.
* **ind** is a serial number for tracking the order
in which axes were added.
The AxesStack is a callable, where ``ax_stack()`` returns
the current axes. Alternatively the :meth:`current_key_axes` will
return the current key and associated axes.
"""
def __init__(self):
Stack.__init__(self)
self._ind = 0
def as_list(self):
"""
Return a list of the Axes instances that have been added to the figure
"""
ia_list = [a for k, a in self._elements]
ia_list.sort()
return [a for i, a in ia_list]
def get(self, key):
"""
Return the Axes instance that was added with *key*.
If it is not present, return None.
"""
item = dict(self._elements).get(key)
if item is None:
return None
return item[1]
def _entry_from_axes(self, e):
ind, k = dict([(a, (ind, k)) for (k, (ind, a)) in self._elements])[e]
return (k, (ind, e))
def remove(self, a):
"""Remove the axes from the stack."""
Stack.remove(self, self._entry_from_axes(a))
def bubble(self, a):
"""
Move the given axes, which must already exist in the
stack, to the top.
"""
return Stack.bubble(self, self._entry_from_axes(a))
def add(self, key, a):
"""
Add Axes *a*, with key *key*, to the stack, and return the stack.
If *a* is already on the stack, don't add it again, but
return *None*.
"""
# All the error checking may be unnecessary; but this method
# is called so seldom that the overhead is negligible.
if not isinstance(a, Axes):
raise ValueError("second argument, %s, is not an Axes" % a)
try:
hash(key)
except TypeError:
raise ValueError("first argument, %s, is not a valid key" % key)
a_existing = self.get(key)
if a_existing is not None:
Stack.remove(self, (key, a_existing))
warnings.warn(
"key %s already existed; Axes is being replaced" % key)
# I don't think the above should ever happen.
if a in self:
return None
self._ind += 1
return Stack.push(self, (key, (self._ind, a)))
def current_key_axes(self):
"""
Return a tuple of ``(key, axes)`` for the active axes.
If no axes exists on the stack, then returns ``(None, None)``.
"""
if not len(self._elements):
return self._default, self._default
else:
key, (index, axes) = self._elements[self._pos]
return key, axes
def __call__(self):
return self.current_key_axes()[1]
def __contains__(self, a):
return a in self.as_list()
class SubplotParams:
"""
A class to hold the parameters for a subplot
"""
def __init__(self, left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None):
"""
All dimensions are fraction of the figure width or height.
All values default to their rc params
The following attributes are available
*left* : 0.125
The left side of the subplots of the figure
*right* : 0.9
The right side of the subplots of the figure
*bottom* : 0.1
The bottom of the subplots of the figure
*top* : 0.9
The top of the subplots of the figure
*wspace* : 0.2
The amount of width reserved for blank space between subplots
*hspace* : 0.2
The amount of height reserved for white space between subplots
"""
self.validate = True
self.update(left, bottom, right, top, wspace, hspace)
def update(self, left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None):
"""
Update the current values. If any kwarg is None, default to
the current value, if set, otherwise to rc
"""
thisleft = getattr(self, 'left', None)
thisright = getattr(self, 'right', None)
thistop = getattr(self, 'top', None)
thisbottom = getattr(self, 'bottom', None)
thiswspace = getattr(self, 'wspace', None)
thishspace = getattr(self, 'hspace', None)
self._update_this('left', left)
self._update_this('right', right)
self._update_this('bottom', bottom)
self._update_this('top', top)
self._update_this('wspace', wspace)
self._update_this('hspace', hspace)
def reset():
self.left = thisleft
self.right = thisright
self.top = thistop
self.bottom = thisbottom
self.wspace = thiswspace
self.hspace = thishspace
if self.validate:
if self.left >= self.right:
reset()
raise ValueError('left cannot be >= right')
if self.bottom >= self.top:
reset()
raise ValueError('bottom cannot be >= top')
def _update_this(self, s, val):
if val is None:
val = getattr(self, s, None)
if val is None:
key = 'figure.subplot.' + s
val = rcParams[key]
setattr(self, s, val)
class Figure(Artist):
"""
The Figure instance supports callbacks through a *callbacks*
attribute which is a :class:`matplotlib.cbook.CallbackRegistry`
instance. The events you can connect to are 'dpi_changed', and
the callback will be called with ``func(fig)`` where fig is the
:class:`Figure` instance.
*patch*
The figure patch is drawn by a
:class:`matplotlib.patches.Rectangle` instance
*suppressComposite*
For multiple figure images, the figure will make composite
images depending on the renderer option_image_nocomposite
function. If suppressComposite is True|False, this will
override the renderer.
"""
def __str__(self):
return "Figure(%gx%g)" % tuple(self.bbox.size)
def __init__(self,
figsize=None, # defaults to rc figure.figsize
dpi=None, # defaults to rc figure.dpi
facecolor=None, # defaults to rc figure.facecolor
edgecolor=None, # defaults to rc figure.edgecolor
linewidth=0.0, # the default linewidth of the frame
frameon=None, # whether or not to draw the figure frame
subplotpars=None, # default to rc
tight_layout=None, # default to rc figure.autolayout
):
"""
*figsize*
w,h tuple in inches
*dpi*
Dots per inch
*facecolor*
The figure patch facecolor; defaults to rc ``figure.facecolor``
*edgecolor*
The figure patch edge color; defaults to rc ``figure.edgecolor``
*linewidth*
The figure patch edge linewidth; the default linewidth of the frame
*frameon*
If *False*, suppress drawing the figure frame
*subplotpars*
A :class:`SubplotParams` instance, defaults to rc
*tight_layout*
If *False* use *subplotpars*; if *True* adjust subplot
parameters using :meth:`tight_layout` with default padding.
When providing a dict containing the keys `pad`, `w_pad`, `h_pad`
and `rect`, the default :meth:`tight_layout` paddings will be
overridden.
Defaults to rc ``figure.autolayout``.
"""
Artist.__init__(self)
self.callbacks = cbook.CallbackRegistry()
if figsize is None:
figsize = rcParams['figure.figsize']
if dpi is None:
dpi = rcParams['figure.dpi']
if facecolor is None:
facecolor = rcParams['figure.facecolor']
if edgecolor is None:
edgecolor = rcParams['figure.edgecolor']
if frameon is None:
frameon = rcParams['figure.frameon']
self.dpi_scale_trans = Affine2D()
self.dpi = dpi
self.bbox_inches = Bbox.from_bounds(0, 0, *figsize)
self.bbox = TransformedBbox(self.bbox_inches, self.dpi_scale_trans)
self.frameon = frameon
self.transFigure = BboxTransformTo(self.bbox)
# the figurePatch name is deprecated
self.patch = self.figurePatch = Rectangle(
xy=(0, 0), width=1, height=1,
facecolor=facecolor, edgecolor=edgecolor,
linewidth=linewidth)
self._set_artist_props(self.patch)
self.patch.set_aa(False)
self._hold = rcParams['axes.hold']
self.canvas = None
self._suptitle = None
if subplotpars is None:
subplotpars = SubplotParams()
self.subplotpars = subplotpars
self.set_tight_layout(tight_layout)
self._axstack = AxesStack() # track all figure axes and current axes
self.clf()
self._cachedRenderer = None
# TODO: I'd like to dynamically add the _repr_html_ method
# to the figure in the right context, but then IPython doesn't
# use it, for some reason.
def _repr_html_(self):
# We can't use "isinstance" here, because then we'd end up importing
# webagg unconditiionally.
if (self.canvas is not None and
'WebAgg' in self.canvas.__class__.__name__):
from matplotlib.backends import backend_webagg
return backend_webagg.ipython_inline_display(self)
def show(self, warn=True):
"""
If using a GUI backend with pyplot, display the figure window.
If the figure was not created using
:func:`~matplotlib.pyplot.figure`, it will lack a
:class:`~matplotlib.backend_bases.FigureManagerBase`, and
will raise an AttributeError.
For non-GUI backends, this does nothing, in which case
a warning will be issued if *warn* is True (default).
"""
try:
manager = getattr(self.canvas, 'manager')
except AttributeError as err:
raise AttributeError("%s\n"
"Figure.show works only "
"for figures managed by pyplot, normally "
"created by pyplot.figure()." % err)
if manager is not None:
try:
manager.show()
return
except NonGuiException:
pass
if warn:
import warnings
warnings.warn(
"matplotlib is currently using a non-GUI backend, "
"so cannot show the figure")
def _get_axes(self):
return self._axstack.as_list()
axes = property(fget=_get_axes, doc="Read-only: list of axes in Figure")
def _get_dpi(self):
return self._dpi
def _set_dpi(self, dpi):
self._dpi = dpi
self.dpi_scale_trans.clear().scale(dpi, dpi)
self.callbacks.process('dpi_changed', self)
dpi = property(_get_dpi, _set_dpi)
def get_tight_layout(self):
"""
Return the Boolean flag, True to use :meth`tight_layout` when drawing.
"""
return self._tight
def set_tight_layout(self, tight):
"""
Set whether :meth:`tight_layout` is used upon drawing.
If None, the rcParams['figure.autolayout'] value will be set.
When providing a dict containing the keys `pad`, `w_pad`, `h_pad`
and `rect`, the default :meth:`tight_layout` paddings will be
overridden.
ACCEPTS: [True | False | dict | None ]
"""
if tight is None:
tight = rcParams['figure.autolayout']
self._tight = bool(tight)
self._tight_parameters = tight if isinstance(tight, dict) else {}
def autofmt_xdate(self, bottom=0.2, rotation=30, ha='right'):
"""
Date ticklabels often overlap, so it is useful to rotate them
and right align them. Also, a common use case is a number of
subplots with shared xaxes where the x-axis is date data. The
ticklabels are often long, and it helps to rotate them on the
bottom subplot and turn them off on other subplots, as well as
turn off xlabels.
*bottom*
The bottom of the subplots for :meth:`subplots_adjust`
*rotation*
The rotation of the xtick labels
*ha*
The horizontal alignment of the xticklabels
"""
allsubplots = np.alltrue([hasattr(ax, 'is_last_row') for ax
in self.axes])
if len(self.axes) == 1:
for label in self.axes[0].get_xticklabels():
label.set_ha(ha)
label.set_rotation(rotation)
else:
if allsubplots:
for ax in self.get_axes():
if ax.is_last_row():
for label in ax.get_xticklabels():
label.set_ha(ha)
label.set_rotation(rotation)
else:
for label in ax.get_xticklabels():
label.set_visible(False)
ax.set_xlabel('')
if allsubplots:
self.subplots_adjust(bottom=bottom)
def get_children(self):
'get a list of artists contained in the figure'
children = [self.patch]
children.extend(self.artists)
children.extend(self.axes)
children.extend(self.lines)
children.extend(self.patches)
children.extend(self.texts)
children.extend(self.images)
children.extend(self.legends)
return children
def contains(self, mouseevent):
"""
Test whether the mouse event occurred on the figure.
Returns True,{}
"""
if six.callable(self._contains):
return self._contains(self, mouseevent)
# inside = mouseevent.x >= 0 and mouseevent.y >= 0
inside = self.bbox.contains(mouseevent.x, mouseevent.y)
return inside, {}
def get_window_extent(self, *args, **kwargs):
'get the figure bounding box in display space; kwargs are void'
return self.bbox
def suptitle(self, t, **kwargs):
"""
Add a centered title to the figure.
kwargs are :class:`matplotlib.text.Text` properties. Using figure
coordinates, the defaults are:
*x* : 0.5
The x location of the text in figure coords
*y* : 0.98
The y location of the text in figure coords
*horizontalalignment* : 'center'
The horizontal alignment of the text
*verticalalignment* : 'top'
The vertical alignment of the text
A :class:`matplotlib.text.Text` instance is returned.
Example::
fig.suptitle('this is the figure title', fontsize=12)
"""
x = kwargs.pop('x', 0.5)
y = kwargs.pop('y', 0.98)
if ('horizontalalignment' not in kwargs) and ('ha' not in kwargs):
kwargs['horizontalalignment'] = 'center'
if ('verticalalignment' not in kwargs) and ('va' not in kwargs):
kwargs['verticalalignment'] = 'top'
sup = self.text(x, y, t, **kwargs)
if self._suptitle is not None:
self._suptitle.set_text(t)
self._suptitle.set_position((x, y))
self._suptitle.update_from(sup)
sup.remove()
else:
self._suptitle = sup
return self._suptitle
def set_canvas(self, canvas):
"""
Set the canvas the contains the figure
ACCEPTS: a FigureCanvas instance
"""
self.canvas = canvas
def hold(self, b=None):
"""
Set the hold state. If hold is None (default), toggle the
hold state. Else set the hold state to boolean value b.
e.g.::
hold() # toggle hold
hold(True) # hold is on
hold(False) # hold is off
"""
if b is None:
self._hold = not self._hold
else:
self._hold = b
def figimage(self, X,
xo=0,
yo=0,
alpha=None,
norm=None,
cmap=None,
vmin=None,
vmax=None,
origin=None,
**kwargs):
"""
Adds a non-resampled image to the figure.
call signatures::
figimage(X, **kwargs)
adds a non-resampled array *X* to the figure.
::
figimage(X, xo, yo)
with pixel offsets *xo*, *yo*,
*X* must be a float array:
* If *X* is MxN, assume luminance (grayscale)
* If *X* is MxNx3, assume RGB
* If *X* is MxNx4, assume RGBA
Optional keyword arguments:
========= =========================================================
Keyword Description
========= =========================================================
xo or yo An integer, the *x* and *y* image offset in pixels
cmap a :class:`matplotlib.colors.Colormap` instance, e.g.,
cm.jet. If *None*, default to the rc ``image.cmap``
value
norm a :class:`matplotlib.colors.Normalize` instance. The
default is normalization(). This scales luminance -> 0-1
vmin|vmax are used to scale a luminance image to 0-1. If either
is *None*, the min and max of the luminance values will
be used. Note if you pass a norm instance, the settings
for *vmin* and *vmax* will be ignored.
alpha the alpha blending value, default is *None*
origin [ 'upper' | 'lower' ] Indicates where the [0,0] index of
the array is in the upper left or lower left corner of
the axes. Defaults to the rc image.origin value
========= =========================================================
figimage complements the axes image
(:meth:`~matplotlib.axes.Axes.imshow`) which will be resampled
to fit the current axes. If you want a resampled image to
fill the entire figure, you can define an
:class:`~matplotlib.axes.Axes` with size [0,1,0,1].
An :class:`matplotlib.image.FigureImage` instance is returned.
.. plot:: mpl_examples/pylab_examples/figimage_demo.py
Additional kwargs are Artist kwargs passed on to
:class:`~matplotlib.image.FigureImage`
"""
if not self._hold:
self.clf()
im = FigureImage(self, cmap, norm, xo, yo, origin, **kwargs)
im.set_array(X)
im.set_alpha(alpha)
if norm is None:
im.set_clim(vmin, vmax)
self.images.append(im)
im._remove_method = lambda h: self.images.remove(h)
return im
def set_size_inches(self, *args, **kwargs):
"""
set_size_inches(w,h, forward=False)
Set the figure size in inches (1in == 2.54cm)
Usage::
fig.set_size_inches(w,h) # OR
fig.set_size_inches((w,h) )
optional kwarg *forward=True* will cause the canvas size to be
automatically updated; e.g., you can resize the figure window
from the shell
ACCEPTS: a w,h tuple with w,h in inches
See Also
--------
matplotlib.Figure.get_size_inches
"""
forward = kwargs.get('forward', False)
if len(args) == 1:
w, h = args[0]
else:
w, h = args
dpival = self.dpi
self.bbox_inches.p1 = w, h
if forward:
dpival = self.dpi
canvasw = w * dpival
canvash = h * dpival
manager = getattr(self.canvas, 'manager', None)
if manager is not None:
manager.resize(int(canvasw), int(canvash))
def get_size_inches(self):
"""
Returns the current size of the figure in inches (1in == 2.54cm)
as an numpy array.
Returns
-------
size : ndarray
The size of the figure in inches
See Also
--------
matplotlib.Figure.set_size_inches
"""
return np.array(self.bbox_inches.p1)
def get_edgecolor(self):
'Get the edge color of the Figure rectangle'
return self.patch.get_edgecolor()
def get_facecolor(self):
'Get the face color of the Figure rectangle'
return self.patch.get_facecolor()
def get_figwidth(self):
'Return the figwidth as a float'
return self.bbox_inches.width
def get_figheight(self):
'Return the figheight as a float'
return self.bbox_inches.height
def get_dpi(self):
'Return the dpi as a float'
return self.dpi
def get_frameon(self):
'get the boolean indicating frameon'
return self.frameon
def set_edgecolor(self, color):
"""
Set the edge color of the Figure rectangle
ACCEPTS: any matplotlib color - see help(colors)
"""
self.patch.set_edgecolor(color)
def set_facecolor(self, color):
"""
Set the face color of the Figure rectangle
ACCEPTS: any matplotlib color - see help(colors)
"""
self.patch.set_facecolor(color)
def set_dpi(self, val):
"""
Set the dots-per-inch of the figure
ACCEPTS: float
"""
self.dpi = val
def set_figwidth(self, val):
"""
Set the width of the figure in inches
ACCEPTS: float
"""
self.bbox_inches.x1 = val
def set_figheight(self, val):
"""
Set the height of the figure in inches
ACCEPTS: float
"""
self.bbox_inches.y1 = val
def set_frameon(self, b):
"""
Set whether the figure frame (background) is displayed or invisible
ACCEPTS: boolean
"""
self.frameon = b
def delaxes(self, a):
'remove a from the figure and update the current axes'
self._axstack.remove(a)
for func in self._axobservers:
func(self)
def _make_key(self, *args, **kwargs):
'make a hashable key out of args and kwargs'
def fixitems(items):
#items may have arrays and lists in them, so convert them
# to tuples for the key
ret = []
for k, v in items:
# some objects can define __getitem__ without being
# iterable and in those cases the conversion to tuples
# will fail. So instead of using the iterable(v) function
# we simply try and convert to a tuple, and proceed if not.
try:
v = tuple(v)
except Exception:
pass
ret.append((k, v))
return tuple(ret)
def fixlist(args):
ret = []
for a in args:
if iterable(a):
a = tuple(a)
ret.append(a)
return tuple(ret)
key = fixlist(args), fixitems(six.iteritems(kwargs))
return key
@docstring.dedent_interpd
def add_axes(self, *args, **kwargs):
"""
Add an axes at position *rect* [*left*, *bottom*, *width*,
*height*] where all quantities are in fractions of figure
width and height. kwargs are legal
:class:`~matplotlib.axes.Axes` kwargs plus *projection* which
sets the projection type of the axes. (For backward
compatibility, ``polar=True`` may also be provided, which is
equivalent to ``projection='polar'``). Valid values for
*projection* are: %(projection_names)s. Some of these
projections support additional kwargs, which may be provided
to :meth:`add_axes`. Typical usage::
rect = l,b,w,h
fig.add_axes(rect)
fig.add_axes(rect, frameon=False, axisbg='g')
fig.add_axes(rect, polar=True)
fig.add_axes(rect, projection='polar')
fig.add_axes(ax)
If the figure already has an axes with the same parameters,
then it will simply make that axes current and return it. If
you do not want this behavior, e.g., you want to force the
creation of a new Axes, you must use a unique set of args and
kwargs. The axes :attr:`~matplotlib.axes.Axes.label`
attribute has been exposed for this purpose. e.g., if you want
two axes that are otherwise identical to be added to the
figure, make sure you give them unique labels::
fig.add_axes(rect, label='axes1')
fig.add_axes(rect, label='axes2')
In rare circumstances, add_axes may be called with a single
argument, an Axes instance already created in the present
figure but not in the figure's list of axes. For example,
if an axes has been removed with :meth:`delaxes`, it can
be restored with::
fig.add_axes(ax)
In all cases, the :class:`~matplotlib.axes.Axes` instance
will be returned.
In addition to *projection*, the following kwargs are supported:
%(Axes)s
"""
if not len(args):
return
# shortcut the projection "key" modifications later on, if an axes
# with the exact args/kwargs exists, return it immediately.
key = self._make_key(*args, **kwargs)
ax = self._axstack.get(key)
if ax is not None:
self.sca(ax)
return ax
if isinstance(args[0], Axes):
a = args[0]
assert(a.get_figure() is self)
else:
rect = args[0]
projection_class, kwargs, key = process_projection_requirements(
self, *args, **kwargs)
# check that an axes of this type doesn't already exist, if it
# does, set it as active and return it
ax = self._axstack.get(key)
if ax is not None and isinstance(ax, projection_class):
self.sca(ax)
return ax
# create the new axes using the axes class given
a = projection_class(self, rect, **kwargs)
self._axstack.add(key, a)
self.sca(a)
return a
@docstring.dedent_interpd
def add_subplot(self, *args, **kwargs):
"""
Add a subplot. Examples::
fig.add_subplot(111)
# equivalent but more general
fig.add_subplot(1,1,1)
# add subplot with red background
fig.add_subplot(212, axisbg='r')
# add a polar subplot
fig.add_subplot(111, projection='polar')
# add Subplot instance sub
fig.add_subplot(sub)
*kwargs* are legal :class:`~matplotlib.axes.Axes` kwargs plus
*projection*, which chooses a projection type for the axes.
(For backward compatibility, *polar=True* may also be
provided, which is equivalent to *projection='polar'*). Valid
values for *projection* are: %(projection_names)s. Some of
these projections
support additional *kwargs*, which may be provided to
:meth:`add_axes`.
The :class:`~matplotlib.axes.Axes` instance will be returned.
If the figure already has a subplot with key (*args*,
*kwargs*) then it will simply make that subplot current and
return it.
.. seealso:: :meth:`~matplotlib.pyplot.subplot` for an
explanation of the args.
The following kwargs are supported:
%(Axes)s
"""
if not len(args):
return
if len(args) == 1 and isinstance(args[0], int):
args = tuple([int(c) for c in str(args[0])])
if len(args) != 3:
raise ValueError("Integer subplot specification must " +
"be a three digit number. " +
"Not {n:d}".format(n=len(args)))
if isinstance(args[0], SubplotBase):
a = args[0]
assert(a.get_figure() is self)
# make a key for the subplot (which includes the axes object id
# in the hash)
key = self._make_key(*args, **kwargs)
else:
projection_class, kwargs, key = process_projection_requirements(
self, *args, **kwargs)
# try to find the axes with this key in the stack
ax = self._axstack.get(key)
if ax is not None:
if isinstance(ax, projection_class):
# the axes already existed, so set it as active & return
self.sca(ax)
return ax
else:
# Undocumented convenience behavior:
# subplot(111); subplot(111, projection='polar')
# will replace the first with the second.
# Without this, add_subplot would be simpler and
# more similar to add_axes.
self._axstack.remove(ax)
a = subplot_class_factory(projection_class)(self, *args, **kwargs)
self._axstack.add(key, a)
self.sca(a)
return a
def clf(self, keep_observers=False):
"""
Clear the figure.
Set *keep_observers* to True if, for example,
a gui widget is tracking the axes in the figure.
"""
self.suppressComposite = None
self.callbacks = cbook.CallbackRegistry()
for ax in tuple(self.axes): # Iterate over the copy.
ax.cla()
self.delaxes(ax) # removes ax from self._axstack
toolbar = getattr(self.canvas, 'toolbar', None)
if toolbar is not None:
toolbar.update()
self._axstack.clear()
self.artists = []
self.lines = []
self.patches = []
self.texts = []
self.images = []
self.legends = []
if not keep_observers:
self._axobservers = []
self._suptitle = None
def clear(self):
"""
Clear the figure -- synonym for :meth:`clf`.
"""
self.clf()
@allow_rasterization
def draw(self, renderer):
"""
Render the figure using :class:`matplotlib.backend_bases.RendererBase`
instance *renderer*.
"""
# draw the figure bounding box, perhaps none for white figure
if not self.get_visible():
return
renderer.open_group('figure')
if self.get_tight_layout() and self.axes:
try:
self.tight_layout(renderer, **self._tight_parameters)
except ValueError:
pass
# ValueError can occur when resizing a window.
if self.frameon:
self.patch.draw(renderer)
# a list of (zorder, func_to_call, list_of_args)
dsu = []
for a in self.patches:
dsu.append((a.get_zorder(), a, a.draw, [renderer]))
for a in self.lines:
dsu.append((a.get_zorder(), a, a.draw, [renderer]))
for a in self.artists:
dsu.append((a.get_zorder(), a, a.draw, [renderer]))
# override the renderer default if self.suppressComposite
# is not None
not_composite = renderer.option_image_nocomposite()
if self.suppressComposite is not None:
not_composite = self.suppressComposite
if (len(self.images) <= 1 or not_composite or
not cbook.allequal([im.origin for im in self.images])):
for a in self.images:
dsu.append((a.get_zorder(), a, a.draw, [renderer]))
else:
# make a composite image blending alpha
# list of (_image.Image, ox, oy)
mag = renderer.get_image_magnification()
ims = [(im.make_image(mag), im.ox, im.oy, im.get_alpha())
for im in self.images]
im = _image.from_images(self.bbox.height * mag,
self.bbox.width * mag,
ims)
im.is_grayscale = False
l, b, w, h = self.bbox.bounds
def draw_composite():
gc = renderer.new_gc()
gc.set_clip_rectangle(self.bbox)
gc.set_clip_path(self.get_clip_path())
renderer.draw_image(gc, l, b, im)
gc.restore()
dsu.append((self.images[0].get_zorder(), self.images[0],
draw_composite, []))
# render the axes
for a in self.axes:
dsu.append((a.get_zorder(), a, a.draw, [renderer]))
# render the figure text
for a in self.texts:
dsu.append((a.get_zorder(), a, a.draw, [renderer]))
for a in self.legends:
dsu.append((a.get_zorder(), a, a.draw, [renderer]))
dsu = [row for row in dsu if not row[1].get_animated()]
dsu.sort(key=itemgetter(0))
for zorder, a, func, args in dsu:
func(*args)
renderer.close_group('figure')
self._cachedRenderer = renderer
self.canvas.draw_event(renderer)
def draw_artist(self, a):
"""
draw :class:`matplotlib.artist.Artist` instance *a* only --
this is available only after the figure is drawn
"""
assert self._cachedRenderer is not None
a.draw(self._cachedRenderer)
def get_axes(self):
return self.axes
def legend(self, handles, labels, *args, **kwargs):
"""
Place a legend in the figure. Labels are a sequence of
strings, handles is a sequence of
:class:`~matplotlib.lines.Line2D` or
:class:`~matplotlib.patches.Patch` instances, and loc can be a
string or an integer specifying the legend location
USAGE::
legend( (line1, line2, line3),
('label1', 'label2', 'label3'),
'upper right')
The *loc* location codes are::
'best' : 0, (currently not supported for figure legends)
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
*loc* can also be an (x,y) tuple in figure coords, which
specifies the lower left of the legend box. figure coords are
(0,0) is the left, bottom of the figure and 1,1 is the right,
top.
Keyword arguments:
*prop*: [ *None* | FontProperties | dict ]
A :class:`matplotlib.font_manager.FontProperties`
instance. If *prop* is a dictionary, a new instance will be
created with *prop*. If *None*, use rc settings.
*numpoints*: integer
The number of points in the legend line, default is 4
*scatterpoints*: integer
The number of points in the legend line, default is 4
*scatteryoffsets*: list of floats
a list of yoffsets for scatter symbols in legend
*markerscale*: [ *None* | scalar ]
The relative size of legend markers vs. original. If *None*, use rc
settings.
*fancybox*: [ *None* | *False* | *True* ]
if *True*, draw a frame with a round fancybox. If *None*, use rc
*shadow*: [ *None* | *False* | *True* ]
If *True*, draw a shadow behind legend. If *None*, use rc settings.
*ncol* : integer
number of columns. default is 1
*mode* : [ "expand" | *None* ]
if mode is "expand", the legend will be horizontally expanded
to fill the axes area (or *bbox_to_anchor*)
*title* : string
the legend title
Padding and spacing between various elements use following keywords
parameters. The dimensions of these values are given as a fraction
of the fontsize. Values from rcParams will be used if None.
================ ====================================================
Keyword Description
================ ====================================================
borderpad the fractional whitespace inside the legend border
labelspacing the vertical space between the legend entries
handlelength the length of the legend handles
handletextpad the pad between the legend handle and text
borderaxespad the pad between the axes and legend border
columnspacing the spacing between columns
================ ====================================================
.. Note:: Not all kinds of artist are supported by the legend.
See LINK (FIXME) for details.
**Example:**
.. plot:: mpl_examples/pylab_examples/figlegend_demo.py
"""
l = Legend(self, handles, labels, *args, **kwargs)
self.legends.append(l)
l._remove_method = lambda h: self.legends.remove(h)
return l
@docstring.dedent_interpd
def text(self, x, y, s, *args, **kwargs):
"""
Add text to figure.
Call signature::
text(x, y, s, fontdict=None, **kwargs)
Add text to figure at location *x*, *y* (relative 0-1
coords). See :func:`~matplotlib.pyplot.text` for the meaning
of the other arguments.
kwargs control the :class:`~matplotlib.text.Text` properties:
%(Text)s
"""
override = _process_text_args({}, *args, **kwargs)
t = Text(x=x, y=y, text=s)
t.update(override)
self._set_artist_props(t)
self.texts.append(t)
t._remove_method = lambda h: self.texts.remove(h)
return t
def _set_artist_props(self, a):
if a != self:
a.set_figure(self)
a.set_transform(self.transFigure)
@docstring.dedent_interpd
def gca(self, **kwargs):
"""
Get the current axes, creating one if necessary
The following kwargs are supported for ensuring the returned axes
adheres to the given projection etc., and for axes creation if
the active axes does not exist:
%(Axes)s
"""
ckey, cax = self._axstack.current_key_axes()
# if there exists an axes on the stack see if it maches
# the desired axes configuration
if cax is not None:
# if no kwargs are given just return the current axes
# this is a convenience for gca() on axes such as polar etc.
if not kwargs:
return cax
# if the user has specified particular projection detail
# then build up a key which can represent this
else:
# we don't want to modify the original kwargs
# so take a copy so that we can do what we like to it
kwargs_copy = kwargs.copy()
projection_class, _, key = process_projection_requirements(
self, **kwargs_copy)
# let the returned axes have any gridspec by removing it from
# the key
ckey = ckey[1:]
key = key[1:]
# if the cax matches this key then return the axes, otherwise
# continue and a new axes will be created
if key == ckey and isinstance(cax, projection_class):
return cax
# no axes found, so create one which spans the figure
return self.add_subplot(1, 1, 1, **kwargs)
def sca(self, a):
'Set the current axes to be a and return a'
self._axstack.bubble(a)
for func in self._axobservers:
func(self)
return a
def _gci(self):
"""
helper for :func:`~matplotlib.pyplot.gci`;
do not use elsewhere.
"""
# Look first for an image in the current Axes:
cax = self._axstack.current_key_axes()[1]
if cax is None:
return None
im = cax._gci()
if im is not None:
return im
# If there is no image in the current Axes, search for
# one in a previously created Axes. Whether this makes
# sense is debatable, but it is the documented behavior.
for ax in reversed(self.axes):
im = ax._gci()
if im is not None:
return im
return None
def __getstate__(self):
state = self.__dict__.copy()
# the axobservers cannot currently be pickled.
# Additionally, the canvas cannot currently be pickled, but this has
# the benefit of meaning that a figure can be detached from one canvas,
# and re-attached to another.
for attr_to_pop in ('_axobservers', 'show',
'canvas', '_cachedRenderer'):
state.pop(attr_to_pop, None)
# add version information to the state
state['__mpl_version__'] = _mpl_version
# check to see if the figure has a manager and whether it is registered
# with pyplot
if getattr(self.canvas, 'manager', None) is not None:
manager = self.canvas.manager
import matplotlib._pylab_helpers
if manager in list(six.itervalues(
matplotlib._pylab_helpers.Gcf.figs)):
state['_restore_to_pylab'] = True
return state
def __setstate__(self, state):
version = state.pop('__mpl_version__')
restore_to_pylab = state.pop('_restore_to_pylab', False)
if version != _mpl_version:
import warnings
warnings.warn("This figure was saved with matplotlib version %s "
"and is unlikely to function correctly." %
(version, ))
self.__dict__ = state
# re-initialise some of the unstored state information
self._axobservers = []
self.canvas = None
if restore_to_pylab:
# lazy import to avoid circularity
import matplotlib.pyplot as plt
import matplotlib._pylab_helpers as pylab_helpers
allnums = plt.get_fignums()
num = max(allnums) + 1 if allnums else 1
mgr = plt._backend_mod.new_figure_manager_given_figure(num, self)
# XXX The following is a copy and paste from pyplot. Consider
# factoring to pylab_helpers
if self.get_label():
mgr.set_window_title(self.get_label())
# make this figure current on button press event
def make_active(event):
pylab_helpers.Gcf.set_active(mgr)
mgr._cidgcf = mgr.canvas.mpl_connect('button_press_event',
make_active)
pylab_helpers.Gcf.set_active(mgr)
self.number = num
plt.draw_if_interactive()
def add_axobserver(self, func):
'whenever the axes state change, ``func(self)`` will be called'
self._axobservers.append(func)
def savefig(self, *args, **kwargs):
"""
Save the current figure.
Call signature::
savefig(fname, dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=None)
The output formats available depend on the backend being used.
Arguments:
*fname*:
A string containing a path to a filename, or a Python
file-like object, or possibly some backend-dependent object
such as :class:`~matplotlib.backends.backend_pdf.PdfPages`.
If *format* is *None* and *fname* is a string, the output
format is deduced from the extension of the filename. If
the filename has no extension, the value of the rc parameter
``savefig.format`` is used.
If *fname* is not a string, remember to specify *format* to
ensure that the correct backend is used.
Keyword arguments:
*dpi*: [ *None* | ``scalar > 0`` ]
The resolution in dots per inch. If *None* it will default to
the value ``savefig.dpi`` in the matplotlibrc file.
*facecolor*, *edgecolor*:
the colors of the figure rectangle
*orientation*: [ 'landscape' | 'portrait' ]
not supported on all backends; currently only on postscript output
*papertype*:
One of 'letter', 'legal', 'executive', 'ledger', 'a0' through
'a10', 'b0' through 'b10'. Only supported for postscript
output.
*format*:
One of the file extensions supported by the active
backend. Most backends support png, pdf, ps, eps and svg.
*transparent*:
If *True*, the axes patches will all be transparent; the
figure patch will also be transparent unless facecolor
and/or edgecolor are specified via kwargs.
This is useful, for example, for displaying
a plot on top of a colored background on a web page. The
transparency of these patches will be restored to their
original values upon exit of this function.
*frameon*:
If *True*, the figure patch will be colored, if *False*, the
figure background will be transparent. If not provided, the
rcParam 'savefig.frameon' will be used.
*bbox_inches*:
Bbox in inches. Only the given portion of the figure is
saved. If 'tight', try to figure out the tight bbox of
the figure.
*pad_inches*:
Amount of padding around the figure when bbox_inches is
'tight'.
*bbox_extra_artists*:
A list of extra artists that will be considered when the
tight bbox is calculated.
"""
kwargs.setdefault('dpi', rcParams['savefig.dpi'])
frameon = kwargs.pop('frameon', rcParams['savefig.frameon'])
transparent = kwargs.pop('transparent',
rcParams['savefig.transparent'])
if transparent:
kwargs.setdefault('facecolor', 'none')
kwargs.setdefault('edgecolor', 'none')
original_axes_colors = []
for ax in self.axes:
patch = ax.patch
original_axes_colors.append((patch.get_facecolor(),
patch.get_edgecolor()))
patch.set_facecolor('none')
patch.set_edgecolor('none')
else:
kwargs.setdefault('facecolor', rcParams['savefig.facecolor'])
kwargs.setdefault('edgecolor', rcParams['savefig.edgecolor'])
if frameon:
original_frameon = self.get_frameon()
self.set_frameon(frameon)
self.canvas.print_figure(*args, **kwargs)
if frameon:
self.set_frameon(original_frameon)
if transparent:
for ax, cc in zip(self.axes, original_axes_colors):
ax.patch.set_facecolor(cc[0])
ax.patch.set_edgecolor(cc[1])
@docstring.dedent_interpd
def colorbar(self, mappable, cax=None, ax=None, use_gridspec=True, **kw):
"""
Create a colorbar for a ScalarMappable instance, *mappable*.
Documentation for the pylab thin wrapper:
%(colorbar_doc)s
"""
if ax is None:
ax = self.gca()
# Store the value of gca so that we can set it back later on.
current_ax = self.gca()
if cax is None:
if use_gridspec and isinstance(ax, SubplotBase):
cax, kw = cbar.make_axes_gridspec(ax, **kw)
else:
cax, kw = cbar.make_axes(ax, **kw)
cax.hold(True)
cb = cbar.colorbar_factory(cax, mappable, **kw)
self.sca(current_ax)
return cb
def subplots_adjust(self, *args, **kwargs):
"""
Call signature::
subplots_adjust(left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None)
Update the :class:`SubplotParams` with *kwargs* (defaulting to rc when
*None*) and update the subplot locations
"""
self.subplotpars.update(*args, **kwargs)
for ax in self.axes:
if not isinstance(ax, SubplotBase):
# Check if sharing a subplots axis
if (ax._sharex is not None and
isinstance(ax._sharex, SubplotBase)):
ax._sharex.update_params()
ax.set_position(ax._sharex.figbox)
elif (ax._sharey is not None and
isinstance(ax._sharey, SubplotBase)):
ax._sharey.update_params()
ax.set_position(ax._sharey.figbox)
else:
ax.update_params()
ax.set_position(ax.figbox)
def ginput(self, n=1, timeout=30, show_clicks=True, mouse_add=1,
mouse_pop=3, mouse_stop=2):
"""
Call signature::
ginput(self, n=1, timeout=30, show_clicks=True,
mouse_add=1, mouse_pop=3, mouse_stop=2)
Blocking call to interact with the figure.
This will wait for *n* clicks from the user and return a list of the
coordinates of each click.
If *timeout* is zero or negative, does not timeout.
If *n* is zero or negative, accumulate clicks until a middle click
(or potentially both mouse buttons at once) terminates the input.
Right clicking cancels last input.
The buttons used for the various actions (adding points, removing
points, terminating the inputs) can be overriden via the
arguments *mouse_add*, *mouse_pop* and *mouse_stop*, that give
the associated mouse button: 1 for left, 2 for middle, 3 for
right.
The keyboard can also be used to select points in case your mouse
does not have one or more of the buttons. The delete and backspace
keys act like right clicking (i.e., remove last point), the enter key
terminates input and any other key (not already used by the window
manager) selects a point.
"""
blocking_mouse_input = BlockingMouseInput(self,
mouse_add=mouse_add,
mouse_pop=mouse_pop,
mouse_stop=mouse_stop)
return blocking_mouse_input(n=n, timeout=timeout,
show_clicks=show_clicks)
def waitforbuttonpress(self, timeout=-1):
"""
Call signature::
waitforbuttonpress(self, timeout=-1)
Blocking call to interact with the figure.
This will return True is a key was pressed, False if a mouse
button was pressed and None if *timeout* was reached without
either being pressed.
If *timeout* is negative, does not timeout.
"""
blocking_input = BlockingKeyMouseInput(self)
return blocking_input(timeout=timeout)
def get_default_bbox_extra_artists(self):
bbox_artists = [artist for artist in self.get_children()
if artist.get_visible()]
for ax in self.axes:
if ax.get_visible():
bbox_artists.extend(ax.get_default_bbox_extra_artists())
# we don't want the figure's patch to influence the bbox calculation
bbox_artists.remove(self.patch)
return bbox_artists
def get_tightbbox(self, renderer):
"""
Return a (tight) bounding box of the figure in inches.
It only accounts axes title, axis labels, and axis
ticklabels. Needs improvement.
"""
bb = []
for ax in self.axes:
if ax.get_visible():
bb.append(ax.get_tightbbox(renderer))
if len(bb) == 0:
return self.bbox_inches
_bbox = Bbox.union([b for b in bb if b.width != 0 or b.height != 0])
bbox_inches = TransformedBbox(_bbox,
Affine2D().scale(1. / self.dpi))
return bbox_inches
def tight_layout(self, renderer=None, pad=1.08, h_pad=None,
w_pad=None, rect=None):
"""
Adjust subplot parameters to give specified padding.
Parameters:
*pad* : float
padding between the figure edge and the edges of subplots,
as a fraction of the font-size.
*h_pad*, *w_pad* : float
padding (height/width) between edges of adjacent subplots.
Defaults to `pad_inches`.
*rect* : if rect is given, it is interpreted as a rectangle
(left, bottom, right, top) in the normalized figure
coordinate that the whole subplots area (including
labels) will fit into. Default is (0, 0, 1, 1).
"""
from .tight_layout import (get_renderer, get_tight_layout_figure,
get_subplotspec_list)
subplotspec_list = get_subplotspec_list(self.axes)
if None in subplotspec_list:
warnings.warn("This figure includes Axes that are not "
"compatible with tight_layout, so its "
"results might be incorrect.")
if renderer is None:
renderer = get_renderer(self)
kwargs = get_tight_layout_figure(self, self.axes, subplotspec_list,
renderer,
pad=pad, h_pad=h_pad, w_pad=w_pad,
rect=rect)
self.subplots_adjust(**kwargs)
def figaspect(arg):
"""
Create a figure with specified aspect ratio. If *arg* is a number,
use that aspect ratio. If *arg* is an array, figaspect will
determine the width and height for a figure that would fit array
preserving aspect ratio. The figure width, height in inches are
returned. Be sure to create an axes with equal with and height,
e.g.,
Example usage::
# make a figure twice as tall as it is wide
w, h = figaspect(2.)
fig = Figure(figsize=(w,h))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
ax.imshow(A, **kwargs)
# make a figure with the proper aspect for an array
A = rand(5,3)
w, h = figaspect(A)
fig = Figure(figsize=(w,h))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
ax.imshow(A, **kwargs)
Thanks to Fernando Perez for this function
"""
isarray = hasattr(arg, 'shape')
# min/max sizes to respect when autoscaling. If John likes the idea, they
# could become rc parameters, for now they're hardwired.
figsize_min = np.array((4.0, 2.0)) # min length for width/height
figsize_max = np.array((16.0, 16.0)) # max length for width/height
#figsize_min = rcParams['figure.figsize_min']
#figsize_max = rcParams['figure.figsize_max']
# Extract the aspect ratio of the array
if isarray:
nr, nc = arg.shape[:2]
arr_ratio = float(nr) / nc
else:
arr_ratio = float(arg)
# Height of user figure defaults
fig_height = rcParams['figure.figsize'][1]
# New size for the figure, keeping the aspect ratio of the caller
newsize = np.array((fig_height / arr_ratio, fig_height))
# Sanity checks, don't drop either dimension below figsize_min
newsize /= min(1.0, *(newsize / figsize_min))
# Avoid humongous windows as well
newsize /= max(1.0, *(newsize / figsize_max))
# Finally, if we have a really funky aspect ratio, break it but respect
# the min/max dimensions (we don't want figures 10 feet tall!)
newsize = np.clip(newsize, figsize_min, figsize_max)
return newsize
docstring.interpd.update(Figure=martist.kwdoc(Figure))
| mit |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/doc/mpl_examples/pylab_examples/errorbar_demo.py | 3 | 1237 | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
# example data
x = np.arange(0.1, 4, 0.5)
y = np.exp(-x)
# example variable error bar values
yerr = 0.1 + 0.2*np.sqrt(x)
xerr = 0.1 + yerr
# First illustrate basic pyplot interface, using defaults where possible.
plt.figure()
plt.errorbar(x, y, xerr=0.2, yerr=0.4)
plt.title("Simplest errorbars, 0.2 in x, 0.4 in y")
# Now switch to a more OO interface to exercise more features.
fig, axs = plt.subplots(nrows=2, ncols=2, sharex=True)
ax = axs[0,0]
ax.errorbar(x, y, yerr=yerr, fmt='o')
ax.set_title('Vert. symmetric')
# With 4 subplots, reduce the number of axis ticks to avoid crowding.
ax.locator_params(nbins=4)
ax = axs[0,1]
ax.errorbar(x, y, xerr=xerr, fmt='o')
ax.set_title('Hor. symmetric')
ax = axs[1,0]
ax.errorbar(x, y, yerr=[yerr, 2*yerr], xerr=[xerr, 2*xerr], fmt='--o')
ax.set_title('H, V asymmetric')
ax = axs[1,1]
ax.set_yscale('log')
# Here we have to be careful to keep all y values positive:
ylower = np.maximum(1e-2, y - yerr)
yerr_lower = y - ylower
ax.errorbar(x, y, yerr=[yerr_lower, 2*yerr], xerr=xerr,
fmt='o', ecolor='g', capthick=2)
ax.set_title('Mixed sym., log y')
fig.suptitle('Variable errorbars')
plt.show()
| mit |
aewhatley/scikit-learn | sklearn/manifold/isomap.py | 229 | 7169 | """Isomap for manifold learning"""
# Author: Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) 2011
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..neighbors import NearestNeighbors, kneighbors_graph
from ..utils import check_array
from ..utils.graph import graph_shortest_path
from ..decomposition import KernelPCA
from ..preprocessing import KernelCenterer
class Isomap(BaseEstimator, TransformerMixin):
"""Isomap Embedding
Non-linear dimensionality reduction through Isometric Mapping
Read more in the :ref:`User Guide <isomap>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
eigen_solver : ['auto'|'arpack'|'dense']
'auto' : Attempt to choose the most efficient solver
for the given problem.
'arpack' : Use Arnoldi decomposition to find the eigenvalues
and eigenvectors.
'dense' : Use a direct solver (i.e. LAPACK)
for the eigenvalue decomposition.
tol : float
Convergence tolerance passed to arpack or lobpcg.
not used if eigen_solver == 'dense'.
max_iter : integer
Maximum number of iterations for the arpack solver.
not used if eigen_solver == 'dense'.
path_method : string ['auto'|'FW'|'D']
Method to use in finding shortest path.
'auto' : attempt to choose the best algorithm automatically.
'FW' : Floyd-Warshall algorithm.
'D' : Dijkstra's algorithm.
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
Algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kernel_pca_ : object
`KernelPCA` object used to implement the embedding.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
nbrs_ : sklearn.neighbors.NearestNeighbors instance
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
dist_matrix_ : array-like, shape (n_samples, n_samples)
Stores the geodesic distance matrix of training data.
References
----------
.. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric
framework for nonlinear dimensionality reduction. Science 290 (5500)
"""
def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto',
tol=0, max_iter=None, path_method='auto',
neighbors_algorithm='auto'):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.path_method = path_method
self.neighbors_algorithm = neighbors_algorithm
self.nbrs_ = NearestNeighbors(n_neighbors=n_neighbors,
algorithm=neighbors_algorithm)
def _fit_transform(self, X):
X = check_array(X)
self.nbrs_.fit(X)
self.training_data_ = self.nbrs_._fit_X
self.kernel_pca_ = KernelPCA(n_components=self.n_components,
kernel="precomputed",
eigen_solver=self.eigen_solver,
tol=self.tol, max_iter=self.max_iter)
kng = kneighbors_graph(self.nbrs_, self.n_neighbors,
mode='distance')
self.dist_matrix_ = graph_shortest_path(kng,
method=self.path_method,
directed=False)
G = self.dist_matrix_ ** 2
G *= -0.5
self.embedding_ = self.kernel_pca_.fit_transform(G)
def reconstruction_error(self):
"""Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Notes
-------
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
"""
G = -0.5 * self.dist_matrix_ ** 2
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.lambdas_
return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0]
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, precomputed tree, or NearestNeighbors
object.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X: {array-like, sparse matrix, BallTree, KDTree}
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
X = check_array(X)
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
#Create the graph of shortest distances from X to self.training_data_
# via the nearest neighbors of X.
#This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
G_X = np.zeros((X.shape[0], self.training_data_.shape[0]))
for i in range(X.shape[0]):
G_X[i] = np.min((self.dist_matrix_[indices[i]]
+ distances[i][:, None]), 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
| bsd-3-clause |
FrancoisRheaultUS/dipy | doc/examples/linear_fascicle_evaluation.py | 3 | 11967 | """
=================================
Linear fascicle evaluation (LiFE)
=================================
Evaluating the results of tractography algorithms is one of the biggest
challenges for diffusion MRI. One proposal for evaluation of tractography
results is to use a forward model that predicts the signal from each of a set
of streamlines, and then fit a linear model to these simultaneous predictions
[Pestilli2014]_.
We will use streamlines generated using probabilistic tracking on CSA
peaks. For brevity, we will include in this example only streamlines going
through the corpus callosum connecting left to right superior frontal
cortex. The process of tracking and finding these streamlines is fully
demonstrated in the :ref:`streamline_tools` example. If this example has been
run, we can read the streamlines from file. Otherwise, we'll run that example
first, by importing it. This provides us with all of the variables that were
created in that example:
"""
from mpl_toolkits.axes_grid1 import AxesGrid
import matplotlib
import matplotlib.pyplot as plt
import dipy.tracking.life as life
from dipy.viz import window, actor, colormap as cmap
import numpy as np
import os.path as op
from dipy.io.streamline import load_trk
import dipy.core.optimize as opt
if not op.exists('lr-superiorfrontal.trk'):
from streamline_tools import *
else:
# We'll need to know where the corpus callosum is from these variables:
from dipy.core.gradients import gradient_table
from dipy.data import get_fnames
from dipy.io.gradients import read_bvals_bvecs
from dipy.io.image import load_nifti_data, load_nifti
hardi_fname, hardi_bval_fname, hardi_bvec_fname = get_fnames('stanford_hardi')
label_fname = get_fnames('stanford_labels')
t1_fname = get_fnames('stanford_t1')
data, affine, hardi_img = load_nifti(hardi_fname, return_img=True)
labels = load_nifti_data(label_fname)
t1_data = load_nifti_data(t1_fname)
bvals, bvecs = read_bvals_bvecs(hardi_bval_fname, hardi_bvec_fname)
gtab = gradient_table(bvals, bvecs)
cc_slice = labels == 2
# Read the candidates from file in voxel space:
candidate_sl_sft = load_trk('lr-superiorfrontal.trk', 'same')
candidate_sl_sft.to_vox()
candidate_sl = candidate_sl_sft.streamlines
"""
The streamlines that are entered into the model are termed 'candidate
streamlines' (or a 'candidate connectome'):
"""
"""
Let's visualize the initial candidate group of streamlines in 3D, relative to
the anatomical structure of this brain:
"""
# Enables/disables interactive visualization
interactive = False
candidate_streamlines_actor = actor.streamtube(candidate_sl,
cmap.line_colors(candidate_sl))
cc_ROI_actor = actor.contour_from_roi(cc_slice, color=(1., 1., 0.),
opacity=0.5)
vol_actor = actor.slicer(t1_data)
vol_actor.display(x=40)
vol_actor2 = vol_actor.copy()
vol_actor2.display(z=35)
# Add display objects to canvas
ren = window.Renderer()
ren.add(candidate_streamlines_actor)
ren.add(cc_ROI_actor)
ren.add(vol_actor)
ren.add(vol_actor2)
window.record(ren, n_frames=1,
out_path='life_candidates.png',
size=(800, 800))
if interactive:
window.show(ren)
"""
.. figure:: life_candidates.png
:align: center
**Candidate connectome before life optimization**
"""
"""
Next, we initialize a LiFE model. We import the ``dipy.tracking.life`` module,
which contains the classes and functions that implement the model:
"""
fiber_model = life.FiberModel(gtab)
"""
Since we read the streamlines from a file, already in the voxel space, we do
not need to transform them into this space. Otherwise, if the streamline
coordinates were in the world space (relative to the scanner iso-center, or
relative to the mid-point of the AC-PC-connecting line), we would use this::
inv_affine = np.linalg.inv(hardi_img.affine)
the inverse transformation from world space to the voxel space as the affine
for the following model fit.
The next step is to fit the model, producing a ``FiberFit`` class instance,
that stores the data, as well as the results of the fitting procedure.
The LiFE model posits that the signal in the diffusion MRI volume can be
explained by the streamlines, by the equation
.. math::
y = X\beta
Where $y$ is the diffusion MRI signal, $\beta$ are a set of weights on the
streamlines and $X$ is a design matrix. This matrix has the dimensions $m$ by
$n$, where $m=n_{voxels} \cdot n_{directions}$, and $n_{voxels}$ is the set of
voxels in the ROI that contains the streamlines considered in this model. The
$i^{th}$ column of the matrix contains the expected contributions of the
$i^{th}$ streamline (arbitrarily ordered) to each of the voxels. $X$ is a
sparse matrix, because each streamline traverses only a small percentage of the
voxels. The expected contributions of the streamline are calculated using a
forward model, where each node of the streamline is modeled as a cylindrical
fiber compartment with Gaussian diffusion, using the diffusion tensor model.
See [Pestilli2014]_ for more detail on the model, and variations of this model.
"""
fiber_fit = fiber_model.fit(data, candidate_sl, affine=np.eye(4))
"""
The ``FiberFit`` class instance holds various properties of the model fit. For
example, it has the weights $\beta$, that are assigned to each streamline. In
most cases, a tractography through some region will include redundant
streamlines, and these streamlines will have $\beta_i$ that are 0.
"""
fig, ax = plt.subplots(1)
ax.hist(fiber_fit.beta, bins=100, histtype='step')
ax.set_xlabel('Fiber weights')
ax.set_ylabel('# fibers')
fig.savefig('beta_histogram.png')
"""
.. figure:: beta_histogram.png
:align: center
**LiFE streamline weights**
"""
"""
We use $\beta$ to filter out these redundant streamlines, and generate an
optimized group of streamlines:
"""
optimized_sl = list(np.array(candidate_sl)[np.where(fiber_fit.beta > 0)[0]])
ren = window.Renderer()
ren.add(actor.streamtube(optimized_sl, cmap.line_colors(optimized_sl)))
ren.add(cc_ROI_actor)
ren.add(vol_actor)
window.record(ren, n_frames=1, out_path='life_optimized.png',
size=(800, 800))
if interactive:
window.show(ren)
"""
.. figure:: life_optimized.png
:align: center
**Streamlines selected via LiFE optimization**
"""
"""
The new set of streamlines should do well in fitting the data, and redundant
streamlines have presumably been removed (in this case, about 50% of the
streamlines).
But how well does the model do in explaining the diffusion data? We can
quantify that: the ``FiberFit`` class instance has a `predict` method, which
can be used to invert the model and predict back either the data that was used
to fit the model, or other unseen data (e.g. in cross-validation, see
:ref:`kfold_xval`).
Without arguments, the ``.predict()`` method will predict the diffusion signal
for the same gradient table that was used in the fit data, but ``gtab`` and
``S0`` keyword arguments can be used to predict for other acquisition schemes
and other baseline non-diffusion-weighted signals.
"""
model_predict = fiber_fit.predict()
"""
We will focus on the error in prediction of the diffusion-weighted data, and
calculate the root of the mean squared error.
"""
model_error = model_predict - fiber_fit.data
model_rmse = np.sqrt(np.mean(model_error[:, 10:] ** 2, -1))
"""
As a baseline against which we can compare, we calculate another error term. In
this case, we assume that the weight for each streamline is equal
to zero. This produces the naive prediction of the mean of the signal in each
voxel.
"""
beta_baseline = np.zeros(fiber_fit.beta.shape[0])
pred_weighted = np.reshape(opt.spdot(fiber_fit.life_matrix, beta_baseline),
(fiber_fit.vox_coords.shape[0],
np.sum(~gtab.b0s_mask)))
mean_pred = np.empty((fiber_fit.vox_coords.shape[0], gtab.bvals.shape[0]))
S0 = fiber_fit.b0_signal
"""
Since the fitting is done in the demeaned S/S0 domain, we need
to add back the mean and then multiply by S0 in every voxel:
"""
mean_pred[..., gtab.b0s_mask] = S0[:, None]
mean_pred[..., ~gtab.b0s_mask] =\
(pred_weighted + fiber_fit.mean_signal[:, None]) * S0[:, None]
mean_error = mean_pred - fiber_fit.data
mean_rmse = np.sqrt(np.mean(mean_error ** 2, -1))
"""
First, we can compare the overall distribution of errors between these two
alternative models of the ROI. We show the distribution of differences in error
(improvement through model fitting, relative to the baseline model). Here,
positive values denote an improvement in error with model fit, relative to
without the model fit.
"""
fig, ax = plt.subplots(1)
ax.hist(mean_rmse - model_rmse, bins=100, histtype='step')
ax.text(0.2, 0.9, 'Median RMSE, mean model: %.2f' % np.median(mean_rmse),
horizontalalignment='left',
verticalalignment='center',
transform=ax.transAxes)
ax.text(0.2, 0.8, 'Median RMSE, LiFE: %.2f' % np.median(model_rmse),
horizontalalignment='left',
verticalalignment='center',
transform=ax.transAxes)
ax.set_xlabel('RMS Error')
ax.set_ylabel('# voxels')
fig.savefig('error_histograms.png')
"""
.. figure:: error_histograms.png
:align: center
Improvement in error with fitting of the LiFE model.
"""
"""
Second, we can show the spatial distribution of the two error terms,
and of the improvement with the model fit:
"""
vol_model = np.ones(data.shape[:3]) * np.nan
vol_model[fiber_fit.vox_coords[:, 0],
fiber_fit.vox_coords[:, 1],
fiber_fit.vox_coords[:, 2]] = model_rmse
vol_mean = np.ones(data.shape[:3]) * np.nan
vol_mean[fiber_fit.vox_coords[:, 0],
fiber_fit.vox_coords[:, 1],
fiber_fit.vox_coords[:, 2]] = mean_rmse
vol_improve = np.ones(data.shape[:3]) * np.nan
vol_improve[fiber_fit.vox_coords[:, 0],
fiber_fit.vox_coords[:, 1],
fiber_fit.vox_coords[:, 2]] = mean_rmse - model_rmse
sl_idx = 49
fig = plt.figure()
fig.subplots_adjust(left=0.05, right=0.95)
ax = AxesGrid(fig, 111,
nrows_ncols=(1, 3),
label_mode="1",
share_all=True,
cbar_location="top",
cbar_mode="each",
cbar_size="10%",
cbar_pad="5%")
ax[0].matshow(np.rot90(t1_data[sl_idx, :, :]), cmap=matplotlib.cm.bone)
im = ax[0].matshow(np.rot90(vol_model[sl_idx, :, :]), cmap=matplotlib.cm.hot)
ax.cbar_axes[0].colorbar(im)
ax[1].matshow(np.rot90(t1_data[sl_idx, :, :]), cmap=matplotlib.cm.bone)
im = ax[1].matshow(np.rot90(vol_mean[sl_idx, :, :]), cmap=matplotlib.cm.hot)
ax.cbar_axes[1].colorbar(im)
ax[2].matshow(np.rot90(t1_data[sl_idx, :, :]), cmap=matplotlib.cm.bone)
im = ax[2].matshow(np.rot90(vol_improve[sl_idx, :, :]),
cmap=matplotlib.cm.RdBu)
ax.cbar_axes[2].colorbar(im)
for lax in ax:
lax.set_xticks([])
lax.set_yticks([])
fig.savefig("spatial_errors.png")
"""
.. figure:: spatial_errors.png
:align: center
Spatial distribution of error and improvement.
"""
"""
This image demonstrates that in many places, fitting the LiFE model results in
substantial reduction of the error.
Note that for full-brain tractographies *LiFE* can require large amounts of
memory. For detailed memory profiling of the algorithm, based on the
streamlines generated in :ref:`example_probabilistic_fiber_tracking`, see `this
IPython notebook
<http://nbviewer.ipython.org/gist/arokem/bc29f34ebc97510d9def>`_.
For the Matlab implementation of LiFE, head over to `Franco Pestilli's github
webpage <http://francopestilli.github.io/life/>`_.
References
----------
.. [Pestilli2014] Pestilli, F., Yeatman, J, Rokem, A. Kay, K. and Wandell B.A.
(2014). Validation and statistical inference in living connectomes. Nature
Methods 11: 1058-1063. doi:10.1038/nmeth.3098
.. include:: ../links_names.inc
"""
| bsd-3-clause |
brookehus/msmbuilder | msmbuilder/example_datasets/muller.py | 9 | 4306 | from __future__ import print_function, division, absolute_import
from multiprocessing import cpu_count
from multiprocessing.pool import ThreadPool
import numpy as np
from .base import _NWell
from ._muller import propagate, muller_potential
__all__ = ['load_muller', 'MullerPotential']
###############################################################################
# Constants
###############################################################################
# DO NOT CHANGE THESE CONSTANTS WITHOUT UPDATING VERSION ATTRIBUTE
# AND THE DOCSTRING
MULLER_PARAMETERS = dict(
MIN_X=-1.5, MIN_Y=-0.2,
MAX_X=1.2, MAX_Y=2,
N_TRAJECTORIES=10,
N_STEPS=1000000,
THIN=100,
KT=1.5e4,
DT=0.1,
DIFFUSION_CONST=1e-2,
VERSION=1)
###############################################################################
# Code
###############################################################################
class MullerPotential(_NWell):
"""Browian dynamics on Muller potential dataset
Parameters
----------
data_home : optional, default: None
Specify another cache folder for the datasets. By default
all MSMBuilder data is stored in '~/msmbuilder_data' subfolders.
random_state : {int, None}, default: None
Seed the psuedorandom number generator to generate trajectories. If
seed is None, the global numpy PRNG is used. If random_state is an
int, the simulations will be cached in ``data_home``, or loaded from
``data_home`` if simulations with that seed have been performed already.
With random_state=None, new simulations will be performed and the
trajectories will not be cached.
Notes
-----
This dataset consists of 10 trajectories simulated with Brownian dynamics
on the Muller potential, a two-dimensional, three-well potential energy
surface. The potential is defined in [1]. The dynamics are governed by the
stochastic differential equation::
dx_t/dt = -\nabla V(x)/(kT) + \sqrt{2D} * R(t)
where R(t) is a standard normal white-noise process, and D=1e-2. The
dynamics are discretized with an euler integrator with timsetep dt=0.1,
and kT=1.5e4 Each trajectory is simulated for 1000000 steps, and
coordinates are are saved every 100 steps. The starting points for the
trajectories are sampled from the uniform distribution over the rectangular
box between x=(-1.5, 1.2) and y=(-0.2, 2.0).
References
----------
.. [1] Muller, Klaus, and Leo D. Brown. "Location of saddle points and minimum
energypaths by a constrained simplex optimization procedure." Theoretica
chimica acta 53.1 (1979): 75-93.
"""
target_name = "muller"
version = MULLER_PARAMETERS['VERSION']
n_trajectories = MULLER_PARAMETERS['N_TRAJECTORIES']
def simulate_func(self, random):
M = MULLER_PARAMETERS
x0s = random.uniform(
low=[M['MIN_X'], M['MIN_Y']],
high=[M['MAX_X'], M['MAX_Y']],
size=(M['N_TRAJECTORIES'], 2))
# propagate releases the GIL, so we can use a thread pool and
# get a nice speedup
tp = ThreadPool(cpu_count())
return tp.map(lambda x0:
propagate(
n_steps=M['N_STEPS'], x0=x0, thin=M['THIN'], kT=M['KT'],
dt=M['DT'], D=M['DIFFUSION_CONST'], random_state=random,
min_x=M['MIN_X'], max_x=M['MAX_X'], min_y=M['MIN_Y'],
max_y=M['MAX_Y']), x0s)
def potential(self, x, y):
return muller_potential(x, y)
def plot(self, minx=-1.5, maxx=1.2, miny=-0.2, maxy=2, **kwargs):
"""Helper function to plot the Muller potential
"""
import matplotlib.pyplot as pp
grid_width = max(maxx-minx, maxy-miny) / 200.0
ax = kwargs.pop('ax', None)
xx, yy = np.mgrid[minx:maxx:grid_width, miny:maxy:grid_width]
V = self.potential(xx, yy)
# clip off any values greater than 200, since they mess up
# the color scheme
if ax is None:
ax = pp
ax.contourf(xx, yy, V.clip(max=200), 40, **kwargs)
def load_muller(data_home=None, random_state=None):
return MullerPotential(data_home, random_state).get()
load_muller.__doc__ = MullerPotential.__doc__
| lgpl-2.1 |
rothadamg/UPSITE | Tools/get_genes.py | 1 | 6521 | '''
Created on Oct 27, 2014
@author: Adam
'''
import BANNER2
import operator
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
def get_author_entity_dict(docs_dict, Author, current_author_num, tot_authors):
number_docs = len(docs_dict)
author_entity_dict = {}
tfidf_lists = []
for num, key in enumerate(docs_dict):
print Author, "----------------------- Author {0}/{1}....Publication {2}/{3} -----------------------" .format(current_author_num, tot_authors, num + 1, number_docs)
try:
a = docs_dict[key][0]
except IndexError:
a = []
if not isinstance(a, str):
if not isinstance(a, unicode):
pass
else:
# a = docs_dict['23231918'][0]
b = a.encode('ascii','ignore')
c = b.split('.')
sent_list_final= []
for sent in c:
if len(sent) > 1:
sent_list_final.append(sent)
entity_list = BANNER2.main(sent_list_final)
tfidf_lists.append(entity_list)
for x in entity_list:
if x in author_entity_dict:
author_entity_dict[x] += 1
if x not in author_entity_dict:
author_entity_dict[x] = 1
# print 'author_entity_dict'
# print author_entity_dict
# author_entity_dict = {'TRPML1 KD': 4, 'Ca(2+) release': 1, 'EBP50': 1, 'PLCgamma': 1, 'lysosomal ion channel TRPML1': 1, 'transient receptor potential': 1, 'TRPC2': 9, 'transcription factor MTF-1': 1, 'ML1': 10, 'Gb3': 3, 'TRP': 16, 'Toc': 1, 'alpha-galactosidase': 1, 'proapoptotic protein Bax': 1, 'trp2 mutant': 1, 'phospholipase C': 1, 'CatB': 3, 'lysosomal ion homeostasis': 1, 'calmodulin': 1, 'IP(3) receptors': 1, 'LRRK2': 4, 'TRPML3': 1, 'lysosomal SNARE proteins': 1, 'mitochondrial Ca2+': 1, 'GPCR': 3, 'plasma membrane receptors': 1, 'TRPML1': 28, 'zinc transporter ZnT4': 1, 'InaD': 1, 'ROS': 1, 'VAMP7 KD': 1, 'apolipoprotein B hydrolysis in MLIV': 1, 'Ca(2+) release channels': 1, 'caveolin': 1, 'GPI': 1, 'lysosomal enzymes': 1, 'TRPC': 15, 'leucine-rich repeat kinase 2': 1, 'LRRK2 G2019S': 1, 'tyrosine kinase receptors': 1, 'caspase': 1, 'NEHRF': 1, 'TRP family': 3, 'TRP2': 1, 'TRPML1in zinc transport': 1, 'MCOLN1': 1, 'G protein-coupled receptors': 1, 'R1441C': 1, 'scaffolding proteins': 1, 'lysosomal protease cathepsin B': 1, 'TRPML1in': 1, 'RPE1': 1, 'G protein coupled receptors': 1, 'synaptotagmin VII': 1, 'ROS chelator': 1, 'VAMP7': 1, 'transient receptor potential mucolipin 1': 2, 'GFP': 2, 'KD': 2, 'ZnT4': 1, 'reactive oxygen species': 1, 'SYT7': 1, 'Zn(2+) transporters': 1, 'retinal pigmented epithelial 1': 1, 'Fe2': 2, 'TRPML2': 1}
# author_entity_dict = {'TRPML1 KD': 4, 'Ca(2+) release': 1, 'EBP50': 1, 'PLCgamma': 1, 'lysosomal ion channel TRPML1': 1, 'transient receptor potential': 1, 'TRPC2': 9, 'transcription factor MTF-1': 1, 'ML1': 10, 'Gb3': 3, 'TRP': 16, 'Toc': 1, 'alpha-galactosidase': 1, 'proapoptotic protein Bax': 1, 'trp2 mutant': 1, 'phospholipase C': 1, 'CatB': 3, 'lysosomal ion homeostasis': 1, 'calmodulin': 1, 'IP(3) receptors': 1, 'LRRK2': 4, 'TRPML3': 1, 'lysosomal SNARE proteins': 1, 'mitochondrial Ca2+': 1, 'GPCR': 3, 'plasma membrane receptors': 1, 'TRPML1': 28, 'zinc transporter ZnT4': 1, 'InaD': 1, 'ROS': 1, 'VAMP7 KD': 1, 'apolipoprotein B hydrolysis in MLIV': 1, 'Ca(2+) release channels': 1, 'caveolin': 1, 'GPI': 1, 'lysosomal enzymes': 1, 'TRPC': 15, 'leucine-rich repeat kinase 2': 1, 'LRRK2 G2019S': 1, 'tyrosine kinase receptors': 1, 'caspase': 1, 'NEHRF': 1, 'TRP family': 3, 'TRP2': 1, 'TRPML1in zinc transport': 1, 'MCOLN1': 1, 'G protein-coupled receptors': 1, 'R1441C': 1, 'scaffolding proteins': 1, 'lysosomal protease cathepsin B': 1, 'TRPML1in': 1, 'RPE1': 1, 'G protein coupled receptors': 1, 'synaptotagmin VII': 1, 'ROS chelator': 1, 'VAMP7': 1, 'transient receptor potential mucolipin 1': 2, 'GFP': 2, 'KD': 2, 'ZnT4': 1, 'reactive oxygen species': 1, 'SYT7': 1, 'Zn(2+) transporters': 1, 'retinal pigmented epithelial 1': 1, 'Fe2': 2, 'TRPML2': 1}
total_entity_count = 0
for entity in author_entity_dict:
entity_count = author_entity_dict[entity]
total_entity_count += entity_count
return author_entity_dict, total_entity_count , tfidf_lists
def normalize_dict(author_entity_dict):
pass
def get_ent_frequency(author_entity_dict, total_entity_count):
frequency_dict = {}
for entity in author_entity_dict:
frequency = float(author_entity_dict[entity]) / float(total_entity_count)
frequency = round(frequency, 6)
frequency_dict[entity] = frequency
sorted_frequency_list = sorted(frequency_dict.items(), key=operator.itemgetter(1), reverse= True)
for x in sorted_frequency_list:
print x
return sorted_frequency_list
def get_tfidf(tfidf_lists):
tfidf_vectorizer = TfidfVectorizer()
tfidf_lists_rm_blank = []
tot_num_entities = 0
for l in tfidf_lists:
tot_num_entities += len(l)
if len(l) > 0:
tfidf_lists_rm_blank.append(l)
print tfidf_lists_rm_blank
tfidf_vect_input = [' '.join(x) for x in tfidf_lists_rm_blank]
print 'tfidf_vect_input', tfidf_vect_input
tfidf = tfidf_vectorizer.fit_transform(tfidf_lists_rm_blank)
print 'tfidf', tfidf
def main(docs_dict, Author, current_author_num, tot_authors):
author_entity_dict, total_entity_count, tfidf_lists = get_author_entity_dict(docs_dict, Author, current_author_num, tot_authors)
# normalized_author_entity_dict = normalize_dict(author_entity_dict)
entity_frequency_list = get_ent_frequency(author_entity_dict, total_entity_count)
# tfidf_lists = [[], ['AC genotype', 'GCLC gene', 'rs6458939', 'glutamate-cysteine ligase', 'glutathione S-transferase alpha 3', 'GSTA3', 'CC genotype', 'glutathione-S-transferase', 'GST', 'GCLC gene'], [], ['MGMT', 'MTHFS', 'CBS', 'MGMT', 'MTHFS', 'CBS', 'DNMT3L genes'], [], ['Bmp6', 'Bone morphogenetic protein 6', 'Bmp6', 'Bmp6', 'Bmp6', 'Bmp6 allele']]
# tfidf_list = get_tfidf(tfidf_lists)
return entity_frequency_list, tfidf_lists
if __name__=="__main__":
docs_dict = {}
Author = ''
current_author_num = ''
tot_authors = ''
main(docs_dict, Author, current_author_num, tot_authors)
| mit |
lukas/ml-class | examples/scikit/cross-validation-tfidf.py | 2 | 1066 | import pandas as pd
import numpy as np
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import SGDClassifier
from sklearn.pipeline import Pipeline
from wandblog import log
import wandb
run = wandb.init()
config = run.config
df = pd.read_csv('tweets.csv')
target = df['is_there_an_emotion_directed_at_a_brand_or_product']
text = df['tweet_text']
fixed_text = text[pd.notnull(text)]
fixed_target = target[pd.notnull(text)]
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
clf = SGDClassifier()
from sklearn.model_selection import cross_val_score, cross_val_predict
text_clf = Pipeline([('vect', CountVectorizer(ngram_range=(1,2))),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier(max_iter=1000)),
])
scores = cross_val_score(text_clf, fixed_text, fixed_target)
print(scores)
print(scores.mean())
predictions = cross_val_predict(text_clf, fixed_text, fixed_target)
log(run, fixed_text, fixed_target, predictions)
| gpl-2.0 |
harisbal/pandas | pandas/tests/arrays/categorical/test_constructors.py | 1 | 20895 | # -*- coding: utf-8 -*-
from datetime import datetime
import numpy as np
import pytest
from pandas.core.dtypes.common import is_float_dtype, is_integer_dtype
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical, CategoricalIndex, DatetimeIndex, Index, Interval,
IntervalIndex, NaT, Series, Timestamp, date_range, period_range,
timedelta_range)
import pandas.util.testing as tm
class TestCategoricalConstructors(object):
def test_validate_ordered(self):
# see gh-14058
exp_msg = "'ordered' must either be 'True' or 'False'"
exp_err = TypeError
# This should be a boolean.
ordered = np.array([0, 1, 2])
with tm.assert_raises_regex(exp_err, exp_msg):
Categorical([1, 2, 3], ordered=ordered)
with tm.assert_raises_regex(exp_err, exp_msg):
Categorical.from_codes([0, 0, 1], categories=['a', 'b', 'c'],
ordered=ordered)
def test_constructor_empty(self):
# GH 17248
c = Categorical([])
expected = Index([])
tm.assert_index_equal(c.categories, expected)
c = Categorical([], categories=[1, 2, 3])
expected = pd.Int64Index([1, 2, 3])
tm.assert_index_equal(c.categories, expected)
def test_constructor_empty_boolean(self):
# see gh-22702
cat = pd.Categorical([], categories=[True, False])
categories = sorted(cat.categories.tolist())
assert categories == [False, True]
def test_constructor_tuples(self):
values = np.array([(1,), (1, 2), (1,), (1, 2)], dtype=object)
result = Categorical(values)
expected = Index([(1,), (1, 2)], tupleize_cols=False)
tm.assert_index_equal(result.categories, expected)
assert result.ordered is False
def test_constructor_tuples_datetimes(self):
# numpy will auto reshape when all of the tuples are the
# same len, so add an extra one with 2 items and slice it off
values = np.array([(Timestamp('2010-01-01'),),
(Timestamp('2010-01-02'),),
(Timestamp('2010-01-01'),),
(Timestamp('2010-01-02'),),
('a', 'b')], dtype=object)[:-1]
result = Categorical(values)
expected = Index([(Timestamp('2010-01-01'),),
(Timestamp('2010-01-02'),)], tupleize_cols=False)
tm.assert_index_equal(result.categories, expected)
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical(arr, ordered=False)
assert not factor.ordered
# this however will raise as cannot be sorted
pytest.raises(
TypeError, lambda: Categorical(arr, ordered=True))
def test_constructor_interval(self):
result = Categorical([Interval(1, 2), Interval(2, 3), Interval(3, 6)],
ordered=True)
ii = IntervalIndex([Interval(1, 2), Interval(2, 3), Interval(3, 6)])
exp = Categorical(ii, ordered=True)
tm.assert_categorical_equal(result, exp)
tm.assert_index_equal(result.categories, ii)
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"], dtype=np.object_)
c1 = Categorical(exp_arr)
tm.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
tm.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
tm.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
pytest.raises(ValueError, f)
def f():
Categorical(["a", "b"], ["a", "b", "b"])
pytest.raises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
assert not c1.ordered
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
tm.assert_numpy_array_equal(c1.__array__(), c2.__array__())
tm.assert_index_equal(c2.categories, Index(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
tm.assert_categorical_equal(c1, c2)
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(["a", "b", "c", "a"]),
categories=["a", "b", "c", "d"])
tm.assert_categorical_equal(c1, c2)
# This should result in integer categories, not float!
cat = Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
assert is_integer_dtype(cat.categories)
# https://github.com/pandas-dev/pandas/issues/3678
cat = Categorical([np.nan, 1, 2, 3])
assert is_integer_dtype(cat.categories)
# this should result in floats
cat = Categorical([np.nan, 1, 2., 3])
assert is_float_dtype(cat.categories)
cat = Categorical([np.nan, 1., 2., 3.])
assert is_float_dtype(cat.categories)
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notna()])
# assert is_integer_dtype(vals)
# corner cases
cat = Categorical([1])
assert len(cat.categories) == 1
assert cat.categories[0] == 1
assert len(cat.codes) == 1
assert cat.codes[0] == 0
cat = Categorical(["a"])
assert len(cat.categories) == 1
assert cat.categories[0] == "a"
assert len(cat.codes) == 1
assert cat.codes[0] == 0
# Scalars should be converted to lists
cat = Categorical(1)
assert len(cat.categories) == 1
assert cat.categories[0] == 1
assert len(cat.codes) == 1
assert cat.codes[0] == 0
# two arrays
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(None):
c_old = Categorical([0, 1, 2, 0, 1, 2],
categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(None):
c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa
categories=[3, 4, 5])
# the next one are from the old docs
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
def test_constructor_not_sequence(self):
# https://github.com/pandas-dev/pandas/issues/16022
with pytest.raises(TypeError):
Categorical(['a', 'b'], categories='a')
def test_constructor_with_null(self):
# Cannot have NaN in categories
with pytest.raises(ValueError):
Categorical([np.nan, "a", "b", "c"],
categories=[np.nan, "a", "b", "c"])
with pytest.raises(ValueError):
Categorical([None, "a", "b", "c"],
categories=[None, "a", "b", "c"])
with pytest.raises(ValueError):
Categorical(DatetimeIndex(['nat', '20160101']),
categories=[NaT, Timestamp('20160101')])
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
tm.assert_categorical_equal(ci.values, Categorical(ci))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
tm.assert_categorical_equal(ci.values,
Categorical(ci.astype(object),
categories=ci.categories))
def test_constructor_with_generator(self):
# This was raising an Error in isna(single_val).any() because isna
# returned a scalar for a generator
xrange = range
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
tm.assert_categorical_equal(cat, exp)
cat = Categorical(xrange(3))
tm.assert_categorical_equal(cat, exp)
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
tm.assert_categorical_equal(cat, exp)
cat = Categorical([0, 1, 2], categories=xrange(3))
tm.assert_categorical_equal(cat, exp)
@pytest.mark.parametrize("dtl", [
date_range("1995-01-01 00:00:00", periods=5, freq="s"),
date_range("1995-01-01 00:00:00", periods=5,
freq="s", tz="US/Eastern"),
timedelta_range("1 day", periods=5, freq="s")
])
def test_constructor_with_datetimelike(self, dtl):
# see gh-12077
# constructor with a datetimelike and NaT
s = Series(dtl)
c = Categorical(s)
expected = type(dtl)(s)
expected.freq = None
tm.assert_index_equal(c.categories, expected)
tm.assert_numpy_array_equal(c.codes, np.arange(5, dtype="int8"))
# with NaT
s2 = s.copy()
s2.iloc[-1] = NaT
c = Categorical(s2)
expected = type(dtl)(s2.dropna())
expected.freq = None
tm.assert_index_equal(c.categories, expected)
exp = np.array([0, 1, 2, 3, -1], dtype=np.int8)
tm.assert_numpy_array_equal(c.codes, exp)
result = repr(c)
assert "NaT" in result
def test_constructor_from_index_series_datetimetz(self):
idx = date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
result = Categorical(idx)
tm.assert_index_equal(result.categories, idx)
result = Categorical(Series(idx))
tm.assert_index_equal(result.categories, idx)
def test_constructor_from_index_series_timedelta(self):
idx = timedelta_range('1 days', freq='D', periods=3)
result = Categorical(idx)
tm.assert_index_equal(result.categories, idx)
result = Categorical(Series(idx))
tm.assert_index_equal(result.categories, idx)
def test_constructor_from_index_series_period(self):
idx = period_range('2015-01-01', freq='D', periods=3)
result = Categorical(idx)
tm.assert_index_equal(result.categories, idx)
result = Categorical(Series(idx))
tm.assert_index_equal(result.categories, idx)
def test_constructor_invariant(self):
# GH 14190
vals = [
np.array([1., 1.2, 1.8, np.nan]),
np.array([1, 2, 3], dtype='int64'),
['a', 'b', 'c', np.nan],
[pd.Period('2014-01'), pd.Period('2014-02'), NaT],
[Timestamp('2014-01-01'), Timestamp('2014-01-02'), NaT],
[Timestamp('2014-01-01', tz='US/Eastern'),
Timestamp('2014-01-02', tz='US/Eastern'), NaT],
]
for val in vals:
c = Categorical(val)
c2 = Categorical(c)
tm.assert_categorical_equal(c, c2)
@pytest.mark.parametrize('ordered', [True, False])
def test_constructor_with_dtype(self, ordered):
categories = ['b', 'a', 'c']
dtype = CategoricalDtype(categories, ordered=ordered)
result = Categorical(['a', 'b', 'a', 'c'], dtype=dtype)
expected = Categorical(['a', 'b', 'a', 'c'], categories=categories,
ordered=ordered)
tm.assert_categorical_equal(result, expected)
assert result.ordered is ordered
def test_constructor_dtype_and_others_raises(self):
dtype = CategoricalDtype(['a', 'b'], ordered=True)
with tm.assert_raises_regex(ValueError, "Cannot"):
Categorical(['a', 'b'], categories=['a', 'b'], dtype=dtype)
with tm.assert_raises_regex(ValueError, "Cannot"):
Categorical(['a', 'b'], ordered=True, dtype=dtype)
with tm.assert_raises_regex(ValueError, "Cannot"):
Categorical(['a', 'b'], ordered=False, dtype=dtype)
@pytest.mark.parametrize('categories', [
None, ['a', 'b'], ['a', 'c'],
])
@pytest.mark.parametrize('ordered', [True, False])
def test_constructor_str_category(self, categories, ordered):
result = Categorical(['a', 'b'], categories=categories,
ordered=ordered, dtype='category')
expected = Categorical(['a', 'b'], categories=categories,
ordered=ordered)
tm.assert_categorical_equal(result, expected)
def test_constructor_str_unknown(self):
with tm.assert_raises_regex(ValueError, "Unknown `dtype`"):
Categorical([1, 2], dtype="foo")
def test_constructor_from_categorical_with_dtype(self):
dtype = CategoricalDtype(['a', 'b', 'c'], ordered=True)
values = Categorical(['a', 'b', 'd'])
result = Categorical(values, dtype=dtype)
# We use dtype.categories, not values.categories
expected = Categorical(['a', 'b', 'd'], categories=['a', 'b', 'c'],
ordered=True)
tm.assert_categorical_equal(result, expected)
def test_constructor_from_categorical_with_unknown_dtype(self):
dtype = CategoricalDtype(None, ordered=True)
values = Categorical(['a', 'b', 'd'])
result = Categorical(values, dtype=dtype)
# We use values.categories, not dtype.categories
expected = Categorical(['a', 'b', 'd'], categories=['a', 'b', 'd'],
ordered=True)
tm.assert_categorical_equal(result, expected)
def test_constructor_from_categorical_string(self):
values = Categorical(['a', 'b', 'd'])
# use categories, ordered
result = Categorical(values, categories=['a', 'b', 'c'], ordered=True,
dtype='category')
expected = Categorical(['a', 'b', 'd'], categories=['a', 'b', 'c'],
ordered=True)
tm.assert_categorical_equal(result, expected)
# No string
result = Categorical(values, categories=['a', 'b', 'c'], ordered=True)
tm.assert_categorical_equal(result, expected)
def test_constructor_with_categorical_categories(self):
# GH17884
expected = Categorical(['a', 'b'], categories=['a', 'b', 'c'])
result = Categorical(
['a', 'b'], categories=Categorical(['a', 'b', 'c']))
tm.assert_categorical_equal(result, expected)
result = Categorical(
['a', 'b'], categories=CategoricalIndex(['a', 'b', 'c']))
tm.assert_categorical_equal(result, expected)
def test_from_codes(self):
# too few categories
def f():
Categorical.from_codes([1, 2], [1, 2])
pytest.raises(ValueError, f)
# no int codes
def f():
Categorical.from_codes(["a"], [1, 2])
pytest.raises(ValueError, f)
# no unique categories
def f():
Categorical.from_codes([0, 1, 2], ["a", "a", "b"])
pytest.raises(ValueError, f)
# NaN categories included
def f():
Categorical.from_codes([0, 1, 2], ["a", "b", np.nan])
pytest.raises(ValueError, f)
# too negative
def f():
Categorical.from_codes([-2, 1, 2], ["a", "b", "c"])
pytest.raises(ValueError, f)
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], ["a", "b", "c"])
tm.assert_categorical_equal(exp, res)
# Not available in earlier numpy versions
if hasattr(np.random, "choice"):
codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])
Categorical.from_codes(codes, categories=["train", "test"])
def test_from_codes_with_categorical_categories(self):
# GH17884
expected = Categorical(['a', 'b'], categories=['a', 'b', 'c'])
result = Categorical.from_codes(
[0, 1], categories=Categorical(['a', 'b', 'c']))
tm.assert_categorical_equal(result, expected)
result = Categorical.from_codes(
[0, 1], categories=CategoricalIndex(['a', 'b', 'c']))
tm.assert_categorical_equal(result, expected)
# non-unique Categorical still raises
with pytest.raises(ValueError):
Categorical.from_codes([0, 1], Categorical(['a', 'b', 'a']))
def test_from_codes_with_nan_code(self):
# GH21767
codes = [1, 2, np.nan]
categories = ['a', 'b', 'c']
with pytest.raises(ValueError):
Categorical.from_codes(codes, categories)
def test_from_codes_with_float(self):
# GH21767
codes = [1.0, 2.0, 0] # integer, but in float dtype
categories = ['a', 'b', 'c']
with tm.assert_produces_warning(FutureWarning):
cat = Categorical.from_codes(codes, categories)
tm.assert_numpy_array_equal(cat.codes, np.array([1, 2, 0], dtype='i1'))
codes = [1.1, 2.0, 0] # non-integer
with pytest.raises(ValueError):
Categorical.from_codes(codes, categories)
@pytest.mark.parametrize('dtype', [None, 'category'])
def test_from_inferred_categories(self, dtype):
cats = ['a', 'b']
codes = np.array([0, 0, 1, 1], dtype='i8')
result = Categorical._from_inferred_categories(cats, codes, dtype)
expected = Categorical.from_codes(codes, cats)
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize('dtype', [None, 'category'])
def test_from_inferred_categories_sorts(self, dtype):
cats = ['b', 'a']
codes = np.array([0, 1, 1, 1], dtype='i8')
result = Categorical._from_inferred_categories(cats, codes, dtype)
expected = Categorical.from_codes([1, 0, 0, 0], ['a', 'b'])
tm.assert_categorical_equal(result, expected)
def test_from_inferred_categories_dtype(self):
cats = ['a', 'b', 'd']
codes = np.array([0, 1, 0, 2], dtype='i8')
dtype = CategoricalDtype(['c', 'b', 'a'], ordered=True)
result = Categorical._from_inferred_categories(cats, codes, dtype)
expected = Categorical(['a', 'b', 'a', 'd'],
categories=['c', 'b', 'a'],
ordered=True)
tm.assert_categorical_equal(result, expected)
def test_from_inferred_categories_coerces(self):
cats = ['1', '2', 'bad']
codes = np.array([0, 0, 1, 2], dtype='i8')
dtype = CategoricalDtype([1, 2])
result = Categorical._from_inferred_categories(cats, codes, dtype)
expected = Categorical([1, 1, 2, np.nan])
tm.assert_categorical_equal(result, expected)
def test_construction_with_ordered(self):
# GH 9347, 9190
cat = Categorical([0, 1, 2])
assert not cat.ordered
cat = Categorical([0, 1, 2], ordered=False)
assert not cat.ordered
cat = Categorical([0, 1, 2], ordered=True)
assert cat.ordered
@pytest.mark.xfail(reason="Imaginary values not supported in Categorical",
strict=True)
def test_constructor_imaginary(self):
values = [1, 2, 3 + 1j]
c1 = Categorical(values)
tm.assert_index_equal(c1.categories, Index(values))
tm.assert_numpy_array_equal(np.array(c1), np.array(values))
| bsd-3-clause |
anoopkunchukuttan/theano-rnn | hf_example.py | 9 | 7374 | """
This code uses the recurrent neural net implementation in rnn.py
but trains it using Hessian-Free optimization.
It requires the theano-hf package:
https://github.com/boulanni/theano-hf
@author Graham Taylor
"""
from rnn import MetaRNN
from hf import SequenceDataset, hf_optimizer
import numpy as np
import matplotlib.pyplot as plt
import logging
def test_real(n_updates=100):
""" Test RNN with real-valued outputs. """
n_hidden = 10
n_in = 5
n_out = 3
n_steps = 10
n_seq = 1000
np.random.seed(0)
# simple lag test
seq = np.random.randn(n_seq, n_steps, n_in)
targets = np.zeros((n_seq, n_steps, n_out))
targets[:, 1:, 0] = seq[:, :-1, 3] # delayed 1
targets[:, 1:, 1] = seq[:, :-1, 2] # delayed 1
targets[:, 2:, 2] = seq[:, :-2, 0] # delayed 2
targets += 0.01 * np.random.standard_normal(targets.shape)
# SequenceDataset wants a list of sequences
# this allows them to be different lengths, but here they're not
seq = [i for i in seq]
targets = [i for i in targets]
gradient_dataset = SequenceDataset([seq, targets], batch_size=None,
number_batches=100)
cg_dataset = SequenceDataset([seq, targets], batch_size=None,
number_batches=20)
model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out,
activation='tanh')
opt = hf_optimizer(p=model.rnn.params, inputs=[model.x, model.y],
s=model.rnn.y_pred,
costs=[model.rnn.loss(model.y)], h=model.rnn.h)
opt.train(gradient_dataset, cg_dataset, num_updates=n_updates)
plt.close('all')
fig = plt.figure()
ax1 = plt.subplot(211)
plt.plot(seq[0])
ax1.set_title('input')
ax2 = plt.subplot(212)
true_targets = plt.plot(targets[0])
guess = model.predict(seq[0])
guessed_targets = plt.plot(guess, linestyle='--')
for i, x in enumerate(guessed_targets):
x.set_color(true_targets[i].get_color())
ax2.set_title('solid: true output, dashed: model output')
def test_binary(multiple_out=False, n_updates=250):
""" Test RNN with binary outputs. """
n_hidden = 10
n_in = 5
if multiple_out:
n_out = 2
else:
n_out = 1
n_steps = 10
n_seq = 100
np.random.seed(0)
# simple lag test
seq = np.random.randn(n_seq, n_steps, n_in)
targets = np.zeros((n_seq, n_steps, n_out), dtype='int32')
# whether lag 1 (dim 3) is greater than lag 2 (dim 0)
targets[:, 2:, 0] = np.cast[np.int32](seq[:, 1:-1, 3] > seq[:, :-2, 0])
if multiple_out:
# whether product of lag 1 (dim 4) and lag 1 (dim 2)
# is less than lag 2 (dim 0)
targets[:, 2:, 1] = np.cast[np.int32](
(seq[:, 1:-1, 4] * seq[:, 1:-1, 2]) > seq[:, :-2, 0])
# SequenceDataset wants a list of sequences
# this allows them to be different lengths, but here they're not
seq = [i for i in seq]
targets = [i for i in targets]
gradient_dataset = SequenceDataset([seq, targets], batch_size=None,
number_batches=500)
cg_dataset = SequenceDataset([seq, targets], batch_size=None,
number_batches=100)
model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out,
activation='tanh', output_type='binary')
# optimizes negative log likelihood
# but also reports zero-one error
opt = hf_optimizer(p=model.rnn.params, inputs=[model.x, model.y],
s=model.rnn.y_pred,
costs=[model.rnn.loss(model.y),
model.rnn.errors(model.y)], h=model.rnn.h)
# using settings of initial_lambda and mu given in Nicolas' RNN example
# seem to do a little worse than the default
opt.train(gradient_dataset, cg_dataset, num_updates=n_updates)
seqs = xrange(10)
plt.close('all')
for seq_num in seqs:
fig = plt.figure()
ax1 = plt.subplot(211)
plt.plot(seq[seq_num])
ax1.set_title('input')
ax2 = plt.subplot(212)
true_targets = plt.step(xrange(n_steps), targets[seq_num], marker='o')
guess = model.predict_proba(seq[seq_num])
guessed_targets = plt.step(xrange(n_steps), guess)
plt.setp(guessed_targets, linestyle='--', marker='d')
for i, x in enumerate(guessed_targets):
x.set_color(true_targets[i].get_color())
ax2.set_ylim((-0.1, 1.1))
ax2.set_title('solid: true output, dashed: model output (prob)')
def test_softmax(n_updates=250):
""" Test RNN with softmax outputs. """
n_hidden = 10
n_in = 5
n_steps = 10
n_seq = 100
n_classes = 3
n_out = n_classes # restricted to single softmax per time step
np.random.seed(0)
# simple lag test
seq = np.random.randn(n_seq, n_steps, n_in)
targets = np.zeros((n_seq, n_steps), dtype='int32')
thresh = 0.5
# if lag 1 (dim 3) is greater than lag 2 (dim 0) + thresh
# class 1
# if lag 1 (dim 3) is less than lag 2 (dim 0) - thresh
# class 2
# if lag 2(dim0) - thresh <= lag 1 (dim 3) <= lag2(dim0) + thresh
# class 0
targets[:, 2:][seq[:, 1:-1, 3] > seq[:, :-2, 0] + thresh] = 1
targets[:, 2:][seq[:, 1:-1, 3] < seq[:, :-2, 0] - thresh] = 2
#targets[:, 2:, 0] = np.cast[np.int](seq[:, 1:-1, 3] > seq[:, :-2, 0])
# SequenceDataset wants a list of sequences
# this allows them to be different lengths, but here they're not
seq = [i for i in seq]
targets = [i for i in targets]
gradient_dataset = SequenceDataset([seq, targets], batch_size=None,
number_batches=500)
cg_dataset = SequenceDataset([seq, targets], batch_size=None,
number_batches=100)
model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out,
activation='tanh', output_type='softmax',
use_symbolic_softmax=True)
# optimizes negative log likelihood
# but also reports zero-one error
opt = hf_optimizer(p=model.rnn.params, inputs=[model.x, model.y],
s=model.rnn.y_pred,
costs=[model.rnn.loss(model.y),
model.rnn.errors(model.y)], h=model.rnn.h)
# using settings of initial_lambda and mu given in Nicolas' RNN example
# seem to do a little worse than the default
opt.train(gradient_dataset, cg_dataset, num_updates=n_updates)
seqs = xrange(10)
plt.close('all')
for seq_num in seqs:
fig = plt.figure()
ax1 = plt.subplot(211)
plt.plot(seq[seq_num])
ax1.set_title('input')
ax2 = plt.subplot(212)
# blue line will represent true classes
true_targets = plt.step(xrange(n_steps), targets[seq_num], marker='o')
# show probabilities (in b/w) output by model
guess = model.predict_proba(seq[seq_num])
guessed_probs = plt.imshow(guess.T, interpolation='nearest',
cmap='gray')
ax2.set_title('blue: true class, grayscale: probs assigned by model')
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
#test_real(n_updates=20)
#test_binary(multiple_out=True, n_updates=20)
test_softmax(n_updates=20)
| bsd-3-clause |
rl-institut/reegis_hp | reegis_hp/de21/results.py | 3 | 11607 | import easygui_qt as easy
import pandas as pd
import numpy as np
import geoplot
from matplotlib import pyplot as plt
import math
from matplotlib.colors import LinearSegmentedColormap
MTH = {'sum': np.sum, 'max': np.max, 'min': np.min, 'mean': np.mean}
class SpatialData:
def __init__(self, result_file=None):
if result_file is None:
result_file = easy.get_file_names(title="Select result file.")[0]
print(result_file)
self.results = pd.read_csv(result_file, index_col=[0, 1, 2])
self.polygons = None
self.lines = None
self.plotter = None
def add_polygon_column(self, obj=None, direction=None, bus=None,
method=None, kws=None, **kwargs):
if method is None:
method = easy.get_choice("Chose you method!",
choices=['sum', 'max', 'min', 'mean'])
if self.polygons is None:
self.polygons = load_geometry(**kwargs)
if kws is None:
kws = ['line', 'GL', 'duals']
objects = list(set([
x[5:] for x in
self.results.index.get_level_values('obj_label').unique()
if not any(y in x for y in kws)]))
reg_buses = list(set([
x[5:] for x in
self.results.index.get_level_values('bus_label').unique()
if not any(y in x for y in kws)]))
global_buses = list(set([
x for x in
self.results.index.get_level_values('bus_label').unique()
if 'GL' in x]))
buses = reg_buses + global_buses
if obj is None:
obj = easy.get_choice("What object do you want to plot?",
choices=objects)
if direction is None:
direction = easy.get_choice("From bus or to bus?",
choices=['from_bus', 'to_bus'])
if bus is None:
bus = easy.get_choice("Which bus?", choices=buses)
for r in self.polygons.index:
try:
tmp = pd.Series(self.results.loc[
'{0}_{1}'.format(r, bus), direction,
'{0}_{1}'.format(r, obj)]['val']).groupby(
level=0).agg(MTH[method])[0]
except KeyError:
tmp = float('nan')
self.polygons.loc[r, obj] = tmp
uv = unit_round(self.polygons[obj])
self.polygons[obj] = uv['series']
self.polygons[obj].prefix = uv['prefix']
self.polygons[obj].prefix_long = uv['prefix_long']
selection = {'obj': obj,
'direction': direction,
'bus': bus,
'method': method}
return selection
def add_power_lines(self, method=None, **kwargs):
if self.lines is None:
self.lines = load_geometry(region_column='name', **kwargs)
if self.plotter is None:
self.plotter = geoplot.GeoPlotter(
geoplot.postgis2shapely(self.lines.geom), (3, 16, 47, 56))
else:
self.plotter.geometries = geoplot.postgis2shapely(self.lines.geom)
if method is None:
method = easy.get_choice("Chose you method!",
choices=['sum', 'max', 'min', 'mean'])
for l in self.lines.index:
try:
r = l.split('-')
tmp = pd.Series()
tmp.set_value(1, self.results.loc[
'{0}_bus_el'.format(r[0]), 'from_bus',
'{0}_{1}_powerline'.format(*r)]['val'].groupby(
level=0).agg(MTH[method])[0])
tmp.set_value(2, self.results.loc[
'{0}_bus_el'.format(r[1]), 'from_bus',
'{1}_{0}_powerline'.format(*r)]['val'].groupby(
level=0).agg(MTH[method])[0])
self.lines.loc[l, 'trans'] = tmp.max()
except KeyError:
self.lines.loc[l, 'trans'] = 3000000
uv = unit_round(self.lines['trans'])
self.lines['trans'] = uv['series']
self.lines['trans'].prefix = uv['prefix']
self.lines['trans'].prefix_long = uv['prefix_long']
return method
def load_geometry(geometry_file=None, region_column='gid'):
if geometry_file is None:
geometry_file = easy.get_file_names()[0]
return pd.read_csv(geometry_file, index_col=region_column)
def show():
plt.tight_layout()
plt.box(on=None)
plt.show()
def unit_round(values, min_value=False):
longprefix = {0: '', 1: 'kilo', 2: 'Mega', 3: 'Giga', 4: 'Tera',
5: 'Exa', 6: 'Peta'}
shortprefix = {0: '', 1: 'k', 2: 'M', 3: 'G', 4: 'T',
5: 'E', 6: 'P'}
if min_value:
def_value = min(values)
a = 1
else:
def_value = max(values)
a = 0
if def_value > 0:
factor = int(int(math.log10(def_value)) / 3) + a
else:
factor = 0
values = round(values / 10 ** (factor * 3), 2)
return {'series': values, 'prefix': shortprefix[factor],
'prefix_long': longprefix[factor]}
def add_labels(data, plotter, label=None,
coord_file='data/geometries/coord_region.csv'):
p = pd.read_csv(coord_file, index_col='name')
data.polygons['point'] = p.point
for row in data.polygons.iterrows():
if 'point' not in row[1]:
point = geoplot.postgis2shapely([row[1].geom, ])[0].centroid
else:
point = geoplot.postgis2shapely([row[1].point, ])[0]
(x, y) = plotter.basemap(point.x, point.y)
if label is None:
text = row[0][2:]
else:
text = str(round(row[1][label], 1))
if row[1].normalised < 0.3 or row[1].normalised > 0.95:
textcolour = 'white'
else:
textcolour = 'black'
plotter.ax.text(x, y, text, color=textcolour, fontsize=12)
start_line = plotter.basemap(9.7, 53.4)
end_line = plotter.basemap(10.0, 53.55)
plt.plot([start_line[0], end_line[0]], [start_line[1], end_line[1]], '-',
color='white')
def polygon_plot(l_min=None, l_max=None, setname=None, myset=None, method=None,
filename=None):
geometry = 'data/geometries/polygons_de21_simple.csv'
sets = {
'load': {
'obj': 'load',
'direction': 'from_bus',
'bus': 'bus_el'},
'pv': {
'obj': 'solar',
'direction': 'to_bus',
'bus': 'bus_el'},
}
if setname is None and myset is None:
setname = easy.get_choice("What object do you want to plot?",
choices=tuple(sets.keys()))
if setname is not None:
myset = sets[setname]
if method is None:
myset['method'] = easy.get_choice(
"Chose you method!", choices=['sum', 'max', 'min', 'mean'])
else:
myset['method'] = method
s_data = SpatialData(filename)
myset = s_data.add_polygon_column(geometry_file=geometry, **myset)
if myset['method'] == 'sum':
unit = 'Wh'
else:
unit = 'W'
unit = "[{0}]".format(s_data.polygons[myset['obj']].prefix + unit)
plotter = geoplot.GeoPlotter(geoplot.postgis2shapely(s_data.polygons.geom),
(3, 16, 47, 56))
v_min = s_data.polygons[myset['obj']].min()
v_max = s_data.polygons[myset['obj']].max()
s_data.polygons['normalised'] = ((s_data.polygons[myset['obj']] - v_min) /
(v_max - v_min))
plotter.data = s_data.polygons['normalised']
plotter.plot(facecolor='data', edgecolor='white')
add_labels(s_data, plotter, myset['obj'])
if l_min is None:
l_min = v_min
if l_max is None:
l_max = v_max
plotter.draw_legend((l_min, l_max), number_ticks=3, legendlabel=unit,
location='bottom')
show()
def powerline_plot(l_min=None, l_max=None):
s_data = SpatialData()
reg = {
'geometry_file': 'data/geometries/polygons_de21_simple.csv'}
poly = geoplot.postgis2shapely(load_geometry(**reg).geom)
plotter = geoplot.GeoPlotter(poly, (3, 16, 47, 56))
method = s_data.add_power_lines(
geometry_file='data/geometries/lines_de21.csv')
plotter.plot(facecolor='grey', edgecolor='white')
if method == 'sum':
unit = 'Wh'
else:
unit = 'W'
unit = "[{0}]".format(s_data.lines['trans'].prefix + unit)
v_min = s_data.lines['trans'].min()
v_max = s_data.lines['trans'].max()
s_data.lines['normalised'] = ((s_data.lines['trans'] - v_min) /
(v_max - v_min))
plotter.geometries = geoplot.postgis2shapely(s_data.lines.geom)
plotter.data = s_data.lines['normalised']
my_cmap = LinearSegmentedColormap.from_list('mycmap', [(0, 'green'),
(0.5, 'yellow'),
(1, 'red')])
plotter.plot(edgecolor='data', linewidth=2, cmap=my_cmap)
if l_min is None:
l_min = v_min
if l_max is None:
l_max = v_max
plotter.draw_legend((l_min, l_max), number_ticks=3, cmap=my_cmap,
legendlabel=unit, location='right')
show()
def combined_plot():
s_data = SpatialData()
obj = s_data.add_polygon_column(
obj='load', direction='from_bus', bus='bus_el', method='sum',
geometry_file='geometries/polygons_de21_simple.csv')
s_data.add_power_lines(
geometry_file='geometries/lines_de21.csv')
unit = s_data.polygons[obj].prefix_long
plotter = geoplot.GeoPlotter(geoplot.postgis2shapely(s_data.polygons.geom),
(3, 16, 47, 56))
v_min = s_data.polygons[obj].min()
v_max = s_data.polygons[obj].max()
s_data.polygons['normalised'] = ((s_data.polygons[obj] - v_min) /
(v_max - v_min))
plotter.data = s_data.polygons['normalised']
plotter.plot(facecolor='data', edgecolor='white')
plotter.draw_legend((v_min, v_max), number_ticks=3, legendlabel=unit,
location='bottom')
unit = s_data.lines['trans'].prefix_long
v_min = s_data.lines['trans'].min()
v_max = s_data.lines['trans'].max()
s_data.lines['normalised'] = ((s_data.lines['trans'] - v_min) /
(v_max - v_min))
plotter.geometries = geoplot.postgis2shapely(s_data.lines.geom)
plotter.data = s_data.lines['normalised']
my_cmap = LinearSegmentedColormap.from_list('mycmap', [(0, 'green'),
(0.5, 'yellow'),
(1, 'red')])
plotter.plot(edgecolor='data', linewidth=2, cmap=my_cmap)
plotter.draw_legend((v_min, v_max), number_ticks=3,
legendlabel=unit, location='right')
show()
if __name__ == "__main__":
# resf = ('/home/uwe/git_local/reegis-hp/reegis_hp/de21/results' +
# '/scenario_reegis_de_21_test_2017-01-03 11:31:10.600830_' +
# 'results_complete.csv')
# choice = 'polygons'
choice = easy.get_choice(
"What geometry do you want to plot?", choices=['lines', 'polygons'])
if choice == 'polygons':
polygon_plot(l_min=0)
elif choice == 'lines':
powerline_plot()
else:
print("End!")
| gpl-3.0 |
rahuldhote/scikit-learn | sklearn/kernel_approximation.py | 258 | 17973 | """
The :mod:`sklearn.kernel_approximation` module implements several
approximate kernel feature maps base on Fourier transforms.
"""
# Author: Andreas Mueller <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import svd
from .base import BaseEstimator
from .base import TransformerMixin
from .utils import check_array, check_random_state, as_float_array
from .utils.extmath import safe_sparse_dot
from .utils.validation import check_is_fitted
from .metrics.pairwise import pairwise_kernels
class RBFSampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of an RBF kernel by Monte Carlo approximation
of its Fourier transform.
It implements a variant of Random Kitchen Sinks.[1]
Read more in the :ref:`User Guide <rbf_kernel_approx>`.
Parameters
----------
gamma : float
Parameter of RBF kernel: exp(-gamma * x^2)
n_components : int
Number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Notes
-----
See "Random Features for Large-Scale Kernel Machines" by A. Rahimi and
Benjamin Recht.
[1] "Weighted Sums of Random Kitchen Sinks: Replacing
minimization with randomization in learning" by A. Rahimi and
Benjamin Recht.
(http://www.eecs.berkeley.edu/~brecht/papers/08.rah.rec.nips.pdf)
"""
def __init__(self, gamma=1., n_components=100, random_state=None):
self.gamma = gamma
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X, accept_sparse='csr')
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
self.random_weights_ = (np.sqrt(2 * self.gamma) * random_state.normal(
size=(n_features, self.n_components)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = check_array(X, accept_sparse='csr')
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class SkewedChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of the "skewed chi-squared" kernel by Monte
Carlo approximation of its Fourier transform.
Read more in the :ref:`User Guide <skewed_chi_kernel_approx>`.
Parameters
----------
skewedness : float
"skewedness" parameter of the kernel. Needs to be cross-validated.
n_components : int
number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
References
----------
See "Random Fourier Approximations for Skewed Multiplicative Histogram
Kernels" by Fuxin Li, Catalin Ionescu and Cristian Sminchisescu.
See also
--------
AdditiveChi2Sampler : A different approach for approximating an additive
variant of the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
"""
def __init__(self, skewedness=1., n_components=100, random_state=None):
self.skewedness = skewedness
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
uniform = random_state.uniform(size=(n_features, self.n_components))
# transform by inverse CDF of sech
self.random_weights_ = (1. / np.pi
* np.log(np.tan(np.pi / 2. * uniform)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = as_float_array(X, copy=True)
X = check_array(X, copy=False)
if (X < 0).any():
raise ValueError("X may not contain entries smaller than zero.")
X += self.skewedness
np.log(X, X)
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class AdditiveChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximate feature map for additive chi2 kernel.
Uses sampling the fourier transform of the kernel characteristic
at regular intervals.
Since the kernel that is to be approximated is additive, the components of
the input vectors can be treated separately. Each entry in the original
space is transformed into 2*sample_steps+1 features, where sample_steps is
a parameter of the method. Typical values of sample_steps include 1, 2 and
3.
Optimal choices for the sampling interval for certain data ranges can be
computed (see the reference). The default values should be reasonable.
Read more in the :ref:`User Guide <additive_chi_kernel_approx>`.
Parameters
----------
sample_steps : int, optional
Gives the number of (complex) sampling points.
sample_interval : float, optional
Sampling interval. Must be specified when sample_steps not in {1,2,3}.
Notes
-----
This estimator approximates a slightly different version of the additive
chi squared kernel then ``metric.additive_chi2`` computes.
See also
--------
SkewedChi2Sampler : A Fourier-approximation to a non-additive variant of
the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
sklearn.metrics.pairwise.additive_chi2_kernel : The exact additive chi
squared kernel.
References
----------
See `"Efficient additive kernels via explicit feature maps"
<http://www.robots.ox.ac.uk/~vedaldi/assets/pubs/vedaldi11efficient.pdf>`_
A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence,
2011
"""
def __init__(self, sample_steps=2, sample_interval=None):
self.sample_steps = sample_steps
self.sample_interval = sample_interval
def fit(self, X, y=None):
"""Set parameters."""
X = check_array(X, accept_sparse='csr')
if self.sample_interval is None:
# See reference, figure 2 c)
if self.sample_steps == 1:
self.sample_interval_ = 0.8
elif self.sample_steps == 2:
self.sample_interval_ = 0.5
elif self.sample_steps == 3:
self.sample_interval_ = 0.4
else:
raise ValueError("If sample_steps is not in [1, 2, 3],"
" you need to provide sample_interval")
else:
self.sample_interval_ = self.sample_interval
return self
def transform(self, X, y=None):
"""Apply approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Returns
-------
X_new : {array, sparse matrix}, \
shape = (n_samples, n_features * (2*sample_steps + 1))
Whether the return value is an array of sparse matrix depends on
the type of the input X.
"""
msg = ("%(name)s is not fitted. Call fit to set the parameters before"
" calling transform")
check_is_fitted(self, "sample_interval_", msg=msg)
X = check_array(X, accept_sparse='csr')
sparse = sp.issparse(X)
# check if X has negative values. Doesn't play well with np.log.
if ((X.data if sparse else X) < 0).any():
raise ValueError("Entries of X must be non-negative.")
# zeroth component
# 1/cosh = sech
# cosh(0) = 1.0
transf = self._transform_sparse if sparse else self._transform_dense
return transf(X)
def _transform_dense(self, X):
non_zero = (X != 0.0)
X_nz = X[non_zero]
X_step = np.zeros_like(X)
X_step[non_zero] = np.sqrt(X_nz * self.sample_interval_)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X_nz)
step_nz = 2 * X_nz * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.cos(j * log_step_nz)
X_new.append(X_step)
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.sin(j * log_step_nz)
X_new.append(X_step)
return np.hstack(X_new)
def _transform_sparse(self, X):
indices = X.indices.copy()
indptr = X.indptr.copy()
data_step = np.sqrt(X.data * self.sample_interval_)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X.data)
step_nz = 2 * X.data * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
data_step = factor_nz * np.cos(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
data_step = factor_nz * np.sin(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
return sp.hstack(X_new)
class Nystroem(BaseEstimator, TransformerMixin):
"""Approximate a kernel map using a subset of the training data.
Constructs an approximate feature map for an arbitrary kernel
using a subset of the data as basis.
Read more in the :ref:`User Guide <nystroem_kernel_approx>`.
Parameters
----------
kernel : string or callable, default="rbf"
Kernel map to be approximated. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
n_components : int
Number of features to construct.
How many data points will be used to construct the mapping.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Attributes
----------
components_ : array, shape (n_components, n_features)
Subset of training points used to construct the feature map.
component_indices_ : array, shape (n_components)
Indices of ``components_`` in the training set.
normalization_ : array, shape (n_components, n_components)
Normalization matrix needed for embedding.
Square root of the kernel matrix on ``components_``.
References
----------
* Williams, C.K.I. and Seeger, M.
"Using the Nystroem method to speed up kernel machines",
Advances in neural information processing systems 2001
* T. Yang, Y. Li, M. Mahdavi, R. Jin and Z. Zhou
"Nystroem Method vs Random Fourier Features: A Theoretical and Empirical
Comparison",
Advances in Neural Information Processing Systems 2012
See also
--------
RBFSampler : An approximation to the RBF kernel using random Fourier
features.
sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
"""
def __init__(self, kernel="rbf", gamma=None, coef0=1, degree=3,
kernel_params=None, n_components=100, random_state=None):
self.kernel = kernel
self.gamma = gamma
self.coef0 = coef0
self.degree = degree
self.kernel_params = kernel_params
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit estimator to data.
Samples a subset of training points, computes kernel
on these and computes normalization matrix.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Training data.
"""
X = check_array(X, accept_sparse='csr')
rnd = check_random_state(self.random_state)
n_samples = X.shape[0]
# get basis vectors
if self.n_components > n_samples:
# XXX should we just bail?
n_components = n_samples
warnings.warn("n_components > n_samples. This is not possible.\n"
"n_components was set to n_samples, which results"
" in inefficient evaluation of the full kernel.")
else:
n_components = self.n_components
n_components = min(n_samples, n_components)
inds = rnd.permutation(n_samples)
basis_inds = inds[:n_components]
basis = X[basis_inds]
basis_kernel = pairwise_kernels(basis, metric=self.kernel,
filter_params=True,
**self._get_kernel_params())
# sqrt of kernel matrix on basis vectors
U, S, V = svd(basis_kernel)
S = np.maximum(S, 1e-12)
self.normalization_ = np.dot(U * 1. / np.sqrt(S), V)
self.components_ = basis
self.component_indices_ = inds
return self
def transform(self, X):
"""Apply feature map to X.
Computes an approximate feature map using the kernel
between some training points and X.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Data to transform.
Returns
-------
X_transformed : array, shape=(n_samples, n_components)
Transformed data.
"""
check_is_fitted(self, 'components_')
X = check_array(X, accept_sparse='csr')
kernel_params = self._get_kernel_params()
embedded = pairwise_kernels(X, self.components_,
metric=self.kernel,
filter_params=True,
**kernel_params)
return np.dot(embedded, self.normalization_.T)
def _get_kernel_params(self):
params = self.kernel_params
if params is None:
params = {}
if not callable(self.kernel):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
return params
| bsd-3-clause |
sbvickers/SEDclient | sedclient/sedPlot.py | 1 | 3213 |
import matplotlib.pyplot as plt
import dataStruct as ds
import globs
class Plot:
"""
class for Building SEDs.
"""
def __init__(self):
"""
"""
self.name = globs.name
self.ra = globs.ra
self.dec = globs.dec
self.y_ann = 0.97
self.photo = ds.buildPhStruct()
self.spec = ds.buildSpStruct()
self.fig = plt.figure()
self.ax = plt.subplot(111)
self.ax.set_xlabel(r'$\lambda\, \left[ \mu\rm{m} \right]$', fontsize=16)
self.ax.set_ylabel(r'$\lambda F_\lambda\,\left[ \rm{erg\,\,s}^{-1}\,\rm{cm}^{-2} \right]$', fontsize=16)
self.ax.set_xscale('log')
self.ax.set_yscale('log')
self.ax.set_xlim([0.1, 1000.0])
self.ax.set_xticklabels(['', '$0.1$', '$1$', '$10$', '$100$', '$1000$'])
def plotPh(self):
"""
Plots the photometry.
"""
import configparser
for survey in self.photo:
conf = configparser.ConfigParser()
conf.read("{}{}.ini".format(globs.confPath, survey))
conf = conf['plot']
if self.photo[survey]['flux']:
wave = self.photo[survey]['wave']
flux = [f for x in self.photo[survey]['flux'] for f in (x.value.n, x.value.s)]
if 'white' not in conf['mfc']:
self.ax.errorbar(wave, flux[0::2], yerr=flux[1::2], fmt=conf['marker'], mfc=conf['mfc'], mec=conf['mec'], ecolor=conf['mfc'], label=conf['label'])
else:
self.ax.errorbar(wave, flux[0::2], yerr=flux[1::2], fmt=conf['marker'], mfc=conf['mfc'], mec=conf['mec'], ecolor=conf['mec'], label=conf['label'])
def plotSp(self):
"""
Plots the spectroscopy.
"""
import configparser
for survey in self.spec:
conf = configparser.ConfigParser()
conf.read("{}spec/{}.ini".format(globs.confPath, survey))
conf = conf['plot']
if self.spec[survey]['flux']:
wave = self.spec[survey]['wave']
flux = [f for x in self.spec[survey]['flux'] for f in (x.value.n, x.value.s)]
self.ax.plot(wave, flux[0::2], ls=conf['ls'], color=conf['col'], lw=float(conf['lw']), label=conf['label'])
def annotate(self, string):
"""
Annotates string in top right corner moving down after each line.
"""
self.ax.text(0.97, self.y_ann, string, fontsize=14, transform=self.ax.transAxes, horizontalalignment='right', verticalalignment='top')
self.y_ann -= 0.05
def legend(self):
"""
Displays the legend.
"""
self.ax.legend(loc='lower right', bbox_to_anchor=(0.99, 0.01), fancybox=False, shadow=False, ncol=4, numpoints=1, prop={'size':6.5}, fontsize=14)
def saveSed(self, filename=None):
"""
Saves SED plot using the dataSave.py modules.
"""
import dataSave as ds
if filename:
ds.saveSed(self.fig, filename)
else:
ds.saveSed(self.fig)
def show(self):
"""
Shows the figure.
"""
plt.show()
| bsd-2-clause |
pnedunuri/scikit-learn | examples/bicluster/plot_spectral_biclustering.py | 403 | 2011 | """
=============================================
A demo of the Spectral Biclustering algorithm
=============================================
This example demonstrates how to generate a checkerboard dataset and
bicluster it using the Spectral Biclustering algorithm.
The data is generated with the ``make_checkerboard`` function, then
shuffled and passed to the Spectral Biclustering algorithm. The rows
and columns of the shuffled matrix are rearranged to show the
biclusters found by the algorithm.
The outer product of the row and column label vectors shows a
representation of the checkerboard structure.
"""
print(__doc__)
# Author: Kemal Eren <[email protected]>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_checkerboard
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.metrics import consensus_score
n_clusters = (4, 3)
data, rows, columns = make_checkerboard(
shape=(300, 300), n_clusters=n_clusters, noise=10,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralBiclustering(n_clusters=n_clusters, method='log',
random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.1f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.matshow(np.outer(np.sort(model.row_labels_) + 1,
np.sort(model.column_labels_) + 1),
cmap=plt.cm.Blues)
plt.title("Checkerboard structure of rearranged data")
plt.show()
| bsd-3-clause |
voxlol/scikit-learn | sklearn/neighbors/tests/test_nearest_centroid.py | 305 | 4121 | """
Testing for the nearest centroid module.
"""
import numpy as np
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from sklearn.neighbors import NearestCentroid
from sklearn import datasets
from sklearn.metrics.pairwise import pairwise_distances
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
X_csr = sp.csr_matrix(X) # Sparse matrix
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
T_csr = sp.csr_matrix(T)
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset, including sparse versions.
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# Same test, but with a sparse matrix to fit and test.
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit with sparse, test with non-sparse
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T), true_result)
# Fit with non-sparse, test with sparse
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit and predict with non-CSR sparse matrices
clf = NearestCentroid()
clf.fit(X_csr.tocoo(), y)
assert_array_equal(clf.predict(T_csr.tolil()), true_result)
def test_precomputed():
clf = NearestCentroid(metric="precomputed")
clf.fit(X, y)
S = pairwise_distances(T, clf.centroids_)
assert_array_equal(clf.predict(S), true_result)
def test_iris():
# Check consistency on dataset iris.
for metric in ('euclidean', 'cosine'):
clf = NearestCentroid(metric=metric).fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.9, "Failed with score = " + str(score)
def test_iris_shrinkage():
# Check consistency on dataset iris, when using shrinkage.
for metric in ('euclidean', 'cosine'):
for shrink_threshold in [None, 0.1, 0.5]:
clf = NearestCentroid(metric=metric,
shrink_threshold=shrink_threshold)
clf = clf.fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.8, "Failed with score = " + str(score)
def test_pickle():
import pickle
# classification
obj = NearestCentroid()
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_array_equal(score, score2,
"Failed to generate same score"
" after pickling (classification).")
def test_shrinkage_threshold_decoded_y():
clf = NearestCentroid(shrink_threshold=0.01)
y_ind = np.asarray(y)
y_ind[y_ind == -1] = 0
clf.fit(X, y_ind)
centroid_encoded = clf.centroids_
clf.fit(X, y)
assert_array_equal(centroid_encoded, clf.centroids_)
def test_predict_translated_data():
# Test that NearestCentroid gives same results on translated data
rng = np.random.RandomState(0)
X = rng.rand(50, 50)
y = rng.randint(0, 3, 50)
noise = rng.rand(50)
clf = NearestCentroid(shrink_threshold=0.1)
clf.fit(X, y)
y_init = clf.predict(X)
clf = NearestCentroid(shrink_threshold=0.1)
X_noise = X + noise
clf.fit(X_noise, y)
y_translate = clf.predict(X_noise)
assert_array_equal(y_init, y_translate)
def test_manhattan_metric():
# Test the manhattan metric.
clf = NearestCentroid(metric='manhattan')
clf.fit(X, y)
dense_centroid = clf.centroids_
clf.fit(X_csr, y)
assert_array_equal(clf.centroids_, dense_centroid)
assert_array_equal(dense_centroid, [[-1, -1], [1, 1]])
| bsd-3-clause |
huzq/scikit-learn | examples/linear_model/plot_ols_3d.py | 23 | 2001 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Sparsity Example: Fitting only features 1 and 2
=========================================================
Features 1 and 2 of the diabetes-dataset are fitted and
plotted below. It illustrates that although feature 2
has a strong coefficient on the full model, it does not
give us much regarding `y` when compared to just feature 1
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets, linear_model
X, y = datasets.load_diabetes(return_X_y=True)
indices = (0, 1)
X_train = X[:-20, indices]
X_test = X[-20:, indices]
y_train = y[:-20]
y_test = y[-20:]
ols = linear_model.LinearRegression()
ols.fit(X_train, y_train)
# #############################################################################
# Plot the figure
def plot_figs(fig_num, elev, azim, X_train, clf):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, elev=elev, azim=azim)
ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c='k', marker='+')
ax.plot_surface(np.array([[-.1, -.1], [.15, .15]]),
np.array([[-.1, .15], [-.1, .15]]),
clf.predict(np.array([[-.1, -.1, .15, .15],
[-.1, .15, -.1, .15]]).T
).reshape((2, 2)),
alpha=.5)
ax.set_xlabel('X_1')
ax.set_ylabel('X_2')
ax.set_zlabel('Y')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
# Generate the three different figures from different views
elev = 43.5
azim = -110
plot_figs(1, elev, azim, X_train, ols)
elev = -.5
azim = 0
plot_figs(2, elev, azim, X_train, ols)
elev = -.5
azim = 90
plot_figs(3, elev, azim, X_train, ols)
plt.show()
| bsd-3-clause |
Eric89GXL/scikit-learn | sklearn/utils/tests/test_validation.py | 8 | 6906 | """Tests for input validation functions"""
from tempfile import NamedTemporaryFile
import numpy as np
from numpy.testing import assert_array_equal
import scipy.sparse as sp
from nose.tools import assert_raises, assert_true, assert_false, assert_equal
from sklearn.utils import (array2d, as_float_array, atleast2d_or_csr,
atleast2d_or_csc, check_arrays, safe_asarray)
from sklearn.random_projection import sparse_random_matrix
def test_safe_asarray():
"""Test that array dtype conversion works."""
# Test with sparse arrays
X = sp.csc_matrix(np.arange(4, dtype=np.float))
Y = safe_asarray(X)
assert_true(Y.dtype == np.float)
# Check that no copy has been performed
Y.data[0] = 7 # value not in original array
assert_equal(X.data[0], Y.data[0])
Y = safe_asarray(X, dtype=np.int)
assert_equal(Y.data.dtype, np.int)
# Test with dense arrays
X = np.arange(4, dtype=np.float)
Y = safe_asarray(X)
assert_true(Y.dtype == np.float)
# Check that no copy has been performed
Y[0] = 7
assert_equal(X[0], Y[0])
Y = safe_asarray(X, dtype=np.int)
assert_equal(Y.dtype, np.int)
def test_as_float_array():
"""Test function for as_float_array"""
X = np.ones((3, 10), dtype=np.int32)
X = X + np.arange(10, dtype=np.int32)
# Checks that the return type is ok
X2 = as_float_array(X, copy=False)
np.testing.assert_equal(X2.dtype, np.float32)
# Another test
X = X.astype(np.int64)
X2 = as_float_array(X, copy=True)
# Checking that the array wasn't overwritten
assert_true(as_float_array(X, False) is not X)
# Checking that the new type is ok
np.testing.assert_equal(X2.dtype, np.float64)
# Here, X is of the right type, it shouldn't be modified
X = np.ones((3, 2), dtype=np.float32)
assert_true(as_float_array(X, copy=False) is X)
# Test that if X is fortran ordered it stays
X = np.asfortranarray(X)
assert_true(np.isfortran(as_float_array(X, copy=True)))
# Test the copy parameter with some matrices
matrices = [
np.matrix(np.arange(5)),
sp.csc_matrix(np.arange(5)).todense(),
sparse_random_matrix(10, 10, density=0.10).todense()
]
for M in matrices:
N = as_float_array(M, copy=True)
N[0, 0] = np.nan
assert_false(np.isnan(M).any())
def test_check_arrays_exceptions():
"""Check that invalid arguments raise appropriate exceptions"""
assert_raises(ValueError, check_arrays, [0], [0, 1])
assert_raises(TypeError, check_arrays, 0, [0, 1])
assert_raises(TypeError, check_arrays, [0], 0)
assert_raises(TypeError, check_arrays, [0, 1], [0, 1], meaning_of_life=42)
assert_raises(ValueError, check_arrays, [0], [0], sparse_format='fake')
def test_np_matrix():
"""Confirm that input validation code does not return np.matrix"""
X = np.arange(12).reshape(3, 4)
assert_false(isinstance(as_float_array(X), np.matrix))
assert_false(isinstance(as_float_array(np.matrix(X)), np.matrix))
assert_false(isinstance(as_float_array(sp.csc_matrix(X)), np.matrix))
assert_false(isinstance(atleast2d_or_csr(X), np.matrix))
assert_false(isinstance(atleast2d_or_csr(np.matrix(X)), np.matrix))
assert_false(isinstance(atleast2d_or_csr(sp.csc_matrix(X)), np.matrix))
assert_false(isinstance(atleast2d_or_csc(X), np.matrix))
assert_false(isinstance(atleast2d_or_csc(np.matrix(X)), np.matrix))
assert_false(isinstance(atleast2d_or_csc(sp.csr_matrix(X)), np.matrix))
assert_false(isinstance(safe_asarray(X), np.matrix))
assert_false(isinstance(safe_asarray(np.matrix(X)), np.matrix))
assert_false(isinstance(safe_asarray(sp.lil_matrix(X)), np.matrix))
assert_true(atleast2d_or_csr(X, copy=False) is X)
assert_false(atleast2d_or_csr(X, copy=True) is X)
assert_true(atleast2d_or_csc(X, copy=False) is X)
assert_false(atleast2d_or_csc(X, copy=True) is X)
def test_memmap():
"""Confirm that input validation code doesn't copy memory mapped arrays"""
asflt = lambda x: as_float_array(x, copy=False)
with NamedTemporaryFile(prefix='sklearn-test') as tmp:
M = np.memmap(tmp, shape=100, dtype=np.float32)
M[:] = 0
for f in (array2d, np.asarray, asflt, safe_asarray):
X = f(M)
X[:] = 1
assert_array_equal(X.ravel(), M)
X[:] = 0
def test_ordering():
"""Check that ordering is enforced correctly by validation utilities.
We need to check each validation utility, because a 'copy' without
'order=K' will kill the ordering.
"""
X = np.ones((10, 5))
for A in X, X.T:
for validator in (array2d, atleast2d_or_csr, atleast2d_or_csc):
for copy in (True, False):
B = validator(A, order='C', copy=copy)
assert_true(B.flags['C_CONTIGUOUS'])
B = validator(A, order='F', copy=copy)
assert_true(B.flags['F_CONTIGUOUS'])
if copy:
assert_false(A is B)
X = sp.csr_matrix(X)
X.data = X.data[::-1]
assert_false(X.data.flags['C_CONTIGUOUS'])
for validator in (atleast2d_or_csc, atleast2d_or_csr):
for copy in (True, False):
Y = validator(X, copy=copy, order='C')
assert_true(Y.data.flags['C_CONTIGUOUS'])
def test_check_arrays():
# check that error is raised on different length inputs
X = [0, 1]
Y = np.arange(3)
assert_raises(ValueError, check_arrays, X, Y)
# check error for sparse matrix and array
X = sp.csc_matrix(np.arange(4))
assert_raises(ValueError, check_arrays, X, Y)
# check they y=None pattern
X = [0, 1, 2]
X_, Y_, Z_ = check_arrays(X, Y, None)
assert_true(Z_ is None)
# check that lists are converted
X_, Y_ = check_arrays(X, Y)
assert_true(isinstance(X_, np.ndarray))
assert_true(isinstance(Y_, np.ndarray))
# check that Y was not copied:
assert_true(Y_ is Y)
# check copying
X_, Y_ = check_arrays(X, Y, copy=True)
assert_false(Y_ is Y)
# check forcing dtype
X_, Y_ = check_arrays(X, Y, dtype=np.int)
assert_equal(X_.dtype, np.int)
assert_equal(Y_.dtype, np.int)
X_, Y_ = check_arrays(X, Y, dtype=np.float)
assert_equal(X_.dtype, np.float)
assert_equal(Y_.dtype, np.float)
# test check_ccontiguous
Y = np.arange(6).reshape(3, 2).copy('F')
# if we don't specify it, it is not changed
X_, Y_ = check_arrays(X, Y)
assert_true(Y_.flags['F_CONTIGUOUS'])
assert_false(Y_.flags['C_CONTIGUOUS'])
X_, Y_ = check_arrays(X, Y, check_ccontiguous=True)
assert_true(Y_.flags['C_CONTIGUOUS'])
assert_false(Y_.flags['F_CONTIGUOUS'])
# check that lists are passed through if allow_lists is true
X_, Y_ = check_arrays(X, Y, allow_lists=True)
assert_true(isinstance(X_, list))
| bsd-3-clause |
googleapis/python-automl | tests/unit/test_tables_client_v1beta1.py | 1 | 70305 | # -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests."""
import mock
import pandas
import pytest
from google.api_core import exceptions
from google.auth.credentials import AnonymousCredentials
from google.cloud import automl_v1beta1
from google.cloud.automl_v1beta1.types import data_types, data_items
from google.protobuf import struct_pb2 as struct
PROJECT = "project"
REGION = "region"
LOCATION_PATH = "projects/{}/locations/{}".format(PROJECT, REGION)
class TestTablesClient(object):
def tables_client(
self, client_attrs={}, prediction_client_attrs={}, gcs_client_attrs={}
):
client_mock = mock.Mock(**client_attrs)
prediction_client_mock = mock.Mock(**prediction_client_attrs)
gcs_client_mock = mock.Mock(**gcs_client_attrs)
return automl_v1beta1.TablesClient(
client=client_mock,
prediction_client=prediction_client_mock,
gcs_client=gcs_client_mock,
project=PROJECT,
region=REGION,
)
def test_list_datasets_empty(self):
client = self.tables_client(
client_attrs={
"list_datasets.return_value": [],
"location_path.return_value": LOCATION_PATH,
},
prediction_client_attrs={},
)
ds = client.list_datasets()
request = automl_v1beta1.ListDatasetsRequest(parent=LOCATION_PATH)
client.auto_ml_client.list_datasets.assert_called_with(request=request)
assert ds == []
def test_list_datasets_not_empty(self):
datasets = ["some_dataset"]
client = self.tables_client(
client_attrs={
"list_datasets.return_value": datasets,
"location_path.return_value": LOCATION_PATH,
},
prediction_client_attrs={},
)
ds = client.list_datasets()
request = automl_v1beta1.ListDatasetsRequest(parent=LOCATION_PATH)
client.auto_ml_client.list_datasets.assert_called_with(request=request)
assert len(ds) == 1
assert ds[0] == "some_dataset"
def test_get_dataset_no_value(self):
client = self.tables_client({}, {})
with pytest.raises(ValueError):
client.get_dataset()
client.auto_ml_client.get_dataset.assert_not_called()
def test_get_dataset_name(self):
dataset_actual = "dataset"
client = self.tables_client({"get_dataset.return_value": dataset_actual}, {})
dataset = client.get_dataset(dataset_name="my_dataset")
client.auto_ml_client.get_dataset.assert_called_with(
request=automl_v1beta1.GetDatasetRequest(name="my_dataset")
)
assert dataset == dataset_actual
def test_get_no_dataset(self):
client = self.tables_client(
{"get_dataset.side_effect": exceptions.NotFound("err")}, {}
)
with pytest.raises(exceptions.NotFound):
client.get_dataset(dataset_name="my_dataset")
client.auto_ml_client.get_dataset.assert_called_with(
request=automl_v1beta1.GetDatasetRequest(name="my_dataset")
)
def test_get_dataset_from_empty_list(self):
client = self.tables_client({"list_datasets.return_value": []}, {})
with pytest.raises(exceptions.NotFound):
client.get_dataset(dataset_display_name="my_dataset")
def test_get_dataset_from_list_not_found(self):
client = self.tables_client(
{"list_datasets.return_value": [mock.Mock(display_name="not_it")]}, {}
)
with pytest.raises(exceptions.NotFound):
client.get_dataset(dataset_display_name="my_dataset")
def test_get_dataset_from_list(self):
client = self.tables_client(
{
"list_datasets.return_value": [
mock.Mock(display_name="not_it"),
mock.Mock(display_name="my_dataset"),
]
},
{},
)
dataset = client.get_dataset(dataset_display_name="my_dataset")
assert dataset.display_name == "my_dataset"
def test_get_dataset_from_list_ambiguous(self):
client = self.tables_client(
{
"list_datasets.return_value": [
mock.Mock(display_name="my_dataset"),
mock.Mock(display_name="not_my_dataset"),
mock.Mock(display_name="my_dataset"),
]
},
{},
)
with pytest.raises(ValueError):
client.get_dataset(dataset_display_name="my_dataset")
def test_create_dataset(self):
client = self.tables_client(
{
"location_path.return_value": LOCATION_PATH,
"create_dataset.return_value": mock.Mock(display_name="name"),
},
{},
)
metadata = {"primary_table_spec_id": "1234"}
dataset = client.create_dataset("name", metadata=metadata)
client.auto_ml_client.create_dataset.assert_called_with(
request=automl_v1beta1.CreateDatasetRequest(
parent=LOCATION_PATH,
dataset={"display_name": "name", "tables_dataset_metadata": metadata},
)
)
assert dataset.display_name == "name"
def test_delete_dataset(self):
dataset = mock.Mock()
dataset.configure_mock(name="name")
client = self.tables_client({"delete_dataset.return_value": None}, {})
client.delete_dataset(dataset=dataset)
client.auto_ml_client.delete_dataset.assert_called_with(
request=automl_v1beta1.DeleteDatasetRequest(name="name")
)
def test_delete_dataset_not_found(self):
client = self.tables_client({"list_datasets.return_value": []}, {})
client.delete_dataset(dataset_display_name="not_found")
client.auto_ml_client.delete_dataset.assert_not_called()
def test_delete_dataset_name(self):
client = self.tables_client({"delete_dataset.return_value": None}, {})
client.delete_dataset(dataset_name="name")
client.auto_ml_client.delete_dataset.assert_called_with(
request=automl_v1beta1.DeleteDatasetRequest(name="name")
)
def test_export_not_found(self):
client = self.tables_client({"list_datasets.return_value": []}, {})
with pytest.raises(exceptions.NotFound):
client.export_data(dataset_display_name="name", gcs_input_uris="uri")
client.auto_ml_client.export_data.assert_not_called()
def test_export_gcs_uri(self):
client = self.tables_client({"export_data.return_value": None}, {})
client.export_data(dataset_name="name", gcs_output_uri_prefix="uri")
client.auto_ml_client.export_data.assert_called_with(
request=automl_v1beta1.ExportDataRequest(
name="name",
output_config={"gcs_destination": {"output_uri_prefix": "uri"}},
)
)
def test_export_bq_uri(self):
client = self.tables_client({"export_data.return_value": None}, {})
client.export_data(dataset_name="name", bigquery_output_uri="uri")
client.auto_ml_client.export_data.assert_called_with(
request=automl_v1beta1.ExportDataRequest(
name="name",
output_config={"bigquery_destination": {"output_uri": "uri"}},
)
)
def test_import_not_found(self):
client = self.tables_client({"list_datasets.return_value": []}, {})
with pytest.raises(exceptions.NotFound):
client.import_data(dataset_display_name="name", gcs_input_uris="uri")
client.auto_ml_client.import_data.assert_not_called()
def test_import_pandas_dataframe(self):
client = self.tables_client(
gcs_client_attrs={
"bucket_name": "my_bucket",
"upload_pandas_dataframe.return_value": "uri",
}
)
dataframe = pandas.DataFrame({})
client.import_data(
project=PROJECT,
region=REGION,
dataset_name="name",
pandas_dataframe=dataframe,
)
client.gcs_client.ensure_bucket_exists.assert_called_with(PROJECT, REGION)
client.gcs_client.upload_pandas_dataframe.assert_called_with(dataframe)
client.auto_ml_client.import_data.assert_called_with(
request=automl_v1beta1.ImportDataRequest(
name="name", input_config={"gcs_source": {"input_uris": ["uri"]}}
)
)
def test_import_pandas_dataframe_init_gcs(self):
client = automl_v1beta1.TablesClient(
client=mock.Mock(),
prediction_client=mock.Mock(),
project=PROJECT,
region=REGION,
credentials=AnonymousCredentials(),
)
dataframe = pandas.DataFrame({})
patch = mock.patch(
"google.cloud.automl_v1beta1.services.tables.tables_client.gcs_client.GcsClient",
bucket_name="my_bucket",
)
with patch as MockGcsClient:
mockInstance = MockGcsClient.return_value
mockInstance.upload_pandas_dataframe.return_value = "uri"
client.import_data(dataset_name="name", pandas_dataframe=dataframe)
assert client.gcs_client is mockInstance
client.gcs_client.ensure_bucket_exists.assert_called_with(PROJECT, REGION)
client.gcs_client.upload_pandas_dataframe.assert_called_with(dataframe)
client.auto_ml_client.import_data.assert_called_with(
request=automl_v1beta1.ImportDataRequest(
name="name", input_config={"gcs_source": {"input_uris": ["uri"]}}
)
)
def test_import_gcs_uri(self):
client = self.tables_client({"import_data.return_value": None}, {})
client.import_data(dataset_name="name", gcs_input_uris="uri")
client.auto_ml_client.import_data.assert_called_with(
request=automl_v1beta1.ImportDataRequest(
name="name", input_config={"gcs_source": {"input_uris": ["uri"]}}
)
)
def test_import_gcs_uris(self):
client = self.tables_client({"import_data.return_value": None}, {})
client.import_data(dataset_name="name", gcs_input_uris=["uri", "uri"])
client.auto_ml_client.import_data.assert_called_with(
request=automl_v1beta1.ImportDataRequest(
name="name", input_config={"gcs_source": {"input_uris": ["uri", "uri"]}}
)
)
def test_import_bq_uri(self):
client = self.tables_client({"import_data.return_value": None}, {})
client.import_data(dataset_name="name", bigquery_input_uri="uri")
client.auto_ml_client.import_data.assert_called_with(
request=automl_v1beta1.ImportDataRequest(
name="name", input_config={"bigquery_source": {"input_uri": "uri"}}
)
)
def test_list_table_specs(self):
client = self.tables_client({"list_table_specs.return_value": None}, {})
client.list_table_specs(dataset_name="name")
client.auto_ml_client.list_table_specs.assert_called_with(
request=automl_v1beta1.ListTableSpecsRequest(parent="name")
)
def test_list_table_specs_not_found(self):
client = self.tables_client(
{"list_table_specs.side_effect": exceptions.NotFound("not found")}, {}
)
with pytest.raises(exceptions.NotFound):
client.list_table_specs(dataset_name="name")
client.auto_ml_client.list_table_specs.assert_called_with(
request=automl_v1beta1.ListTableSpecsRequest(parent="name")
)
def test_get_table_spec(self):
client = self.tables_client({}, {})
client.get_table_spec("name")
client.auto_ml_client.get_table_spec.assert_called_with(
request=automl_v1beta1.GetTableSpecRequest(name="name")
)
def test_get_column_spec(self):
client = self.tables_client({}, {})
client.get_column_spec("name")
client.auto_ml_client.get_column_spec.assert_called_with(
request=automl_v1beta1.GetColumnSpecRequest(name="name")
)
def test_list_column_specs(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
client = self.tables_client(
{
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [],
},
{},
)
client.list_column_specs(dataset_name="name")
client.auto_ml_client.list_table_specs.assert_called_with(
request=automl_v1beta1.ListTableSpecsRequest(parent="name")
)
client.auto_ml_client.list_column_specs.assert_called_with(
request=automl_v1beta1.ListColumnSpecsRequest(parent="table")
)
def test_update_column_spec_not_found(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec = automl_v1beta1.ColumnSpec(
name="column",
display_name="column",
data_type=automl_v1beta1.DataType(type_code=automl_v1beta1.TypeCode.STRING),
)
client = self.tables_client(
client_attrs={
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [column_spec],
},
prediction_client_attrs={},
)
with pytest.raises(exceptions.NotFound):
client.update_column_spec(dataset_name="name", column_spec_name="column2")
client.auto_ml_client.list_table_specs.assert_called_with(
request=automl_v1beta1.ListTableSpecsRequest(parent="name")
)
client.auto_ml_client.list_column_specs.assert_called_with(
request=automl_v1beta1.ListColumnSpecsRequest(parent="table")
)
client.auto_ml_client.update_column_spec.assert_not_called()
def test_update_column_spec_display_name_not_found(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec = automl_v1beta1.ColumnSpec(
name="column",
display_name="column",
data_type=automl_v1beta1.DataType(type_code=automl_v1beta1.TypeCode.STRING),
)
client = self.tables_client(
client_attrs={
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [column_spec],
},
prediction_client_attrs={},
)
with pytest.raises(exceptions.NotFound):
client.update_column_spec(
dataset_name="name", column_spec_display_name="column2"
)
client.auto_ml_client.list_table_specs.assert_called_with(
request=automl_v1beta1.ListTableSpecsRequest(parent="name")
)
client.auto_ml_client.list_column_specs.assert_called_with(
request=automl_v1beta1.ListColumnSpecsRequest(parent="table")
)
client.auto_ml_client.update_column_spec.assert_not_called()
def test_update_column_spec_name_no_args(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec = automl_v1beta1.ColumnSpec(
name="column/2",
display_name="column",
data_type=automl_v1beta1.DataType(
type_code=automl_v1beta1.TypeCode.FLOAT64
),
)
client = self.tables_client(
{
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [column_spec],
},
{},
)
client.update_column_spec(dataset_name="name", column_spec_name="column/2")
client.auto_ml_client.list_table_specs.assert_called_with(
request=automl_v1beta1.ListTableSpecsRequest(parent="name")
)
client.auto_ml_client.list_column_specs.assert_called_with(
request=automl_v1beta1.ListColumnSpecsRequest(parent="table")
)
client.auto_ml_client.update_column_spec.assert_called_with(
request=automl_v1beta1.UpdateColumnSpecRequest(
column_spec={
"name": "column/2",
"data_type": {"type_code": automl_v1beta1.TypeCode.FLOAT64},
}
)
)
def test_update_column_spec_no_args(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec = automl_v1beta1.ColumnSpec(
name="column",
display_name="column",
data_type=automl_v1beta1.DataType(
type_code=automl_v1beta1.TypeCode.FLOAT64
),
)
client = self.tables_client(
{
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [column_spec],
},
{},
)
client.update_column_spec(
dataset_name="name", column_spec_display_name="column"
)
client.auto_ml_client.list_table_specs.assert_called_with(
request=automl_v1beta1.ListTableSpecsRequest(parent="name")
)
client.auto_ml_client.list_column_specs.assert_called_with(
request=automl_v1beta1.ListColumnSpecsRequest(parent="table")
)
client.auto_ml_client.update_column_spec.assert_called_with(
request=automl_v1beta1.UpdateColumnSpecRequest(
column_spec={
"name": "column",
"data_type": {"type_code": automl_v1beta1.TypeCode.FLOAT64},
}
)
)
def test_update_column_spec_nullable(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec = automl_v1beta1.ColumnSpec(
name="column",
display_name="column",
data_type=automl_v1beta1.DataType(
type_code=automl_v1beta1.TypeCode.FLOAT64
),
)
client = self.tables_client(
{
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [column_spec],
},
{},
)
client.update_column_spec(
dataset_name="name", column_spec_display_name="column", nullable=True
)
client.auto_ml_client.list_table_specs.assert_called_with(
request=automl_v1beta1.ListTableSpecsRequest(parent="name")
)
client.auto_ml_client.list_column_specs.assert_called_with(
request=automl_v1beta1.ListColumnSpecsRequest(parent="table")
)
client.auto_ml_client.update_column_spec.assert_called_with(
request=automl_v1beta1.UpdateColumnSpecRequest(
column_spec={
"name": "column",
"data_type": {
"type_code": automl_v1beta1.TypeCode.FLOAT64,
"nullable": True,
},
}
)
)
def test_update_column_spec_type_code(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec = automl_v1beta1.ColumnSpec(
name="column",
display_name="column",
data_type=automl_v1beta1.DataType(
type_code=automl_v1beta1.TypeCode.FLOAT64
),
)
client = self.tables_client(
{
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [column_spec],
},
{},
)
client.update_column_spec(
dataset_name="name",
column_spec_display_name="column",
type_code=automl_v1beta1.TypeCode.ARRAY,
)
client.auto_ml_client.list_table_specs.assert_called_with(
request=automl_v1beta1.ListTableSpecsRequest(parent="name")
)
client.auto_ml_client.list_column_specs.assert_called_with(
request=automl_v1beta1.ListColumnSpecsRequest(parent="table")
)
client.auto_ml_client.update_column_spec.assert_called_with(
request=automl_v1beta1.UpdateColumnSpecRequest(
column_spec={
"name": "column",
"data_type": {"type_code": automl_v1beta1.TypeCode.ARRAY},
}
)
)
def test_update_column_spec_type_code_nullable(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec = automl_v1beta1.ColumnSpec(
name="column",
display_name="column",
data_type=automl_v1beta1.DataType(
type_code=automl_v1beta1.TypeCode.FLOAT64
),
)
client = self.tables_client(
{
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [column_spec],
},
{},
)
client.update_column_spec(
dataset_name="name",
nullable=True,
column_spec_display_name="column",
type_code=automl_v1beta1.TypeCode.ARRAY,
)
client.auto_ml_client.list_table_specs.assert_called_with(
request=automl_v1beta1.ListTableSpecsRequest(parent="name")
)
client.auto_ml_client.list_column_specs.assert_called_with(
request=automl_v1beta1.ListColumnSpecsRequest(parent="table")
)
client.auto_ml_client.update_column_spec.assert_called_with(
request=automl_v1beta1.UpdateColumnSpecRequest(
column_spec={
"name": "column",
"data_type": {
"type_code": automl_v1beta1.TypeCode.ARRAY,
"nullable": True,
},
}
)
)
def test_update_column_spec_type_code_nullable_false(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec = automl_v1beta1.ColumnSpec(
name="column",
display_name="column",
data_type=automl_v1beta1.DataType(
type_code=automl_v1beta1.TypeCode.FLOAT64
),
)
client = self.tables_client(
{
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [column_spec],
},
{},
)
client.update_column_spec(
dataset_name="name",
nullable=False,
column_spec_display_name="column",
type_code=automl_v1beta1.TypeCode.FLOAT64,
)
client.auto_ml_client.list_table_specs.assert_called_with(
request=automl_v1beta1.ListTableSpecsRequest(parent="name")
)
client.auto_ml_client.list_column_specs.assert_called_with(
request=automl_v1beta1.ListColumnSpecsRequest(parent="table")
)
client.auto_ml_client.update_column_spec.assert_called_with(
request=automl_v1beta1.UpdateColumnSpecRequest(
column_spec={
"name": "column",
"data_type": {
"type_code": automl_v1beta1.TypeCode.FLOAT64,
"nullable": False,
},
}
)
)
def test_set_target_column_table_not_found(self):
client = self.tables_client(
{"list_table_specs.side_effect": exceptions.NotFound("err")}, {}
)
with pytest.raises(exceptions.NotFound):
client.set_target_column(
dataset_name="name", column_spec_display_name="column2"
)
client.auto_ml_client.list_table_specs.assert_called_with(
request=automl_v1beta1.ListTableSpecsRequest(parent="name")
)
client.auto_ml_client.list_column_specs.assert_not_called()
client.auto_ml_client.update_dataset.assert_not_called()
def test_set_target_column_not_found(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec_mock = mock.Mock()
column_spec_mock.configure_mock(name="column/1", display_name="column")
client = self.tables_client(
{
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [column_spec_mock],
},
{},
)
with pytest.raises(exceptions.NotFound):
client.set_target_column(
dataset_name="name", column_spec_display_name="column2"
)
client.auto_ml_client.list_table_specs.assert_called_with(
request=automl_v1beta1.ListTableSpecsRequest(parent="name")
)
client.auto_ml_client.list_column_specs.assert_called_with(
request=automl_v1beta1.ListColumnSpecsRequest(parent="table")
)
client.auto_ml_client.update_dataset.assert_not_called()
def test_set_target_column(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec_mock = mock.Mock()
column_spec_mock.configure_mock(name="column/1", display_name="column")
dataset_mock = mock.Mock()
tables_dataset_metadata_mock = mock.Mock()
tables_dataset_metadata_mock.configure_mock(
target_column_spec_id="2",
weight_column_spec_id="2",
ml_use_column_spec_id="3",
)
dataset_mock.configure_mock(
name="dataset", tables_dataset_metadata=tables_dataset_metadata_mock
)
client = self.tables_client(
{
"get_dataset.return_value": dataset_mock,
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [column_spec_mock],
},
{},
)
client.set_target_column(dataset_name="name", column_spec_display_name="column")
client.auto_ml_client.list_table_specs.assert_called_with(
request=automl_v1beta1.ListTableSpecsRequest(parent="name")
)
client.auto_ml_client.list_column_specs.assert_called_with(
request=automl_v1beta1.ListColumnSpecsRequest(parent="table")
)
client.auto_ml_client.update_dataset.assert_called_with(
request=automl_v1beta1.UpdateDatasetRequest(
dataset={
"name": "dataset",
"tables_dataset_metadata": {
"target_column_spec_id": "1",
"weight_column_spec_id": "2",
"ml_use_column_spec_id": "3",
},
}
)
)
def test_set_weight_column_table_not_found(self):
client = self.tables_client(
{"list_table_specs.side_effect": exceptions.NotFound("err")}, {}
)
try:
client.set_weight_column(
dataset_name="name", column_spec_display_name="column2"
)
except exceptions.NotFound:
pass
client.auto_ml_client.list_table_specs.assert_called_with(
request=automl_v1beta1.ListTableSpecsRequest(parent="name")
)
client.auto_ml_client.list_column_specs.assert_not_called()
client.auto_ml_client.update_dataset.assert_not_called()
def test_set_weight_column_not_found(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec_mock = mock.Mock()
column_spec_mock.configure_mock(name="column/1", display_name="column")
client = self.tables_client(
{
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [column_spec_mock],
},
{},
)
with pytest.raises(exceptions.NotFound):
client.set_weight_column(
dataset_name="name", column_spec_display_name="column2"
)
client.auto_ml_client.list_table_specs.assert_called_with(
request=automl_v1beta1.ListTableSpecsRequest(parent="name")
)
client.auto_ml_client.list_column_specs.assert_called_with(
request=automl_v1beta1.ListColumnSpecsRequest(parent="table")
)
client.auto_ml_client.update_dataset.assert_not_called()
def test_set_weight_column(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec_mock = mock.Mock()
column_spec_mock.configure_mock(name="column/2", display_name="column")
dataset_mock = mock.Mock()
tables_dataset_metadata_mock = mock.Mock()
tables_dataset_metadata_mock.configure_mock(
target_column_spec_id="1",
weight_column_spec_id="1",
ml_use_column_spec_id="3",
)
dataset_mock.configure_mock(
name="dataset", tables_dataset_metadata=tables_dataset_metadata_mock
)
client = self.tables_client(
{
"get_dataset.return_value": dataset_mock,
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [column_spec_mock],
},
{},
)
client.set_weight_column(dataset_name="name", column_spec_display_name="column")
client.auto_ml_client.list_table_specs.assert_called_with(
request=automl_v1beta1.ListTableSpecsRequest(parent="name")
)
client.auto_ml_client.list_column_specs.assert_called_with(
request=automl_v1beta1.ListColumnSpecsRequest(parent="table")
)
client.auto_ml_client.update_dataset.assert_called_with(
request=automl_v1beta1.UpdateDatasetRequest(
dataset={
"name": "dataset",
"tables_dataset_metadata": {
"target_column_spec_id": "1",
"weight_column_spec_id": "2",
"ml_use_column_spec_id": "3",
},
}
)
)
def test_clear_weight_column(self):
dataset_mock = mock.Mock()
tables_dataset_metadata_mock = mock.Mock()
tables_dataset_metadata_mock.configure_mock(
target_column_spec_id="1",
weight_column_spec_id="2",
ml_use_column_spec_id="3",
)
dataset_mock.configure_mock(
name="dataset", tables_dataset_metadata=tables_dataset_metadata_mock
)
client = self.tables_client({"get_dataset.return_value": dataset_mock}, {})
client.clear_weight_column(dataset_name="name")
client.auto_ml_client.update_dataset.assert_called_with(
request=automl_v1beta1.UpdateDatasetRequest(
dataset={
"name": "dataset",
"tables_dataset_metadata": {
"target_column_spec_id": "1",
"weight_column_spec_id": None,
"ml_use_column_spec_id": "3",
},
}
)
)
def test_set_test_train_column_table_not_found(self):
client = self.tables_client(
{"list_table_specs.side_effect": exceptions.NotFound("err")}, {}
)
with pytest.raises(exceptions.NotFound):
client.set_test_train_column(
dataset_name="name", column_spec_display_name="column2"
)
client.auto_ml_client.list_table_specs.assert_called_with(
request=automl_v1beta1.ListTableSpecsRequest(parent="name")
)
client.auto_ml_client.list_column_specs.assert_not_called()
client.auto_ml_client.update_dataset.assert_not_called()
def test_set_test_train_column_not_found(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec_mock = mock.Mock()
column_spec_mock.configure_mock(name="column/1", display_name="column")
client = self.tables_client(
{
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [column_spec_mock],
},
{},
)
with pytest.raises(exceptions.NotFound):
client.set_test_train_column(
dataset_name="name", column_spec_display_name="column2"
)
client.auto_ml_client.list_table_specs.assert_called_with(
request=automl_v1beta1.ListTableSpecsRequest(parent="name")
)
client.auto_ml_client.list_column_specs.assert_called_with(
request=automl_v1beta1.ListColumnSpecsRequest(parent="table")
)
client.auto_ml_client.update_dataset.assert_not_called()
def test_set_test_train_column(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec_mock = mock.Mock()
column_spec_mock.configure_mock(name="column/3", display_name="column")
dataset_mock = mock.Mock()
tables_dataset_metadata_mock = mock.Mock()
tables_dataset_metadata_mock.configure_mock(
target_column_spec_id="1",
weight_column_spec_id="2",
ml_use_column_spec_id="2",
)
dataset_mock.configure_mock(
name="dataset", tables_dataset_metadata=tables_dataset_metadata_mock
)
client = self.tables_client(
{
"get_dataset.return_value": dataset_mock,
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [column_spec_mock],
},
{},
)
client.set_test_train_column(
dataset_name="name", column_spec_display_name="column"
)
client.auto_ml_client.list_table_specs.assert_called_with(
request=automl_v1beta1.ListTableSpecsRequest(parent="name")
)
client.auto_ml_client.list_column_specs.assert_called_with(
request=automl_v1beta1.ListColumnSpecsRequest(parent="table")
)
client.auto_ml_client.update_dataset.assert_called_with(
request=automl_v1beta1.UpdateDatasetRequest(
dataset={
"name": "dataset",
"tables_dataset_metadata": {
"target_column_spec_id": "1",
"weight_column_spec_id": "2",
"ml_use_column_spec_id": "3",
},
}
)
)
def test_clear_test_train_column(self):
dataset_mock = mock.Mock()
tables_dataset_metadata_mock = mock.Mock()
tables_dataset_metadata_mock.configure_mock(
target_column_spec_id="1",
weight_column_spec_id="2",
ml_use_column_spec_id="2",
)
dataset_mock.configure_mock(
name="dataset", tables_dataset_metadata=tables_dataset_metadata_mock
)
client = self.tables_client({"get_dataset.return_value": dataset_mock}, {})
client.clear_test_train_column(dataset_name="name")
client.auto_ml_client.update_dataset.assert_called_with(
request=automl_v1beta1.UpdateDatasetRequest(
dataset={
"name": "dataset",
"tables_dataset_metadata": {
"target_column_spec_id": "1",
"weight_column_spec_id": "2",
"ml_use_column_spec_id": None,
},
}
)
)
def test_set_time_column(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec_mock = mock.Mock()
column_spec_mock.configure_mock(name="column/3", display_name="column")
dataset_mock = mock.Mock()
dataset_mock.configure_mock(name="dataset")
client = self.tables_client(
{
"get_dataset.return_value": dataset_mock,
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [column_spec_mock],
},
{},
)
client.set_time_column(dataset_name="name", column_spec_display_name="column")
client.auto_ml_client.list_table_specs.assert_called_with(
request=automl_v1beta1.ListTableSpecsRequest(parent="name")
)
client.auto_ml_client.list_column_specs.assert_called_with(
request=automl_v1beta1.ListColumnSpecsRequest(parent="table")
)
client.auto_ml_client.update_table_spec.assert_called_with(
request=automl_v1beta1.UpdateTableSpecRequest(
table_spec={"name": "table", "time_column_spec_id": "3"}
)
)
def test_clear_time_column(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
dataset_mock = mock.Mock()
dataset_mock.configure_mock(name="dataset")
client = self.tables_client(
{
"get_dataset.return_value": dataset_mock,
"list_table_specs.return_value": [table_spec_mock],
},
{},
)
client.clear_time_column(dataset_name="name")
client.auto_ml_client.update_table_spec.assert_called_with(
request=automl_v1beta1.UpdateTableSpecRequest(
table_spec={"name": "table", "time_column_spec_id": None}
)
)
def test_get_model_evaluation(self):
client = self.tables_client({}, {})
client.get_model_evaluation(model_evaluation_name="x")
client.auto_ml_client.get_model_evaluation.assert_called_with(
request=automl_v1beta1.GetModelEvaluationRequest(name="x")
)
def test_list_model_evaluations_empty(self):
client = self.tables_client({"list_model_evaluations.return_value": []}, {})
ds = client.list_model_evaluations(model_name="model")
client.auto_ml_client.list_model_evaluations.assert_called_with(
request=automl_v1beta1.ListModelEvaluationsRequest(parent="model")
)
assert ds == []
def test_list_model_evaluations_not_empty(self):
evaluations = ["eval"]
client = self.tables_client(
{
"list_model_evaluations.return_value": evaluations,
"location_path.return_value": LOCATION_PATH,
},
{},
)
ds = client.list_model_evaluations(model_name="model")
client.auto_ml_client.list_model_evaluations.assert_called_with(
request=automl_v1beta1.ListModelEvaluationsRequest(parent="model")
)
assert len(ds) == 1
assert ds[0] == "eval"
def test_list_models_empty(self):
client = self.tables_client(
{
"list_models.return_value": [],
"location_path.return_value": LOCATION_PATH,
},
{},
)
ds = client.list_models()
client.auto_ml_client.list_models.assert_called_with(
request=automl_v1beta1.ListModelsRequest(parent=LOCATION_PATH)
)
assert ds == []
def test_list_models_not_empty(self):
models = ["some_model"]
client = self.tables_client(
{
"list_models.return_value": models,
"location_path.return_value": LOCATION_PATH,
},
{},
)
ds = client.list_models()
client.auto_ml_client.list_models.assert_called_with(
request=automl_v1beta1.ListModelsRequest(parent=LOCATION_PATH)
)
assert len(ds) == 1
assert ds[0] == "some_model"
def test_get_model_name(self):
model_actual = "model"
client = self.tables_client({"get_model.return_value": model_actual}, {})
model = client.get_model(model_name="my_model")
client.auto_ml_client.get_model.assert_called_with(name="my_model")
assert model == model_actual
def test_get_no_model(self):
client = self.tables_client(
{"get_model.side_effect": exceptions.NotFound("err")}, {}
)
with pytest.raises(exceptions.NotFound):
client.get_model(model_name="my_model")
client.auto_ml_client.get_model.assert_called_with(name="my_model")
def test_get_model_from_empty_list(self):
client = self.tables_client({"list_models.return_value": []}, {})
with pytest.raises(exceptions.NotFound):
client.get_model(model_display_name="my_model")
def test_get_model_from_list_not_found(self):
client = self.tables_client(
{"list_models.return_value": [mock.Mock(display_name="not_it")]}, {}
)
with pytest.raises(exceptions.NotFound):
client.get_model(model_display_name="my_model")
def test_get_model_from_list(self):
client = self.tables_client(
{
"list_models.return_value": [
mock.Mock(display_name="not_it"),
mock.Mock(display_name="my_model"),
]
},
{},
)
model = client.get_model(model_display_name="my_model")
assert model.display_name == "my_model"
def test_get_model_from_list_ambiguous(self):
client = self.tables_client(
{
"list_models.return_value": [
mock.Mock(display_name="my_model"),
mock.Mock(display_name="not_my_model"),
mock.Mock(display_name="my_model"),
]
},
{},
)
with pytest.raises(ValueError):
client.get_model(model_display_name="my_model")
def test_delete_model(self):
model = mock.Mock()
model.configure_mock(name="name")
client = self.tables_client({"delete_model.return_value": None}, {})
client.delete_model(model=model)
client.auto_ml_client.delete_model.assert_called_with(
request=automl_v1beta1.DeleteModelRequest(name="name")
)
def test_delete_model_not_found(self):
client = self.tables_client({"list_models.return_value": []}, {})
client.delete_model(model_display_name="not_found")
client.auto_ml_client.delete_model.assert_not_called()
def test_delete_model_name(self):
client = self.tables_client({"delete_model.return_value": None}, {})
client.delete_model(model_name="name")
client.auto_ml_client.delete_model.assert_called_with(
request=automl_v1beta1.DeleteModelRequest(name="name")
)
def test_deploy_model_no_args(self):
client = self.tables_client({}, {})
with pytest.raises(ValueError):
client.deploy_model()
client.auto_ml_client.deploy_model.assert_not_called()
def test_deploy_model(self):
client = self.tables_client({}, {})
client.deploy_model(model_name="name")
client.auto_ml_client.deploy_model.assert_called_with(
request=automl_v1beta1.DeployModelRequest(name="name")
)
def test_deploy_model_not_found(self):
client = self.tables_client({"list_models.return_value": []}, {})
with pytest.raises(exceptions.NotFound):
client.deploy_model(model_display_name="name")
client.auto_ml_client.deploy_model.assert_not_called()
def test_undeploy_model(self):
client = self.tables_client({}, {})
client.undeploy_model(model_name="name")
client.auto_ml_client.undeploy_model.assert_called_with(
request=automl_v1beta1.UndeployModelRequest(name="name")
)
def test_undeploy_model_not_found(self):
client = self.tables_client({"list_models.return_value": []}, {})
with pytest.raises(exceptions.NotFound):
client.undeploy_model(model_display_name="name")
client.auto_ml_client.undeploy_model.assert_not_called()
def test_create_model(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec_mock = mock.Mock()
column_spec_mock.configure_mock(name="column/2", display_name="column")
client = self.tables_client(
{
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [column_spec_mock],
"location_path.return_value": LOCATION_PATH,
},
{},
)
client.create_model(
"my_model", dataset_name="my_dataset", train_budget_milli_node_hours=1000
)
client.auto_ml_client.create_model.assert_called_with(
request=automl_v1beta1.CreateModelRequest(
parent=LOCATION_PATH,
model={
"display_name": "my_model",
"dataset_id": "my_dataset",
"tables_model_metadata": {"train_budget_milli_node_hours": 1000},
},
)
)
def test_create_model_include_columns(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec_1 = automl_v1beta1.ColumnSpec(
name="column/1", display_name="column1"
)
column_spec_2 = automl_v1beta1.ColumnSpec(
name="column/2", display_name="column2"
)
client = self.tables_client(
client_attrs={
"list_table_specs.return_value": [
automl_v1beta1.TableSpec(name="table")
],
"list_column_specs.return_value": [column_spec_1, column_spec_2],
"location_path.return_value": LOCATION_PATH,
},
prediction_client_attrs={},
)
client.create_model(
"my_model",
dataset_name="my_dataset",
include_column_spec_names=["column1"],
train_budget_milli_node_hours=1000,
)
client.auto_ml_client.create_model.assert_called_with(
request=automl_v1beta1.CreateModelRequest(
parent=LOCATION_PATH,
model=automl_v1beta1.Model(
display_name="my_model",
dataset_id="my_dataset",
tables_model_metadata=automl_v1beta1.TablesModelMetadata(
train_budget_milli_node_hours=1000,
input_feature_column_specs=[column_spec_1],
),
),
)
)
def test_create_model_exclude_columns(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec_1 = automl_v1beta1.ColumnSpec(
name="column/1", display_name="column1"
)
column_spec_2 = automl_v1beta1.ColumnSpec(
name="column/2", display_name="column2"
)
client = self.tables_client(
client_attrs={
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [column_spec_1, column_spec_2],
"location_path.return_value": LOCATION_PATH,
},
prediction_client_attrs={},
)
client.create_model(
"my_model",
dataset_name="my_dataset",
exclude_column_spec_names=["column1"],
train_budget_milli_node_hours=1000,
)
client.auto_ml_client.create_model.assert_called_with(
request=automl_v1beta1.CreateModelRequest(
parent=LOCATION_PATH,
model=automl_v1beta1.Model(
display_name="my_model",
dataset_id="my_dataset",
tables_model_metadata=automl_v1beta1.TablesModelMetadata(
train_budget_milli_node_hours=1000,
input_feature_column_specs=[column_spec_2],
),
),
)
)
def test_create_model_invalid_hours_small(self):
client = self.tables_client({}, {})
with pytest.raises(ValueError):
client.create_model(
"my_model", dataset_name="my_dataset", train_budget_milli_node_hours=1
)
client.auto_ml_client.create_model.assert_not_called()
def test_create_model_invalid_hours_large(self):
client = self.tables_client({}, {})
with pytest.raises(ValueError):
client.create_model(
"my_model",
dataset_name="my_dataset",
train_budget_milli_node_hours=1000000,
)
client.auto_ml_client.create_model.assert_not_called()
def test_create_model_invalid_no_dataset(self):
client = self.tables_client({}, {})
with pytest.raises(ValueError):
client.create_model("my_model", train_budget_milli_node_hours=1000)
client.auto_ml_client.get_dataset.assert_not_called()
client.auto_ml_client.create_model.assert_not_called()
def test_create_model_invalid_include_exclude(self):
client = self.tables_client({}, {})
with pytest.raises(ValueError):
client.create_model(
"my_model",
dataset_name="my_dataset",
include_column_spec_names=["a"],
exclude_column_spec_names=["b"],
train_budget_milli_node_hours=1000,
)
client.auto_ml_client.get_dataset.assert_not_called()
client.auto_ml_client.create_model.assert_not_called()
def test_predict_from_array(self):
data_type = mock.Mock(type_code=data_types.TypeCode.CATEGORY)
column_spec = mock.Mock(display_name="a", data_type=data_type)
model_metadata = mock.Mock(input_feature_column_specs=[column_spec])
model = mock.Mock()
model.configure_mock(tables_model_metadata=model_metadata, name="my_model")
client = self.tables_client({"get_model.return_value": model}, {})
client.predict(["1"], model_name="my_model")
# append each row value separately until issue is resovled
# https://github.com/googleapis/proto-plus-python/issues/104
row = data_items.Row()
row.values.append(struct.Value(string_value="1"))
payload = data_items.ExamplePayload(row=row)
client.prediction_client.predict.assert_called_with(
request=automl_v1beta1.PredictRequest(
name="my_model", payload=payload, params=None
)
)
def test_predict_from_dict(self):
data_type = mock.Mock(type_code=data_types.TypeCode.CATEGORY)
column_spec_a = mock.Mock(display_name="a", data_type=data_type)
column_spec_b = mock.Mock(display_name="b", data_type=data_type)
model_metadata = mock.Mock(
input_feature_column_specs=[column_spec_a, column_spec_b]
)
model = mock.Mock()
model.configure_mock(tables_model_metadata=model_metadata, name="my_model")
client = self.tables_client({"get_model.return_value": model}, {})
client.predict({"a": "1", "b": "2"}, model_name="my_model")
# append each row value separately until issue is resovled
# https://github.com/googleapis/proto-plus-python/issues/104
row = data_items.Row()
row.values.append(struct.Value(string_value="1"))
row.values.append(struct.Value(string_value="2"))
payload = data_items.ExamplePayload(row=row)
client.prediction_client.predict.assert_called_with(
request=automl_v1beta1.PredictRequest(
name="my_model", payload=payload, params=None
)
)
def test_predict_from_dict_with_feature_importance(self):
data_type = mock.Mock(type_code=data_types.TypeCode.CATEGORY)
column_spec_a = mock.Mock(display_name="a", data_type=data_type)
column_spec_b = mock.Mock(display_name="b", data_type=data_type)
model_metadata = mock.Mock(
input_feature_column_specs=[column_spec_a, column_spec_b]
)
model = mock.Mock()
model.configure_mock(tables_model_metadata=model_metadata, name="my_model")
client = self.tables_client({"get_model.return_value": model}, {})
client.predict(
{"a": "1", "b": "2"}, model_name="my_model", feature_importance=True
)
# append each row value separately until issue is resovled
# https://github.com/googleapis/proto-plus-python/issues/104
row = data_items.Row()
row.values.append(struct.Value(string_value="1"))
row.values.append(struct.Value(string_value="2"))
payload = data_items.ExamplePayload(row=row)
client.prediction_client.predict.assert_called_with(
request=automl_v1beta1.PredictRequest(
name="my_model", payload=payload, params={"feature_importance": "true"}
)
)
def test_predict_from_dict_missing(self):
data_type = mock.Mock(type_code=data_types.TypeCode.CATEGORY)
column_spec_a = mock.Mock(display_name="a", data_type=data_type)
column_spec_b = mock.Mock(display_name="b", data_type=data_type)
model_metadata = mock.Mock(
input_feature_column_specs=[column_spec_a, column_spec_b]
)
model = mock.Mock()
model.configure_mock(tables_model_metadata=model_metadata, name="my_model")
client = self.tables_client({"get_model.return_value": model}, {})
client.predict({"a": "1"}, model_name="my_model")
# append each row value separately until issue is resovled
# https://github.com/googleapis/proto-plus-python/issues/104
row = data_items.Row()
row.values.append(struct.Value(string_value="1"))
row.values.append(struct.Value(null_value=struct.NullValue.NULL_VALUE))
payload = data_items.ExamplePayload(row=row)
client.prediction_client.predict.assert_called_with(
request=automl_v1beta1.PredictRequest(
name="my_model", payload=payload, params=None
)
)
def test_predict_all_types(self):
float_type = mock.Mock(type_code=data_types.TypeCode.FLOAT64)
timestamp_type = mock.Mock(type_code=data_types.TypeCode.TIMESTAMP)
string_type = mock.Mock(type_code=data_types.TypeCode.STRING)
array_type = mock.Mock(
type_code=data_types.TypeCode.ARRAY,
list_element_type=mock.Mock(type_code=data_types.TypeCode.FLOAT64),
)
struct_type = mock.Mock(
type_code=data_types.TypeCode.STRUCT,
struct_type=data_types.StructType(
fields={
"a": data_types.DataType(type_code=data_types.TypeCode.CATEGORY),
"b": data_types.DataType(type_code=data_types.TypeCode.CATEGORY),
}
),
)
category_type = mock.Mock(type_code=data_types.TypeCode.CATEGORY)
column_spec_float = mock.Mock(display_name="float", data_type=float_type)
column_spec_timestamp = mock.Mock(
display_name="timestamp", data_type=timestamp_type
)
column_spec_string = mock.Mock(display_name="string", data_type=string_type)
column_spec_array = mock.Mock(display_name="array", data_type=array_type)
column_spec_struct = mock.Mock(display_name="struct", data_type=struct_type)
column_spec_category = mock.Mock(
display_name="category", data_type=category_type
)
column_spec_null = mock.Mock(display_name="null", data_type=category_type)
model_metadata = mock.Mock(
input_feature_column_specs=[
column_spec_float,
column_spec_timestamp,
column_spec_string,
column_spec_array,
column_spec_struct,
column_spec_category,
column_spec_null,
]
)
model = mock.Mock()
model.configure_mock(tables_model_metadata=model_metadata, name="my_model")
client = self.tables_client({"get_model.return_value": model}, {})
client.predict(
{
"float": 1.0,
"timestamp": "EST",
"string": "text",
"array": [1],
"struct": {"a": "label_a", "b": "label_b"},
"category": "a",
"null": None,
},
model_name="my_model",
)
struct_pb = struct.Struct()
struct_pb.fields["a"].CopyFrom(struct.Value(string_value="label_a"))
struct_pb.fields["b"].CopyFrom(struct.Value(string_value="label_b"))
# append each row value separately until issue is resovled
# https://github.com/googleapis/proto-plus-python/issues/104
row = data_items.Row()
values = [
struct.Value(number_value=1.0),
struct.Value(string_value="EST"),
struct.Value(string_value="text"),
struct.Value(
list_value=struct.ListValue(values=[struct.Value(number_value=1.0)])
),
struct.Value(struct_value=struct_pb),
struct.Value(string_value="a"),
struct.Value(null_value=struct.NullValue.NULL_VALUE),
]
for v in values:
row.values.append(v)
payload = data_items.ExamplePayload(row=row)
client.prediction_client.predict.assert_called_with(
request=automl_v1beta1.PredictRequest(
name="my_model", payload=payload, params=None
)
)
def test_predict_from_array_missing(self):
data_type = mock.Mock(type_code=data_types.TypeCode.CATEGORY)
column_spec = mock.Mock(display_name="a", data_type=data_type)
model_metadata = mock.Mock(input_feature_column_specs=[column_spec])
model = mock.Mock()
model.configure_mock(tables_model_metadata=model_metadata, name="my_model")
client = self.tables_client({"get_model.return_value": model}, {})
with pytest.raises(ValueError):
client.predict([], model_name="my_model")
client.prediction_client.predict.assert_not_called()
def test_batch_predict_pandas_dataframe(self):
client = self.tables_client(
gcs_client_attrs={
"bucket_name": "my_bucket",
"upload_pandas_dataframe.return_value": "gs://input",
}
)
dataframe = pandas.DataFrame({})
client.batch_predict(
project=PROJECT,
region=REGION,
model_name="my_model",
pandas_dataframe=dataframe,
gcs_output_uri_prefix="gs://output",
)
client.gcs_client.ensure_bucket_exists.assert_called_with(PROJECT, REGION)
client.gcs_client.upload_pandas_dataframe.assert_called_with(dataframe)
client.prediction_client.batch_predict.assert_called_with(
request=automl_v1beta1.BatchPredictRequest(
name="my_model",
input_config={"gcs_source": {"input_uris": ["gs://input"]}},
output_config={"gcs_destination": {"output_uri_prefix": "gs://output"}},
)
)
def test_batch_predict_pandas_dataframe_init_gcs(self):
client = automl_v1beta1.TablesClient(
client=mock.Mock(),
prediction_client=mock.Mock(),
project=PROJECT,
region=REGION,
credentials=AnonymousCredentials(),
)
dataframe = pandas.DataFrame({})
patch = mock.patch(
"google.cloud.automl_v1beta1.services.tables.gcs_client.GcsClient",
bucket_name="my_bucket",
)
with patch as MockGcsClient:
mockInstance = MockGcsClient.return_value
mockInstance.upload_pandas_dataframe.return_value = "gs://input"
dataframe = pandas.DataFrame({})
client.batch_predict(
model_name="my_model",
pandas_dataframe=dataframe,
gcs_output_uri_prefix="gs://output",
)
client.gcs_client.ensure_bucket_exists.assert_called_with(PROJECT, REGION)
client.gcs_client.upload_pandas_dataframe.assert_called_with(dataframe)
client.prediction_client.batch_predict.assert_called_with(
request=automl_v1beta1.BatchPredictRequest(
name="my_model",
input_config={"gcs_source": {"input_uris": ["gs://input"]}},
output_config={
"gcs_destination": {"output_uri_prefix": "gs://output"}
},
)
)
def test_batch_predict_gcs(self):
client = self.tables_client({}, {})
client.batch_predict(
model_name="my_model",
gcs_input_uris="gs://input",
gcs_output_uri_prefix="gs://output",
)
client.prediction_client.batch_predict.assert_called_with(
request=automl_v1beta1.BatchPredictRequest(
name="my_model",
input_config={"gcs_source": {"input_uris": ["gs://input"]}},
output_config={"gcs_destination": {"output_uri_prefix": "gs://output"}},
)
)
def test_batch_predict_bigquery(self):
client = self.tables_client({}, {})
client.batch_predict(
model_name="my_model",
bigquery_input_uri="bq://input",
bigquery_output_uri="bq://output",
)
client.prediction_client.batch_predict.assert_called_with(
request=automl_v1beta1.BatchPredictRequest(
name="my_model",
input_config={"bigquery_source": {"input_uri": "bq://input"}},
output_config={"bigquery_destination": {"output_uri": "bq://output"}},
)
)
def test_batch_predict_bigquery_with_params(self):
client = self.tables_client({}, {})
client.batch_predict(
model_name="my_model",
bigquery_input_uri="bq://input",
bigquery_output_uri="bq://output",
params={"feature_importance": "true"},
)
client.prediction_client.batch_predict.assert_called_with(
request=automl_v1beta1.BatchPredictRequest(
name="my_model",
input_config={"bigquery_source": {"input_uri": "bq://input"}},
output_config={"bigquery_destination": {"output_uri": "bq://output"}},
params={"feature_importance": "true"},
)
)
def test_batch_predict_mixed(self):
client = self.tables_client({}, {})
client.batch_predict(
model_name="my_model",
gcs_input_uris="gs://input",
bigquery_output_uri="bq://output",
)
client.prediction_client.batch_predict.assert_called_with(
request=automl_v1beta1.BatchPredictRequest(
name="my_model",
input_config={"gcs_source": {"input_uris": ["gs://input"]}},
output_config={"bigquery_destination": {"output_uri": "bq://output"}},
)
)
def test_batch_predict_missing_input_gcs_uri(self):
client = self.tables_client({}, {})
with pytest.raises(ValueError):
client.batch_predict(
model_name="my_model",
gcs_input_uris=None,
gcs_output_uri_prefix="gs://output",
)
client.prediction_client.batch_predict.assert_not_called()
def test_batch_predict_missing_input_bigquery_uri(self):
client = self.tables_client({}, {})
with pytest.raises(ValueError):
client.batch_predict(
model_name="my_model",
bigquery_input_uri=None,
gcs_output_uri_prefix="gs://output",
)
client.prediction_client.batch_predict.assert_not_called()
def test_batch_predict_missing_output_gcs_uri(self):
client = self.tables_client({}, {})
with pytest.raises(ValueError):
client.batch_predict(
model_name="my_model",
gcs_input_uris="gs://input",
gcs_output_uri_prefix=None,
)
client.prediction_client.batch_predict.assert_not_called()
def test_batch_predict_missing_output_bigquery_uri(self):
client = self.tables_client({}, {})
with pytest.raises(ValueError):
client.batch_predict(
model_name="my_model",
gcs_input_uris="gs://input",
bigquery_output_uri=None,
)
client.prediction_client.batch_predict.assert_not_called()
def test_batch_predict_missing_model(self):
client = self.tables_client({"list_models.return_value": []}, {})
with pytest.raises(exceptions.NotFound):
client.batch_predict(
model_display_name="my_model",
gcs_input_uris="gs://input",
gcs_output_uri_prefix="gs://output",
)
client.prediction_client.batch_predict.assert_not_called()
def test_batch_predict_no_model(self):
client = self.tables_client({}, {})
with pytest.raises(ValueError):
client.batch_predict(
gcs_input_uris="gs://input", gcs_output_uri_prefix="gs://output"
)
client.auto_ml_client.list_models.assert_not_called()
client.prediction_client.batch_predict.assert_not_called()
def test_auto_ml_client_credentials(self):
credentials_mock = mock.Mock()
patch_auto_ml_client = mock.patch(
"google.cloud.automl_v1beta1.services.tables.tables_client.AutoMlClient"
)
with patch_auto_ml_client as MockAutoMlClient:
automl_v1beta1.TablesClient(credentials=credentials_mock)
_, auto_ml_client_kwargs = MockAutoMlClient.call_args
assert "credentials" in auto_ml_client_kwargs
assert auto_ml_client_kwargs["credentials"] == credentials_mock
def test_prediction_client_credentials(self):
credentials_mock = mock.Mock()
patch_prediction_client = mock.patch(
"google.cloud.automl_v1beta1.services.tables.tables_client.PredictionServiceClient"
)
with patch_prediction_client as MockPredictionClient:
automl_v1beta1.TablesClient(credentials=credentials_mock)
_, prediction_client_kwargs = MockPredictionClient.call_args
assert "credentials" in prediction_client_kwargs
assert prediction_client_kwargs["credentials"] == credentials_mock
def test_prediction_client_client_info(self):
client_info_mock = mock.Mock()
patch_prediction_client = mock.patch(
"google.cloud.automl_v1beta1.services.tables.tables_client.PredictionServiceClient"
)
with patch_prediction_client as MockPredictionClient:
automl_v1beta1.TablesClient(client_info=client_info_mock)
_, prediction_client_kwargs = MockPredictionClient.call_args
assert "client_info" in prediction_client_kwargs
assert prediction_client_kwargs["client_info"] == client_info_mock
| apache-2.0 |
tesidroni/mp | Lib/site-packages/numpy/lib/recfunctions.py | 58 | 34495 | """
Collection of utilities to manipulate structured arrays.
Most of these functions were initially implemented by John Hunter for matplotlib.
They have been rewritten and extended for convenience.
"""
import sys
import itertools
import numpy as np
import numpy.ma as ma
from numpy import ndarray, recarray
from numpy.ma import MaskedArray
from numpy.ma.mrecords import MaskedRecords
from numpy.lib._iotools import _is_string_like
_check_fill_value = np.ma.core._check_fill_value
__all__ = ['append_fields',
'drop_fields',
'find_duplicates',
'get_fieldstructure',
'join_by',
'merge_arrays',
'rec_append_fields', 'rec_drop_fields', 'rec_join',
'recursive_fill_fields', 'rename_fields',
'stack_arrays',
]
def recursive_fill_fields(input, output):
"""
Fills fields from output with fields from input,
with support for nested structures.
Parameters
----------
input : ndarray
Input array.
output : ndarray
Output array.
Notes
-----
* `output` should be at least the same size as `input`
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)])
>>> b = np.zeros((3,), dtype=a.dtype)
>>> rfn.recursive_fill_fields(a, b)
array([(1, 10.0), (2, 20.0), (0, 0.0)],
dtype=[('A', '<i4'), ('B', '<f8')])
"""
newdtype = output.dtype
for field in newdtype.names:
try:
current = input[field]
except ValueError:
continue
if current.dtype.names:
recursive_fill_fields(current, output[field])
else:
output[field][:len(current)] = current
return output
def get_names(adtype):
"""
Returns the field names of the input datatype as a tuple.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names(np.empty((1,), dtype=int)) is None
True
>>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)]))
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names(adtype)
('a', ('b', ('ba', 'bb')))
"""
listnames = []
names = adtype.names
for name in names:
current = adtype[name]
if current.names:
listnames.append((name, tuple(get_names(current))))
else:
listnames.append(name)
return tuple(listnames) or None
def get_names_flat(adtype):
"""
Returns the field names of the input datatype as a tuple. Nested structure
are flattend beforehand.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names_flat(np.empty((1,), dtype=int)) is None
True
>>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', float)]))
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names_flat(adtype)
('a', 'b', 'ba', 'bb')
"""
listnames = []
names = adtype.names
for name in names:
listnames.append(name)
current = adtype[name]
if current.names:
listnames.extend(get_names_flat(current))
return tuple(listnames) or None
def flatten_descr(ndtype):
"""
Flatten a structured data-type description.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('a', '<i4'), ('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.flatten_descr(ndtype)
(('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32')))
"""
names = ndtype.names
if names is None:
return ndtype.descr
else:
descr = []
for field in names:
(typ, _) = ndtype.fields[field]
if typ.names:
descr.extend(flatten_descr(typ))
else:
descr.append((field, typ))
return tuple(descr)
def zip_descr(seqarrays, flatten=False):
"""
Combine the dtype description of a series of arrays.
Parameters
----------
seqarrays : sequence of arrays
Sequence of arrays
flatten : {boolean}, optional
Whether to collapse nested descriptions.
"""
newdtype = []
if flatten:
for a in seqarrays:
newdtype.extend(flatten_descr(a.dtype))
else:
for a in seqarrays:
current = a.dtype
names = current.names or ()
if len(names) > 1:
newdtype.append(('', current.descr))
else:
newdtype.extend(current.descr)
return np.dtype(newdtype).descr
def get_fieldstructure(adtype, lastname=None, parents=None,):
"""
Returns a dictionary with fields as keys and a list of parent fields as values.
This function is used to simplify access to fields nested in other fields.
Parameters
----------
adtype : np.dtype
Input datatype
lastname : optional
Last processed field name (used internally during recursion).
parents : dictionary
Dictionary of parent fields (used interbally during recursion).
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('A', int),
... ('B', [('BA', int),
... ('BB', [('BBA', int), ('BBB', int)])])])
>>> rfn.get_fieldstructure(ndtype)
... # XXX: possible regression, order of BBA and BBB is swapped
{'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
"""
if parents is None:
parents = {}
names = adtype.names
for name in names:
current = adtype[name]
if current.names:
if lastname:
parents[name] = [lastname, ]
else:
parents[name] = []
parents.update(get_fieldstructure(current, name, parents))
else:
lastparent = [_ for _ in (parents.get(lastname, []) or [])]
if lastparent:
# if (lastparent[-1] != lastname):
lastparent.append(lastname)
elif lastname:
lastparent = [lastname, ]
parents[name] = lastparent or []
return parents or None
def _izip_fields_flat(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays,
collapsing any nested structure.
"""
for element in iterable:
if isinstance(element, np.void):
for f in _izip_fields_flat(tuple(element)):
yield f
else:
yield element
def _izip_fields(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays.
"""
for element in iterable:
if hasattr(element, '__iter__') and not isinstance(element, basestring):
for f in _izip_fields(element):
yield f
elif isinstance(element, np.void) and len(tuple(element)) == 1:
for f in _izip_fields(element):
yield f
else:
yield element
def izip_records(seqarrays, fill_value=None, flatten=True):
"""
Returns an iterator of concatenated items from a sequence of arrays.
Parameters
----------
seqarray : sequence of arrays
Sequence of arrays.
fill_value : {None, integer}
Value used to pad shorter iterables.
flatten : {True, False},
Whether to
"""
# OK, that's a complete ripoff from Python2.6 itertools.izip_longest
def sentinel(counter=([fill_value] * (len(seqarrays) - 1)).pop):
"Yields the fill_value or raises IndexError"
yield counter()
#
fillers = itertools.repeat(fill_value)
iters = [itertools.chain(it, sentinel(), fillers) for it in seqarrays]
# Should we flatten the items, or just use a nested approach
if flatten:
zipfunc = _izip_fields_flat
else:
zipfunc = _izip_fields
#
try:
for tup in itertools.izip(*iters):
yield tuple(zipfunc(tup))
except IndexError:
pass
def _fix_output(output, usemask=True, asrecarray=False):
"""
Private function: return a recarray, a ndarray, a MaskedArray
or a MaskedRecords depending on the input parameters
"""
if not isinstance(output, MaskedArray):
usemask = False
if usemask:
if asrecarray:
output = output.view(MaskedRecords)
else:
output = ma.filled(output)
if asrecarray:
output = output.view(recarray)
return output
def _fix_defaults(output, defaults=None):
"""
Update the fill_value and masked data of `output`
from the default given in a dictionary defaults.
"""
names = output.dtype.names
(data, mask, fill_value) = (output.data, output.mask, output.fill_value)
for (k, v) in (defaults or {}).iteritems():
if k in names:
fill_value[k] = v
data[k][mask[k]] = v
return output
def merge_arrays(seqarrays,
fill_value= -1, flatten=False, usemask=False, asrecarray=False):
"""
Merge arrays field by field.
Parameters
----------
seqarrays : sequence of ndarrays
Sequence of arrays
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
flatten : {False, True}, optional
Whether to collapse nested fields.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])))
masked_array(data = [(1, 10.0) (2, 20.0) (--, 30.0)],
mask = [(False, False) (False, False) (True, False)],
fill_value = (999999, 1e+20),
dtype = [('f0', '<i4'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])),
... usemask=False)
array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('f0', '<i4'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]).view([('a', int)]),
... np.array([10., 20., 30.])),
... usemask=False, asrecarray=True)
rec.array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('a', '<i4'), ('f1', '<f8')])
Notes
-----
* Without a mask, the missing value will be filled with something,
* depending on what its corresponding type:
-1 for integers
-1.0 for floating point numbers
'-' for characters
'-1' for strings
True for boolean values
* XXX: I just obtained these values empirically
"""
# Only one item in the input sequence ?
if (len(seqarrays) == 1):
seqarrays = np.asanyarray(seqarrays[0])
# Do we have a single ndarary as input ?
if isinstance(seqarrays, (ndarray, np.void)):
seqdtype = seqarrays.dtype
if (not flatten) or \
(zip_descr((seqarrays,), flatten=True) == seqdtype.descr):
# Minimal processing needed: just make sure everythng's a-ok
seqarrays = seqarrays.ravel()
# Make sure we have named fields
if not seqdtype.names:
seqdtype = [('', seqdtype)]
# Find what type of array we must return
if usemask:
if asrecarray:
seqtype = MaskedRecords
else:
seqtype = MaskedArray
elif asrecarray:
seqtype = recarray
else:
seqtype = ndarray
return seqarrays.view(dtype=seqdtype, type=seqtype)
else:
seqarrays = (seqarrays,)
else:
# Make sure we have arrays in the input sequence
seqarrays = map(np.asanyarray, seqarrays)
# Find the sizes of the inputs and their maximum
sizes = tuple(a.size for a in seqarrays)
maxlength = max(sizes)
# Get the dtype of the output (flattening if needed)
newdtype = zip_descr(seqarrays, flatten=flatten)
# Initialize the sequences for data and mask
seqdata = []
seqmask = []
# If we expect some kind of MaskedArray, make a special loop.
if usemask:
for (a, n) in itertools.izip(seqarrays, sizes):
nbmissing = (maxlength - n)
# Get the data and mask
data = a.ravel().__array__()
mask = ma.getmaskarray(a).ravel()
# Get the filling value (if needed)
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
fmsk = True
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
fmsk = np.ones((1,), dtype=mask.dtype)
else:
fval = None
fmsk = True
# Store an iterator padding the input to the expected length
seqdata.append(itertools.chain(data, [fval] * nbmissing))
seqmask.append(itertools.chain(mask, [fmsk] * nbmissing))
# Create an iterator for the data
data = tuple(izip_records(seqdata, flatten=flatten))
output = ma.array(np.fromiter(data, dtype=newdtype, count=maxlength),
mask=list(izip_records(seqmask, flatten=flatten)))
if asrecarray:
output = output.view(MaskedRecords)
else:
# Same as before, without the mask we don't need...
for (a, n) in itertools.izip(seqarrays, sizes):
nbmissing = (maxlength - n)
data = a.ravel().__array__()
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
else:
fval = None
seqdata.append(itertools.chain(data, [fval] * nbmissing))
output = np.fromiter(tuple(izip_records(seqdata, flatten=flatten)),
dtype=newdtype, count=maxlength)
if asrecarray:
output = output.view(recarray)
# And we're done...
return output
def drop_fields(base, drop_names, usemask=True, asrecarray=False):
"""
Return a new array with fields in `drop_names` dropped.
Nested fields are supported.
Parameters
----------
base : array
Input array
drop_names : string or sequence
String or sequence of strings corresponding to the names of the fields
to drop.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : string or sequence
Whether to return a recarray or a mrecarray (`asrecarray=True`) or
a plain ndarray or masked array with flexible dtype (`asrecarray=False`)
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
... dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
>>> rfn.drop_fields(a, 'a')
array([((2.0, 3),), ((5.0, 6),)],
dtype=[('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.drop_fields(a, 'ba')
array([(1, (3,)), (4, (6,))],
dtype=[('a', '<i4'), ('b', [('bb', '<i4')])])
>>> rfn.drop_fields(a, ['ba', 'bb'])
array([(1,), (4,)],
dtype=[('a', '<i4')])
"""
if _is_string_like(drop_names):
drop_names = [drop_names, ]
else:
drop_names = set(drop_names)
#
def _drop_descr(ndtype, drop_names):
names = ndtype.names
newdtype = []
for name in names:
current = ndtype[name]
if name in drop_names:
continue
if current.names:
descr = _drop_descr(current, drop_names)
if descr:
newdtype.append((name, descr))
else:
newdtype.append((name, current))
return newdtype
#
newdtype = _drop_descr(base.dtype, drop_names)
if not newdtype:
return None
#
output = np.empty(base.shape, dtype=newdtype)
output = recursive_fill_fields(base, output)
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def rec_drop_fields(base, drop_names):
"""
Returns a new numpy.recarray with fields in `drop_names` dropped.
"""
return drop_fields(base, drop_names, usemask=False, asrecarray=True)
def rename_fields(base, namemapper):
"""
Rename the fields from a flexible-datatype ndarray or recarray.
Nested fields are supported.
Parameters
----------
base : ndarray
Input array whose fields must be modified.
namemapper : dictionary
Dictionary mapping old field names to their new version.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])])
>>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'})
array([(1, (2.0, [3.0, 30.0])), (4, (5.0, [6.0, 60.0]))],
dtype=[('A', '<i4'), ('b', [('ba', '<f8'), ('BB', '<f8', 2)])])
"""
def _recursive_rename_fields(ndtype, namemapper):
newdtype = []
for name in ndtype.names:
newname = namemapper.get(name, name)
current = ndtype[name]
if current.names:
newdtype.append((newname,
_recursive_rename_fields(current, namemapper)))
else:
newdtype.append((newname, current))
return newdtype
newdtype = _recursive_rename_fields(base.dtype, namemapper)
return base.view(newdtype)
def append_fields(base, names, data=None, dtypes=None,
fill_value= -1, usemask=True, asrecarray=False):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
"""
# Check the names
if isinstance(names, (tuple, list)):
if len(names) != len(data):
err_msg = "The number of arrays does not match the number of names"
raise ValueError(err_msg)
elif isinstance(names, basestring):
names = [names, ]
data = [data, ]
#
if dtypes is None:
data = [np.array(a, copy=False, subok=True) for a in data]
data = [a.view([(name, a.dtype)]) for (name, a) in zip(names, data)]
elif not hasattr(dtypes, '__iter__'):
dtypes = [dtypes, ]
if len(data) != len(dtypes):
if len(dtypes) == 1:
dtypes = dtypes * len(data)
else:
msg = "The dtypes argument must be None, "\
"a single dtype or a list."
raise ValueError(msg)
data = [np.array(a, copy=False, subok=True, dtype=d).view([(n, d)])
for (a, n, d) in zip(data, names, dtypes)]
#
base = merge_arrays(base, usemask=usemask, fill_value=fill_value)
if len(data) > 1:
data = merge_arrays(data, flatten=True, usemask=usemask,
fill_value=fill_value)
else:
data = data.pop()
#
output = ma.masked_all(max(len(base), len(data)),
dtype=base.dtype.descr + data.dtype.descr)
output = recursive_fill_fields(base, output)
output = recursive_fill_fields(data, output)
#
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def rec_append_fields(base, names, data, dtypes=None):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
See Also
--------
append_fields
Returns
-------
appended_array : np.recarray
"""
return append_fields(base, names, data=data, dtypes=dtypes,
asrecarray=True, usemask=False)
def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
autoconvert=False):
"""
Superposes arrays fields by fields
Parameters
----------
seqarrays : array or sequence
Sequence of input arrays.
defaults : dictionary, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is `asrecarray==True`)
or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`) or
just a flexible-type ndarray.
autoconvert : {False, True}, optional
Whether automatically cast the type of the field to the maximum.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> x = np.array([1, 2,])
>>> rfn.stack_arrays(x) is x
True
>>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)])
>>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
... dtype=[('A', '|S3'), ('B', float), ('C', float)])
>>> test = rfn.stack_arrays((z,zz))
>>> test
masked_array(data = [('A', 1.0, --) ('B', 2.0, --) ('a', 10.0, 100.0) ('b', 20.0, 200.0)
('c', 30.0, 300.0)],
mask = [(False, False, True) (False, False, True) (False, False, False)
(False, False, False) (False, False, False)],
fill_value = ('N/A', 1e+20, 1e+20),
dtype = [('A', '|S3'), ('B', '<f8'), ('C', '<f8')])
"""
if isinstance(arrays, ndarray):
return arrays
elif len(arrays) == 1:
return arrays[0]
seqarrays = [np.asanyarray(a).ravel() for a in arrays]
nrecords = [len(a) for a in seqarrays]
ndtype = [a.dtype for a in seqarrays]
fldnames = [d.names for d in ndtype]
#
dtype_l = ndtype[0]
newdescr = dtype_l.descr
names = [_[0] for _ in newdescr]
for dtype_n in ndtype[1:]:
for descr in dtype_n.descr:
name = descr[0] or ''
if name not in names:
newdescr.append(descr)
names.append(name)
else:
nameidx = names.index(name)
current_descr = newdescr[nameidx]
if autoconvert:
if np.dtype(descr[1]) > np.dtype(current_descr[-1]):
current_descr = list(current_descr)
current_descr[-1] = descr[1]
newdescr[nameidx] = tuple(current_descr)
elif descr[1] != current_descr[-1]:
raise TypeError("Incompatible type '%s' <> '%s'" % \
(dict(newdescr)[name], descr[1]))
# Only one field: use concatenate
if len(newdescr) == 1:
output = ma.concatenate(seqarrays)
else:
#
output = ma.masked_all((np.sum(nrecords),), newdescr)
offset = np.cumsum(np.r_[0, nrecords])
seen = []
for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]):
names = a.dtype.names
if names is None:
output['f%i' % len(seen)][i:j] = a
else:
for name in n:
output[name][i:j] = a[name]
if name not in seen:
seen.append(name)
#
return _fix_output(_fix_defaults(output, defaults),
usemask=usemask, asrecarray=asrecarray)
def find_duplicates(a, key=None, ignoremask=True, return_index=False):
"""
Find the duplicates in a structured array along a given key
Parameters
----------
a : array-like
Input array
key : {string, None}, optional
Name of the fields along which to check the duplicates.
If None, the search is performed by records
ignoremask : {True, False}, optional
Whether masked data should be discarded or considered as duplicates.
return_index : {False, True}, optional
Whether to return the indices of the duplicated values.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = [('a', int)]
>>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3],
... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
>>> rfn.find_duplicates(a, ignoremask=True, return_index=True)
... # XXX: judging by the output, the ignoremask flag has no effect
"""
a = np.asanyarray(a).ravel()
# Get a dictionary of fields
fields = get_fieldstructure(a.dtype)
# Get the sorting data (by selecting the corresponding field)
base = a
if key:
for f in fields[key]:
base = base[f]
base = base[key]
# Get the sorting indices and the sorted data
sortidx = base.argsort()
sortedbase = base[sortidx]
sorteddata = sortedbase.filled()
# Compare the sorting data
flag = (sorteddata[:-1] == sorteddata[1:])
# If masked data must be ignored, set the flag to false where needed
if ignoremask:
sortedmask = sortedbase.recordmask
flag[sortedmask[1:]] = False
flag = np.concatenate(([False], flag))
# We need to take the point on the left as well (else we're missing it)
flag[:-1] = flag[:-1] + flag[1:]
duplicates = a[sortidx][flag]
if return_index:
return (duplicates, sortidx[flag])
else:
return duplicates
def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None, usemask=True, asrecarray=False):
"""
Join arrays `r1` and `r2` on key `key`.
The key should be either a string or a sequence of string corresponding
to the fields used to join the array.
An exception is raised if the `key` field cannot be found in the two input
arrays.
Neither `r1` nor `r2` should have any duplicates along `key`: the presence
of duplicates will make the output quite unreliable. Note that duplicates
are not looked for by the algorithm.
Parameters
----------
key : {string, sequence}
A string or a sequence of strings corresponding to the fields used
for comparison.
r1, r2 : arrays
Structured arrays.
jointype : {'inner', 'outer', 'leftouter'}, optional
If 'inner', returns the elements common to both r1 and r2.
If 'outer', returns the common elements as well as the elements of r1
not in r2 and the elements of not in r2.
If 'leftouter', returns the common elements and the elements of r1 not
in r2.
r1postfix : string, optional
String appended to the names of the fields of r1 that are present in r2
but absent of the key.
r2postfix : string, optional
String appended to the names of the fields of r2 that are present in r1
but absent of the key.
defaults : {dictionary}, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is `asrecarray==True`)
or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`) or
just a flexible-type ndarray.
Notes
-----
* The output is sorted along the key.
* A temporary array is formed by dropping the fields not in the key for the
two arrays and concatenating the result. This array is then sorted, and
the common entries selected. The output is constructed by filling the fields
with the selected entries. Matching is not preserved if there are some
duplicates...
"""
# Check jointype
if jointype not in ('inner', 'outer', 'leftouter'):
raise ValueError("The 'jointype' argument should be in 'inner', "\
"'outer' or 'leftouter' (got '%s' instead)" % jointype)
# If we have a single key, put it in a tuple
if isinstance(key, basestring):
key = (key,)
# Check the keys
for name in key:
if name not in r1.dtype.names:
raise ValueError('r1 does not have key field %s' % name)
if name not in r2.dtype.names:
raise ValueError('r2 does not have key field %s' % name)
# Make sure we work with ravelled arrays
r1 = r1.ravel()
r2 = r2.ravel()
(nb1, nb2) = (len(r1), len(r2))
(r1names, r2names) = (r1.dtype.names, r2.dtype.names)
# Make temporary arrays of just the keys
r1k = drop_fields(r1, [n for n in r1names if n not in key])
r2k = drop_fields(r2, [n for n in r2names if n not in key])
# Concatenate the two arrays for comparison
aux = ma.concatenate((r1k, r2k))
idx_sort = aux.argsort(order=key)
aux = aux[idx_sort]
#
# Get the common keys
flag_in = ma.concatenate(([False], aux[1:] == aux[:-1]))
flag_in[:-1] = flag_in[1:] + flag_in[:-1]
idx_in = idx_sort[flag_in]
idx_1 = idx_in[(idx_in < nb1)]
idx_2 = idx_in[(idx_in >= nb1)] - nb1
(r1cmn, r2cmn) = (len(idx_1), len(idx_2))
if jointype == 'inner':
(r1spc, r2spc) = (0, 0)
elif jointype == 'outer':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
idx_2 = np.concatenate((idx_2, idx_out[(idx_out >= nb1)] - nb1))
(r1spc, r2spc) = (len(idx_1) - r1cmn, len(idx_2) - r2cmn)
elif jointype == 'leftouter':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
(r1spc, r2spc) = (len(idx_1) - r1cmn, 0)
# Select the entries from each input
(s1, s2) = (r1[idx_1], r2[idx_2])
#
# Build the new description of the output array .......
# Start with the key fields
ndtype = [list(_) for _ in r1k.dtype.descr]
# Add the other fields
ndtype.extend(list(_) for _ in r1.dtype.descr if _[0] not in key)
# Find the new list of names (it may be different from r1names)
names = list(_[0] for _ in ndtype)
for desc in r2.dtype.descr:
desc = list(desc)
name = desc[0]
# Have we seen the current name already ?
if name in names:
nameidx = names.index(name)
current = ndtype[nameidx]
# The current field is part of the key: take the largest dtype
if name in key:
current[-1] = max(desc[1], current[-1])
# The current field is not part of the key: add the suffixes
else:
current[0] += r1postfix
desc[0] += r2postfix
ndtype.insert(nameidx + 1, desc)
#... we haven't: just add the description to the current list
else:
names.extend(desc[0])
ndtype.append(desc)
# Revert the elements to tuples
ndtype = [tuple(_) for _ in ndtype]
# Find the largest nb of common fields : r1cmn and r2cmn should be equal, but...
cmn = max(r1cmn, r2cmn)
# Construct an empty array
output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype)
names = output.dtype.names
for f in r1names:
selected = s1[f]
if f not in names:
f += r1postfix
current = output[f]
current[:r1cmn] = selected[:r1cmn]
if jointype in ('outer', 'leftouter'):
current[cmn:cmn + r1spc] = selected[r1cmn:]
for f in r2names:
selected = s2[f]
if f not in names:
f += r2postfix
current = output[f]
current[:r2cmn] = selected[:r2cmn]
if (jointype == 'outer') and r2spc:
current[-r2spc:] = selected[r2cmn:]
# Sort and finalize the output
output.sort(order=key)
kwargs = dict(usemask=usemask, asrecarray=asrecarray)
return _fix_output(_fix_defaults(output, defaults), **kwargs)
def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None):
"""
Join arrays `r1` and `r2` on keys.
Alternative to join_by, that always returns a np.recarray.
See Also
--------
join_by : equivalent function
"""
kwargs = dict(jointype=jointype, r1postfix=r1postfix, r2postfix=r2postfix,
defaults=defaults, usemask=False, asrecarray=True)
return join_by(key, r1, r2, **kwargs)
| gpl-3.0 |
ky822/Data_Bootcamp | Code/Python/bootcamp_sandbox.py | 1 | 4395 | """
Miscellaneous experiments for Data Bootcamp course.
Repository of materials (including this file):
* https://github.com/DaveBackus/Data_Bootcamp/
* https://github.com/DaveBackus/Data_Bootcamp/Code/Python
Written by Dave Backus, March 2015
Created with Python 3.4
"""
print('\nWelcome to Data Bootcamp!')
import datetime as dt
print('Today is', dt.date.today())
"""
Check Python version
"""
import sys
print('\nWhat version of Python are we running? \n', sys.version, '\n', sep='')
if float(sys.version_info[0]) < 3.0:
raise Exception('Program halted, old version of Python. ' +
'Sorry, you need to install Anaconda again.')
else:
print('Congratulations, Python is up to date!')
# sys.exit(0) # this halts execution
#%%
"""
Assignments and copies
http://stackoverflow.com/questions/10844493/dataframe-apply-in-python-pandas-alters-both-original-and-duplicate-dataframes
"""
# check 1
a = [1,2,3]
b = a
b[0] = 'WHOA!'
print('\nAfter assignment, a is', a)
# to make a copy
a = [1,2,3]
b = a.copy()
b[0] = 'WHOA!'
print('\nAfter copy, a is', a)
# check 2
import numpy as np
c = np.array([7, 3, 5])
d = c
e = 2*c - 5
print('\nAfter assignment, (d, e) are', d, e)
c[0] = 10
print(d, e)
#%%
"""
Check path of current working directory (just for the heck of it)
"""
# https://docs.python.org/2/library/os.path.html
import os
print('\nCurrent path:\n', os.getcwd(), sep='')
"""
Check for specific file
"""
import os
print('\nList of files in working directory:')
[print(file) for file in os.listdir()]
file = 'SQL_support_code.py'
if not os.path.isfile(file):
raise Exception('***** Program halted, file missing *****')
#%%
"""
IMF's historical database on public debt
https://www.imf.org/External/pubs/cat/longres.aspx?sk=24332.0
rows are countries, columns are dates (1692-2012)
"""
import pandas as pd
import urllib # handles internet files
import zipfile # handles zip files
import os
# copy zip file to hard drive
print('\nCopy IMF historical debt data to hard drive')
url = 'https://www.imf.org/external/pubs/ft/wp/2010/Data/wp10245.zip'
zname = '../Temp/' + os.path.basename(url) # strip out file name
urllib.request.urlretrieve(url, zname) # copy file from url to disk
# extract spreadsheet
zf = zipfile.ZipFile(zname, 'r')
zf.printdir()
xlsname = zf.namelist()[0]
xls = zf.extract(xlsname)
df = pd.read_excel(xls, sheetname=1, na_values=['…', '….', ''], index_col=0,
encoding='utf-8')
print('Type: ', type(df))
print('Shape (dimensions): ', df.shape)
print('Column labels (variables): ', df.columns.tolist())
print('Variable types: \n', df.dtypes, sep='')
#%%
# select years 1980 to 2013 and ifscode
years = [year for year in range(1980, 2013)]
years_str = [str(year) for year in years]
vars = ['ifscode'] + years
some = df[vars]
#%%
"""
# manual version
xlsfile = 'Debt Database Fall 2013 Vintage.xlsx'
df = pd.read_excel('../Temp/' + xlsfile, sheetname=1)
"""
# ok, some legit ways
#%%
"""
urrlib version: the hard way relative to Pandas
"""
# copy file from url to hard drive
import urllib.request
file = 'foo.csv'
url1 = 'https://raw.githubusercontent.com/DaveBackus/Data_Bootcamp/master/'
url2 = 'Code/Data/test1.csv'
url = url1 + url2
urllib.request.urlretrieve(url, file)
# Sarah's version
f = urllib.request.urlopen(url)
file = 'foo_sbh.csv'
with open(file, 'wb') as local_file:
local_file.write(f.read())
#%%
"""
World Bank WDI from zip file
File is too big, takes too long to read (but has great stuff!)
"""
import pandas as pd
import urllib
import zipfile
import os
# this is a big file, best to test with something smaller
url = 'http://databank.worldbank.org/data/download/WDI_csv.zip'
file = '../Temp/' + os.path.basename(url) # strip out file name
urllib.request.urlretrieve(url, file) # copy to disk
# see what's there
print(['Is zipfile?', zipfile.is_zipfile(file)])
zf = zipfile.ZipFile(file, 'r')
#print('List of zipfile contents (two versions)')
zf.printdir()
# extract a component
csv = zf.extract('WDI_Data.csv') # copy to disk
df1 = pd.read_csv(csv) # read
print(df1.columns) # check contents
# alternative: open and read
csv = zf.open('WDI_Data.csv')
df2 = pd.read_csv(csv)
print(df2.columns)
| mit |
andrescodas/casadi | docs/examples/python/dae_multiple_shooting.py | 3 | 3595 | #
# This file is part of CasADi.
#
# CasADi -- A symbolic framework for dynamic optimization.
# Copyright (C) 2010-2014 Joel Andersson, Joris Gillis, Moritz Diehl,
# K.U. Leuven. All rights reserved.
# Copyright (C) 2011-2014 Greg Horn
#
# CasADi is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# CasADi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with CasADi; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
from casadi import *
"""
Solves the following optimal control problem (OCP) in differential-algebraic
equations (DAE)
minimize integral_{t=0}^{10} x0^2 + x1^2 + u^2 dt
x0,x1,z,u
subject to dot(x0) == z*x0-x1+u \
dot(x1) == x0 } for 0 <= t <= 10
0 == x1^2 + z - 1 /
x0(t=0) == 0
x1(t=0) == 1
x0(t=10) == 0
x1(t=10) == 0
-0.75 <= u <= 1 for 0 <= t <= 10
The method used is direct multiple shooting.
Joel Andersson, 2015
"""
# Declare variables
x0 = SX.sym('x0')
x1 = SX.sym('x1')
x = vertcat(x0, x1) # Differential states
z = SX.sym('z') # Algebraic variable
u = SX.sym('u') # Control
# Differential equation
f_x = vertcat(z*x0-x1+u, x0)
# Algebraic equation
f_z = x1**2 + z - 1
# Lagrange cost term (quadrature)
f_q = x0**2 + x1**2 + u**2
# Create an integrator
dae = {'x':x, 'z':z, 'p':u, 'ode':f_x, 'alg':f_z, 'quad':f_q}
opts = {'tf':0.5} # interval length
I = integrator('I', 'idas', dae, opts)
# Number of intervals
nk = 20
# Start with an empty NLP
w = [] # List of variables
lbw = [] # Lower bounds on w
ubw = [] # Upper bounds on w
G = [] # Constraints
J = 0 # Cost function
# Initial conditions
Xk = MX.sym('X0', 2)
w.append(Xk)
lbw += [ 0, 1 ]
ubw += [ 0, 1 ]
# Loop over all intervals
for k in range(nk):
# Local control
Uk = MX.sym('U'+str(k))
w.append(Uk)
lbw += [-0.75]
ubw += [ 1.00]
# Call integrator function
Ik = I(x0=Xk, p=Uk)
Xk = Ik['xf']
J = J + Ik['qf'] # Sum quadratures
# "Lift" the variable
X_prev = Xk
Xk = MX.sym('X'+str(k+1), 2)
w.append(Xk)
lbw += [-inf, -inf]
ubw += [ inf, inf]
G.append(X_prev - Xk)
# Allocate an NLP solver
nlp = {'x':vertcat(*w), 'f':J, 'g':vertcat(*G)}
opts = {'ipopt.linear_solver':'ma27'}
solver = nlpsol('solver', 'ipopt', nlp, opts)
# Pass bounds, initial guess and solve NLP
sol = solver(lbx = lbw, # Lower variable bound
ubx = ubw, # Upper variable bound
lbg = 0.0, # Lower constraint bound
ubg = 0.0, # Upper constraint bound
x0 = 0.0) # Initial guess
# Plot the results
import matplotlib.pyplot as plt
plt.figure(1)
plt.clf()
plt.plot(linspace(0., 10., nk+1), sol['x'][0::3],'--')
plt.plot(linspace(0., 10., nk+1), sol['x'][1::3],'-')
plt.plot(linspace(0., 10., nk), sol['x'][2::3],'-.')
plt.title('Van der Pol optimization - multiple shooting')
plt.xlabel('time')
plt.legend(['x0 trajectory','x1 trajectory','u trajectory'])
plt.grid()
plt.show()
| lgpl-3.0 |
shaneknapp/spark | python/pyspark/pandas/tests/test_typedef.py | 15 | 16852 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import unittest
import datetime
import decimal
from typing import List
import pandas
import pandas as pd
from pandas.api.types import CategoricalDtype
import numpy as np
from pyspark.sql.types import (
ArrayType,
BinaryType,
BooleanType,
FloatType,
IntegerType,
LongType,
StringType,
StructField,
StructType,
ByteType,
ShortType,
DateType,
DecimalType,
DoubleType,
TimestampType,
)
from pyspark.pandas.typedef import (
as_spark_type,
extension_dtypes_available,
extension_float_dtypes_available,
extension_object_dtypes_available,
infer_return_type,
pandas_on_spark_type,
)
from pyspark import pandas as ps
class TypeHintTests(unittest.TestCase):
@unittest.skipIf(
sys.version_info < (3, 7),
"Type inference from pandas instances is supported with Python 3.7+",
)
def test_infer_schema_from_pandas_instances(self):
def func() -> pd.Series[int]:
pass
inferred = infer_return_type(func)
self.assertEqual(inferred.dtype, np.int64)
self.assertEqual(inferred.spark_type, LongType())
def func() -> pd.Series[np.float]:
pass
inferred = infer_return_type(func)
self.assertEqual(inferred.dtype, np.float64)
self.assertEqual(inferred.spark_type, DoubleType())
def func() -> "pd.DataFrame[np.float, str]":
pass
expected = StructType([StructField("c0", DoubleType()), StructField("c1", StringType())])
inferred = infer_return_type(func)
self.assertEqual(inferred.dtypes, [np.float64, np.unicode_])
self.assertEqual(inferred.spark_type, expected)
def func() -> "pandas.DataFrame[np.float]":
pass
expected = StructType([StructField("c0", DoubleType())])
inferred = infer_return_type(func)
self.assertEqual(inferred.dtypes, [np.float64])
self.assertEqual(inferred.spark_type, expected)
def func() -> "pd.Series[int]":
pass
inferred = infer_return_type(func)
self.assertEqual(inferred.dtype, np.int64)
self.assertEqual(inferred.spark_type, LongType())
def func() -> pd.DataFrame[np.float, str]:
pass
expected = StructType([StructField("c0", DoubleType()), StructField("c1", StringType())])
inferred = infer_return_type(func)
self.assertEqual(inferred.dtypes, [np.float64, np.unicode_])
self.assertEqual(inferred.spark_type, expected)
def func() -> pd.DataFrame[np.float]:
pass
expected = StructType([StructField("c0", DoubleType())])
inferred = infer_return_type(func)
self.assertEqual(inferred.dtypes, [np.float64])
self.assertEqual(inferred.spark_type, expected)
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [3, 4, 5]})
def func() -> pd.DataFrame[pdf.dtypes]: # type: ignore
pass
expected = StructType([StructField("c0", LongType()), StructField("c1", LongType())])
inferred = infer_return_type(func)
self.assertEqual(inferred.dtypes, [np.int64, np.int64])
self.assertEqual(inferred.spark_type, expected)
pdf = pd.DataFrame({"a": [1, 2, 3], "b": pd.Categorical(["a", "b", "c"])})
def func() -> pd.Series[pdf.b.dtype]: # type: ignore
pass
inferred = infer_return_type(func)
self.assertEqual(inferred.dtype, CategoricalDtype(categories=["a", "b", "c"]))
self.assertEqual(inferred.spark_type, LongType())
def func() -> pd.DataFrame[pdf.dtypes]: # type: ignore
pass
expected = StructType([StructField("c0", LongType()), StructField("c1", LongType())])
inferred = infer_return_type(func)
self.assertEqual(inferred.dtypes, [np.int64, CategoricalDtype(categories=["a", "b", "c"])])
self.assertEqual(inferred.spark_type, expected)
def test_if_pandas_implements_class_getitem(self):
# the current type hint implementation of pandas DataFrame assumes pandas doesn't
# implement '__class_getitem__'. This test case is to make sure pandas
# doesn't implement them.
assert not ps._frame_has_class_getitem
assert not ps._series_has_class_getitem
@unittest.skipIf(
sys.version_info < (3, 7),
"Type inference from pandas instances is supported with Python 3.7+",
)
def test_infer_schema_with_names_pandas_instances(self):
def func() -> 'pd.DataFrame["a" : np.float, "b":str]': # noqa: F405
pass
expected = StructType([StructField("a", DoubleType()), StructField("b", StringType())])
inferred = infer_return_type(func)
self.assertEqual(inferred.dtypes, [np.float64, np.unicode_])
self.assertEqual(inferred.spark_type, expected)
def func() -> "pd.DataFrame['a': np.float, 'b': int]": # noqa: F405
pass
expected = StructType([StructField("a", DoubleType()), StructField("b", LongType())])
inferred = infer_return_type(func)
self.assertEqual(inferred.dtypes, [np.float64, np.int64])
self.assertEqual(inferred.spark_type, expected)
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [3, 4, 5]})
def func() -> pd.DataFrame[zip(pdf.columns, pdf.dtypes)]:
pass
expected = StructType([StructField("a", LongType()), StructField("b", LongType())])
inferred = infer_return_type(func)
self.assertEqual(inferred.dtypes, [np.int64, np.int64])
self.assertEqual(inferred.spark_type, expected)
pdf = pd.DataFrame({("x", "a"): [1, 2, 3], ("y", "b"): [3, 4, 5]})
def func() -> pd.DataFrame[zip(pdf.columns, pdf.dtypes)]:
pass
expected = StructType(
[StructField("(x, a)", LongType()), StructField("(y, b)", LongType())]
)
inferred = infer_return_type(func)
self.assertEqual(inferred.dtypes, [np.int64, np.int64])
self.assertEqual(inferred.spark_type, expected)
pdf = pd.DataFrame({"a": [1, 2, 3], "b": pd.Categorical(["a", "b", "c"])})
def func() -> pd.DataFrame[zip(pdf.columns, pdf.dtypes)]:
pass
expected = StructType([StructField("a", LongType()), StructField("b", LongType())])
inferred = infer_return_type(func)
self.assertEqual(inferred.dtypes, [np.int64, CategoricalDtype(categories=["a", "b", "c"])])
self.assertEqual(inferred.spark_type, expected)
@unittest.skipIf(
sys.version_info < (3, 7),
"Type inference from pandas instances is supported with Python 3.7+",
)
def test_infer_schema_with_names_pandas_instances_negative(self):
def try_infer_return_type():
def f() -> 'pd.DataFrame["a" : np.float : 1, "b":str:2]': # noqa: F405
pass
infer_return_type(f)
self.assertRaisesRegex(TypeError, "Type hints should be specified", try_infer_return_type)
class A:
pass
def try_infer_return_type():
def f() -> pd.DataFrame[A]:
pass
infer_return_type(f)
self.assertRaisesRegex(TypeError, "not understood", try_infer_return_type)
def try_infer_return_type():
def f() -> 'pd.DataFrame["a" : np.float : 1, "b":str:2]': # noqa: F405
pass
infer_return_type(f)
self.assertRaisesRegex(TypeError, "Type hints should be specified", try_infer_return_type)
# object type
pdf = pd.DataFrame({"a": ["a", 2, None]})
def try_infer_return_type():
def f() -> pd.DataFrame[pdf.dtypes]: # type: ignore
pass
infer_return_type(f)
self.assertRaisesRegex(TypeError, "object.*not understood", try_infer_return_type)
def try_infer_return_type():
def f() -> pd.Series[pdf.a.dtype]: # type: ignore
pass
infer_return_type(f)
self.assertRaisesRegex(TypeError, "object.*not understood", try_infer_return_type)
def test_infer_schema_with_names_negative(self):
def try_infer_return_type():
def f() -> 'ps.DataFrame["a" : np.float : 1, "b":str:2]': # noqa: F405
pass
infer_return_type(f)
self.assertRaisesRegex(TypeError, "Type hints should be specified", try_infer_return_type)
class A:
pass
def try_infer_return_type():
def f() -> ps.DataFrame[A]:
pass
infer_return_type(f)
self.assertRaisesRegex(TypeError, "not understood", try_infer_return_type)
def try_infer_return_type():
def f() -> 'ps.DataFrame["a" : np.float : 1, "b":str:2]': # noqa: F405
pass
infer_return_type(f)
self.assertRaisesRegex(TypeError, "Type hints should be specified", try_infer_return_type)
# object type
pdf = pd.DataFrame({"a": ["a", 2, None]})
def try_infer_return_type():
def f() -> ps.DataFrame[pdf.dtypes]: # type: ignore
pass
infer_return_type(f)
self.assertRaisesRegex(TypeError, "object.*not understood", try_infer_return_type)
def try_infer_return_type():
def f() -> ps.Series[pdf.a.dtype]: # type: ignore
pass
infer_return_type(f)
self.assertRaisesRegex(TypeError, "object.*not understood", try_infer_return_type)
def test_as_spark_type_pandas_on_spark_dtype(self):
type_mapper = {
# binary
np.character: (np.character, BinaryType()),
np.bytes_: (np.bytes_, BinaryType()),
np.string_: (np.bytes_, BinaryType()),
bytes: (np.bytes_, BinaryType()),
# integer
np.int8: (np.int8, ByteType()),
np.byte: (np.int8, ByteType()),
np.int16: (np.int16, ShortType()),
np.int32: (np.int32, IntegerType()),
np.int64: (np.int64, LongType()),
np.int: (np.int64, LongType()),
int: (np.int64, LongType()),
# floating
np.float32: (np.float32, FloatType()),
np.float: (np.float64, DoubleType()),
np.float64: (np.float64, DoubleType()),
float: (np.float64, DoubleType()),
# string
np.str: (np.unicode_, StringType()),
np.unicode_: (np.unicode_, StringType()),
str: (np.unicode_, StringType()),
# bool
np.bool: (np.bool, BooleanType()),
bool: (np.bool, BooleanType()),
# datetime
np.datetime64: (np.datetime64, TimestampType()),
datetime.datetime: (np.dtype("datetime64[ns]"), TimestampType()),
# DateType
datetime.date: (np.dtype("object"), DateType()),
# DecimalType
decimal.Decimal: (np.dtype("object"), DecimalType(38, 18)),
# ArrayType
np.ndarray: (np.dtype("object"), ArrayType(StringType())),
List[bytes]: (np.dtype("object"), ArrayType(BinaryType())),
List[np.character]: (np.dtype("object"), ArrayType(BinaryType())),
List[np.bytes_]: (np.dtype("object"), ArrayType(BinaryType())),
List[np.string_]: (np.dtype("object"), ArrayType(BinaryType())),
List[bool]: (np.dtype("object"), ArrayType(BooleanType())),
List[np.bool]: (np.dtype("object"), ArrayType(BooleanType())),
List[datetime.date]: (np.dtype("object"), ArrayType(DateType())),
List[np.int8]: (np.dtype("object"), ArrayType(ByteType())),
List[np.byte]: (np.dtype("object"), ArrayType(ByteType())),
List[decimal.Decimal]: (np.dtype("object"), ArrayType(DecimalType(38, 18))),
List[float]: (np.dtype("object"), ArrayType(DoubleType())),
List[np.float]: (np.dtype("object"), ArrayType(DoubleType())),
List[np.float64]: (np.dtype("object"), ArrayType(DoubleType())),
List[np.float32]: (np.dtype("object"), ArrayType(FloatType())),
List[np.int32]: (np.dtype("object"), ArrayType(IntegerType())),
List[int]: (np.dtype("object"), ArrayType(LongType())),
List[np.int]: (np.dtype("object"), ArrayType(LongType())),
List[np.int64]: (np.dtype("object"), ArrayType(LongType())),
List[np.int16]: (np.dtype("object"), ArrayType(ShortType())),
List[str]: (np.dtype("object"), ArrayType(StringType())),
List[np.unicode_]: (np.dtype("object"), ArrayType(StringType())),
List[datetime.datetime]: (np.dtype("object"), ArrayType(TimestampType())),
List[np.datetime64]: (np.dtype("object"), ArrayType(TimestampType())),
# CategoricalDtype
CategoricalDtype(categories=["a", "b", "c"]): (
CategoricalDtype(categories=["a", "b", "c"]),
LongType(),
),
}
for numpy_or_python_type, (dtype, spark_type) in type_mapper.items():
self.assertEqual(as_spark_type(numpy_or_python_type), spark_type)
self.assertEqual(pandas_on_spark_type(numpy_or_python_type), (dtype, spark_type))
with self.assertRaisesRegex(TypeError, "Type uint64 was not understood."):
as_spark_type(np.dtype("uint64"))
with self.assertRaisesRegex(TypeError, "Type object was not understood."):
as_spark_type(np.dtype("object"))
with self.assertRaisesRegex(TypeError, "Type uint64 was not understood."):
pandas_on_spark_type(np.dtype("uint64"))
with self.assertRaisesRegex(TypeError, "Type object was not understood."):
pandas_on_spark_type(np.dtype("object"))
@unittest.skipIf(not extension_dtypes_available, "The pandas extension types are not available")
def test_as_spark_type_extension_dtypes(self):
from pandas import Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype
type_mapper = {
Int8Dtype(): ByteType(),
Int16Dtype(): ShortType(),
Int32Dtype(): IntegerType(),
Int64Dtype(): LongType(),
}
for extension_dtype, spark_type in type_mapper.items():
self.assertEqual(as_spark_type(extension_dtype), spark_type)
self.assertEqual(pandas_on_spark_type(extension_dtype), (extension_dtype, spark_type))
@unittest.skipIf(
not extension_object_dtypes_available, "The pandas extension object types are not available"
)
def test_as_spark_type_extension_object_dtypes(self):
from pandas import BooleanDtype, StringDtype
type_mapper = {
BooleanDtype(): BooleanType(),
StringDtype(): StringType(),
}
for extension_dtype, spark_type in type_mapper.items():
self.assertEqual(as_spark_type(extension_dtype), spark_type)
self.assertEqual(pandas_on_spark_type(extension_dtype), (extension_dtype, spark_type))
@unittest.skipIf(
not extension_float_dtypes_available, "The pandas extension float types are not available"
)
def test_as_spark_type_extension_float_dtypes(self):
from pandas import Float32Dtype, Float64Dtype
type_mapper = {
Float32Dtype(): FloatType(),
Float64Dtype(): DoubleType(),
}
for extension_dtype, spark_type in type_mapper.items():
self.assertEqual(as_spark_type(extension_dtype), spark_type)
self.assertEqual(pandas_on_spark_type(extension_dtype), (extension_dtype, spark_type))
if __name__ == "__main__":
from pyspark.pandas.tests.test_typedef import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
Titan-C/scikit-learn | examples/decomposition/plot_pca_3d.py | 10 | 2432 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Principal components analysis (PCA)
=========================================================
These figures aid in illustrating how a point cloud
can be very flat in one direction--which is where PCA
comes in to choose a direction that is not flat.
"""
print(__doc__)
# Authors: Gael Varoquaux
# Jaques Grobler
# Kevin Hughes
# License: BSD 3 clause
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
# #############################################################################
# Create the data
e = np.exp(1)
np.random.seed(4)
def pdf(x):
return 0.5 * (stats.norm(scale=0.25 / e).pdf(x)
+ stats.norm(scale=4 / e).pdf(x))
y = np.random.normal(scale=0.5, size=(30000))
x = np.random.normal(scale=0.5, size=(30000))
z = np.random.normal(scale=0.1, size=len(x))
density = pdf(x) * pdf(y)
pdf_z = pdf(5 * z)
density *= pdf_z
a = x + y
b = 2 * y
c = a - b + z
norm = np.sqrt(a.var() + b.var())
a /= norm
b /= norm
# #############################################################################
# Plot the figures
def plot_figs(fig_num, elev, azim):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=elev, azim=azim)
ax.scatter(a[::10], b[::10], c[::10], c=density[::10], marker='+', alpha=.4)
Y = np.c_[a, b, c]
# Using SciPy's SVD, this would be:
# _, pca_score, V = scipy.linalg.svd(Y, full_matrices=False)
pca = PCA(n_components=3)
pca.fit(Y)
pca_score = pca.explained_variance_ratio_
V = pca.components_
x_pca_axis, y_pca_axis, z_pca_axis = V.T * pca_score / pca_score.min()
x_pca_axis, y_pca_axis, z_pca_axis = 3 * V.T
x_pca_plane = np.r_[x_pca_axis[:2], - x_pca_axis[1::-1]]
y_pca_plane = np.r_[y_pca_axis[:2], - y_pca_axis[1::-1]]
z_pca_plane = np.r_[z_pca_axis[:2], - z_pca_axis[1::-1]]
x_pca_plane.shape = (2, 2)
y_pca_plane.shape = (2, 2)
z_pca_plane.shape = (2, 2)
ax.plot_surface(x_pca_plane, y_pca_plane, z_pca_plane)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
elev = -40
azim = -80
plot_figs(1, elev, azim)
elev = 30
azim = 20
plot_figs(2, elev, azim)
plt.show()
| bsd-3-clause |
ryfeus/lambda-packs | pytorch/source/numpy/doc/creation.py | 24 | 5496 | """
==============
Array Creation
==============
Introduction
============
There are 5 general mechanisms for creating arrays:
1) Conversion from other Python structures (e.g., lists, tuples)
2) Intrinsic numpy array creation objects (e.g., arange, ones, zeros,
etc.)
3) Reading arrays from disk, either from standard or custom formats
4) Creating arrays from raw bytes through the use of strings or buffers
5) Use of special library functions (e.g., random)
This section will not cover means of replicating, joining, or otherwise
expanding or mutating existing arrays. Nor will it cover creating object
arrays or structured arrays. Both of those are covered in their own sections.
Converting Python array_like Objects to NumPy Arrays
====================================================
In general, numerical data arranged in an array-like structure in Python can
be converted to arrays through the use of the array() function. The most
obvious examples are lists and tuples. See the documentation for array() for
details for its use. Some objects may support the array-protocol and allow
conversion to arrays this way. A simple way to find out if the object can be
converted to a numpy array using array() is simply to try it interactively and
see if it works! (The Python Way).
Examples: ::
>>> x = np.array([2,3,1,0])
>>> x = np.array([2, 3, 1, 0])
>>> x = np.array([[1,2.0],[0,0],(1+1j,3.)]) # note mix of tuple and lists,
and types
>>> x = np.array([[ 1.+0.j, 2.+0.j], [ 0.+0.j, 0.+0.j], [ 1.+1.j, 3.+0.j]])
Intrinsic NumPy Array Creation
==============================
NumPy has built-in functions for creating arrays from scratch:
zeros(shape) will create an array filled with 0 values with the specified
shape. The default dtype is float64. ::
>>> np.zeros((2, 3))
array([[ 0., 0., 0.], [ 0., 0., 0.]])
ones(shape) will create an array filled with 1 values. It is identical to
zeros in all other respects.
arange() will create arrays with regularly incrementing values. Check the
docstring for complete information on the various ways it can be used. A few
examples will be given here: ::
>>> np.arange(10)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.arange(2, 10, dtype=float)
array([ 2., 3., 4., 5., 6., 7., 8., 9.])
>>> np.arange(2, 3, 0.1)
array([ 2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9])
Note that there are some subtleties regarding the last usage that the user
should be aware of that are described in the arange docstring.
linspace() will create arrays with a specified number of elements, and
spaced equally between the specified beginning and end values. For
example: ::
>>> np.linspace(1., 4., 6)
array([ 1. , 1.6, 2.2, 2.8, 3.4, 4. ])
The advantage of this creation function is that one can guarantee the
number of elements and the starting and end point, which arange()
generally will not do for arbitrary start, stop, and step values.
indices() will create a set of arrays (stacked as a one-higher dimensioned
array), one per dimension with each representing variation in that dimension.
An example illustrates much better than a verbal description: ::
>>> np.indices((3,3))
array([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]])
This is particularly useful for evaluating functions of multiple dimensions on
a regular grid.
Reading Arrays From Disk
========================
This is presumably the most common case of large array creation. The details,
of course, depend greatly on the format of data on disk and so this section
can only give general pointers on how to handle various formats.
Standard Binary Formats
-----------------------
Various fields have standard formats for array data. The following lists the
ones with known python libraries to read them and return numpy arrays (there
may be others for which it is possible to read and convert to numpy arrays so
check the last section as well)
::
HDF5: h5py
FITS: Astropy
Examples of formats that cannot be read directly but for which it is not hard to
convert are those formats supported by libraries like PIL (able to read and
write many image formats such as jpg, png, etc).
Common ASCII Formats
------------------------
Comma Separated Value files (CSV) are widely used (and an export and import
option for programs like Excel). There are a number of ways of reading these
files in Python. There are CSV functions in Python and functions in pylab
(part of matplotlib).
More generic ascii files can be read using the io package in scipy.
Custom Binary Formats
---------------------
There are a variety of approaches one can use. If the file has a relatively
simple format then one can write a simple I/O library and use the numpy
fromfile() function and .tofile() method to read and write numpy arrays
directly (mind your byteorder though!) If a good C or C++ library exists that
read the data, one can wrap that library with a variety of techniques though
that certainly is much more work and requires significantly more advanced
knowledge to interface with C or C++.
Use of Special Libraries
------------------------
There are libraries that can be used to generate arrays for special purposes
and it isn't possible to enumerate all of them. The most common uses are use
of the many array generation functions in random that can generate arrays of
random values, and some utility functions to generate special matrices (e.g.
diagonal).
"""
from __future__ import division, absolute_import, print_function
| mit |
kunduk/VarP | QCModule/variantCallStat.py | 1 | 18514 | #
# COPYRIGHT (C) 2017-2020 University of Maryland
#
"""
.. module:: variantCallStat
:platform: Unix, Windows, MacOSX
:synopsis: Computes variant call related statistics
.. moduleauthor:: Kunal Kundu ([email protected])
The module generates plots to illustrate -
* Ti/Tv ratio across samples
* Homozygous/Heterozygous ratio across samples
* Number of Common, Rare and Novel SNVs across samples
* Number of Common, Rare and Novel Indels across samples
* NUmber of no call sites and low quality sites across samples
Module Dependency:
- Tabix
- Numpy
- Matplotlib
- Varant
This module accepts only gVCF file.
It expects the gVCF should be Varant annotated.
"""
from tabix import Tabix
from matplotlib import pyplot as plt
import numpy as np
import os
from gcn.lib.varann.vartype.varant.annotator import SNPAnnotation
def load_bed_file(capfile):
'''Loads the given capture file'''
capcoord = []
s = open(capfile)
for line in s:
line = line.strip()
if not line:
continue
d = line.split('\t')
chrom, sp, ep, gene = d
capcoord.append((chrom, int(sp), int(ep)))
return capcoord
def _get_afrsamples(ethnicity_config):
'''Returns a list of African samples'''
afr_samples = []
flag = False
s = open(ethnicity_config)
for line in s:
line = line.strip()
if not line:
continue
if flag == True:
sid, e = line.split(':')
if e.strip() == 'African':
afr_samples.append(sid.strip())
if line.startswith('Following'):
flag = True
return afr_samples
def _get_control_afrsamples(control_ethnicity_config):
'''Returns a list of African samples'''
control_afr_samples = []
o = open(control_ethnicity_config)
for line in o:
line = line.strip()
if not line:
continue
d = line.split('\t')
if d[0] == 'sample':
continue
if d[2] == 'AFR':
control_afr_samples.append(d[0])
return control_afr_samples
def get_change_type(ref, alt):
'''Given the Reference and Alternate allele,
it return substitution type'''
if len(ref) != len(alt):
return 'NA'
if ref in ['A', 'G'] and alt in ['A', 'G']:
return 'ts'
elif ref in ['C', 'T'] and alt in ['C', 'T']:
return 'ts'
else:
return 'tv'
def get_vcfsamples(invcf):
'''Given a VCF file it returns samples in the VCF'''
for line in os.popen("zcat %s | head -2000 | grep '#CHROM'" % invcf):
line = line.strip()
h = line.split('\t')[:9]
vcfsamples = line.split('\t')[9:]
break
return vcfsamples
def get_varcnt_gp(invcf, antvcf, capcoord, samples, varcnt, vao):
'''Returns a dictionary where the keys are sampleid and values are a list
of length 11 whose the elements are the count of -
* Nocall
* VarLQ
* GTLQ
* REF allele
* HetALT
* HomALT
* Ts
* Tv
* Common Variants (Based on 1000 Genomes)
* Rare Variants (Based on 1000 Genomes)
* Novel Variants (Based on 1000 Genomes)
'''
vcfsamples = get_vcfsamples(invcf)
for sid in samples:
varcnt[sid] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] # NoCall, VarLQ, GTLQ, REF, HetALT, HomALT, Ts, Tv, Common, Rare, Novel
vcfo = Tabix(invcf)
antvcfo = Tabix(antvcf)
for coord in capcoord:
try:
qdata = vcfo.query(*coord)
except:
qdata = None
if qdata is None:
continue
for rec in qdata:
filter = rec[6]
gts = rec[9:]
trans = vao.retrieve_genedef(rec[0], int(rec[1]), int(rec[1]))
if not trans: # indicating its an intergenic region
continue
for sid, gtinfo in zip(vcfsamples, gts):
if sid in samples:
if gtinfo == '.' or './.' in gtinfo: # No Call
varcnt[sid][0] += 1
elif filter != 'PASS':
varcnt[sid][1] += 1 # VarLQ
elif '0/0' in gtinfo:
varcnt[sid][3] += 1 # REF
else:
gt, gq = gtinfo.split(':')[:2]
gq = int(gq)
a1, a2 = gt.split('/')
a1, a2 = int(a1), int(a2)
ref, alt = rec[3], rec[4].split(',')[a2-1]
ct = get_change_type(ref, alt)
arec = antvcfo.query(rec[0], int(rec[1])-1, int(rec[1]))
com, rare, novel = False, False, False
iflag = False
for e in arec:
if int(rec[1]) == int(e[1]):
kgaf = 0.0
if 'KGDB' in e[7]:
kgaf = float(e[7].split('KGAF=')[1].split(';')[0].split(',')[a2 - 1])
exacaf = 0.0
if 'EXACDB' in e[7]:
exacaf = float(e[7].split('EXACAF=')[1].split(';')[0].split(',')[a2 - 1])
if kgaf == 0.0 and exacaf == 0.0:
novel = True
elif kgaf < 0.05 and exacaf < 0.05:
rare = True
else:
com = True
break
if gq < 30: # GTLQ
varcnt[sid][2] += 1
elif a1 != a2: # HetALT
varcnt[sid][4] += 1
if ct == 'ts':
varcnt[sid][6] += 1
elif ct == 'tv':
varcnt[sid][7] += 1
if com:
varcnt[sid][8] += 1
elif rare:
varcnt[sid][9] += 1
elif novel:
varcnt[sid][10] += 1
print sid, rec[0], rec[1], rec[3], rec[4]
elif a1 == a2: # HomALT
varcnt[sid][5] += 1
if ct == 'ts':
varcnt[sid][6] += 1
elif ct == 'tv':
varcnt[sid][7] += 1
if com:
varcnt[sid][8] += 1
elif rare:
varcnt[sid][9] += 1
elif novel:
varcnt[sid][10] += 1
print sid, rec[0], rec[1], rec[3], rec[4]
return varcnt
def plot_varcnt_by_type(varcnt, control_varcnt, samples_capv02, afr_samples, control_afr_samples, yl):
'''Plot the variant call statistics'''
control_commonlist = []
control_rarelist_afr = []
control_rarelist_nonafr = []
for sid, val in control_varcnt.items():
control_commonlist.append(val[4])
if sid in control_afr_samples:
control_rarelist_afr.append(val[5])
else:
control_rarelist_nonafr.append(val[5])
legiden = {}
for sid, val in varcnt.items():
c, r, n = val[8], val[9], val[10]
if sid in afr_samples:
clr = '#1A5CEA'
if sid in samples_capv02:
m = '^'
l = 'African;Capture v02'
else:
m = 'o'
l = 'African;Capture v01'
else:
clr = '#EA831A'
if sid in samples_capv02:
m = '^'
l = 'Non-African;Capture v02'
else:
m = 'o'
l = 'Non-African;Capture v01'
x1 = np.random.normal(0.5, 0.08, 1)
p = plt.scatter(x1, c, s=80, c=clr, alpha=0.8, marker=m, linewidths=0.5)
if l not in legiden:
legiden[l] = p
x2 = np.random.normal(2, 0.08, 1)
plt.scatter(x2, r, s=80, c=clr, alpha=0.8, marker=m, linewidths=0.5)
x3 = np.random.normal(3.8, 0.08, 1)
plt.scatter(x3, n, s=80, c=clr, alpha=0.8, marker=m, linewidths=0.5)
plt.boxplot([control_commonlist, control_rarelist_afr, control_rarelist_nonafr], positions=[1, 2.5, 2.8])
plt.xticks([0, 0.5, 1, 2, 2.5, 2.8, 3.8, 4.5], ['', 'HS', 'KGS','HS', 'KGS_AFR', 'KGS_NonAFR','HS', ''], rotation=50)
plt.ylabel(yl)
plt.ylim(0)
t2 = []
t1 = legiden.keys()
t1.sort()
for e in t1:
t2.append(legiden[e])
plt.grid(True)
plt.show()
def plot_lq_nc_plot(varcnt, samples_capv02, afr_samples):
'''Plot the No call sites and low quality variant count statistics'''
legiden = {}
for sid, val in varcnt.items():
if sid in afr_samples:
clr = '#1A5CEA'
if sid in samples_capv02:
m = '^'
l = 'African;Capture v02'
else:
m = 'o'
l = 'African;Capture v01'
else:
clr = '#EA831A'
if sid in samples_capv02:
m = '^'
l = 'Non-African;Capture v02'
else:
m = 'o'
l = 'Non-African;Capture v01'
x1 = np.random.normal(val[0], 0.08, 1)
p = plt.scatter(x1, val[1], s=80, c=clr, alpha=0.8, marker=m, linewidths=0.5)
if l not in legiden:
legiden[l] = p
plt.ylabel('# of No Call sites')
plt.xlabel('# of low quality sites (not PASS in gVCF)')
t2 = []
t1 = legiden.keys()
t1.sort()
for e in t1:
t2.append(legiden[e])
plt.grid(True)
plt.show()
def plot_ratio(varcnt, control_varcnt, samples_capv02, afr_samples, control_afr_samples):
'''Plots the Ti/Tv ratio and HetALT/HomALT ratio across samples'''
titv_ratio_list = []
hethom_ratio_list = []
sidlist = []
control_titv_ratio_list = []
control_hethom_ratio_list = []
for s, val in varcnt.items():
hethom_ratio = float(val[4]) / float(val[5])
titv_ratio = float(val[6]) / float(val[7])
hethom_ratio_list.append(hethom_ratio)
titv_ratio_list.append(titv_ratio)
sidlist.append(s)
for s, val in control_varcnt.items():
hethom_ratio = float(val[0]) / float(val[1])
titv_ratio = float(val[2]) / float(val[3])
control_hethom_ratio_list.append(hethom_ratio)
control_titv_ratio_list.append(titv_ratio)
legiden = {}
for sid, t, h in zip(sidlist, titv_ratio_list, hethom_ratio_list):
if sid in afr_samples:
clr = '#1A5CEA'
if sid in samples_capv02:
m = '^'
l = 'African;Capture v02'
else:
m = 'o'
l = 'African;Capture v01'
else:
clr = '#EA831A'
if sid in samples_capv02:
m = '^'
l = 'Non-African;Capture v02'
else:
m = 'o'
l = 'Non-African;Capture v01'
x1 = np.random.normal(0.5, 0.08, 1)
p = plt.scatter(x1, t, s=80, c=clr, alpha=0.8, marker=m, linewidths=0.5)
if l not in legiden:
legiden[l] = p
x2 = np.random.normal(2, 0.08, 1)
plt.scatter(x2, h, s=80, c=clr, alpha=0.8, marker=m, linewidths=0.5)
plt.boxplot([control_titv_ratio_list, control_hethom_ratio_list], positions=[1, 2.5])
plt.xticks([0, 0.5, 1, 2, 2.5, 3], ['', 'HS', 'KGS', 'HS', 'KGS', ''])
plt.ylabel('Ratio')
t2 = []
t1 = legiden.keys()
t1.sort()
for e in t1:
t2.append(legiden[e])
plt.grid(True)
plt.show()
def get_varcnt_control(controlvcf):
'''Loads variants from the control vcf file'''
control_varcnt = {}
vcfsamples = get_vcfsamples(controlvcf)
print '# of 1000 Genomes samples', len(vcfsamples)
for sid in vcfsamples:
control_varcnt[sid] = [0, 0, 0, 0, 0, 0, 0] # HET, HOM, Ts, Tv, Common, Rare, Novel
s = os.popen("zcat %s" % controlvcf)
for rec in s:
if rec[0] == '#' :
continue
rec = rec.strip()
rec = rec.split('\t')
for sid, gtinfo in zip(vcfsamples, rec[9:]):
if '.' in gtinfo or gtinfo in ['0/0', '0|0', '0']:
continue
gt = gtinfo
a1, a2 = gt.split('|')
a1, a2 = int(a1), int(a2)
ref, alt = rec[3], rec[4].split(',')[a2-1]
ct = get_change_type(ref, alt)
com, rare = False, False
af = float(rec[7].split(';AF=')[1].split(';')[0].split(',')[a2 - 1])
#print af
if af >= 0.05:
com = True
#elif af
else:
rare = True
if a1 != a2: # HetALT
control_varcnt[sid][0] += 1
if ct == 'ts':
control_varcnt[sid][2] += 1
elif ct == 'tv':
control_varcnt[sid][3] += 1
if com:
control_varcnt[sid][4] += 1
elif rare:
control_varcnt[sid][5] += 1
elif a1 == a2: # HomALT
control_varcnt[sid][1] += 1
if ct == 'ts':
control_varcnt[sid][2] += 1
elif ct == 'tv':
control_varcnt[sid][3] += 1
if com:
control_varcnt[sid][4] += 1
elif rare:
control_varcnt[sid][5] += 1
return control_varcnt
def main(snvvcf, indelvcf, snvantvcf, snvcontrolvcf, indelcontrolvcf, capv01, capv02, samples_capv01, samples_capv02, ethnicity_config, control_ethnicity_config):
vao = SNPAnnotation('REFGENE')
cap1coord = load_bed_file(capv01)
cap2coord = load_bed_file(capv02)
afr_samples = _get_afrsamples(ethnicity_config)
control_afr_samples = _get_control_afrsamples(control_ethnicity_config)
## FOR SNV ##
# Load the SNV Count data for all captures
snvvarcnt = {}
snvvarcnt = get_varcnt_gp(snvvcf, snvantvcf, cap1coord, samples_capv01, snvvarcnt, vao)
snvvarcnt = get_varcnt_gp(snvvcf, snvantvcf, cap2coord, samples_capv02, snvvarcnt, vao)
# Load the SNV variant Count data for Control
snv_control_varcnt = get_varcnt_control(snvcontrolvcf)
## FOR INDEL ##
# Load the INDEL Count data for all captures
antvcf = indelvcf
indelvarcnt = {}
indelvarcnt = get_varcnt_gp(indelvcf, antvcf, cap1coord, samples_capv01, indelvarcnt, vao)
indelvarcnt = get_varcnt_gp(indelvcf, antvcf, cap2coord, samples_capv02, indelvarcnt, vao)
# Load the INDEL variant Count data for Control
indel_control_varcnt = get_varcnt_control(indelcontrolvcf)
# Display the Loaded Variant Count data in csv format
print '******'
for s, val in indelvarcnt.items():
sum(val[:6])
if val[6] == 0 or val[7] == 0:
titv_ratio = '0.0'
else:
titv_ratio = str(float(val[6]) / float(val[7]))
print s, '-', ','.join([str(e) for e in val] + [titv_ratio])
print '******'
# Display the Loaded Variant Count data in csv format
print '########'
for s, val in snvvarcnt.items():
sum(val[:6])
if val[6] == 0 or val[7] == 0:
titv_ratio = '0.0'
else:
titv_ratio = str(float(val[6]) / float(val[7]))
print s, '-', ','.join([str(e) for e in val] + [titv_ratio])
print '########'
# Plot No call versus Low Call sites
plot_lq_nc_plot(snvvarcnt, samples_capv02, afr_samples)
#Plot the INDEL - Common, Rare, Novel variants
yl = '# of Indels'
plot_varcnt_by_type(indelvarcnt, indel_control_varcnt, samples_capv02, afr_samples, control_afr_samples, yl)
#Plot the SNV - Common, Rare, Novel variants
yl = '# of SNVs'
plot_varcnt_by_type(snvvarcnt, snv_control_varcnt, samples_capv02, afr_samples, control_afr_samples, yl)
# Plot Ts/Tv ratio and HET/HOM ratio
plot_ratio(snvvarcnt, snv_control_varcnt, samples_capv02, afr_samples, control_afr_samples)
if __name__ == '__main__':
snvvcf = '/4-Hopkins_clinical_panel_SNV_dataset_v2.vcf.gz'
indelvcf = '/4-Hopkins_clinical_panel_InDel_varant_refgene.vcf.gz'
snvcontrolvcf = '/control_variants_all.vcf.gz'
indelcontrolvcf = '/indel_control_variants_all.vcf.gz'
snvantvcf = '/4-Hopkins_clinical_panel_SNV_varant_refgene.vcf.gz'
indelantvcf = '/4-Hopkins_clinical_panel_InDel_varant_refgene.vcf.gz'
capv01 = '/4-Hopkins_clinical_panel_capture_v1b.bed'
capv02 = '/4-Hopkins_clinical_panel_capture_v2paper.bed'
samples_capv01 = ['P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7', 'P8', 'P9', 'P10', 'P11', 'P13',
'P14', 'P15', 'P16', 'P17', 'P18', 'P19', 'P20', 'P23', 'P24', 'P25', 'P26',
'P28', 'P29', 'P31', 'P32', 'P33', 'P35', 'P36', 'P37', 'P38', 'P40',
'P41', 'P42', 'P43', 'P44', 'P45', 'P46', 'P48', 'P49', 'P51',
'P52', 'P53', 'P54', 'P56', 'P57', 'P58', 'P59', 'P60', 'P61', 'P62',
'P63', 'P64', 'P65', 'P66', 'P67', 'P68', 'P69', 'P70', 'P71', 'P72', 'P73',
'P74', 'P75', 'P76', 'P77', 'P78', 'P79', 'P80', 'P81', 'P82', 'P83', 'P84',
'P85', 'P86', 'P87', 'P88', 'P89', 'P90', 'P91', 'P92', 'P93', 'P94', 'P95',
'P96', 'P97', 'P98', 'P99', 'P100', 'P101', 'P102', 'P103', 'P104', 'P105', 'P106']
samples_capv02 = ['P12', 'P21', 'P22', 'P27', 'P30', 'P34', 'P39', 'P47', 'P50', 'P55']
ethnicity_config = '/4-Hopkins_clinical_panel_SNV_predicted_ethnicity_KK.tsv'
control_ethnicity_config = '/integrated_call_samples_v3.20130502.ALL.panel'
control_ped_file = '/20130606_g1k.ped'
main(snvvcf, indelvcf, snvantvcf, snvcontrolvcf, indelcontrolvcf, capv01, capv02, samples_capv01, samples_capv02, ethnicity_config, control_ethnicity_config)
| agpl-3.0 |
manashmndl/scikit-learn | examples/ensemble/plot_gradient_boosting_regularization.py | 355 | 2843 | """
================================
Gradient Boosting regularization
================================
Illustration of the effect of different regularization strategies
for Gradient Boosting. The example is taken from Hastie et al 2009.
The loss function used is binomial deviance. Regularization via
shrinkage (``learning_rate < 1.0``) improves performance considerably.
In combination with shrinkage, stochastic gradient boosting
(``subsample < 1.0``) can produce more accurate models by reducing the
variance via bagging.
Subsampling without shrinkage usually does poorly.
Another strategy to reduce the variance is by subsampling the features
analogous to the random splits in Random Forests
(via the ``max_features`` parameter).
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X = X.astype(np.float32)
# map labels from {-1, 1} to {0, 1}
labels, y = np.unique(y, return_inverse=True)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
original_params = {'n_estimators': 1000, 'max_leaf_nodes': 4, 'max_depth': None, 'random_state': 2,
'min_samples_split': 5}
plt.figure()
for label, color, setting in [('No shrinkage', 'orange',
{'learning_rate': 1.0, 'subsample': 1.0}),
('learning_rate=0.1', 'turquoise',
{'learning_rate': 0.1, 'subsample': 1.0}),
('subsample=0.5', 'blue',
{'learning_rate': 1.0, 'subsample': 0.5}),
('learning_rate=0.1, subsample=0.5', 'gray',
{'learning_rate': 0.1, 'subsample': 0.5}),
('learning_rate=0.1, max_features=2', 'magenta',
{'learning_rate': 0.1, 'max_features': 2})]:
params = dict(original_params)
params.update(setting)
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
# compute test set deviance
test_deviance = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
# clf.loss_ assumes that y_test[i] in {0, 1}
test_deviance[i] = clf.loss_(y_test, y_pred)
plt.plot((np.arange(test_deviance.shape[0]) + 1)[::5], test_deviance[::5],
'-', color=color, label=label)
plt.legend(loc='upper left')
plt.xlabel('Boosting Iterations')
plt.ylabel('Test Set Deviance')
plt.show()
| bsd-3-clause |
jz3707/tushare | setup.py | 21 | 2592 | from setuptools import setup, find_packages
import codecs
import os
import tushare
def read(fname):
return codecs.open(os.path.join(os.path.dirname(__file__), fname)).read()
long_desc = """
TuShare
===============
.. image:: https://api.travis-ci.org/waditu/tushare.png?branch=master
:target: https://travis-ci.org/waditu/tushare
.. image:: https://badge.fury.io/py/tushare.png
:target: http://badge.fury.io/py/tushare
* easy to use as most of the data returned are pandas DataFrame objects
* can be easily saved as csv, excel or json files
* can be inserted into MySQL or Mongodb
Target Users
--------------
* financial market analyst of China
* learners of financial data analysis with pandas/NumPy
* people who are interested in China financial data
Installation
--------------
pip install tushare
Upgrade
---------------
pip install tushare --upgrade
Quick Start
--------------
::
import tushare as ts
ts.get_hist_data('600848')
return::
open high close low volume p_change ma5 \
date
2012-01-11 6.880 7.380 7.060 6.880 14129.96 2.62 7.060
2012-01-12 7.050 7.100 6.980 6.900 7895.19 -1.13 7.020
2012-01-13 6.950 7.000 6.700 6.690 6611.87 -4.01 6.913
2012-01-16 6.680 6.750 6.510 6.480 2941.63 -2.84 6.813
2012-01-17 6.660 6.880 6.860 6.460 8642.57 5.38 6.822
2012-01-18 7.000 7.300 6.890 6.880 13075.40 0.44 6.788
2012-01-19 6.690 6.950 6.890 6.680 6117.32 0.00 6.770
2012-01-20 6.870 7.080 7.010 6.870 6813.09 1.74 6.832
"""
setup(
name='tushare',
version=tushare.__version__,
description='A utility for crawling historical and Real-time Quotes data of China stocks',
# long_description=read("READM.rst"),
long_description = long_desc,
author='Jimmy Liu',
author_email='[email protected]',
license='BSD',
url='http://tushare.org',
keywords='china stock data',
classifiers=['Development Status :: 4 - Beta',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'License :: OSI Approved :: BSD License'],
packages=['tushare','tushare.stock','tushare.data','tushare.util'],
package_data={'': ['*.csv']},
) | bsd-3-clause |
scikit-multilearn/scikit-multilearn | skmultilearn/embedding/skembeddings.py | 1 | 2792 | from __future__ import absolute_import
from sklearn.base import BaseEstimator
class SKLearnEmbedder(BaseEstimator):
"""Embed the label space using a scikit-compatible matrix-based embedder
Parameters
----------
embedder : sklearn.base.BaseEstimator
a clonable instance of a scikit-compatible embedder, will be automatically
put under :code:`self.embedder`, see .
pass_input_space : bool (default is False)
whether to take :code:`X` into consideration upon clustering,
use only if you know that the embedder can handle two
parameters for clustering, will be automatically
put under :code:`self.pass_input_space`.
Example code for using this embedder looks like this:
.. code-block:: python
from skmultilearn.embedding import SKLearnEmbedder, EmbeddingClassifier
from sklearn.manifold import SpectralEmbedding
from sklearn.ensemble import RandomForestRegressor
from skmultilearn.adapt import MLkNN
clf = EmbeddingClassifier(
SKLearnEmbedder(SpectralEmbedding(n_components = 10)),
RandomForestRegressor(n_estimators=10),
MLkNN(k=5)
)
clf.fit(X_train, y_train)
predictions = clf.predict(X_test)
"""
def __init__(self, embedder=None, pass_input_space=False):
super(BaseEstimator, self).__init__()
self.embedder = embedder
self.pass_input_space = pass_input_space
def fit(self, X, y):
"""Fits the embedder to data
Parameters
----------
X : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix, shape=(n_samples, n_features)
input feature matrix
y : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix of `{0, 1}`, shape=(n_samples, n_labels)
binary indicator matrix with label assignments
Returns
-------
self
fitted instance of self
"""
self.embedder.fit(X, y)
def fit_transform(self, X, y):
"""Fit the embedder and transform the output space
Parameters
----------
X : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix, shape=(n_samples, n_features)
input feature matrix
y : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix of `{0, 1}`, shape=(n_samples, n_labels)
binary indicator matrix with label assignments
Returns
-------
X, y_embedded
results of the embedding, input and output space
"""
if self.pass_input_space:
result = self.embedder.fit_transform(X, y)
else:
result = self.embedder.fit_transform(y)
return X, result
| bsd-2-clause |
andrewcmyers/tensorflow | tensorflow/contrib/learn/python/learn/grid_search_test.py | 137 | 2035 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Grid search tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
from tensorflow.contrib.learn.python import learn
from tensorflow.python.platform import test
HAS_SKLEARN = os.environ.get('TENSORFLOW_SKLEARN', False)
if HAS_SKLEARN:
try:
# pylint: disable=g-import-not-at-top
from sklearn import datasets
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import accuracy_score
except ImportError:
HAS_SKLEARN = False
class GridSearchTest(test.TestCase):
"""Grid search tests."""
def testIrisDNN(self):
if HAS_SKLEARN:
random.seed(42)
iris = datasets.load_iris()
feature_columns = learn.infer_real_valued_columns_from_input(iris.data)
classifier = learn.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3)
grid_search = GridSearchCV(
classifier, {'hidden_units': [[5, 5], [10, 10]]},
scoring='accuracy',
fit_params={'steps': [50]})
grid_search.fit(iris.data, iris.target)
score = accuracy_score(iris.target, grid_search.predict(iris.data))
self.assertGreater(score, 0.5, 'Failed with score = {0}'.format(score))
if __name__ == '__main__':
test.main()
| apache-2.0 |
hugobowne/scikit-learn | examples/classification/plot_classifier_comparison.py | 36 | 5123 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=====================
Classifier comparison
=====================
A comparison of a several classifiers in scikit-learn on synthetic datasets.
The point of this example is to illustrate the nature of decision boundaries
of different classifiers.
This should be taken with a grain of salt, as the intuition conveyed by
these examples does not necessarily carry over to real datasets.
Particularly in high-dimensional spaces, data can more easily be separated
linearly and the simplicity of classifiers such as naive Bayes and linear SVMs
might lead to better generalization than is achieved by other classifiers.
The plots show training points in solid colors and testing points
semi-transparent. The lower right shows the classification accuracy on the test
set.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Andreas Müller
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
h = .02 # step size in the mesh
names = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Gaussian Process",
"Decision Tree", "Random Forest", "Neural Net", "AdaBoost",
"Naive Bayes", "QDA"]
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025),
SVC(gamma=2, C=1),
GaussianProcessClassifier(1.0 * RBF(1.0), warm_start=True),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
MLPClassifier(alpha=1),
AdaBoostClassifier(),
GaussianNB(),
QuadraticDiscriminantAnalysis()]
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable
]
figure = plt.figure(figsize=(27, 9))
i = 1
# iterate over datasets
for ds_cnt, ds in enumerate(datasets):
# preprocess dataset, split into training and test part
X, y = ds
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=.4, random_state=42)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
if ds_cnt == 0:
ax.set_title("Input data")
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
if ds_cnt == 0:
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
plt.tight_layout()
plt.show()
| bsd-3-clause |
wearpants/osf.io | scripts/analytics/email_invites.py | 55 | 1332 | # -*- coding: utf-8 -*-
import os
import matplotlib.pyplot as plt
from framework.mongo import database
from website import settings
from utils import plot_dates, mkdirp
user_collection = database['user']
FIG_PATH = os.path.join(settings.ANALYTICS_PATH, 'figs', 'features')
mkdirp(FIG_PATH)
def analyze_email_invites():
invited = user_collection.find({'unclaimed_records': {'$ne': {}}})
dates_invited = [
user['date_registered']
for user in invited
]
if not dates_invited:
return
fig = plot_dates(dates_invited)
plt.title('email invitations ({}) total)'.format(len(dates_invited)))
plt.savefig(os.path.join(FIG_PATH, 'email-invites.png'))
plt.close()
def analyze_email_confirmations():
confirmed = user_collection.find({
'unclaimed_records': {'$ne': {}},
'is_claimed': True,
})
dates_confirmed = [
user['date_confirmed']
for user in confirmed
]
if not dates_confirmed:
return
fig = plot_dates(dates_confirmed)
plt.title('confirmed email invitations ({}) total)'.format(len(dates_confirmed)))
plt.savefig(os.path.join(FIG_PATH, 'email-invite-confirmations.png'))
plt.close()
def main():
analyze_email_invites()
analyze_email_confirmations()
if __name__ == '__main__':
main()
| apache-2.0 |
pprett/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_02_sentiment.py | 104 | 3139 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
pipeline = Pipeline([
('vect', TfidfVectorizer(min_df=3, max_df=0.95)),
('clf', LinearSVC(C=1000)),
])
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
}
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1)
grid_search.fit(docs_train, y_train)
# TASK: print the mean and std for each candidate along with the parameter
# settings for all the candidates explored by grid search.
n_candidates = len(grid_search.cv_results_['params'])
for i in range(n_candidates):
print(i, 'params - %s; mean - %0.2f; std - %0.2f'
% (grid_search.cv_results_['params'][i],
grid_search.cv_results_['mean_test_score'][i],
grid_search.cv_results_['std_test_score'][i]))
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
y_predicted = grid_search.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
Kalinova/Dyn_models | ADC_MCMC/ADC_MCMC_pw.py | 1 | 29632 | '''
#############################################################################
Acknowledgments to paper: Kalinova et al. 2016, MNRAS
"The inner mass distribution of late-type spiral galaxies from SAURON stellar kinematic maps".
Copyright (c) 2016, Veselina Kalinova, Dario Colombo, Erik Rosolowsky
University of Alberta
E-mails: [email protected], [email protected], [email protected]
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided
that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of
conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this list of
conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
Neither the name of the Astropy Team nor the names of its contributors may be used to endorse
or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
############################################################################
NAME:
ADC_MCMC_pw
PURPOSE:
PURPOSE:
This procedure calculates the Markov Chain Monte Carlo (MCMC) circular
velocity using the thin disk approximation assumption in the
Axisymmetric Drift Correction approach (ADC; Binney & Tremaine 2008).
We use the velocity and velocity radial profiles of the galaxy, derived from its stellar kinematics via the
kinemetry routine (Krajnovic et al., 2006) and the surface brightness as provided by the Multi-Gaussian Expansion
parametrization method (MGE; Monnet,Bacon & Emsellem 1992).
We use the "EMCEE code" of Foreman-Mackey et al. 2013 (http://dan.iel.fm/emcee/current/),
an implementation of an affine invariant ensemble sampler for the MCMC method of parameter estimations.
Here, we fit a power-law function to the velocity profile of the galaxy.
CALLING SEQUENCE:
res = ADC_MOC_pw(gal, incl, Vinf_in, Rc_in,sig0_in,ksig_in,R,vobs,evobs,sobs,esobs,I0obs,spobs,nwalks, burn_steps, steps, threads)
INPUT PARAMETERS:
GAL: name of the galaxy
INCL: inclination
VINF_IN: initial guess for the asymptotic velocity parameter in the power-law fitting model to the velocity radial profile
RC_IN: initial guess for the core radius parameter in the power-law fitting model of the velocity radial profile
SIG0_IN: initial guess for the y-intercept of the linear fit of the velocity dispersion radial profile
KSIG_IN: initial guess for the slope of the linear fit of the velocity dispersion radial profile
R: radius of the velocity profile
VOBS: observed velocity radial profile (e.g., using kinemetry routine of Krajnovic et al., 2006)
EVOBS: error of VOBS
SOBS: observed velocity dispersion radial profile (e.g., using kinemetry routine of Krajnovic et al., 2006)
ESOBS: error of SOBS
I0OBS: vector of length N containing the peak surface brightness of the
MGE Gaussians describing the galaxy surface brightness in units of
Lsun/pc^2 (solar luminosities per parsec^2)
SPOBS: vector of length N containing the dispersion in arcseconds of
the MGE Gaussians describing the galaxy surface brightness.
NWALKS: Number of Goodman & Weare walkers, which should be equal or
greater than twice the dimension, i.e twice the number of the fitted parameters)
BURN_STEPS: Number of the steps for the burn-in process
STEPS: Number of the steps after burn-in process, i.e steps for the final chains of the parameters
THREADS: Number of threads to use for parallelization, where threads > 1 is for multiprocessing
RETURN:
TABLE: median values of the distributions of the velocity anisotropy, mass-to-light ratio and inclination,
together with their 75 and 25 percentile errors
BURN_CHAINS: (burn-in phase) A pointer to the Markov chain itself, where the shape of this array is (k, iterations, dim).
BURN_LNS: (burn-in phase) A pointer to the matrix of the value of lnprobfn (a function that takes a vector in the parameter space
as input and returns the natural logarithm of the posterior probability for that position) produced at each step
for each walker. The shape is (k, iterations).
BURN_FLATCHAINS: (burn-in phase) A shortcut for accessing burn-in chain flattened along the zeroth (walker) axis.
BURN_FLATLNS: (burn-in phase) A shortcut to return the equivalent of lnprobability but aligned to flatchain rather than chain.
FINAL_CHAINS: (posterior phase) A pointer to the Markov chain itself, where the shape of this array is (k, iterations, dim)
FINAL_LNS: (posterior phase) A pointer to the matrix of the value of lnprobfn (a function that takes a vector in the parameter space
as input and returns the natural logarithm of the posterior probability for that position) produced at each step
for each walker. The shape is (k, iterations).
FINAL_FLATCHAINS: (posterior phase) A shortcut for accessing burn-in chain flattened along the zeroth (walker) axis.
FINAL_FLATLNS: (posterior phase) A shortcut to return the equivalent of lnprobability but aligned to flatchain rather than chain.
CHIV: the chi2 of the velocity fit, where chi2v = np.sum((vobs-vmod)**2/(evobs)**2)
CHIS: the chi2 of the velocity dispersion fit, where chi2s = np.sum((sobs-smod)**2/(esobs)**2)
CHI2V_RED: reduced chi2 of the velocity profile fit, where chi2v_red = chi2v/(len(R) - 4)
CHI2S_RED: reduced chi2 of the velocity dispersion profile fit, where chi2s_red = chi2s/(len(R) - 4)
VMOD: the model fit of the velocity radial profile
SMOD: the model fit of the velocity dispersion profile
METHODS:
rms_logprob: sub-routine for defining the log-probability
runadc: sub-routine for running of ADC-MCMC analysis
make_fig: sub-routine for plotting of burn-in and final chains of the parameters, and corner figure
make_fig_curves: sub-routine for plotting the velocity radial profiles, velocity dispersion radial profiles, azimuthal velocity radial profile,
deprojected velocity dispersion profile, and circular velocity curve of the galaxy using the distributions of
the parameters and their best fit values.
USAGE EXAMPLE:
A simple usage example "test_ADC_MCMC_PW.py" is given in the same directory.
REQUIRED ROUTINES:
By D. Foreman-Mackey (http://dan.iel.fm/emcee/current/)
- emcee
- corner
By Adam Ginsburg ([email protected])
- readcol.py
MODIFICATION HISTORY:
V1.0: Written and tested as part of the implementation of
ADC-MCMC method described in Kalinova et al. (2016).
Veselina Kalinova, Dario Colombo, Erik Rosolowsky;
University of Alberta, 2016
'''
import emcee
import math
import numpy as np
import matplotlib.pyplot as plt
import astropy.io.fits as fits
import scipy.stats as ss
import scipy.interpolate as interp
from astropy.table import Table
from readcol import readcol
import corner
from matplotlib.colors import LogNorm
import fish
from pdb import set_trace as stop
def log_adc(p, R, vobs, sobs, evobs, esobs, Vinf_in, Rc_in,sig0_in,ksig_in,incl):
Vinf, Rc, beta, sig0, ksig = p[0], p[1], p[2], p[3], p[4]
ar = Rc**2/(Rc**2+R**2)
sigr = sig0+ksig*R
vmod = (Vinf*R)*np.sin(math.pi/180*incl)/np.sqrt(Rc**2+R**2)
smod = np.sqrt(1 - beta*np.cos(math.pi/180*incl)**2 + 0.5*(ar-1)*np.sin(math.pi/180*incl)**2)*sigr
#print 'incl:', incl
priors = ss.uniform.logpdf(Vinf,loc=0,scale=400)+\
ss.uniform.logpdf(Rc,loc=0,scale=50)+\
ss.uniform.logpdf(beta,loc=-1.5,scale=2.5)+\
ss.uniform.logpdf(sig0,loc=0,scale=300)+\
ss.uniform.logpdf(ksig,loc=-5,scale=10)+\
ss.norm.logpdf(Vinf,loc=Vinf_in,scale=10)+\
ss.norm.logpdf(Rc,loc=Rc_in,scale=1)+\
ss.norm.logpdf(sig0,loc=sig0_in,scale=1)+\
ss.norm.logpdf(ksig,loc=ksig_in,scale=0.5)
if np.isfinite(priors) == False:
return -np.inf
p1 = (vmod-vobs)**2/evobs**2
p1 = np.nansum(p1)
p2 = (smod-sobs)**2/esobs**2
p2 = np.nansum(p2)
lp = - p1 - p2 + priors
if np.isnan(lp):
return -np.inf
return lp
def runadc(gal, incl, Vinf_in, Rc_in,sig0_in,ksig_in,R,vobs,evobs,sobs,esobs,I0obs,spobs,nwalks, burn_steps, steps, threads):
# %&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&
# MCMC for Vobs and sigma_obs
# %&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&
# Set the walkers
ndim, nwalkers = 5,nwalks
p0 = np.zeros((nwalkers,ndim))
p0[:,0] = np.random.randn(nwalkers)*10+Vinf_in
p0[:,1] = np.random.randn(nwalkers)*1+Rc_in
p0[:,2] = np.random.uniform(-1.5,1,nwalkers) #beta
p0[:,3] = np.random.randn(nwalkers)*1+sig0_in
p0[:,4] = np.random.randn(nwalkers)*0.5+ksig_in
# p0[:,0] = np.random.randn(nwalkers)*10+Vinf_in
# p0[:,1] = np.random.randn(nwalkers)*1+Rc_in
# p0[:,2] = np.random.randn(nwalkers)*1+ideg_in #incl
# p0[:,3] = np.random.uniform(-1,1,nwalkers) #beta
# p0[:,4] = np.random.randn(nwalkers)*1+sig0_in
# p0[:,5] = np.random.randn(nwalkers)*0.5+ksig_in
####### Call EMCEE code #######
sampler = emcee.EnsembleSampler(nwalkers, ndim, log_adc,
args=[R,vobs,sobs,evobs,esobs, Vinf_in, Rc_in,sig0_in,ksig_in, incl], threads=threads)
# burn-in
#pos, prob, state = sampler.run_mcmc(p0, burn_steps)
####### Chain #######
#sampler.reset()
#pos,prob,state = sampler.run_mcmc(pos,steps)
#--------------------------------------------
print "%&%&%&%&%&%&%&%&%&%&%&%&%&"
print "Run MCMC analysis"
print "%&%&%&%&%&%&%&%&%&%&%&%&%&"
# result from the EMCEE code
print("Burn-in...")
#pos, prob, state = sampler.run_mcmc(p0, burn_steps)
peixe = fish.ProgressFish(total=burn_steps)
for j, results in enumerate(sampler.sample(p0, iterations=burn_steps)):
peixe.animate(amount=j+1)
pos = results[0]
burn_chains = sampler.chain.copy()
burn_lns = sampler.lnprobability.copy()
burn_flatchains = sampler.flatchain.copy()
burn_flatlns = sampler.flatlnprobability.copy()
####################################################
#... save the data in a file
np.savez('data_output/Burn_in/'+gal+'_burn_lnP', chain_JAM=sampler.chain, lnprobability_JAM=sampler.lnprobability)
#... save the data in a file
np.savez('data_output/Burn_in/'+gal+'_burn_flatlnP', flatchain_JAM=sampler.flatchain, flatlnprobability_JAM=sampler.flatlnprobability)
##################################################
# RUN again MCMC after burn-in
print("Running MCMC...")
sampler.reset()
#pos,prob,state = sampler.run_mcmc(pos, 10)
peixe = fish.ProgressFish(total=steps)
for j, results in enumerate(sampler.sample(pos, iterations=steps)):
peixe.animate(amount=j+1)
final_chains = sampler.chain
final_lns = sampler.lnprobability
final_flatchains = sampler.flatchain
final_flatlns = sampler.flatlnprobability
####################################################
#... save the data in a file
np.savez('data_output/Chains/'+gal+'_final_lnP', chain_JAM=sampler.chain, lnprobability_JAM=sampler.lnprobability)
#... save the data in a file
np.savez('data_output/Chains/'+gal+'_final_flatlnP', flatchain_JAM=sampler.flatchain, flatlnprobability_JAM=sampler.flatlnprobability)
####################################################
# make distributions of the parameters
#---------------------------------------------
Vinf_dist = final_flatchains[:,0]
Rc_dist = final_flatchains[:,1]
betaz_dist = final_flatchains[:,2]
sig0_dist = final_flatchains[:,3]
ksig_dist = final_flatchains[:,4]
Vinf_med = np.median(final_flatchains[:,0])
Rc_med = np.median(final_flatchains[:,1])
betaz_med = np.median(final_flatchains[:,2])
sig0_med = np.median(final_flatchains[:,3])
ksig_med = np.median(final_flatchains[:,4])
Vinf_plus = np.percentile(final_flatchains[:,0], 75)- np.median(final_flatchains[:,0])
Rc_plus = np.percentile(final_flatchains[:,1], 75)- np.median(final_flatchains[:,1])
betaz_plus = np.percentile(final_flatchains[:,2], 75)- np.median(final_flatchains[:,2])
sig0_plus = np.percentile(final_flatchains[:,3], 75)- np.median(final_flatchains[:,3])
ksig_plus = np.percentile(final_flatchains[:,4], 75)- np.median(final_flatchains[:,4])
Vinf_minus = np.median(final_flatchains[:,0]) - np.percentile(final_flatchains[:,0], 25)
Rc_minus = np.median(final_flatchains[:,1]) - np.percentile(final_flatchains[:,1], 25)
betaz_minus = np.median(final_flatchains[:,2]) - np.percentile(final_flatchains[:,2], 25)
sig0_minus = np.median(final_flatchains[:,3]) - np.percentile(final_flatchains[:,3], 25)
ksig_minus = np.median(final_flatchains[:,4]) - np.percentile(final_flatchains[:,4], 25)
#-------------------------------------------------
# Tables with all parameters
#-----------------------------------------------
# Array for the medians
medians = [Vinf_med, Rc_med, 0.0, sig0_med,ksig_med, betaz_med]
# Array for the upper percentiles
ups = [Vinf_plus, Rc_plus, 0.0, sig0_plus,ksig_plus, betaz_plus]
# Array for the lower percentiles
lws = [Vinf_minus, Rc_minus, 0.0, sig0_minus,ksig_minus,betaz_minus]
# make table
table = Table([medians, ups, lws], names = ('medians(Vinf, Rc, kv, sig0,ksig,betaz)','ups','lws'))
table.write("data_output/Tables/"+gal+".txt",format="ascii.tab",delimiter=",")
print 'Print table'
#-------------------------------------------------
print "%&%&%&%&%&%&%&%&%&%&%&%&%&"
print "Final best fit values"
print "%&%&%&%&%&%&%&%&%&%&%&%&%&"
print 'Vinf: ', np.median(final_flatchains[:,0]), \
'+', np.percentile(final_flatchains[:,0], 75) - np.median(final_flatchains[:,0]),\
'-', np.median(final_flatchains[:,0]) - np.percentile(final_flatchains[:,0], 25)
print 'Rc: ', np.median(final_flatchains[:,1]), \
'+', np.percentile(final_flatchains[:,1], 75) - np.median(final_flatchains[:,1]),\
'-', np.median(final_flatchains[:,1]) - np.percentile(final_flatchains[:,1], 25)
print 'betaz: ', np.median(final_flatchains[:,2]), \
'+', np.percentile(final_flatchains[:,2], 75) - np.median(final_flatchains[:,2]),\
'-', np.median(final_flatchains[:,2]) - np.percentile(final_flatchains[:,2], 25)
print 'sig0: ', np.median(final_flatchains[:,3]), \
'+', np.percentile(final_flatchains[:,3], 75) - np.median(final_flatchains[:,3]),\
'-', np.median(final_flatchains[:,3]) - np.percentile(final_flatchains[:,3], 25)
print 'ksig: ', np.median(final_flatchains[:,4]), \
'+', np.percentile(final_flatchains[:,4], 75) - np.median(final_flatchains[:,4]),\
'-', np.median(final_flatchains[:,4]) - np.percentile(final_flatchains[:,4], 25)
#-------------------------------------------------
# Final Chi2
#-----------------------------------------------
ar_med = Rc_med**2/(Rc_med**2+R**2)
sigr_med = sig0_med+ksig_med*R
vmod = (Vinf_med*R)*np.sin(math.pi/180*incl)/np.sqrt(Rc_med**2+R**2)
smod = np.sqrt(1 - betaz_med*np.cos(math.pi/180*incl)**2 + 0.5*(ar_med-1)*np.sin(math.pi/180*incl)**2)*sigr_med
chi2v = np.sum((vobs-vmod)**2/(evobs)**2)
chi2s = np.sum((sobs-smod)**2/(esobs)**2)
chi2v_red = chi2v/(len(R) - 5)
chi2s_red = chi2s/(len(R) - 5)
RES_v=np.median(np.abs( (vobs/vmod) -1 ))
RES_s=np.median(np.abs( (sobs/smod) -1 ))
vschi2=[chi2v,chi2s]
vschi2_red=[chi2v_red,chi2s_red]
vsRES=[RES_v,RES_s]
print 'Chi2v_red=', chi2v_red
print 'Chi2s_red=', chi2s_red
print 'RES_v=',RES_v
print 'RES_s=',RES_s
table2 = Table([vschi2,vschi2_red,vsRES], names = ('vschi2','vschi2_red','vsRES'))
table2.write("data_output/Tables/Chi2/"+gal+"_chi2.txt",format="ascii.tab",delimiter=",")
print 'Print table chi2'
#stop()
return table, burn_chains, burn_lns, burn_flatchains, burn_flatlns, \
final_chains, final_lns, final_flatchains, final_flatlns,chi2v,chi2s,chi2v_red,chi2s_red,vmod,smod
class ADC_MOC_pw(object):
def __init__(self,gal, incl, Vinf_in, Rc_in,sig0_in,ksig_in,R,vobs,evobs,sobs,esobs,I0obs,spobs,nwalks, burn_steps, steps, threads):
if nwalks < 10:
print("NWALKERS must be equal or greater than twice the dimension)")
nwalks = 10
# Galaxy parameters
self.gal = gal
self.incl=incl
# guess for the velocity fitted parameters
self.Vinf_in= Vinf_in
self.Rc_in=Rc_in
self.sig0_in=sig0_in
self.ksig_in=ksig_in
# observables
self.R=R
self.vobs=vobs
self.evobs=evobs
self.sobs=sobs
self.esobs=esobs
# MGE parameters
self.I0obs=I0obs
self.spobs=spobs
# Run ADC with MCMC
self.table, self.burn_chains, self.burn_lns, self.burn_flatchains, self.burn_flatlns, \
self.final_chains, self.final_lns, self.final_flatchains, self.final_flatlns,\
self.chi2v,self.chi2s,self.chi2v_red,self.chi2v_red,self.vmod,self.smod=runadc(self.gal, self.incl, \
self.Vinf_in, self.Rc_in,self.sig0_in,self.ksig_in,self.R,self.vobs,self.evobs,self.sobs,\
self.esobs,self.I0obs,self.spobs, nwalks, burn_steps, steps, threads)
###########################################################################
def make_fig(self,incl):
fig = plt.figure(figsize=(10,6))
plt.subplot(3,2,1)
plt.title(self.gal)
#plt.plot(burn_chains[:,:,0].T,marker=".",lw=0,color="k")
plt.plot(self.burn_chains[:,:,0].T)
plt.ylabel(r'Chain for $v_{\infty}$')
plt.subplot(3,2,2)
plt.plot(self.burn_chains[:,:,1].T)
plt.ylabel(r'Chain for $R_c$')
plt.subplot(3,2,3)
plt.plot(self.burn_chains[:,:,2].T)
plt.ylabel(r'Chain for $\beta_z$')
plt.subplot(3,2,4)
plt.plot(self.burn_chains[:,:,3].T)
plt.ylabel(r'Chain for $\sigma_0$')
plt.subplot(3,2,5)
plt.plot(self.burn_chains[:,:,4].T)
plt.ylabel(r'Chain for $k_{\sigma}$')
plt.tight_layout() # This tightens up the spacing
plt.savefig("figures/Burn_in/"+self.gal+"_burnin.png")
plt.close()
#sel= np.where(final_flatlns + 10 > np.max(final_flatlns))
fig = plt.figure(figsize=(18,10))
plt.subplot(3,2,1)
plt.title(self.gal)
#plt.hist2d(self.final_chains[:,:,0].T,bins=40, norm=LogNorm(),cmap='gray')
plt.plot(self.final_chains[:,:,0].T)
#plt.plot(self.final_chains[:,:,0].T,marker=".",lw=0,color="#A9A9A9")
plt.ylabel(r'Chain for $v_{\infty}$')
plt.subplot(3,2,2)
plt.plot(self.final_chains[:,:,1].T)
plt.ylabel(r'Chain for $R_c$')
plt.subplot(3,2,3)
plt.plot(self.final_chains[:,:,2].T)
plt.ylabel(r'Chain for $\beta_z$')
plt.subplot(3,2,4)
plt.plot(self.final_chains[:,:,3].T)
plt.ylabel(r'Chain for $\sigma_0$')
plt.subplot(3,2,5)
plt.plot(self.final_chains[:,:,4].T)
plt.ylabel(r'Chain for $k_{\sigma}$')
plt.tight_layout() # This tightens up the spacing
plt.savefig("figures/Chain/"+self.gal+"_chain.png")
plt.close()
#sel= np.where(self.final_flatlns + 10 > np.max(self.final_flatlns))
figure=corner.corner(self.final_flatchains, labels=["$v_{\infty}$", "$R_c$", r'$\beta_{z}^{ADC}$',
"$\sigma_{0}$", "$k_{\sigma}$"], quantiles=[0.25, 0.50, 0.75],show_titles=True, title_fmt=".3f",title_args={"fontsize": 12} )
figure.gca().annotate(self.gal+' ADC Power-law', xy=(0.5, 1.0), xycoords="figure fraction", xytext=(0, -5),
textcoords="offset points", ha="center", va="top")
figure.savefig("figures/Corner/"+self.gal+"_corner.png")
plt.close()
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
def make_fig_curves(self, incl):
R=self.R
Vinf_dist = self.final_flatchains[:,0]
Rc_dist = self.final_flatchains[:,1]
betaz_dist = self.final_flatchains[:,2]
sig0_dist = self.final_flatchains[:,3]
ksig_dist = self.final_flatchains[:,4]
# observables
vobs=self.vobs
evobs=self.evobs
sobs=self.sobs
esobs=self.esobs
# MGE parameters
I0obs=self.I0obs
spobs=self.spobs
Vphi_mod = np.zeros([len(R),len(self.final_flatchains[:,0])])
alphar_mod = np.zeros([len(R),len(self.final_flatchains[:,0])])
sigr_mod = np.zeros([len(R),len(self.final_flatchains[:,0])])
dlnSigR2_dlnR_mod = np.zeros([len(R),len(self.final_flatchains[:,0])])
Vphi_obs = np.zeros([len(R),len(self.final_flatchains[:,0])])
sigr_obs = np.zeros([len(R),len(self.final_flatchains[:,0])])
sobs_fin= np.zeros([len(R),len(self.final_flatchains[:,0])])
vobs_fin= np.zeros([len(R),len(self.final_flatchains[:,0])])
for j in range(len(R)):
Vphi_mod[j,:] = Vinf_dist*R[j]/np.sqrt(R[j]**2+Rc_dist**2)
alphar_mod[j,:] = Rc_dist**2/(R[j]**2+Rc_dist**2)
sigr_mod[j,:] = sig0_dist + ksig_dist*R[j]
dlnSigR2_dlnR_mod[j,:] = 2*ksig_dist*R[j]/(sig0_dist + ksig_dist*R[j])
for j in range(len(R)):
Vphi_obs[j,:] = vobs[j]/np.sin(math.pi/180.*incl)
sigr_obs[j,:] = sobs[j]/np.sqrt(1 - \
betaz_dist*np.cos(math.pi/180*incl)**2 + \
0.5*(Rc_dist**2/(R[j]**2+Rc_dist**2)-1)*np.sin(math.pi/180*incl)**2)
sobs_fin[j,:] = (sig0_dist + ksig_dist*R[j])*np.sqrt(1 - \
betaz_dist*np.cos(math.pi/180*incl)**2 + \
0.5*(Rc_dist**2/(R[j]**2+Rc_dist**2)-1)*np.sin(math.pi/180*incl)**2)
vobs_fin[j,:] = Vinf_dist*R[j]*np.sin(math.pi/180.*incl)/np.sqrt(Rc_dist**2+R[j]**2)
# Itot = I(R), dItot = sum(I0j*exp(-0.5R^2/spj^2)*(-R/spj^2))
Itot = np.zeros(len(R))
dItot = np.zeros(len(R))
for j in range(len(R)):
for i in range(len(I0obs)):
Itot[j] = Itot[j] + I0obs[i]*np.exp(-0.5*R[j]**2/(spobs[i]**2))
dItot[j] = dItot[j] + (-R[j]/(spobs[i]**2))*I0obs[i]*np.exp(-0.5*R[j]**2/(spobs[i]**2))
dlnI_dlnR = R*dItot/Itot
dlnI_dlnRs = np.tile(dlnI_dlnR,(Vphi_mod.shape[1],1))
dlnI_dlnRs = dlnI_dlnRs.T
#===========================
# %&%&%&%&%&%&%&%&%
# Final ADC
# %&%&%&%&%&%&%&%&%
# From model...
Vc2_mod = Vphi_mod**2 + sigr_mod**2*(-dlnI_dlnRs - dlnSigR2_dlnR_mod - 0.5*(1-alphar_mod))
Vc_mod = np.sqrt(Vc2_mod)
# From observation + model...
Vc2_obmod = Vphi_obs**2 + sigr_obs**2*(-dlnI_dlnRs - dlnSigR2_dlnR_mod - 0.5*(1-alphar_mod))
Vc_obmod = np.sqrt(Vc2_obmod)
Vc2_oblit = Vphi_obs**2 + sigr_obs**2
#stop()
############################# PLOT FIGURES ###############################################
fig = plt.figure(figsize=(10,6))
plt.plot(R,np.median(Vc_obmod, axis = 1), 'bo')
eVctop0 = np.percentile(Vc_obmod, 75, axis = 1) - np.median(Vc_obmod, axis = 1)
eVcbot0 = np.median(Vc_obmod, axis = 1) - np.percentile(Vc_obmod, 25, axis = 1)
plt.errorbar(R,np.median(Vc_obmod, axis = 1), yerr = (eVctop0, eVcbot0), color = 'b' )
plt.plot(R,np.median(Vc_mod, axis = 1), 'ro', label='ADC')
eVctop = np.percentile(Vc_mod, 75, axis = 1) - np.median(Vc_mod, axis = 1)
eVcbot = np.median(Vc_mod, axis = 1) - np.percentile(Vc_mod, 25, axis = 1)
plt.errorbar(R,np.median(Vc_mod, axis = 1), yerr = (eVctop, eVcbot), color = 'r' )
#---------------------------------------------------
plt.xlabel('R [arcsec]')
plt.ylabel('$V_{c,ADC}$')
plt.savefig("figures/Vcirc/"+self.gal+"_Vcirc.png")
plt.close()
#######################################
#... save Vcirc in a file
np.savez('data_output/Vcirc/Vc_'+self.gal, rad=R, vcirc_med=Vc_mod, vcirc_up=eVctop, vcirc_dn=eVcbot)
######################################
#... Vphi profiles
fig = plt.figure(figsize=(10,6))
plt.plot(R,np.median(Vphi_obs, axis=1), 'bo')
eVctop1 = np.percentile(Vphi_obs, 75, axis = 1) - np.median(Vphi_obs, axis = 1)
eVcbot1 = np.median(Vphi_obs, axis = 1) - np.percentile(Vphi_obs, 25, axis = 1)
plt.errorbar(R,np.median(Vphi_obs, axis = 1), yerr = (eVctop1, eVcbot1), color = 'b' )
plt.plot(R,np.median(Vphi_mod,axis=1), 'r-')
eVctop2 = np.percentile(Vphi_mod, 75, axis = 1) - np.median(Vphi_mod, axis = 1)
eVcbot2 = np.median(Vphi_mod, axis = 1) - np.percentile(Vphi_mod, 25, axis = 1)
plt.errorbar(R,np.median(Vphi_mod, axis = 1), yerr = (eVctop2, eVcbot2), color = 'r' )
plt.plot(R,vobs, 'g-')
plt.errorbar(R,vobs, yerr = (evobs, evobs), color = 'g' )
plt.xlabel('R [arcsec]')
plt.ylabel('$v_{\phi}$')
plt.savefig("figures/Vphi/"+self.gal+"_Vphi.png")
plt.close()
#... SigR profiles
fig = plt.figure(figsize=(10,6))
plt.plot(R,np.median(sigr_obs, axis=1), 'o')
eVctop3 = np.percentile(sigr_obs, 75, axis = 1) - np.median(sigr_obs, axis = 1)
eVcbot3 = np.median(sigr_obs, axis = 1) - np.percentile(sigr_obs, 25, axis = 1)
plt.errorbar(R,np.median(sigr_obs, axis = 1), yerr = (eVctop3, eVcbot3), color = 'b' )
plt.plot(R,np.median(sigr_mod,axis=1), 'ro')
eVctop4 = np.percentile(sigr_mod, 75, axis = 1) - np.median(sigr_mod, axis = 1)
eVcbot4 = np.median(sigr_mod, axis = 1) - np.percentile(sigr_mod, 25, axis = 1)
plt.errorbar(R,np.median(sigr_mod, axis = 1), yerr = (eVctop4, eVcbot4), color = 'r' )
plt.xlabel('R [arcsec]')
plt.ylabel('$\sigma_{R}$')
plt.savefig("figures/SgR/"+self.gal+"_SgR.png")
plt.close()
#... Sobs profiles
fig = plt.figure(figsize=(10,6))
plt.plot(R,sobs, 'go')
plt.errorbar(R,sobs, yerr = (esobs, esobs), color = 'g' )
plt.plot(R,np.median(sobs_fin,axis=1), 'ro')
eVctop5 = np.percentile(sobs_fin, 75, axis = 1) - np.median(sobs_fin, axis = 1)
eVcbot5 = np.median(sobs_fin, axis = 1) - np.percentile(sobs_fin, 25, axis = 1)
plt.errorbar(R,np.median(sobs_fin, axis = 1), yerr = (eVctop5, eVcbot5), color = 'r' )
plt.xlabel('R [arcsec]')
plt.ylabel('$\sigma_{obs}$')
plt.savefig("figures/Sobs/"+self.gal+"_Sobs.png")
plt.close()
#...Vobs_profile
fig = plt.figure(figsize=(10,6))
plt.plot(R,vobs, 'go')
plt.errorbar(R,vobs, yerr = (esobs, esobs), color = 'g' )
plt.plot(R,np.median(vobs_fin,axis=1), 'ro')
eVctop6 = np.percentile(vobs_fin, 75, axis = 1) - np.median(vobs_fin, axis = 1)
eVcbot6 = np.median(vobs_fin, axis = 1) - np.percentile(vobs_fin, 25, axis = 1)
plt.errorbar(R,np.median(vobs_fin, axis = 1), yerr = (eVctop6, eVcbot6), color = 'r' )
plt.xlabel('R [arcsec]')
plt.ylabel('$V_{obs}$')
plt.savefig("figures/Vobs/"+self.gal+"_Vobs.png")
plt.close()
########################################################################################
print 'Plotting done!'
print 'Save in files'
#... save Vcirc in a file
np.savez('data_output/Vcirc/Vc_'+self.gal, \
#...V and S observed values
R=R, vobs=vobs, evobs=evobs, sobs=sobs, esobs=esobs,\
#...V and S Modeled values
vobs_fin=vobs_fin, eVctop6=eVctop6, eVcbot6=eVcbot6,\
sobs_fin=sobs_fin, eVctop5=eVctop5, eVcbot5=eVcbot5,\
#...Vph and SigR observed
sigr_obs=sigr_obs, eVctop3=eVctop3, eVcbot3=eVcbot3,\
Vphi_obs=Vphi_obs, eVctop1=eVctop1, eVcbot1=eVcbot1,\
#...Vph and SigR modeled
sigr_mod=sigr_mod, eVctop4=eVctop4, eVcbot4=eVcbot4,\
Vphi_mod=Vphi_mod, eVctop2=eVctop2, eVcbot2=eVcbot2,\
#...Vc_ADC observed
Vc_obmod=Vc_obmod, eVctop0=eVctop0, eVcbot0=eVcbot0,\
# Vc_ADC modeled
Vc_mod=Vc_mod, eVctop=eVctop, eVcbot=eVcbot)
| mit |
belltailjp/scikit-learn | examples/bicluster/plot_spectral_coclustering.py | 276 | 1736 | """
==============================================
A demo of the Spectral Co-Clustering algorithm
==============================================
This example demonstrates how to generate a dataset and bicluster it
using the the Spectral Co-Clustering algorithm.
The dataset is generated using the ``make_biclusters`` function, which
creates a matrix of small values and implants bicluster with large
values. The rows and columns are then shuffled and passed to the
Spectral Co-Clustering algorithm. Rearranging the shuffled matrix to
make biclusters contiguous shows how accurately the algorithm found
the biclusters.
"""
print(__doc__)
# Author: Kemal Eren <[email protected]>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_biclusters
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.metrics import consensus_score
data, rows, columns = make_biclusters(
shape=(300, 300), n_clusters=5, noise=5,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralCoclustering(n_clusters=5, random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.3f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.show()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.