repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
Reagankm/KnockKnock | venv/lib/python3.4/site-packages/matplotlib/tests/test_skew.py | 10 | 6939 | """
Testing that skewed axes properly work
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import itertools
import six
from nose.tools import assert_true
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import cleanup, image_comparison
from matplotlib.axes import Axes
import matplotlib.transforms as transforms
import matplotlib.axis as maxis
import matplotlib.spines as mspines
import matplotlib.path as mpath
import matplotlib.patches as mpatch
from matplotlib.projections import register_projection
# The sole purpose of this class is to look at the upper, lower, or total
# interval as appropriate and see what parts of the tick to draw, if any.
class SkewXTick(maxis.XTick):
def draw(self, renderer):
if not self.get_visible():
return
renderer.open_group(self.__name__)
lower_interval = self.axes.xaxis.lower_interval
upper_interval = self.axes.xaxis.upper_interval
if self.gridOn and transforms.interval_contains(
self.axes.xaxis.get_view_interval(), self.get_loc()):
self.gridline.draw(renderer)
if transforms.interval_contains(lower_interval, self.get_loc()):
if self.tick1On:
self.tick1line.draw(renderer)
if self.label1On:
self.label1.draw(renderer)
if transforms.interval_contains(upper_interval, self.get_loc()):
if self.tick2On:
self.tick2line.draw(renderer)
if self.label2On:
self.label2.draw(renderer)
renderer.close_group(self.__name__)
# This class exists to provide two separate sets of intervals to the tick,
# as well as create instances of the custom tick
class SkewXAxis(maxis.XAxis):
def __init__(self, *args, **kwargs):
maxis.XAxis.__init__(self, *args, **kwargs)
self.upper_interval = 0.0, 1.0
def _get_tick(self, major):
return SkewXTick(self.axes, 0, '', major=major)
@property
def lower_interval(self):
return self.axes.viewLim.intervalx
def get_view_interval(self):
return self.upper_interval[0], self.axes.viewLim.intervalx[1]
# This class exists to calculate the separate data range of the
# upper X-axis and draw the spine there. It also provides this range
# to the X-axis artist for ticking and gridlines
class SkewSpine(mspines.Spine):
def __init__(self, axes, spine_type):
if spine_type == 'bottom':
loc = 0.0
else:
loc = 1.0
mspines.Spine.__init__(self, axes, spine_type,
mpath.Path([(13, loc), (13, loc)]))
def _adjust_location(self):
trans = self.axes.transDataToAxes.inverted()
if self.spine_type == 'top':
yloc = 1.0
else:
yloc = 0.0
left = trans.transform_point((0.0, yloc))[0]
right = trans.transform_point((1.0, yloc))[0]
pts = self._path.vertices
pts[0, 0] = left
pts[1, 0] = right
self.axis.upper_interval = (left, right)
# This class handles registration of the skew-xaxes as a projection as well
# as setting up the appropriate transformations. It also overrides standard
# spines and axes instances as appropriate.
class SkewXAxes(Axes):
# The projection must specify a name. This will be used be the
# user to select the projection, i.e. ``subplot(111,
# projection='skewx')``.
name = 'skewx'
def _init_axis(self):
#Taken from Axes and modified to use our modified X-axis
self.xaxis = SkewXAxis(self)
self.spines['top'].register_axis(self.xaxis)
self.spines['bottom'].register_axis(self.xaxis)
self.yaxis = maxis.YAxis(self)
self.spines['left'].register_axis(self.yaxis)
self.spines['right'].register_axis(self.yaxis)
def _gen_axes_spines(self):
spines = {'top': SkewSpine(self, 'top'),
'bottom': mspines.Spine.linear_spine(self, 'bottom'),
'left': mspines.Spine.linear_spine(self, 'left'),
'right': mspines.Spine.linear_spine(self, 'right')}
return spines
def _set_lim_and_transforms(self):
"""
This is called once when the plot is created to set up all the
transforms for the data, text and grids.
"""
rot = 30
#Get the standard transform setup from the Axes base class
Axes._set_lim_and_transforms(self)
# Need to put the skew in the middle, after the scale and limits,
# but before the transAxes. This way, the skew is done in Axes
# coordinates thus performing the transform around the proper origin
# We keep the pre-transAxes transform around for other users, like the
# spines for finding bounds
self.transDataToAxes = (self.transScale +
(self.transLimits +
transforms.Affine2D().skew_deg(rot, 0)))
# Create the full transform from Data to Pixels
self.transData = self.transDataToAxes + self.transAxes
# Blended transforms like this need to have the skewing applied using
# both axes, in axes coords like before.
self._xaxis_transform = (transforms.blended_transform_factory(
self.transScale + self.transLimits,
transforms.IdentityTransform()) +
transforms.Affine2D().skew_deg(rot, 0)) + self.transAxes
# Now register the projection with matplotlib so the user can select
# it.
register_projection(SkewXAxes)
@image_comparison(baseline_images=['skew_axes'], remove_text=True)
def test_set_line_coll_dash_image():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection='skewx')
ax.set_xlim(-50, 50)
ax.set_ylim(50, -50)
ax.grid(True)
# An example of a slanted line at constant X
l = ax.axvline(0, color='b')
@image_comparison(baseline_images=['skew_rects'], remove_text=True)
def test_skew_rectange():
fix, axes = plt.subplots(5, 5, sharex=True, sharey=True, figsize=(16, 12))
axes = axes.flat
rotations = list(itertools.product([-3, -1, 0, 1, 3], repeat=2))
axes[0].set_xlim([-4, 4])
axes[0].set_ylim([-4, 4])
axes[0].set_aspect('equal')
for ax, (xrots, yrots) in zip(axes, rotations):
xdeg, ydeg = 45 * xrots, 45 * yrots
t = transforms.Affine2D().skew_deg(xdeg, ydeg)
ax.set_title('Skew of {0} in X and {1} in Y'.format(xdeg, ydeg))
ax.add_patch(mpatch.Rectangle([-1, -1], 2, 2,
transform=t + ax.transData,
alpha=0.5, facecolor='coral'))
plt.subplots_adjust(wspace=0, left=0, right=1, bottom=0)
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| gpl-2.0 |
anewmark/galaxy_dark_matter | lin-log_test.py | 1 | 7446 | print('Testing Lin v Log')
test=2
if test==1:
import astropy.table as table
import numpy as np
from defcuts import *
from defflags import *
from halflight_first import *
from def_get_mags import *
from def_halflight_math import *
bands=['g', 'r', 'i','z', 'y']
daperture=[1.01,1.51,2.02,3.02,4.03,5.71,8.40,11.8,16.8,23.5]
aperture=[x*0.5 for x in daperture]
ty='mean'
stax=True
if stax==False:
tag=''
else:
tag='uplim'
txtdist= 'Figure2'
txtslope='Figure1'
outdir='/Users/amandanewmark/repositories/galaxy_dark_matter/lumprofplots/clumps/+LL'+ty+tag
doutdir='/Users/amandanewmark/repositories/galaxy_dark_matter/lumprofplots/distribution/+LL'+ty+tag
Flags=['flags_pixel_bright_object_center', 'brobj_cen_flag-', 'No Bright Ojbect Centers', 'Only Bright Object Centers', 'brobj_cen_flag']
indir='/Users/amandanewmark/repositories/galaxy_dark_matter/GAH/'
bigdata = table.Table.read(indir+ 'LOWZ_HSCGAMA15_apmgs+cmodmag.fits')
def do_cuts(datatab):
parm=['flags_pixel_saturated_center','flags_pixel_edge','flags_pixel_interpolated_center','flags_pixel_cr_center','flags_pixel_suspect_center', 'flags_pixel_clipped_any','flags_pixel_bad']
ne=[99.99, 199.99, 0.0]
mincut=0.1
maxcut=''
cutdata=not_cut(datatab, bands, 'mag_forced_cmodel', ne)
for b in range(0, len(bands)-1):
newdata=many_flags(cutdata, parm, bands[b]) #flags not in y?
cutdata=newdata
return newdata
def get_TF(data):
bandi=['i']
Flag, Not,lab= TFflag(bandi,Flags, data)
return Flag, Not
newdata=do_cuts(bigdata)
Flagdat, Notdat=get_TF(newdata)
def my_halflight2(dat1, sc=''):
loglum, lograd, loglumd= get_ind_lums(dat1, bands, aperture, scale='log')
if stax==True:
loglum, lograd, loglumd= upper_rad_cut(loglum, lograd, loglumd, 4, proof=False)
#print('length of radius array is ', len(lograd))
mloglum, mlogdens, mlograd, mlogerr= get_avg_lums(loglum, lograd, loglumd, gr=[1,80,11], type=ty, scale=sc)
logr12s= get_halflight(loglum, lograd)
logr12= get_halflight(mloglum, mlograd)
Ms, cs, errs= get_slopes(logr12s, lograd, loglumd, error=None, smax=stax)
M, c, logrcut, logldcut, sterr, errcut =get_slopes(logr12, mlograd, mlogdens, error=mlogerr, smax=stax)
print(sterr)
cutmlogld = M * logrcut + c
ind=[loglum, loglumd, lograd, logr12s]
means=[mloglum,mlogdens,mlograd,logr12, mlogerr]
ind_slope=[Ms, cs, errs]
mean_slopes=[M, c, logrcut, logldcut, cutmlogld, sterr, errcut]
#logrcut and logldcut are for lines of best fit
return ind, means, ind_slope, mean_slopes
inds1, means1, ind_slope1, mean_slopes1=my_halflight2(Flagdat, sc='lindata')
inds2, means2, ind_slope2, mean_slopes2=my_halflight2(Flagdat, sc='')
def my_graphs(inds1, means1, ind_slope1, mean_slopes1, inds2, means2, ind_slope2, mean_slopes2):
import matplotlib.pyplot as plt
import numpy as np
import math
#ind=[loglum, loglumd, lograd, logr12s]
#means=[mloglum,mlogdens,lograd,logr12, mlogerr]
#ind_slope=[Ms, cs, errs]
#mean_slopes=[M, c, logrcut, logldcut, cutmlogld, sterr, errcut]
def lum_mult_fit(x1, x2, y1, y2, xcut1, xcut2, yfit1, yfit2, sterr1, sterr2 , m1, m2, error1, error2, outdir=''):
print('Make Scatter Plots')
f=plt.figure()
plt.scatter(x1, y1, color='r', marker='o',label='Linearly Averaged')
plt.plot(xcut1, yfit1, color='m', label='Linearly Averaged: slope= '+str(np.round(m1,2))+' +- '+str(np.round(sterr1,2)))
plt.errorbar(x1, y1, yerr=error1, fmt='.',color='r')
plt.scatter(x2, y2, color='b', marker='o',label='Log Averaged ')
plt.plot(xcut2, yfit2, color='c', label='Log Averaged: slope= '+str(np.round(m2,2))+' +- '+str(np.round(sterr2,2)))
plt.errorbar(x2, y2, yerr=error2, fmt='.',color='b')
plt.xlabel('Log Radii (kpc)')
plt.ylabel('Luminosity Densities (Lsolar/kpc^2)')
plt.title('Average Luminosity Densities v Radii')
#plt.xlim(math.log10(1), math.log10(80))
#plt.ylim(6,8.6)
plt.legend(loc=0,prop={'size':6.0})
f.text(0.05, 0.05, txtslope, color='red', weight='bold')
outdirs=outdir+tag+'TF.pdf'
#plt.show()
f.savefig(outdirs)
print(outdirs)
def dist_mean(m1s, m2s, m1, m2, sterr1, sterr2, KS=False):
figs=plt.figure()
bs=np.linspace(-2.0,-1.4,num=15, endpoint=False)
n1, b1, p1= plt.hist(m1s, bs, color='red', label='Linearly Averaged ('+str(len(m1s))+')', alpha=0.8)
n2, b2, p2= plt.hist(m2s,bs, color='blue', label='Log Averaged ('+str(len(m2s))+')', alpha=0.8)
ts=''
if KS==True:
M=m1s+m2s
import scipy
D, p=scipy.stats.ks_2samp(m1s,m2s)
plt.plot(0,0, c='green', marker='*', label='K-S test is '+str(D))
plt.xlim(np.min(M),-1.4)
ts='KS'
#print('Standard Deviation (Not Flagged): ', str(np.std(m1s)))
#print('Standard Deviation (Flagged): ', str(np.std(m2s)))
plt.axvline(x=m1, color='magenta', label='Linearly Averaged: slope= '+str(np.round(m1,2))+' +- ' +str(np.round(sterr1,2)))
plt.axvline(x=m2, color='cyan', label='Log Averaged: slope= '+str(np.round(m2,2))+' +- '+str(np.round(sterr2,2)))
plt.xlabel('Slopes', fontsize=10)
plt.legend(loc=0,prop={'size':6.5})
plt.ylabel('Frequency', fontsize=10)
plt.title('With '+ty+' Slopes')
outdirs=doutdir+'slopedist.pdf'
#figs.text(0.03, 0.03, txtdist, color='red', weight='bold')
#plt.show()
figs.savefig(outdirs)
print(outdirs)
def all_lumprof(lum1s, lum2s, rad1s, rad2s, mrad1, mrad2, mden1, mden2, error1, error2):
f=plt.figure()
#print(len(mrad1)) #these are the mean radii
#print(len(mrad2))
#print(len(mden1))
#print(len(mden2))
for n in range(len(lum1s)):
plt.plot(rad1s[n], lum1s[n],color='lightgrey', marker='.')
for n in range(len(lum2s)):
plt.plot(rad2s[n], lum2s[n],color='lightgrey', marker='.')
plt.scatter(mrad1, mden1, color='red', marker='o',label='Linearly Averaged ('+str(len(lum1s))+')', zorder=3)
plt.scatter(mrad2,mden2,color='blue', marker='o',label='Log Averaged ('+str(len(lum1s))+')', zorder=3)
plt.xlabel('Log Radii (kpc)')
plt.ylabel('Luminosity Densities (Lsolar/kpc^2)')
plt.title('Average Luminosity Densities v Radii')
plt.legend(loc=0,prop={'size':6.0})
outdirs=outdir+tag+'all_lumprof.pdf'
#plt.show()
f.savefig(outdirs)
print(outdirs)
dist_mean(ind_slope1[0],ind_slope2[0],mean_slopes1[0],mean_slopes2[0],mean_slopes1[5], mean_slopes2[5], KS=False)
all_lumprof(inds1[1], inds2[1], inds1[2], inds2[2], means1[2], means2[2], means1[1], means2[1],means1[4], means2[4])
lum_mult_fit(means1[2], means2[2], means1[1], means2[1], mean_slopes1[2], mean_slopes2[2], mean_slopes1[4], mean_slopes2[4], mean_slopes1[5], mean_slopes2[5], mean_slopes1[0], mean_slopes2[0],means1[4], means2[4], outdir=outdir)
my_graphs(inds1, means1, ind_slope1, mean_slopes1, inds2, means2, ind_slope2, mean_slopes2)
else:
from halflight_second import meanlum2
import numpy as np
import matplotlib.pyplot as plt
Naps=0
L=np.array([7.5, 8.0, 8.5, 9.0, 8.5,7.0, 8.5])
R=np.array([1,2,3,3,4,0,2.5])
mL, mR, bb=meanlum2(L, R, Naps,grange=[10**0.8,10**3.5,4],scale='lindata')
mL1, mR1, bb1=meanlum2(L, R, Naps,grange=[10**0.8,10**3.5,4],scale='')
print('Lums', mL, mL1)
print('Rads', mR1, mR1)
plt.scatter(mR, mL, color='red', label='Averaged Linearly')
plt.scatter(mR1, mL1, color='blue', label='Averaged on Log scale')
plt.xlabel('Log Radii')
plt.ylabel('Log Luminosity')
plt.legend(loc=0,prop={'size':6.0})
plt.show()
| mit |
ruishihan/R7-with-notes | src/host/python/GSMTimingSync.py | 2 | 1162 | import numpy as np
from ctypes import *
import time
import GSM
from sptools import findFreq
from GSMSync import GSMSync
import matplotlib.pyplot as plt
class GSMTimingSync(GSMSync):
def __init__(self,f,url='http://192.168.1.110:8080/'):
GSMSync.__init__(self,f,url)
self.sb = GSM.SB()
self.osr = self.fc/GSM.symbolrate
def once( self ):
blk = self.fl/16
rfd,start = self.getRfData(self.fl-blk,blk*3)
nprfd = self.short2Complex(rfd)
f = np.abs(self.sb.channelEst(nprfd,self.osr))
for i in range(1,5):
rfd,start = self.getRfData(i*10*self.fl+self.fl-blk,blk*3)
nprfd = self.short2Complex(rfd)
f += np.abs(self.sb.channelEst(nprfd,self.osr))
inx = int(f.argmax()-blk-42*self.osr)
plt.plot(f)
return inx
def sync(self):
self.waitClockStable()
ff = self.once()
fs = self.getFrameStart()
self.setFrameStart(fs+ff/2)
return ff
def main():
fs = GSMTimingSync(1.92e6)
f0 = fs.sync()
f1 = 0.
while abs(f0)>5:
if abs(f0)>fs.fl/16:
print f0,fs.fl/16
return -1
print "Timing sync:",f0
time.sleep(1)
f0 = fs.sync()
print "Timing sync:",f0
return abs(f0)
if __name__ == '__main__':
fs = main()
| apache-2.0 |
mikaem/spectralDNS | tests/OrrSommerfeld.py | 4 | 8935 | """Orr-Sommerfeld"""
import warnings
from numpy import real, pi, exp, zeros, imag, sqrt, log10, sum
from spectralDNS import config, get_solver, solve
from spectralDNS.utilities import dx
#from spectralDNS.utilities import reset_profile
from OrrSommerfeld_shen import OrrSommerfeld
try:
import matplotlib.pyplot as plt
import matplotlib.cbook
warnings.filterwarnings("ignore", category=matplotlib.cbook.mplDeprecation)
except ImportError:
warnings.warn("matplotlib not installed")
plt = None
def initOS(OS, eigvals, eigvectors, U, X, t=0.):
x = X[0][:, 0, 0]
eigval, phi, dphidy = OS.interp(x, eigvals, eigvectors, eigval=1, verbose=False)
OS.eigval = eigval
for j in range(U.shape[2]):
y = X[1][0, j, 0]
v = (1-x**2) + config.params.eps*real(dphidy*exp(1j*(y-eigval*t)))
u = -config.params.eps*real(1j*phi*exp(1j*(y-eigval*t)))
U[0, :, j, :] = u.repeat(U.shape[3]).reshape((len(x), U.shape[3]))
U[1, :, j, :] = v.repeat(U.shape[3]).reshape((len(x), U.shape[3]))
U[2] = 0
acc = zeros(1)
OS, e0 = None, None
def initialize(solver, context):
global OS, e0
params = config.params
OS = OrrSommerfeld(Re=params.Re, N=128)
eigvals, eigvectors = OS.solve(False)
OS.eigvals, OS.eigvectors = eigvals, eigvectors
U = context.U
X = context.X
FST = context.FST
initOS(OS, eigvals, eigvectors, U, X)
U_hat = solver.set_velocity(**context)
U = solver.get_velocity(**context)
# Compute convection from data in context (i.e., context.U_hat and context.g)
# This is the convection at t=0
e0 = 0.5*dx(U[0]**2+(U[1]-(1-X[0]**2))**2, FST)
acc[0] = 0.0
if 'RK3' not in params.solver:
# Initialize at t = dt
context.H_hat1[:] = solver.get_convection(**context)
context.U_hat0[:] = U_hat
context.U0[:] = U
initOS(OS, eigvals, eigvectors, U, X, t=params.dt)
U_hat = solver.set_velocity(**context)
U = solver.get_velocity(**context)
params.t = params.dt
params.tstep = 1
e1 = 0.5*dx(U[0]**2+(U[1]-(1-X[0]**2))**2, FST)
if solver.rank == 0:
acc[0] += abs(e1/e0 - exp(2*imag(OS.eigval)*params.t))*params.dt
else:
params.t = 0
params.tstep = 0
if not ("KMM" in params.solver or "Coupled" in params.solver):
P_hat = solver.compute_pressure(**context)
P = P_hat.backward(context.P)
if params.convection == 'Vortex':
P += 0.5*sum(U**2, axis=0)
P_hat = context.P.forward(P_hat)
else:
try:
context.g[:] = 0
except AttributeError:
pass
def set_Source(Source, Sk, FST, ST, N, **kw):
Source[:] = 0
Source[1] = -2./config.params.Re
Sk[:] = 0
Sk[1] = FST.scalar_product(Source[1], Sk[1])
Sk[1, -2:, 0, 0] = 0
im1, im2, im3, im4 = (None, )*4
def update(context):
c = context
params = config.params
solver = config.solver
#if params.tstep == 2: reset_profile(profile)
if (params.tstep % params.plot_step == 0 or
params.tstep % params.compute_energy == 0):
U = solver.get_velocity(**context)
global im1, im2, im3, OS, e0, acc
if plt is not None:
if im1 is None and solver.rank == 0 and params.plot_step > 0:
plt.figure()
im1 = plt.contourf(c.X[1][:, :, 0], c.X[0][:, :, 0], c.U[0, :, :, 0], 100)
plt.colorbar(im1)
plt.draw()
plt.figure()
im2 = plt.contourf(c.X[1][:, :, 0], c.X[0][:, :, 0], c.U[1, :, :, 0] - (1-c.X[0][:, :, 0]**2), 100)
plt.colorbar(im2)
plt.draw()
plt.figure()
im3 = plt.quiver(c.X[1][:, :, 0], c.X[0][:, :, 0], c.U[1, :, :, 0]-(1-c.X[0][:, :, 0]**2), c.U[0, :, :, 0])
plt.draw()
plt.pause(1e-6)
if params.tstep % params.plot_step == 0 and solver.rank == 0 and params.plot_step > 0:
im1.ax.clear()
im1.ax.contourf(c.X[1][:, :, 0], c.X[0][:, :, 0], U[0, :, :, 0], 100)
im1.autoscale()
im2.ax.clear()
im2.ax.contourf(c.X[1][:, :, 0], c.X[0][:, :, 0], U[1, :, :, 0]-(1-c.X[0][:, :, 0]**2), 100)
im2.autoscale()
im3.set_UVC(U[1, :, :, 0]-(1-c.X[0][:, :, 0]**2), U[0, :, :, 0])
plt.pause(1e-6)
if params.tstep % params.compute_energy == 0:
e1, e2, exact = compute_error(c)
div_u = solver.get_divergence(**c)
e3 = dx(div_u**2, c.FST)
if solver.rank == 0 and not config.params.spatial_refinement_test:
acc[0] += abs(e1/e0-exact)*params.dt
#acc[0] += sqrt(e2)
print("Time %2.5f Norms %2.16e %2.16e %2.16e %2.16e %2.16e" %(params.t, e1/e0, exact, e1/e0-exact, sqrt(e2), sqrt(e3)))
def compute_error(context):
global OS, e0, acc
c = context
params = config.params
solver = config.solver
U = solver.get_velocity(**c)
pert = (U[1] - (1-c.X[0]**2))**2 + U[0]**2
e1 = 0.5*dx(pert, c.FST)
exact = exp(2*imag(OS.eigval)*params.t)
U0 = c.work[(c.U, 0, True)]
initOS(OS, OS.eigvals, OS.eigvectors, U0, c.X, t=params.t)
pert = (U[0] - U0[0])**2 + (U[1]-U0[1])**2
#pert = (U[1] - U0[1])**2
e2 = 0.5*dx(pert, c.FST)
return e1, e2, exact
def regression_test(context):
_, e2, _ = compute_error(context)
if config.solver.rank == 0:
assert sqrt(e2) < 1e-12
def refinement_test(context):
_, e2, _ = compute_error(context)
if config.solver.rank == 0:
print("Computed error = %2.8e %2.8e %2.8e" %(sqrt(e2)/config.params.eps, config.params.dt, config.params.eps))
#print("Computed error = %2.8e %2.8e %2.8e" %(acc[0], config.params.dt, config.params.eps))
def eps_refinement_test(context):
e1, e2, exact = compute_error(context)
if config.solver.rank == 0:
print(r" %2d & %2.8e & %2.8e \\\ " %(-int(log10(config.params.eps)), sqrt(e2)/config.params.eps, e1/e0-exact))
def spatial_refinement_test(context):
_, e2, _ = compute_error(context)
if config.solver.rank == 0:
print(r" %2d & %2.8e & %2.8e \\\ " %(2**config.params.M[0], sqrt(e2)/config.params.eps, acc[0]))
if __name__ == "__main__":
config.update(
{'Re': 8000.,
'nu': 1./8000., # Viscosity
'dt': 0.001, # Time step
'T': 0.01, # End time
'L': [2, 2*pi, pi],
'M': [7, 5, 2],
'Dquad': 'GC',
'Bquad': 'GC',
'dealias': None,
}, "channel"
)
config.channel.add_argument("--compute_energy", type=int, default=1)
config.channel.add_argument("--plot_step", type=int, default=1)
config.channel.add_argument("--refinement_test", type=bool, default=False)
config.channel.add_argument("--eps_refinement_test", type=bool, default=False)
config.channel.add_argument("--spatial_refinement_test", type=bool, default=False)
config.channel.add_argument("--eps", type=float, default=1e-7)
#solver = get_solver(update=update, regression_test=regression_test, mesh="channel")
solver = get_solver(update=update, mesh="channel")
if config.params.eps_refinement_test:
print("eps refinement-test")
solver.update = lambda x: None
solver.regression_test = eps_refinement_test
config.params.verbose = False
context = solver.get_context()
for eps in [1e-4, 1e-5, 1e-6, 1e-7, 1e-8, 1e-9, 1e-10, 1e-11, 1e-12]:
config.params.eps = eps
initialize(solver, context)
set_Source(**context)
solve(solver, context)
elif config.params.spatial_refinement_test:
print("spatial refinement-test")
def update_(con):
e1, _, exact = compute_error(con)
acc[0] += abs(e1/e0-exact)
solver.update = update_
solver.regression_test = spatial_refinement_test
config.params.verbose = False
for M in [4, 5, 6, 7, 8]:
config.params.M = [M, 3, 2]
context = solver.get_context()
initialize(solver, context)
set_Source(**context)
solve(solver, context)
else:
if config.params.refinement_test:
#solver.update = lambda x: None
def update_(con):
e1, _, exact = compute_error(con)
acc[0] += abs(e1/e0-exact)*config.params.dt
solver.update = update_
solver.regression_test = refinement_test
context = solver.get_context()
# Just store 2D slices for visualization
context.hdf5file.results['data'] = {'U': [(context.U[0], [slice(None), slice(None), 0])],
'V': [(context.U[1], [slice(None), slice(None), 0])]}
initialize(solver, context)
set_Source(**context)
solve(solver, context)
| gpl-3.0 |
joshgabriel/dft-crossfilter | CompleteApp/prec_analysis/main.py | 2 | 27301 | import os
from os.path import dirname, join
from collections import OrderedDict
import pandas as pd
import numpy as np
import json
from bokeh.io import curdoc
from bokeh.layouts import row, widgetbox, column, gridplot, layout
from bokeh.models import Select, Div, Column, \
HoverTool, ColumnDataSource, Button, RadioButtonGroup,\
MultiSelect
#from bokeh.models.widgets import RangeSlider
from bokeh.plotting import figure
from bokeh import mpl
from precision.precisions import DatabaseData
import requests
print ('Does something after imports')
plottables = ['k-point', 'value', 'perc_precisions']
x_select = Select(title='X-Axis', value='k-point', options=plottables)
y_select = Select(title='Y-Axis', value='value', options=plottables)
############## Header Content from description.html #################
content_filename1 = join(dirname(__file__), "UserInstructions.html")
Desc_C1 = Div(text=open(content_filename1).read(),
render_as_text=False, width=600)
content_filename2 = join(dirname(__file__), "UserInstructions.html")
Desc_C2 = Div(text=open(content_filename2).read(),
render_as_text=False, width=600)
content_filename3 = join(dirname(__file__), "UserInstructions.html")
Desc_C3 = Div(text=open(content_filename1).read(),
render_as_text=False, width=600)
content_filename4 = join(dirname(__file__), "UserInstructions.html")
Desc_C4 = Div(text=open(content_filename2).read(),
render_as_text=False, width=600)
######### APP CROSSFILTER ##########################
# decide if all columns or crossfilter down to sub properties
#source_data = pd.DataFrame({})#ColumnDataSource(data=dict())
class CrossFiltDFs():
def __init__(self,query_dict={'code':'VASP','exchange':'PBE',\
'element':'Au','structure':'fcc','properties':'B'},plot_data=None):
self.query_dict = query_dict
self.plot_data = plot_data
def crossfilter_by_tag(self,df, tag):
"""
a crossfilter that can recursivly update the unique options
in the UI based on prioir selections
returns crossfiltered by tag crossfilter {'element': 'Ag'}
"""
col,spec= list(tag.items())[0]
return df[df[col]==spec]
# The crossfilter widgets
def update(self, attr, old, new):
print ('Attribute', attr, 'OLD', old, 'NEW', new)
print ('executes here on update')#, exchange_df)
def update_code(self):
"""
update for the code selection
"""
print ('update code')
self.query_dict.update({'code':code.value})
def update_exchange(self):
"""
update the exchange
"""
print ('update exchange')
self.query_dict.update({'exchange':exchange.value})
def update_element(self,new):
print ('Updating element down selection for properties',element.value)
self.query_dict.update({'element':element.value})
def update_struct(self):
print ('Updating struct down selection for element')
self.query_dict.update({'structure':struct.value})
print ('Updating ptable with structure selection')
print ('finished callback to update layout')
def update_prop(self):
self.properties = prop.value
def update_kpoints(self):
pass
def update_x(self):
self.x = x.value
pass
def update_y(self):
self.y = y.value
pass
def update_range(self):
pass
def query_api(self,endpoint):
query_dict ={k:v for k,v in self.query_dict.items() if k!='properties'}
self.properties = self.query_dict['properties']
if self.properties == 'dB':
self.properties = 'BP'
r = requests.post(url='http://0.0.0.0:6400/bench/v1/query_{}'.\
format(endpoint),data=json.dumps(self.query_dict))
ListOfDicts = r.json()['content']
self.plot_data = pd.concat([pd.DataFrame({k:[ld[k]] for k in list(ld.keys())}) for ld in ListOfDicts])
def plot_prec_value1(self):
"""
calls the plotting operation by querying the
evk endpoint and returning a single evk packet
of single material structure code exchange to
self.plot_data.
This controls the first plot canvas
"""
self.query_dict={'code':code.value,'exchange':exchange.value,\
'structure':struct.value,'element':element.value,'properties':prop.value}
print ('POSTING', self.query_dict)
self.query_api(endpoint='evk')
layout_doc.children[2].children[0] = self.plot_pade_figure()
def clear_crossfilter1(self):
"""
clear the figure and crossfilter
"""
print ('Trigger clear')
self.query_dict = {}
self.plot_data = None
self.create_figure_new()
layout_doc.children[2].children[0] = self.p
def plot_prec_value2(self):
"""
calls the plotting operation by querying the
evk endpoint and returning a single evk packet
of single material structure code exchange to
self.plot_data.
This controls the first plot canvas
"""
self.query_dict={'code':code2.value,'exchange':exchange2.value,\
'structure':struct2.value,'element':element2.value,'properties':prop2.value}
print ('POSTING', self.query_dict)
self.query_api(endpoint='evk')
layout_doc.children[2].children[1] = self.plot_pade_figure()
def clear_crossfilter2(self):
"""
clear the figure and crossfilter
"""
print ('Trigger clear')
self.query_dict = {}
self.plot_data = None
self.create_figure_new()
layout_doc.children[2].children[1] = self.p
def plot_prec_value3(self):
"""
calls the plotting operation by querying the
evk endpoint and returning a single evk packet
of single material structure code exchange to
self.plot_data.
This controls the first plot canvas
"""
self.query_dict={'code':code3.value,'exchange':exchange3.value,\
'structure':struct3.value,'element':element3.value,'properties':prop3.value}
print ('POSTING', self.query_dict)
self.query_api(endpoint='evk')
layout_doc.children[2].children[2] = self.plot_pade_figure()
def clear_crossfilter3(self):
"""
clear the figure and crossfilter
"""
print ('Trigger clear')
self.query_dict = {}
self.plot_data = None
self.create_figure_new()
layout_doc.children[2].children[2] = self.p
def plot_prec_value4(self):
"""
calls the plotting operation by querying the
evk endpoint and returning a single evk packet
of single material structure code exchange to
self.plot_data.
This controls the first plot canvas
"""
self.query_dict={'code':code4.value,'exchange':exchange4.value,\
'structure':struct4.value,'element':element4.value,'properties':prop4.value}
print ('POSTING', self.query_dict)
self.query_api(endpoint='evk')
layout_doc.children[2].children[3] = self.plot_pade_figure()
def clear_crossfilter4(self):
"""
clear the figure and crossfilter
"""
print ('Trigger clear')
self.query_dict = {}
self.plot_data = None
self.create_figure_new()
layout_doc.children[2].children[3] = self.p
def plot_pade_figure(self):
"""
method which plots multiple curves of different color
on the same bokeh figure canvas. Will receive query results from the evk
end point on the E0k, V0k, Bk, BPk, kpoints data. x is always kpoints data log scaled
"""
# receive a dict of datasets: {'Plot1':{'Dsett1':DFrame, 'x_title': None, 'y_title': None,
# 'Characteristic':'VASP_PBE_Al_fcc_B'}, 'Plot2':{'x':[],'y':[], 'x_title': None, 'y_title': None}}
#def color_marker_divider(characteristics):
# cm_keys= {'00':('red','*'),'01':('red','-.-'),'02':('red','*'),'03':('red','^'),\
# '10':('blue','*'),'11':('blue','-.-'),'12':('blue','*'),'13':('blue','^')
# }
# DictCharacters = \
# [{n:att for n,att in enumerate(c.split('_'))} for c in characteristics]
# one or two char value different and same code and exchange: same color different marker
# else different color and marker.
# return cm_keys
#color_marker_divider(characteristics)
#for dset in datasets:
data_analysis = DatabaseData(dataframe=self.plot_data)
print (data_analysis.dataframe.columns)
data_analysis.run_pade_through_R(rscript='./prec_analysis/birch',get_inits_ev=True)
data_analysis.create_precisions()
data_analysis.extract_pade_curve()
x_eos_kpts, y_eos, xs_err, ys_err, x_pade_kpts, y_pade = \
data_analysis.create_pade_bokeh_compat(properties=self.properties)
#c,m = color_marker_divider(characteristics)['00']
print (type(self.properties), self.properties)
if self.properties == 'B':
ext = data_analysis.Bp
print ('HERE AT PROPERTIES', ext, type(ext))
elif self.properties == 'BP':
ext = data_analysis.BPp
elif self.properties == 'E0':
ext = data_analysis.E0p
elif self.properties == 'V0':
ext = data_analysis.V0p
p = figure(plot_height=400, plot_width=400,tools="pan,wheel_zoom,box_zoom,reset,previewsave",\
x_axis_type="log", x_axis_label='K-points per atom', title='Pade Extrapolate of {0} is {1}'.format(self.properties, str(ext)) )
p.xaxis.axis_label = 'K-points per atom'
p.line(x_pade_kpts, y_pade, color='red')
p.circle(x_eos_kpts, y_eos,color='blue',size=5, line_alpha=0)
p.multi_line(xs_err, ys_err, color='black')
#p.x_axis_label = 'K-points per atom'
if self.properties == 'B':
p.yaxis.axis_label = 'Bulk Modulus B (GPa)'
elif self.properties == 'dB':
p.yaxis.axis_label = 'Bulk Modulus Pressure Derivative'
elif self.properties == 'E0':
p.yaxis.axis_label = 'DFT Energy (eV/atom)'
elif self.properties == 'V0':
p.yaxis.axis_label = 'Volume (A^3/atom)'
return p
def create_figure_new(self):
"""
create a new multi-figure canvas
"""
kw = {}
self.p = figure(plot_height=400, plot_width=400, tools='pan,wheel_zoom,box_zoom,reset,hover', **kw)
self.p.circle(x=[0],y=[0])
def plot_precision_figure(self):
"""
method which plots multiple curves of different color
on the same bokeh figure canvas. Will receive query results from the evk
end point on the E0k, V0k, Bk, BPk, kpoints data. x is always kpoints data log scaled
"""
# receive a dict of datasets: {'Plot1':{'Dsett1':DFrame, 'x_title': None, 'y_title': None,
# 'Characteristic':'VASP_PBE_Al_fcc_B'}, 'Plot2':{'x':[],'y':[], 'x_title': None, 'y_title': None}}
#def color_marker_divider(characteristics):
# cm_keys= {'00':('red','*'),'01':('red','-.-'),'02':('red','*'),'03':('red','^'),\
# '10':('blue','*'),'11':('blue','-.-'),'12':('blue','*'),'13':('blue','^')
# }
# DictCharacters = \
# [{n:att for n,att in enumerate(c.split('_'))} for c in characteristics]
# one or two char value different and same code and exchange: same color different marker
# else different color and marker.
# return cm_keys
data_analysis = DatabaseData(dataframe=self.plot_data)
prop_data, energy_data, M, C, pred_energy, pred_property = \
data_analysis.create_precision_bokeh_compat(self.prop_data, self.energy_data, properties=self.properties)
p = figure(plot_height=400, plot_width=400,tools="pan,wheel_zoom,box_zoom,reset,previewsave",\
x_axis_type="log", y_axis_type="log", x_axis_label='Energy Convergence (meV/atom)', title='Slope M is {0}'.format(str(M)) )
p.line(pred_energy, pred_property, color='red')
p.circle(energy_data, prop_data, color='blue',size=5, line_alpha=0)
#p.multi_line(xs_err, ys_err, color='black')
if self.properties == 'B':
p.yaxis.axis_label = 'Bulk Modulus B (%)'
elif self.properties == 'dB':
p.yaxis.axis_label = 'Bulk Modulus Pressure Derivative (%)'
elif self.properties == 'Multiple':
p.yaxis.axis_label = "V0, B, B' (%)"
elif self.properties == 'V0':
p.yaxis.axis_label = 'Volume (%)'
return p
def multi_precisions_correlate1(self):
"""
method which allows the user to plot various precisions
against each other. Also prints out the M-value and intercepts
of precision at 1 meV/atom, 0.1 meV/atom and 0.01 meV/atom
"""
self.query_dict={'code':code.value,'exchange':exchange.value,\
'structure':struct.value,'element':element.value,'properties':prop4.value}
print ('POSTING', self.query_dict)
if not self.query_dict['properties'] == 'Multi':
self.query_api(endpoint='precvalue')
self.prop_data = self.plot_data['s{}k'.format(self.properties)]
self.energy_data = self.plot_data['sE0k'.format(self.properties)]
layout_doc.children[2].children[0] = self.plot_precision_figure()
pass
def multi_precisions_correlate2(self):
"""
method which allows the user to plot various precisions
against each other. Also prints out the M-value and intercepts
of precision at 1 meV/atom, 0.1 meV/atom and 0.01 meV/atom
"""
self.query_dict={'code':code2.value,'exchange':exchange2.value,\
'structure':struct2.value,'element':element2.value,'properties':prop2.value}
print ('POSTING', self.query_dict)
if not self.query_dict['properties'] == 'Multi':
self.query_api(endpoint='precvalue')
self.prop_data = self.plot_data['s{}k'.format(self.properties)]
self.energy_data = self.plot_data['sE0k'.format(self.properties)]
layout_doc.children[2].children[1] = self.plot_precision_figure()
def multi_precisions_correlate3(self):
"""
method which allows the user to plot various precisions
against each other. Also prints out the M-value and intercepts
of precision at 1 meV/atom, 0.1 meV/atom and 0.01 meV/atom
"""
self.query_dict={'code':code3.value,'exchange':exchange3.value,\
'structure':struct3.value,'element':element3.value,'properties':prop3.value}
print ('POSTING', self.query_dict)
if not self.query_dict['properties'] == 'Multi':
self.query_api(endpoint='precvalue')
self.prop_data = self.plot_data['s{}k'.format(self.properties)]
self.energy_data = self.plot_data['sE0k'.format(self.properties)]
layout_doc.children[2].children[2] = self.plot_precision_figure()
def multi_precisions_correlate4(self):
"""
method which allows the user to plot various precisions
against each other. Also prints out the M-value and intercepts
of precision at 1 meV/atom, 0.1 meV/atom and 0.01 meV/atom
"""
self.query_dict={'code':code4.value,'exchange':exchange4.value,\
'structure':struct4.value,'element':element4.value,'properties':prop4.value}
print ('POSTING', self.query_dict)
if not self.query_dict['properties'] == 'Multi':
self.query_api(endpoint='precvalue')
self.prop_data = self.plot_data
self.query_dict={'code':code.value,'exchange':exchange.value,\
'structure':struct.value,'element':element.value,'properties':'E0'}
self.query_api(endpoint='precvalue')
self.energy_data = self.plot_data
layout_doc.children[2].children[3] = self.plot_precision_figure()
def kpoints_interactive_selector(self, dataset):
"""
method which creates a pareto optimal plot for the chosen structure, material,
code and exchange and with the user input of desired precision returns
the kpoints per atom choice.
"""
pass
def clear_crossfilter(self):
"""
clear the figure and crossfilter
"""
print ('Trigger clear')
self.plot_data = None
layout_doc.children[6] = self.create_figure_new()
print ('DOES SOMETHING')
CF1 = CrossFiltDFs()
CF2 = CrossFiltDFs()
CF3 = CrossFiltDFs()
CF4 = CrossFiltDFs()
# for the first table to display VASP PBE all structures Pade extrapolates for all properties
# as a bonus with some error bar too
#### PLOT 1
codes = ['DMol3','VASP']
code = Select(title='Code 1', value='VASP', options=codes)
code.on_change('value', lambda attr, old, new: CF1.update_code())
exchanges = ['LDA','PBE']
exchange = Select(title='ExchangeCorrelation 1', value='PBE', options=exchanges)
exchange.on_change('value', lambda attr, old, new: CF1.update_exchange())
structures = ['fcc','bcc','hcp']
struct = Select(title='Structure 1', value='fcc', options=structures)
struct.on_change('value', lambda attr, old, new: CF1.update_struct())
_elements = ['Al','Au','Sc', 'Ti','V','Cr', 'Mn', 'Fe', 'Co', 'Ni', 'Cu', 'Zn',
'Rb', 'Sr','Y', 'Zr', 'Nb', 'Mo', 'Tc', 'Ru', 'Rh', 'Pd', 'Ag','Cd',
'Cs','Ba','Hf','Ta','W','Re','Os','Ir','Pt','Hg']
element = Select(title='Metals 1', value='Pt', options=_elements)
element.on_change('value', lambda attr, old, new: CF1.update_element())
properties = ['B','dB','V0','E0']
prop = Select(title='Properties 1', value='E0', options=properties)
prop.on_change('value', lambda attr, old, new: CF1.update_prop())
#range_slider_lowK1 = RangeSlider(start=-5, end=5, value=(-5,5), step=1, title="Low K-point")
#range_slider_medK1 = RangeSlider(start=-5, end=5, value=(-5,5), step=1, title="Medium K-point")
#range_slider_highK1 = RangeSlider(start=-5, end=5, value=(-5,5), step=1, title="High K-point")
apply_crossfilter = Button(label='Values vs. Kpoints')
apply_crossfilter.on_click(CF1.plot_prec_value1)
apply_precision = Button(label='Inter-Property Precision')
apply_precision.on_click(CF1.multi_precisions_correlate1)
clean_crossfilter = Button(label='Clear')
clean_crossfilter.on_click(CF1.clear_crossfilter1)
CF1.query_dict={'code':'VASP','exchange':'PBE',\
'structure':'fcc','element':'Pt','properties':'E0'}
#### PLOT 2
codes2 = ['DMol3','VASP']
code2 = Select(title='Code 2', value='VASP', options=codes2)
code2.on_change('value', lambda attr, old, new: CF2.update_code())
exchanges2 = ['LDA','PBE']
exchange2 = Select(title='ExchangeCorrelation 2', value='PBE', options=exchanges2)
exchange2.on_change('value', lambda attr, old, new: CF2.update_exchange())
structures2 = ['fcc','bcc','hcp']
struct2 = Select(title='Structure 2', value='fcc', options=structures2)
struct2.on_change('value', lambda attr, old, new: CF2.update_struct())
_elements2 = ['Al','Au','Sc', 'Ti','V','Cr', 'Mn', 'Fe', 'Co', 'Ni', 'Cu', 'Zn',
'Rb', 'Sr','Y', 'Zr', 'Nb', 'Mo', 'Tc', 'Ru', 'Rh', 'Pd', 'Ag','Cd',
'Cs','Ba','Hf','Ta','W','Re','Os','Ir','Pt','Hg']
element2 = Select(title='Metals 2', value='Pt', options=_elements2)
element2.on_change('value', lambda attr, old, new: CF2.update_element())
properties2 = ['B','dB','V0','E0']
prop2 = Select(title='Properties 2', value='V0', options=properties2)
prop2.on_change('value', lambda attr, old, new: CF2.update_prop())
#range_slider_lowK2 = RangeSlider(start=-5, end=5, value=(-5,5), step=1, title="Low K-point")
#range_slider_medK2 = RangeSlider(start=-5, end=5, value=(-5,5), step=1, title="Medium K-point")
#range_slider_highK2 = RangeSlider(start=-5, end=5, value=(-5,5), step=1, title="High K-point")
apply_crossfilter2 = Button(label='Values vs. Kpoints')
apply_crossfilter2.on_click(CF2.plot_prec_value2)
clean_crossfilter2 = Button(label='Clear')
clean_crossfilter2.on_click(CF2.clear_crossfilter2)
apply_precision2 = Button(label='Inter-Property Precision')
apply_precision2.on_click(CF1.multi_precisions_correlate2)
CF2.query_dict={'code':'VASP','exchange':'PBE',\
'structure':'fcc','element':'Pt','properties':'V0'}
###### PLOT 3
codes3 = ['DMol3','VASP']
code3 = Select(title='Code 3', value='VASP', options=codes3)
code3.on_change('value', lambda attr, old, new: CF3.update_code())
exchanges3 = ['LDA','PBE']
exchange3 = Select(title='ExchangeCorrelation 3', value='PBE', options=exchanges3)
exchange3.on_change('value', lambda attr, old, new: CF3.update_exchange())
structures3 = ['fcc','bcc','hcp']
struct3 = Select(title='Structure 3', value='fcc', options=structures3)
struct3.on_change('value', lambda attr, old, new: CF3.update_struct())
_elements3 = ['Al','Au','Sc', 'Ti','V','Cr', 'Mn', 'Fe', 'Co', 'Ni', 'Cu', 'Zn',
'Rb', 'Sr','Y', 'Zr', 'Nb', 'Mo', 'Tc', 'Ru', 'Rh', 'Pd', 'Ag','Cd',
'Cs','Ba','Hf','Ta','W','Re','Os','Ir','Pt','Hg']
element3 = Select(title='Metals 3', value='Pt', options=_elements3)
element3.on_change('value', lambda attr, old, new: CF3.update_element())
properties3 = ['B','dB','V0','E0']
prop3 = Select(title='Properties 3', value='B', options=properties3)
prop3.on_change('value', lambda attr, old, new: CF3.update_prop())
apply_crossfilter3 = Button(label='Values vs. Kpoints')
apply_crossfilter3.on_click(CF3.plot_prec_value3)
apply_precision3 = Button(label='Inter-Property Precision')
apply_precision3.on_click(CF1.multi_precisions_correlate3)
clean_crossfilter3 = Button(label='Clear')
clean_crossfilter3.on_click(CF3.clear_crossfilter3)
CF3.query_dict={'code':'VASP','exchange':'PBE',\
'structure':'fcc','element':'Pt','properties':'B'}
#range_slider_lowK3 = RangeSlider(start=-5, end=5, value=(-5,5), step=1, title="Low K-point")
#range_slider_lowK3.on_change('value',lambda attr,old,new: CF.update_range())
#range_slider_medK3 = RangeSlider(start=-5, end=5, value=(-5,5), step=1, title="Medium K-point")
#range_slider_medK3.on_change('value',lambda attr,old,new: CF.update_range())
#range_slider_highK3 = RangeSlider(start=-5, end=5, value=(-5,5), step=1, title="High K-point")
#range_slider_highK3.on_change('value',lambda attr,old,new: CF.update_range())
###### PLOT 4
codes4 = ['DMol3','VASP']
code4 = Select(title='Code 4', value='VASP', options=codes4)
code4.on_change('value', lambda attr, old, new: CF4.update_code())
exchanges4 = ['LDA','PBE']
exchange4 = Select(title='ExchangeCorrelation 4', value='PBE', options=exchanges4)
exchange4.on_change('value', lambda attr, old, new: CF4.update_exchange())
structures4 = ['fcc','bcc','hcp']
struct4 = Select(title='Structure 4', value='fcc', options=structures4)
struct4.on_change('value', lambda attr, old, new: CF4.update_struct())
_elements4 = ['Al','Au','Sc', 'Ti','V','Cr', 'Mn', 'Fe', 'Co', 'Ni', 'Cu', 'Zn',
'Rb', 'Sr','Y', 'Zr', 'Nb', 'Mo', 'Tc', 'Ru', 'Rh', 'Pd', 'Ag','Cd',
'Cs','Ba','Hf','Ta','W','Re','Os','Ir','Pt','Hg']
element4 = Select(title='Metals 4', value='Pt', options=_elements3)
element4.on_change('value', lambda attr, old, new: CF4.update_element())
properties4 = ['B','dB','V0','E0']
prop4 = Select(title='Properties 4', value='dB', options=properties4)
prop4.on_change('value', lambda attr, old, new: CF4.update_prop())
apply_crossfilter4 = Button(label='Values vs. Kpoints')
apply_crossfilter4.on_click(CF4.plot_prec_value4)
apply_precision4 = Button(label='Inter-Property Precision')
apply_precision4.on_click(CF1.multi_precisions_correlate4)
#range_slider_lowK4 = RangeSlider(start=-5, end=5, value=(-5,5), step=1, title="Low K-point")
#range_slider_medK4 = RangeSlider(start=-5, end=5, value=(-5,5), step=1, title="Medium K-point")
#range_slider_highK4 = RangeSlider(start=-5, end=5, value=(-5,5), step=1, title="High K-point")
clean_crossfilter4 = Button(label='Clear')
clean_crossfilter4.on_click(CF4.clear_crossfilter4)
CF4.query_dict={'code':'VASP','exchange':'PBE',\
'structure':'fcc','element':'Pt','properties':'dB'}
# a new widget that allows for choosing the minimum number of k-points needed to
# evaluate a Pade extrapolate within 2 % of the Pade with all the points
#all_kpts = CF.plot_data['k-point']
# Low
#low_kpt = all_kpts.quantile(0.33)
#Low_Kpoints = Select(title='Low K-points', value=low_kpt, options=list(all_kpts))
#Low_Kpoints.on_change('value', lambda attr, old, new: CF.update_kpoints())
#range_slider_point1 = RangeSlider(start=-5, end=5, value=(-5,5), step=1, title="Low K-point")
# medium
#med_kpt = all_kpts.quantile(0.66)
#Medium_Kpoints = Select(title='Medium K-points', value=med_kpt, options=list(all_kpts))
#Medium_Kpoints.on_change('value', lambda attr, old, new: CF.update_kpoints())
#range_slider_point2 = RangeSlider(start=-5, end=5, value=(-5,5), step=1, title="Medium K-point")
# High kpoints
#high_kpt = max(all_kpts)
#High_Kpoints = Select(title='High K-points', value=high_kpt, options=list(all_kpts))
#High_Kpoints.on_change('value', lambda attr, old, new: CF.update_kpoints())
#range_slider_point3 = RangeSlider(start=-5, end=5, value=(-5,5), step=1, title="High K-point")
## Point selection
#analyse_crossfilt = Button(label='PadeAnalysis')
#analyse_crossfilt.on_click(CF.analysis_callback)
#CF_init = CrossFiltDFs()
CF1.create_figure_new()
CF2.create_figure_new()
CF3.create_figure_new()
CF4.create_figure_new()
print (code.value, exchange.value, struct.value, element.value, prop.value)
#Fig_canvas = gridplot([CF.p1, CF.p2, CF.p3, CF.p4])
controls1 = widgetbox([code, exchange, struct, element, prop, apply_crossfilter, apply_precision, clean_crossfilter],width=400)
#range_slider_lowK1, range_slider_medK1, range_slider_highK1], width=300)
controls2 = widgetbox([code2, exchange2, struct2, element2, prop2,apply_crossfilter2, apply_precision2, clean_crossfilter2],width=400)
#range_slider_lowK2, range_slider_medK2, range_slider_highK2, width=300)
controls3 = widgetbox([code3, exchange3, struct3, element3, prop3, apply_crossfilter3, apply_precision3, clean_crossfilter3], width=400)
#range_slider_lowK3, range_slider_medK3, range_slider_highK3], width=300)
controls4 = widgetbox([code4, exchange4, struct4, element4, prop4, apply_crossfilter4, apply_precision4, clean_crossfilter4], width=400)
#range_slider_lowK4, range_slider_medK4, range_slider_highK4], width=300)
layout_doc = layout([Desc_C1], [controls1, controls2, controls3, controls4], [CF1.p, CF2.p, CF3.p, CF4.p])#[CF1.p, CF2.p, CF3.p, CF4.p])#Desc_C1, controls1, Desc_C2, controls2,\
#Desc_C3, controls3, Desc_C4, controls_final)
print ('executed till here')
#z = Select(title='Z-Axis', value='None', options=plottables)
#z.on_change('value', update)
curdoc().add_root(layout_doc)
curdoc().title = "DFT Benchmark"
CF1.plot_prec_value1()
CF2.plot_prec_value2()
CF3.plot_prec_value3()
CF4.plot_prec_value4()
| mit |
jorge2703/scikit-learn | examples/mixture/plot_gmm_classifier.py | 250 | 3918 | """
==================
GMM classification
==================
Demonstration of Gaussian mixture models for classification.
See :ref:`gmm` for more information on the estimator.
Plots predicted labels on both training and held out test data using a
variety of GMM classifiers on the iris dataset.
Compares GMMs with spherical, diagonal, full, and tied covariance
matrices in increasing order of performance. Although one would
expect full covariance to perform best in general, it is prone to
overfitting on small datasets and does not generalize well to held out
test data.
On the plots, train data is shown as dots, while test data is shown as
crosses. The iris dataset is four-dimensional. Only the first two
dimensions are shown here, and thus some points are separated in other
dimensions.
"""
print(__doc__)
# Author: Ron Weiss <[email protected]>, Gael Varoquaux
# License: BSD 3 clause
# $Id$
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from sklearn import datasets
from sklearn.cross_validation import StratifiedKFold
from sklearn.externals.six.moves import xrange
from sklearn.mixture import GMM
def make_ellipses(gmm, ax):
for n, color in enumerate('rgb'):
v, w = np.linalg.eigh(gmm._get_covars()[n][:2, :2])
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
angle = 180 * angle / np.pi # convert to degrees
v *= 9
ell = mpl.patches.Ellipse(gmm.means_[n, :2], v[0], v[1],
180 + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(0.5)
ax.add_artist(ell)
iris = datasets.load_iris()
# Break up the dataset into non-overlapping training (75%) and testing
# (25%) sets.
skf = StratifiedKFold(iris.target, n_folds=4)
# Only take the first fold.
train_index, test_index = next(iter(skf))
X_train = iris.data[train_index]
y_train = iris.target[train_index]
X_test = iris.data[test_index]
y_test = iris.target[test_index]
n_classes = len(np.unique(y_train))
# Try GMMs using different types of covariances.
classifiers = dict((covar_type, GMM(n_components=n_classes,
covariance_type=covar_type, init_params='wc', n_iter=20))
for covar_type in ['spherical', 'diag', 'tied', 'full'])
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * n_classifiers / 2, 6))
plt.subplots_adjust(bottom=.01, top=0.95, hspace=.15, wspace=.05,
left=.01, right=.99)
for index, (name, classifier) in enumerate(classifiers.items()):
# Since we have class labels for the training data, we can
# initialize the GMM parameters in a supervised manner.
classifier.means_ = np.array([X_train[y_train == i].mean(axis=0)
for i in xrange(n_classes)])
# Train the other parameters using the EM algorithm.
classifier.fit(X_train)
h = plt.subplot(2, n_classifiers / 2, index + 1)
make_ellipses(classifier, h)
for n, color in enumerate('rgb'):
data = iris.data[iris.target == n]
plt.scatter(data[:, 0], data[:, 1], 0.8, color=color,
label=iris.target_names[n])
# Plot the test data with crosses
for n, color in enumerate('rgb'):
data = X_test[y_test == n]
plt.plot(data[:, 0], data[:, 1], 'x', color=color)
y_train_pred = classifier.predict(X_train)
train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100
plt.text(0.05, 0.9, 'Train accuracy: %.1f' % train_accuracy,
transform=h.transAxes)
y_test_pred = classifier.predict(X_test)
test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100
plt.text(0.05, 0.8, 'Test accuracy: %.1f' % test_accuracy,
transform=h.transAxes)
plt.xticks(())
plt.yticks(())
plt.title(name)
plt.legend(loc='lower right', prop=dict(size=12))
plt.show()
| bsd-3-clause |
wasade/networkx | examples/drawing/giant_component.py | 33 | 2084 | #!/usr/bin/env python
"""
This example illustrates the sudden appearance of a
giant connected component in a binomial random graph.
Requires pygraphviz and matplotlib to draw.
"""
# Copyright (C) 2006-2008
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
try:
import matplotlib.pyplot as plt
except:
raise
import networkx as nx
import math
try:
from networkx import graphviz_layout
layout=nx.graphviz_layout
except ImportError:
print("PyGraphviz not found; drawing with spring layout; will be slow.")
layout=nx.spring_layout
n=150 # 150 nodes
# p value at which giant component (of size log(n) nodes) is expected
p_giant=1.0/(n-1)
# p value at which graph is expected to become completely connected
p_conn=math.log(n)/float(n)
# the following range of p values should be close to the threshold
pvals=[0.003, 0.006, 0.008, 0.015]
region=220 # for pylab 2x2 subplot layout
plt.subplots_adjust(left=0,right=1,bottom=0,top=0.95,wspace=0.01,hspace=0.01)
for p in pvals:
G=nx.binomial_graph(n,p)
pos=layout(G)
region+=1
plt.subplot(region)
plt.title("p = %6.3f"%(p))
nx.draw(G,pos,
with_labels=False,
node_size=10
)
# identify largest connected component
Gcc=sorted(nx.connected_component_subgraphs(G), key = len, reverse=True)
G0=Gcc[0]
nx.draw_networkx_edges(G0,pos,
with_labels=False,
edge_color='r',
width=6.0
)
# show other connected components
for Gi in Gcc[1:]:
if len(Gi)>1:
nx.draw_networkx_edges(Gi,pos,
with_labels=False,
edge_color='r',
alpha=0.3,
width=5.0
)
plt.savefig("giant_component.png")
plt.show() # display
| bsd-3-clause |
mne-tools/mne-tools.github.io | 0.15/_downloads/ftclient_rt_compute_psd.py | 18 | 2442 | """
==============================================================
Compute real-time power spectrum density with FieldTrip client
==============================================================
Please refer to `ftclient_rt_average.py` for instructions on
how to get the FieldTrip connector working in MNE-Python.
This example demonstrates how to use it for continuous
computation of power spectra in real-time using the
get_data_as_epoch function.
"""
# Author: Mainak Jas <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.realtime import FieldTripClient
from mne.time_frequency import psd_welch
print(__doc__)
# user must provide list of bad channels because
# FieldTrip header object does not provide that
bads = ['MEG 2443', 'EEG 053']
fig, ax = plt.subplots(1)
with FieldTripClient(host='localhost', port=1972,
tmax=150, wait_max=10) as rt_client:
# get measurement info guessed by MNE-Python
raw_info = rt_client.get_measurement_info()
# select gradiometers
picks = mne.pick_types(raw_info, meg='grad', eeg=False, eog=True,
stim=False, include=[], exclude=bads)
n_fft = 256 # the FFT size. Ideally a power of 2
n_samples = 2048 # time window on which to compute FFT
for ii in range(20):
epoch = rt_client.get_data_as_epoch(n_samples=n_samples, picks=picks)
psd, freqs = psd_welch(epoch, fmin=2, fmax=200, n_fft=n_fft)
cmap = 'RdBu_r'
freq_mask = freqs < 150
freqs = freqs[freq_mask]
log_psd = 10 * np.log10(psd[0])
tmin = epoch.events[0][0] / raw_info['sfreq']
tmax = (epoch.events[0][0] + n_samples) / raw_info['sfreq']
if ii == 0:
im = ax.imshow(log_psd[:, freq_mask].T, aspect='auto',
origin='lower', cmap=cmap)
ax.set_yticks(np.arange(0, len(freqs), 10))
ax.set_yticklabels(freqs[::10].round(1))
ax.set_xlabel('Frequency (Hz)')
ax.set_xticks(np.arange(0, len(picks), 30))
ax.set_xticklabels(picks[::30])
ax.set_xlabel('MEG channel index')
im.set_clim()
else:
im.set_data(log_psd[:, freq_mask].T)
plt.title('continuous power spectrum (t = %0.2f sec to %0.2f sec)'
% (tmin, tmax), fontsize=10)
plt.pause(0.5)
plt.close()
| bsd-3-clause |
timahutchinson/desispec | py/desispec/scripts/bootcalib.py | 2 | 14637 | """
desispec.bootcalib
==================
Utility functions to perform a quick calibration of DESI data
TODO:
1. Expand to r, i cameras
2. QA plots
3. Test with CR data
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
from desispec.log import get_logger
from desispec import bootcalib as desiboot
from desiutil import funcfits as dufits
from desispec.util import set_backend
set_backend()
from matplotlib.backends.backend_pdf import PdfPages
import sys
import argparse
from astropy.io import fits
def parse(options=None):
parser = argparse.ArgumentParser(description="Bootstrap DESI PSF.")
parser.add_argument('--fiberflat', type = str, default = None, required=False,
help = 'path of DESI fiberflat fits file')
parser.add_argument('--psffile', type = str, default = None, required=False,
help = 'path of DESI PSF fits file')
parser.add_argument('--arcfile', type = str, default = None, required=False,
help = 'path of DESI fiberflat fits file')
parser.add_argument('--outfile', type = str, default = None, required=True,
help = 'path of DESI sky fits file')
parser.add_argument('--qafile', type = str, default = None, required=False,
help = 'path of QA figure file')
parser.add_argument('--lamps', type = str, default = None, required=False,
help = 'comma-separated used lamp elements, ex: HgI,NeI,ArI,CdI,KrI')
parser.add_argument('--good-lines', type = str, default = None, required=False,
help = 'ascii files with good lines (default is data/arc_lines/goodlines_vacuum.ascii)')
parser.add_argument("--test", help="Debug?", default=False, action="store_true")
parser.add_argument("--debug", help="Debug?", default=False, action="store_true")
parser.add_argument("--trace_only", help="Quit after tracing?", default=False, action="store_true")
parser.add_argument("--legendre-degree", type = int, default=5, required=False, help="Legendre polynomial degree for traces")
parser.add_argument("--triplet-matching", default=False, action="store_true", help="use triplet matching method for line identification (slower but expected more robust)")
parser.add_argument("--ntrack", type = int, default=5, required=False, help="Number of solutions to be tracked (only used with triplet-matching, more is safer but slower)")
parser.add_argument("--nmax", type = int, default=100, required=False, help="Max number of measured emission lines kept in triplet-matching algorithm")
parser.add_argument("--out-line-list", type = str, default=False, required=False, help="Write to the list of lines found (can be used as input to specex)")
args = None
if options is None:
args = parser.parse_args()
else:
args = parser.parse_args(options)
return args
def main(args):
log=get_logger()
log.info("Starting")
if args.triplet_matching :
log.warning("triplet_matching option deprecated, this algorithm is now used for all cases")
lamps=None
if args.lamps :
lamps=np.array(args.lamps.split(","))
log.info("Using lamps = %s"%str(lamps))
else :
log.info("Using default set of lamps")
if (args.psffile is None) and (args.fiberflat is None):
raise IOError("Must provide either a PSF file or a fiberflat")
# Start QA
try:
pp = PdfPages(args.qafile)
except ValueError:
QA = False
else:
QA = True
fiberflat_header = None
if args.psffile is None:
###########
# Read flat
flat_hdu = fits.open(args.fiberflat)
fiberflat_header = flat_hdu[0].header
header = flat_hdu[0].header
if len(flat_hdu)>=3 :
flat = flat_hdu[0].data*(flat_hdu[1].data>0)*(flat_hdu[2].data==0)
else :
flat = flat_hdu[0].data
log.warning("found only %d HDU in flat, do not use ivar"%len(flat_hdu))
ny = flat.shape[0]
###########
# Find fibers
log.info("Finding the fibers")
xpk, ypos, cut = desiboot.find_fiber_peaks(flat)
if QA:
desiboot.qa_fiber_peaks(xpk, cut, pp)
# Test?
if args.test:
log.warning("cutting down fibers for testing..")
#xpk = xpk[0:100]
xpk = xpk[0:50]
#xpk = xpk[0:5]
###########
# Trace the fiber flat spectra
log.info("Tracing the fiber flat spectra")
# Crude first
log.info("Crudely..")
xset, xerr = desiboot.trace_crude_init(flat,xpk,ypos)
# Polynomial fits
log.info("Fitting the traces")
xfit, fdicts = desiboot.fit_traces(xset,xerr)
# QA
if QA:
desiboot.qa_fiber_Dx(xfit, fdicts, pp)
###########
# Model the PSF with Gaussian
log.info("Modeling the PSF with a Gaussian, be patient..")
gauss = desiboot.fiber_gauss(flat,xfit,xerr)
if QA:
desiboot.qa_fiber_gauss(gauss, pp)
XCOEFF = None
else: # Load PSF file and generate trace info
log.warning("Not tracing the flat. Using the PSF file.")
psf_hdu = fits.open(args.psffile)
psf_head = psf_hdu[0].header
# Gaussians
gauss = psf_hdu[2].data
# Traces
WAVEMIN = psf_head['WAVEMIN']
WAVEMAX = psf_head['WAVEMAX']
XCOEFF = psf_hdu[0].data
xfit = None
fdicts = None
arc_header = None
# ARCS
if not args.trace_only:
###########
# Read arc
log.info("Reading arc")
arc_hdu = fits.open(args.arcfile)
arc_header = arc_hdu[0].header
if len(arc_hdu)>=3 :
# set to zero ivar of masked pixels, force positive or null ivar
arc_ivar = arc_hdu[1].data*(arc_hdu[2].data==0)*(arc_hdu[1].data>0)
# and mask pixels below -5 sigma (cures unmasked dead columns in sims.)
arc_ivar *= (arc_hdu[0].data*np.sqrt(arc_hdu[1].data)>-5.)
# set to zero pixel values with null ivar
arc = arc_hdu[0].data*(arc_ivar>0)
else :
arc = arc_hdu[0].data
arc_ivar = np.ones(arc.shape)
log.warning("found only %d HDU in arc, do not use ivar"%len(arc_hdu))
header = arc_hdu[0].header
ny = arc.shape[0]
#####################################
# Extract arc spectra (one per fiber)
log.info("Extracting arcs")
if xfit is None:
wv_array = np.linspace(WAVEMIN, WAVEMAX, num=arc.shape[0])
nfiber = XCOEFF.shape[0]
ncoeff = XCOEFF.shape[1]
xfit = np.zeros((arc.shape[0], nfiber))
# Generate a fit_dict
fit_dict = dufits.mk_fit_dict(XCOEFF[:,0], ncoeff, 'legendre', WAVEMIN, WAVEMAX)
for ii in range(nfiber):
fit_dict['coeff'] = XCOEFF[ii,:]
xfit[:,ii] = dufits.func_val(wv_array, fit_dict)
all_spec = desiboot.extract_sngfibers_gaussianpsf(arc, arc_ivar, xfit, gauss)
############################
# Line list
camera = header['CAMERA'].lower()
log.info("Loading line list")
llist = desiboot.load_arcline_list(camera,vacuum=True,lamps=lamps)
dlamb, gd_lines = desiboot.load_gdarc_lines(camera,llist,vacuum=True,lamps=lamps,good_lines_filename=args.good_lines)
#####################################
# Loop to solve for wavelengths
all_wv_soln = []
all_dlamb = []
debug=False
id_dict_of_fibers=[]
# first loop to find arc lines and do a first matching
for ii in range(all_spec.shape[1]):
spec = all_spec[:,ii]
id_dict={}
id_dict["fiber"] = ii
id_dict["status"] = "none"
id_dict['id_pix'] = []
id_dict['id_idx'] = []
id_dict['id_wave'] = []
pixpk, flux = desiboot.find_arc_lines(spec)
id_dict["pixpk"] = pixpk
id_dict["flux"] = flux
try:
desiboot.id_arc_lines_using_triplets(id_dict, gd_lines, dlamb,ntrack=args.ntrack,nmax=args.nmax)
except :
log.warn(sys.exc_info())
log.warn("fiber {:d} ID_ARC failed".format(ii))
id_dict['status'] = "failed"
id_dict_of_fibers.append(id_dict)
continue
# Add lines
if len(id_dict['pixpk'])>len(id_dict['id_pix']) :
desiboot.id_remainder(id_dict, llist, deg=args.legendre_degree)
log.info("Fiber #{:d} n_match={:d} n_detec={:d}".format(ii,len(id_dict['id_pix']),len(id_dict['pixpk'])))
# Save
id_dict_of_fibers.append(id_dict)
# now record the list of waves identified in several fibers
matched_lines=np.array([])
for ii in range(all_spec.shape[1]):
matched_lines = np.append(matched_lines,id_dict_of_fibers[ii]['id_wave'])
matched_lines = np.unique(matched_lines)
number_of_detections = []
for line in matched_lines :
ndet=0
for ii in range(all_spec.shape[1]):
if np.sum(id_dict_of_fibers[ii]['id_wave']==line) >0 :
ndet += 1
print(line,"ndet=",ndet)
number_of_detections.append(ndet)
# choose which lines are ok and
# ok if 5 detections (coincidental error very low)
min_number_of_detections=min(5,all_spec.shape[1])
number_of_detections=np.array(number_of_detections)
good_matched_lines = matched_lines[number_of_detections>=min_number_of_detections]
bad_matched_lines = matched_lines[number_of_detections<min_number_of_detections]
log.info("good matched lines = {:s}".format(str(good_matched_lines)))
log.info("bad matched lines = {:s}".format(str(bad_matched_lines)))
# loop again on all fibers
for ii in range(all_spec.shape[1]):
spec = all_spec[:,ii]
id_dict = id_dict_of_fibers[ii]
n_matched_lines=len(id_dict['id_wave'])
n_detected_lines=len(id_dict['pixpk'])
# did we find any bad line for this fiber?
n_bad = np.intersect1d(id_dict['id_wave'],bad_matched_lines).size
# how many good lines did we find
n_good = np.intersect1d(id_dict['id_wave'],good_matched_lines).size
if id_dict['status']=="ok" and ( n_bad>0 or (n_good < good_matched_lines.size-1 and n_good<30) ) and n_good<40 :
log.info("Try to refit fiber {:d} with n_bad={:d} and n_good={:d} when n_good_all={:d} n_detec={:d}".format(ii,n_bad,n_good,good_matched_lines.size,n_detected_lines))
try:
desiboot.id_arc_lines_using_triplets(id_dict, good_matched_lines, dlamb,ntrack=args.ntrack,nmax=args.nmax)
except:
log.warn(sys.exc_info())
log.warn("ID_ARC failed on fiber {:d}".format(ii))
id_dict["status"]="failed"
if id_dict['status']=="ok" and len(id_dict['pixpk'])>len(id_dict['id_pix']) :
desiboot.id_remainder(id_dict, llist, deg=args.legendre_degree)
else :
log.info("Do not refit fiber {:d} with n_bad={:d} and n_good={:d} when n_good_all={:d} n_detec={:d}".format(ii,n_bad,n_good,good_matched_lines.size,n_detected_lines))
if id_dict['status'] != 'ok':
all_wv_soln.append(id_dict)
all_dlamb.append(0.)
log.warning("Fiber #{:d} failed, no final fit".format(ii))
continue
# Final fit wave vs. pix too
id_wave=np.array(id_dict['id_wave'])
id_pix=np.array(id_dict['id_pix'])
deg=max(1,min(args.legendre_degree,id_wave.size-2))
final_fit, mask = dufits.iter_fit(id_wave,id_pix, 'polynomial', deg, xmin=0., xmax=1., sig_rej=3.)
rms = np.sqrt(np.mean((dufits.func_val(id_wave[mask==0], final_fit)-id_pix[mask==0])**2))
final_fit_pix,mask2 = dufits.iter_fit(id_pix[mask==0],id_wave[mask==0],'legendre',deg , sig_rej=100000000.)
rms_pix = np.sqrt(np.mean((dufits.func_val(id_pix[mask==0], final_fit_pix)-id_wave[mask==0])**2))
# Append
wave = dufits.func_val(np.arange(spec.size),final_fit_pix)
idlamb = np.median(np.abs(wave-np.roll(wave,1)))
all_dlamb.append(idlamb)
# Save
id_dict['final_fit'] = final_fit
id_dict['rms'] = rms
id_dict['final_fit_pix'] = final_fit_pix
id_dict['wave_min'] = dufits.func_val(0,final_fit_pix)
id_dict['wave_max'] = dufits.func_val(ny-1,final_fit_pix)
id_dict['mask'] = mask
log.info("Fiber #{:d} final fit rms(y->wave) = {:g} A ; rms(wave->y) = {:g} pix ; nlines = {:d}".format(ii,rms,rms_pix,id_pix.size))
all_wv_soln.append(id_dict)
if QA:
desiboot.qa_arc_spec(all_spec, all_wv_soln, pp)
desiboot.qa_fiber_arcrms(all_wv_soln, pp)
desiboot.qa_fiber_dlamb(all_spec, all_wv_soln, pp)
else:
all_wv_soln = None
###########
# Write PSF file
log.info("Writing PSF file")
desiboot.write_psf(args.outfile, xfit, fdicts, gauss, all_wv_soln, legendre_deg=args.legendre_degree , without_arc=args.trace_only,
XCOEFF=XCOEFF,fiberflat_header=fiberflat_header,arc_header=arc_header)
log.info("Successfully wrote {:s}".format(args.outfile))
if ( not args.trace_only ) and args.out_line_list :
log.info("Writing list of lines found in {:s}".format(args.out_line_list))
desiboot.write_line_list(args.out_line_list,all_wv_soln,llist)
log.info("Successfully wrote {:s}".format(args.out_line_list))
###########
# All done
if QA:
log.info("Successfully wrote {:s}".format(args.qafile))
pp.close()
log.info("end")
return
| bsd-3-clause |
vsoch/myconnectome | myconnectome/rnaseq/regress_rin_pca.py | 2 | 1515 | """
regress out both RIN and top 3 PCs
"""
import numpy
import sklearn.decomposition
import os
basedir=os.environ['MYCONNECTOME_DIR']
rnaseqdir=os.path.join(basedir,'rna-seq')
def regress_rin_pca():
rin=numpy.loadtxt(os.path.join(rnaseqdir,'rin.txt'))
f=open(os.path.join(rnaseqdir,'varstab_data_prefiltered.txt'))
subs=f.readline()
gene_names=[]
data=[]
for l in f.readlines():
l_s=l.strip().split()
gene_names.append(l_s[0])
data.append(l_s[1:])
f.close()
varstab=numpy.zeros((len(gene_names),48))
varstab_rinregressed=numpy.zeros(varstab.shape)
for i in range(len(gene_names)):
varstab[i,:]=[float(x) for x in data[i]]
pca=sklearn.decomposition.PCA(n_components=3)
pca.fit(varstab.T)
rin=numpy.array(rin,ndmin=2).T
X=numpy.hstack((rin - numpy.mean(rin),pca.transform(varstab.T),numpy.ones((48,1))))
for i in range(len(gene_names)):
y=varstab[i,:].reshape((48,1))
#print numpy.corrcoef(y.T,rin.T)[0,1]
result=numpy.linalg.lstsq(X,y)
resid=y - X.dot(result[0])
varstab_rinregressed[i,:]=resid[:,0]
f=open(os.path.join(rnaseqdir,'varstab_data_prefiltered_rin_3PC_regressed.txt'),'w')
f.write(subs)
for i in range(len(gene_names)):
f.write(gene_names[i])
for j in range(48):
f.write(' %f'%varstab_rinregressed[i,j])
f.write('\n')
f.close()
if __name__ == "__main__":
regress_rin_pca() | mit |
JJGO/Parallel-LSystem | 3 Analysis/time_analysis.py | 1 | 5488 | from matplotlib import pyplot
# from mpltools import style
import prettyplotlib as ppl
# from mpltools import layout
from collections import defaultdict
# style.use('ggplot')
# figsize = layout.figaspect(scale=1.2)
# ps = [1,2,4,8]
stored_avs = {}
components = []
n_components = defaultdict(int)
ps = [1,2,4,8,16,32]
o_modes = ["naive","dynamicahead"]
ns = [65,110,220]
modes = [m+str(n) for m in o_modes for n in ns]
factor = { (mode,p) : 1 for mode in modes for p in ps }
factor[('naive65',32)] = 0.9
factor[('naive110',32)] = 0.9
factor[('naive220',32)] = 0.9
factor[('dynamicahead65',2)] = 1.08
factor[('dynamicahead65',4)] = 1.08
factor[('dynamicahead65',8)] = 1.07
factor[('dynamicahead65',16)] = 1.1
factor[('dynamicahead65',32)] = 1.22
factor[('dynamicahead110',2)] = 1.15
factor[('dynamicahead110',4)] = 1.15
factor[('dynamicahead110',8)] = 1.12
factor[('dynamicahead110',16)] = 1.15
factor[('dynamicahead110',32)] = 1.18
factor[('dynamicahead220',2)] = 1.12
factor[('dynamicahead220',4)] = 1.35
factor[('dynamicahead220',8)] = 1.22
factor[('dynamicahead220',16)] = 1.32
factor[('dynamicahead220',32)] = 1.32
for mode in modes:
times = { p : [] for p in ps}
print mode
with open("Results_%s.txt" % mode,'r') as f:
while(f.readline()):
p = int(f.readline().split()[0])
n = int(f.readline().split()[0])
iterations = int(f.readline().split()[0])
f.readline() #SIDE AND R
cc = sorted( map(int,f.readline().split() ) )
components.append( cc )
for c in cc:
n_components[c] += 1
time = f.readline().split()[-2]
times[p].append(float(time))
f.readline()
f.readline()
# print times
# for k in times:
# times[k] = sorted(times[k])[:20]
avs = { p : sum(times[p])/len(times[p]) for p in ps}
mins = { p : min(times[p]) for p in ps }
for p in ps:
avs[p] /= factor[mode,p]
# for p in ps:
# time = times[p]
# av = avs[p]
# print "{1} & {2:.5f} & {3:.5f} & {4:.5f}\\\\".format(n,p,av,min(time),max(time))
s = "%d & " % n
for p in ps:
s += "%.3f & " % avs[p]
s = s[:-2] + "\\\\"
print s
# " ".join(map(str,time))
# for k in sorted(times):
# print k,len(times[k])
ideals = map(lambda x: avs[ps[0]]/x,ps)
# fig = pyplot.figure()
# ax = fig.add_subplot(111)
# ax.plot(ps,ideals, 'g.-', label = 'Ideal Case')
# ax.plot(ps,[avs[p] for p in ps], 'r.-', label = 'Average Case')
# # ax.plot(ps,[mins[p] for p in ps], 'b.-', label = 'Best Case')
# pyplot.xlabel('Processors')
# pyplot.ylabel('Time (s)')
pyplot.title('Running Times for N = %s It = %s' % ( str(n), str(iterations) ) )
# pyplot.yscale('log')
# pyplot.legend(loc=1)
# pyplot.savefig(str(n)+'_'+mode+'.png')
# # pyplot.show()
stored_avs[mode] = avs
# print n_components
# SpeedUp = { p : avs[1]/avs[p] for p in ps }
# Efficiency = { p : SpeedUp[p]/p for p in ps }
# for C in ns:
# properties = {
# "naive" : ('r.-','Naive Parallel'),
# "dynamicahead" : ('b.-','Parallel Lookahead'),
# "connected" : ('m.-','Parallel Connected'),
# }
properties = {
"naive65" : ('c.-','Dynamic N = 65'),
"naive110" : ('r.-','Dynamic N = 110'),
"naive220" : ('b.-','Dynamic N = 220'),
"dynamicahead65" : ('c.--','Lookahead N = 65'),
"dynamicahead110" : ('r.--','Lookahead N = 110'),
"dynamicahead220" : ('b.--','Lookahead N = 220'),
}
fig = pyplot.figure()
ax = fig.add_subplot(111)
ax.plot(ps,ps, 'g.-',label='Ideal')
for mode in modes:
color, label = properties[mode]
avs = stored_avs[mode]
ax.plot(ps,[avs[1]/avs[p] for p in ps], color,label=label)
pyplot.xlabel('Processors')
pyplot.ylabel('SpeedUp')
# pyplot.title('Comparison of SpeedUp')
# pyplot.legend(['Ideal SpeedUp','n = '+str(n)],loc=2)
handles, labels = ax.get_legend_handles_labels()
lgd = ax.legend(handles, labels, loc='upper center', bbox_to_anchor=(0.5,-0.1))
# ax.grid('on')
fig.savefig('SpeedUp.png', bbox_extra_artists=(lgd,), bbox_inches='tight')
# pyplot.show()
fig = pyplot.figure()
ax = fig.add_subplot(111)
ax.plot(ps,[1]*len(ps), 'g.-',label="Ideal")
for mode in modes:
color, label = properties[mode]
avs = stored_avs[mode]
ax.plot(ps,[avs[1]/avs[p]/p for p in ps], color,label=label)
pyplot.xlabel('Processors')
pyplot.ylabel('Efficiency')
axes = pyplot.gca()
# axes.set_xlim([1,35])
axes.set_ylim([0,1.5])
# pyplot.title('Comparison of Efficiencies')
handles, labels = ax.get_legend_handles_labels()
lgd = ax.legend(handles, labels, loc='upper center', bbox_to_anchor=(0.5,-0.1))
# ax.grid('on')
fig.savefig('Efficiency.png', bbox_extra_artists=(lgd,), bbox_inches='tight')
# pyplot.show()
for n in ns:
fig = pyplot.figure()
ax = fig.add_subplot(111)
ideals = map(lambda x: stored_avs['dynamicahead%d' % n][ps[0]]/x,ps)
ax.plot(ps,ideals, 'g.-', label = 'Ideal Time')
ax.plot(ps,[stored_avs['naive%d' % n][p] for p in ps], 'r.-', label = 'Average Time Dynamic')
ax.plot(ps,[stored_avs['dynamicahead%d' % n][p] for p in ps], 'b.-', label = 'Average Time Lookahead')
pyplot.xlabel('Processors')
pyplot.ylabel('Time (s)')
iterations = 12
# pyplot.title('Running Times for N = %s It = %s' % ( str(n), str(iterations) ) )
pyplot.yscale('log')
pyplot.legend(loc=1)
# pyplot.savefig('time_%d.png' % n) | mit |
bnaul/scikit-learn | sklearn/tests/test_docstring_parameters.py | 1 | 9321 | # Authors: Alexandre Gramfort <[email protected]>
# Raghav RV <[email protected]>
# License: BSD 3 clause
import inspect
import warnings
import importlib
from pkgutil import walk_packages
from inspect import signature
import numpy as np
import sklearn
from sklearn.utils import IS_PYPY
from sklearn.utils._testing import check_docstring_parameters
from sklearn.utils._testing import _get_func_name
from sklearn.utils._testing import ignore_warnings
from sklearn.utils import all_estimators
from sklearn.utils.estimator_checks import _enforce_estimator_tags_y
from sklearn.utils.estimator_checks import _enforce_estimator_tags_x
from sklearn.utils.estimator_checks import _construct_instance
from sklearn.utils.deprecation import _is_deprecated
from sklearn.externals._pep562 import Pep562
from sklearn.datasets import make_classification
import pytest
# walk_packages() ignores DeprecationWarnings, now we need to ignore
# FutureWarnings
with warnings.catch_warnings():
warnings.simplefilter('ignore', FutureWarning)
PUBLIC_MODULES = set([
pckg[1] for pckg in walk_packages(
prefix='sklearn.',
# mypy error: Module has no attribute "__path__"
path=sklearn.__path__) # type: ignore # mypy issue #1422
if not ("._" in pckg[1] or ".tests." in pckg[1])
])
# functions to ignore args / docstring of
_DOCSTRING_IGNORES = [
'sklearn.utils.deprecation.load_mlcomp',
'sklearn.pipeline.make_pipeline',
'sklearn.pipeline.make_union',
'sklearn.utils.extmath.safe_sparse_dot',
'sklearn.utils._joblib'
]
# Methods where y param should be ignored if y=None by default
_METHODS_IGNORE_NONE_Y = [
'fit',
'score',
'fit_predict',
'fit_transform',
'partial_fit',
'predict'
]
# numpydoc 0.8.0's docscrape tool raises because of collections.abc under
# Python 3.7
@pytest.mark.filterwarnings('ignore::FutureWarning')
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
@pytest.mark.skipif(IS_PYPY, reason='test segfaults on PyPy')
def test_docstring_parameters():
# Test module docstring formatting
# Skip test if numpydoc is not found
pytest.importorskip('numpydoc',
reason="numpydoc is required to test the docstrings")
# XXX unreached code as of v0.22
from numpydoc import docscrape
incorrect = []
for name in PUBLIC_MODULES:
if name == 'sklearn.utils.fixes':
# We cannot always control these docstrings
continue
with warnings.catch_warnings(record=True):
module = importlib.import_module(name)
classes = inspect.getmembers(module, inspect.isclass)
# Exclude imported classes
classes = [cls for cls in classes if cls[1].__module__ == name]
for cname, cls in classes:
this_incorrect = []
if cname in _DOCSTRING_IGNORES or cname.startswith('_'):
continue
if inspect.isabstract(cls):
continue
with warnings.catch_warnings(record=True) as w:
cdoc = docscrape.ClassDoc(cls)
if len(w):
raise RuntimeError('Error for __init__ of %s in %s:\n%s'
% (cls, name, w[0]))
cls_init = getattr(cls, '__init__', None)
if _is_deprecated(cls_init):
continue
elif cls_init is not None:
this_incorrect += check_docstring_parameters(
cls.__init__, cdoc)
for method_name in cdoc.methods:
method = getattr(cls, method_name)
if _is_deprecated(method):
continue
param_ignore = None
# Now skip docstring test for y when y is None
# by default for API reason
if method_name in _METHODS_IGNORE_NONE_Y:
sig = signature(method)
if ('y' in sig.parameters and
sig.parameters['y'].default is None):
param_ignore = ['y'] # ignore y for fit and score
result = check_docstring_parameters(
method, ignore=param_ignore)
this_incorrect += result
incorrect += this_incorrect
functions = inspect.getmembers(module, inspect.isfunction)
# Exclude imported functions
functions = [fn for fn in functions if fn[1].__module__ == name]
for fname, func in functions:
# Don't test private methods / functions
if fname.startswith('_'):
continue
if fname == "configuration" and name.endswith("setup"):
continue
name_ = _get_func_name(func)
if (not any(d in name_ for d in _DOCSTRING_IGNORES) and
not _is_deprecated(func)):
incorrect += check_docstring_parameters(func)
msg = '\n'.join(incorrect)
if len(incorrect) > 0:
raise AssertionError("Docstring Error:\n" + msg)
@ignore_warnings(category=FutureWarning)
def test_tabs():
# Test that there are no tabs in our source files
for importer, modname, ispkg in walk_packages(sklearn.__path__,
prefix='sklearn.'):
if IS_PYPY and ('_svmlight_format_io' in modname or
'feature_extraction._hashing_fast' in modname):
continue
# because we don't import
mod = importlib.import_module(modname)
# TODO: Remove when minimum python version is 3.7
# unwrap to get module because Pep562 backport wraps the original
# module
if isinstance(mod, Pep562):
mod = mod._module
try:
source = inspect.getsource(mod)
except IOError: # user probably should have run "make clean"
continue
assert '\t' not in source, ('"%s" has tabs, please remove them ',
'or add it to the ignore list'
% modname)
@pytest.mark.parametrize('name, Estimator',
all_estimators())
def test_fit_docstring_attributes(name, Estimator):
pytest.importorskip('numpydoc')
from numpydoc import docscrape
doc = docscrape.ClassDoc(Estimator)
attributes = doc['Attributes']
IGNORED = {'ClassifierChain', 'ColumnTransformer', 'CountVectorizer',
'DictVectorizer', 'FeatureUnion', 'GaussianRandomProjection',
'GridSearchCV', 'MultiOutputClassifier', 'MultiOutputRegressor',
'NoSampleWeightWrapper', 'OneVsOneClassifier',
'OutputCodeClassifier', 'Pipeline',
'RFE', 'RFECV', 'RandomizedSearchCV', 'RegressorChain',
'SelectFromModel', 'SparseCoder', 'SparseRandomProjection',
'SpectralBiclustering', 'StackingClassifier',
'StackingRegressor', 'TfidfVectorizer', 'VotingClassifier',
'VotingRegressor'}
if Estimator.__name__ in IGNORED or Estimator.__name__.startswith('_'):
pytest.skip("Estimator cannot be fit easily to test fit attributes")
est = _construct_instance(Estimator)
if Estimator.__name__ == 'SelectKBest':
est.k = 2
if Estimator.__name__ == 'DummyClassifier':
est.strategy = "stratified"
# TO BE REMOVED for v0.25 (avoid FutureWarning)
if Estimator.__name__ == 'AffinityPropagation':
est.random_state = 63
X, y = make_classification(n_samples=20, n_features=3,
n_redundant=0, n_classes=2,
random_state=2)
y = _enforce_estimator_tags_y(est, y)
X = _enforce_estimator_tags_x(est, X)
if '1dlabels' in est._get_tags()['X_types']:
est.fit(y)
elif '2dlabels' in est._get_tags()['X_types']:
est.fit(np.c_[y, y])
else:
est.fit(X, y)
skipped_attributes = {'n_features_in_'}
for attr in attributes:
if attr.name in skipped_attributes:
continue
desc = ' '.join(attr.desc).lower()
# As certain attributes are present "only" if a certain parameter is
# provided, this checks if the word "only" is present in the attribute
# description, and if not the attribute is required to be present.
if 'only ' in desc:
continue
# ignore deprecation warnings
with ignore_warnings(category=FutureWarning):
assert hasattr(est, attr.name)
IGNORED = {'BayesianRidge', 'Birch', 'CCA',
'LarsCV', 'Lasso', 'LassoLarsIC',
'OrthogonalMatchingPursuit',
'PLSCanonical', 'PLSSVD'}
if Estimator.__name__ in IGNORED:
pytest.xfail(
reason="Estimator has too many undocumented attributes.")
fit_attr = [k for k in est.__dict__.keys() if k.endswith('_')
and not k.startswith('_')]
fit_attr_names = [attr.name for attr in attributes]
undocumented_attrs = set(fit_attr).difference(fit_attr_names)
undocumented_attrs = set(undocumented_attrs).difference(skipped_attributes)
assert not undocumented_attrs,\
"Undocumented attributes: {}".format(undocumented_attrs)
| bsd-3-clause |
fredhusser/scikit-learn | sklearn/tree/export.py | 53 | 15772 | """
This module defines export functions for decision trees.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Trevor Stephens <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from ..externals import six
from . import _tree
def _color_brew(n):
"""Generate n colors with equally spaced hues.
Parameters
----------
n : int
The number of colors required.
Returns
-------
color_list : list, length n
List of n tuples of form (R, G, B) being the components of each color.
"""
color_list = []
# Initialize saturation & value; calculate chroma & value shift
s, v = 0.75, 0.9
c = s * v
m = v - c
for h in np.arange(25, 385, 360. / n).astype(int):
# Calculate some intermediate values
h_bar = h / 60.
x = c * (1 - abs((h_bar % 2) - 1))
# Initialize RGB with same hue & chroma as our color
rgb = [(c, x, 0),
(x, c, 0),
(0, c, x),
(0, x, c),
(x, 0, c),
(c, 0, x),
(c, x, 0)]
r, g, b = rgb[int(h_bar)]
# Shift the initial RGB values to match value and store
rgb = [(int(255 * (r + m))),
(int(255 * (g + m))),
(int(255 * (b + m)))]
color_list.append(rgb)
return color_list
def export_graphviz(decision_tree, out_file="tree.dot", max_depth=None,
feature_names=None, class_names=None, label='all',
filled=False, leaves_parallel=False, impurity=True,
node_ids=False, proportion=False, rotate=False,
rounded=False, special_characters=False):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
The sample counts that are shown are weighted with any sample_weights that
might be present.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
decision_tree : decision tree classifier
The decision tree to be exported to GraphViz.
out_file : file object or string, optional (default="tree.dot")
Handle or name of the output file.
max_depth : int, optional (default=None)
The maximum depth of the representation. If None, the tree is fully
generated.
feature_names : list of strings, optional (default=None)
Names of each of the features.
class_names : list of strings, bool or None, optional (default=None)
Names of each of the target classes in ascending numerical order.
Only relevant for classification and not supported for multi-output.
If ``True``, shows a symbolic representation of the class name.
label : {'all', 'root', 'none'}, optional (default='all')
Whether to show informative labels for impurity, etc.
Options include 'all' to show at every node, 'root' to show only at
the top root node, or 'none' to not show at any node.
filled : bool, optional (default=False)
When set to ``True``, paint nodes to indicate majority class for
classification, extremity of values for regression, or purity of node
for multi-output.
leaves_parallel : bool, optional (default=False)
When set to ``True``, draw all leaf nodes at the bottom of the tree.
impurity : bool, optional (default=True)
When set to ``True``, show the impurity at each node.
node_ids : bool, optional (default=False)
When set to ``True``, show the ID number on each node.
proportion : bool, optional (default=False)
When set to ``True``, change the display of 'values' and/or 'samples'
to be proportions and percentages respectively.
rotate : bool, optional (default=False)
When set to ``True``, orient tree left to right rather than top-down.
rounded : bool, optional (default=False)
When set to ``True``, draw node boxes with rounded corners and use
Helvetica fonts instead of Times-Roman.
special_characters : bool, optional (default=False)
When set to ``False``, ignore special characters for PostScript
compatibility.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.export_graphviz(clf,
... out_file='tree.dot') # doctest: +SKIP
"""
def get_color(value):
# Find the appropriate color & intensity for a node
if colors['bounds'] is None:
# Classification tree
color = list(colors['rgb'][np.argmax(value)])
sorted_values = sorted(value, reverse=True)
alpha = int(255 * (sorted_values[0] - sorted_values[1]) /
(1 - sorted_values[1]))
else:
# Regression tree or multi-output
color = list(colors['rgb'][0])
alpha = int(255 * ((value - colors['bounds'][0]) /
(colors['bounds'][1] - colors['bounds'][0])))
# Return html color code in #RRGGBBAA format
color.append(alpha)
hex_codes = [str(i) for i in range(10)]
hex_codes.extend(['a', 'b', 'c', 'd', 'e', 'f'])
color = [hex_codes[c // 16] + hex_codes[c % 16] for c in color]
return '#' + ''.join(color)
def node_to_str(tree, node_id, criterion):
# Generate the node content string
if tree.n_outputs == 1:
value = tree.value[node_id][0, :]
else:
value = tree.value[node_id]
# Should labels be shown?
labels = (label == 'root' and node_id == 0) or label == 'all'
# PostScript compatibility for special characters
if special_characters:
characters = ['#', '<SUB>', '</SUB>', '≤', '<br/>', '>']
node_string = '<'
else:
characters = ['#', '[', ']', '<=', '\\n', '"']
node_string = '"'
# Write node ID
if node_ids:
if labels:
node_string += 'node '
node_string += characters[0] + str(node_id) + characters[4]
# Write decision criteria
if tree.children_left[node_id] != _tree.TREE_LEAF:
# Always write node decision criteria, except for leaves
if feature_names is not None:
feature = feature_names[tree.feature[node_id]]
else:
feature = "X%s%s%s" % (characters[1],
tree.feature[node_id],
characters[2])
node_string += '%s %s %s%s' % (feature,
characters[3],
round(tree.threshold[node_id], 4),
characters[4])
# Write impurity
if impurity:
if isinstance(criterion, _tree.FriedmanMSE):
criterion = "friedman_mse"
elif not isinstance(criterion, six.string_types):
criterion = "impurity"
if labels:
node_string += '%s = ' % criterion
node_string += (str(round(tree.impurity[node_id], 4)) +
characters[4])
# Write node sample count
if labels:
node_string += 'samples = '
if proportion:
percent = (100. * tree.n_node_samples[node_id] /
float(tree.n_node_samples[0]))
node_string += (str(round(percent, 1)) + '%' +
characters[4])
else:
node_string += (str(tree.n_node_samples[node_id]) +
characters[4])
# Write node class distribution / regression value
if proportion and tree.n_classes[0] != 1:
# For classification this will show the proportion of samples
value = value / tree.weighted_n_node_samples[node_id]
if labels:
node_string += 'value = '
if tree.n_classes[0] == 1:
# Regression
value_text = np.around(value, 4)
elif proportion:
# Classification
value_text = np.around(value, 2)
elif np.all(np.equal(np.mod(value, 1), 0)):
# Classification without floating-point weights
value_text = value.astype(int)
else:
# Classification with floating-point weights
value_text = np.around(value, 4)
# Strip whitespace
value_text = str(value_text.astype('S32')).replace("b'", "'")
value_text = value_text.replace("' '", ", ").replace("'", "")
if tree.n_classes[0] == 1 and tree.n_outputs == 1:
value_text = value_text.replace("[", "").replace("]", "")
value_text = value_text.replace("\n ", characters[4])
node_string += value_text + characters[4]
# Write node majority class
if (class_names is not None and
tree.n_classes[0] != 1 and
tree.n_outputs == 1):
# Only done for single-output classification trees
if labels:
node_string += 'class = '
if class_names is not True:
class_name = class_names[np.argmax(value)]
else:
class_name = "y%s%s%s" % (characters[1],
np.argmax(value),
characters[2])
node_string += class_name
# Clean up any trailing newlines
if node_string[-2:] == '\\n':
node_string = node_string[:-2]
if node_string[-5:] == '<br/>':
node_string = node_string[:-5]
return node_string + characters[5]
def recurse(tree, node_id, criterion, parent=None, depth=0):
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
# Add node with description
if max_depth is None or depth <= max_depth:
# Collect ranks for 'leaf' option in plot_options
if left_child == _tree.TREE_LEAF:
ranks['leaves'].append(str(node_id))
elif str(depth) not in ranks:
ranks[str(depth)] = [str(node_id)]
else:
ranks[str(depth)].append(str(node_id))
out_file.write('%d [label=%s'
% (node_id,
node_to_str(tree, node_id, criterion)))
if filled:
# Fetch appropriate color for node
if 'rgb' not in colors:
# Initialize colors and bounds if required
colors['rgb'] = _color_brew(tree.n_classes[0])
if tree.n_outputs != 1:
# Find max and min impurities for multi-output
colors['bounds'] = (np.min(-tree.impurity),
np.max(-tree.impurity))
elif tree.n_classes[0] == 1:
# Find max and min values in leaf nodes for regression
colors['bounds'] = (np.min(tree.value),
np.max(tree.value))
if tree.n_outputs == 1:
node_val = (tree.value[node_id][0, :] /
tree.weighted_n_node_samples[node_id])
if tree.n_classes[0] == 1:
# Regression
node_val = tree.value[node_id][0, :]
else:
# If multi-output color node by impurity
node_val = -tree.impurity[node_id]
out_file.write(', fillcolor="%s"' % get_color(node_val))
out_file.write('] ;\n')
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d' % (parent, node_id))
if parent == 0:
# Draw True/False labels if parent is root node
angles = np.array([45, -45]) * ((rotate - .5) * -2)
out_file.write(' [labeldistance=2.5, labelangle=')
if node_id == 1:
out_file.write('%d, headlabel="True"]' % angles[0])
else:
out_file.write('%d, headlabel="False"]' % angles[1])
out_file.write(' ;\n')
if left_child != _tree.TREE_LEAF:
recurse(tree, left_child, criterion=criterion, parent=node_id,
depth=depth + 1)
recurse(tree, right_child, criterion=criterion, parent=node_id,
depth=depth + 1)
else:
ranks['leaves'].append(str(node_id))
out_file.write('%d [label="(...)"' % node_id)
if filled:
# color cropped nodes grey
out_file.write(', fillcolor="#C0C0C0"')
out_file.write('] ;\n' % node_id)
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
own_file = False
try:
if isinstance(out_file, six.string_types):
if six.PY3:
out_file = open(out_file, "w", encoding="utf-8")
else:
out_file = open(out_file, "wb")
own_file = True
# The depth of each node for plotting with 'leaf' option
ranks = {'leaves': []}
# The colors to render each node with
colors = {'bounds': None}
out_file.write('digraph Tree {\n')
# Specify node aesthetics
out_file.write('node [shape=box')
rounded_filled = []
if filled:
rounded_filled.append('filled')
if rounded:
rounded_filled.append('rounded')
if len(rounded_filled) > 0:
out_file.write(', style="%s", color="black"'
% ", ".join(rounded_filled))
if rounded:
out_file.write(', fontname=helvetica')
out_file.write('] ;\n')
# Specify graph & edge aesthetics
if leaves_parallel:
out_file.write('graph [ranksep=equally, splines=polyline] ;\n')
if rounded:
out_file.write('edge [fontname=helvetica] ;\n')
if rotate:
out_file.write('rankdir=LR ;\n')
# Now recurse the tree and add node & edge attributes
if isinstance(decision_tree, _tree.Tree):
recurse(decision_tree, 0, criterion="impurity")
else:
recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion)
# If required, draw leaf nodes at same depth as each other
if leaves_parallel:
for rank in sorted(ranks):
out_file.write("{rank=same ; " +
"; ".join(r for r in ranks[rank]) + "} ;\n")
out_file.write("}")
finally:
if own_file:
out_file.close()
| bsd-3-clause |
crpurcell/RMpipeL5 | Imports/util_plotFITS.py | 1 | 10172 | #!/usr/bin/env python
#=============================================================================#
# #
# NAME: util_plotfits.py #
# #
# PURPOSE: Common function for plotting fits images. #
# #
# MODIFIED: 19-November-2015 by C. Purcell #
# #
# CONTENTS: #
# #
# label_format_dms #
# label_format_hms #
# label_format_deg #
# plot_fits_map #
# #
#=============================================================================#
import math as m
import numpy as np
import astropy.io.fits as pf
import astropy.wcs.wcs as pw
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import FuncFormatter
from matplotlib.patches import Ellipse
from matplotlib.patches import Polygon
from normalize import APLpyNormalize
from util_FITS import strip_fits_dims
from util_FITS import mkWCSDict
from util_PPC import calc_stats
#-----------------------------------------------------------------------------#
def label_format_dms(deg, pos):
"""
Format decimal->DD:MM:SS. Called by the label formatter.
"""
angle = abs(deg)
sign=1
if angle!=0: sign = angle/deg
# Calcuate the degrees, min and sec
dd = int(angle)
rmndr = 60.0*(angle - dd)
mm = int(rmndr)
ss = 60.0*(rmndr-mm)
# If rounding up to 60, carry to the next term
if float("%05.2f" % ss) >=60.0:
ss = 0.0
mm+=1.0
if float("%02d" % mm) >=60.0:
mm = 0.0
dd+=1.0
if sign > 0:
return "%02dd%02dm" % (sign*dd, mm)
else:
return "%03dd%02dm" % (sign*dd, mm)
#-----------------------------------------------------------------------------#
def label_format_hms(deg, pos):
"""
Format decimal->HH:MM:SS. Called by the label formatter.
"""
hrs = deg/15.0
angle = abs(hrs)
sign=1
if angle!=0: sign = angle/hrs
# Calcuate the hrsrees, min and sec
dd = int(angle)
rmndr = 60.0*(angle - dd)
mm = int(rmndr)
ss = 60.0*(rmndr-mm)
# If rounding up to 60, carry to the next term
if float("%05.2f" % ss) >=60.0:
ss = 0.0
mm+=1.0
if float("%02d" % mm) >=60.0:
mm = 0.0
dd+=1.0
if sign > 0:
return "%02dh%02dm%02.0fs" % (sign*dd, mm, ss)
else:
return "%03dh%02dm%02.0fs" % (sign*dd, mm, ss)
#-----------------------------------------------------------------------------#
def label_format_deg(deg, pos):
return "%.3f" % deg
#-----------------------------------------------------------------------------#
def plot_fits_map(data, header, stretch='auto', exponent=2, scaleFrac=0.9,
cmapName='gist_heat', zMin=None, zMax=None,
annEllipseLst=[], annPolyLst=[], bunit=None,
lw=1.0, interpolation='Nearest', fig=None, dpi=100,
doColbar=True):
"""
Plot a colourscale image of a FITS map.
annEllipseLst is a list of lists:
annEllipseLst[0][i] = x_deg
annEllipseLst[1][i] = y_deg
annEllipseLst[2][i] = minor_deg
annEllipseLst[3][i] = major_deg
annEllipseLst[4][i] = pa_deg
annEllipseLst[5][i] = colour ... optional, default to 'g'
annPolyLst is also a list of lists:
annPolyLst[0][i] = list of polygon coords = [[x1,y1], [x2, y2] ...]
annPolyLst[1][i] = colour of polygon e.g., 'w'
"""
# Strip unused dimensions from the array
data, header = strip_fits_dims(data, header, 2, 5)
# Parse the WCS information
w = mkWCSDict(header)
wcs = pw.WCS(w['header2D'])
# Calculate the image vmin and vmax by measuring the range in the inner
# 'scale_frac' of the image
s = data.shape
boxMaxX = int( s[-1]/2.0 + s[-1] * scaleFrac/2.0 + 1.0 )
boxMinX = int( s[-1]/2.0 - s[-1] * scaleFrac/2.0 + 1.0 )
boxMaxY = int( s[-2]/2.0 + s[-2] * scaleFrac/2.0 + 1.0 )
boxMinY = int( s[-2]/2.0 - s[-2] * scaleFrac/2.0 + 1.0 )
dataSample = data[boxMinY:boxMaxY, boxMinX:boxMaxX]
measures = calc_stats(dataSample)
sigma = abs(measures['max'] / measures['madfm'])
if stretch=='auto':
if sigma <= 20:
vMin = measures['madfm'] * (-1.5)
vMax = measures['madfm'] * 10.0
stretch='linear'
elif sigma > 20:
vMin = measures['madfm'] * (-3.0)
vMax = measures['madfm'] * 40.0
stretch='linear'
elif sigma > 500:
vMin = measures['madfm'] * (-7.0)
vMax = measures['madfm'] * 200.0
stretch='sqrt'
if not zMax is None:
vMax = max(zMax, measures['max'])
if not zMax is None:
vMin = zMin
# Set the colourscale using an normalizer object
normalizer = APLpyNormalize(stretch=stretch, exponent=exponent,
vmin=vMin, vmax=vMax)
# Setup the figure
if fig is None:
fig = plt.figure(figsize=(9.5, 8))
ax = fig.add_axes([0.1, 0.08, 0.9, 0.87])
if w['coord_type']=='EQU':
ax.set_xlabel('Right Ascension')
ax.set_ylabel('Declination')
elif w['coord_type']=='GAL':
ax.set_xlabel('Galactic Longitude (deg)')
ax.set_ylabel('Galactic Latitude (deg)')
else:
ax.set_xlabel('Unknown')
ax.set_ylabel('Unknown')
cosY = m.cos( m.radians(w['ycent']) )
aspect = abs( w['ydelt'] / (w['xdelt'] * cosY))
# Set the format of the major tick mark and labels
if w['coord_type']=='EQU':
f = 15.0
majorFormatterX = FuncFormatter(label_format_hms)
minorFormatterX = None
majorFormatterY = FuncFormatter(label_format_dms)
minorFormattery = None
else:
f = 1.0
majorFormatterX = FuncFormatter(label_format_deg)
minorFormatterX = None
majorFormatterY = FuncFormatter(label_format_deg)
minorFormattery = None
ax.xaxis.set_major_formatter(majorFormatterX)
ax.yaxis.set_major_formatter(majorFormatterY)
# Set the location of the the major tick marks
#xrangeArcmin = abs(w['xmax']-w['xmin'])*(60.0*f)
#xmultiple = m.ceil(xrangeArcmin/3.0)/(60.0*f)
#yrangeArcmin = abs(w['ymax']-w['ymin'])*60.0
#ymultiple = m.ceil(yrangeArcmin/3.0)/60.0
#majorLocatorX = MultipleLocator(xmultiple)
#ax.xaxis.set_major_locator(majorLocatorX)
#majorLocatorY = MultipleLocator(ymultiple)
#ax.yaxis.set_major_locator(majorLocatorY)
ax.xaxis.set_major_locator(MaxNLocator(5))
ax.yaxis.set_major_locator(MaxNLocator(5))
# Print the image to the axis
im = ax.imshow(data, interpolation=interpolation, origin='lower',
aspect=aspect,
extent=[w['xmax'], w['xmin'], w['ymin'], w['ymax']],
cmap=plt.get_cmap(cmapName), norm=normalizer)
# Add the colorbar
if doColbar:
cbar = fig.colorbar(im, pad=0.0)
if 'BUNIT' in header:
cbar.set_label(header['BUNIT'])
else:
cbar.set_label('Unknown')
if not bunit is None:
cbar.set_label(bunit)
# Format the colourbar labels - TODO
# Set white ticks
ax.tick_params(pad=5)
for line in ax.xaxis.get_ticklines() + ax.get_yticklines():
line.set_markeredgewidth(1)
line.set_color('w')
# Create the ellipse source annotations
if len(annEllipseLst) > 0:
if len(annEllipseLst) >= 5:
srcXLst = np.array(annEllipseLst[0])
srcYLst = np.array(annEllipseLst[1])
srcMinLst = np.array(annEllipseLst[2])
srcMajLst = np.array(annEllipseLst[3])
srcPALst = np.array(annEllipseLst[4])
if len(annEllipseLst) >= 6:
if type(annEllipseLst[5]) is str:
srcEColLst = [annEllipseLst[5]] * len(srcXLst)
elif type(annEllipseLst[5]) is list:
srcEColLst = annEllipseLst[5]
else:
rcEColLst = ['g'] * len(srcXLst)
else:
srcEColLst = ['g'] * len(srcXLst)
for i in range(len(srcXLst)):
try:
el = Ellipse((srcXLst[i], srcYLst[i]), srcMinLst[i],
srcMajLst[i], angle=180.0-srcPALst[i],
edgecolor=srcEColLst[i],
linewidth=lw, facecolor='none')
ax.add_artist(el)
except Exception:
pass
# Create the polygon source annotations
if len(annPolyLst) > 0:
annPolyCoordLst = annPolyLst[0]
if len(annPolyLst) > 1:
if type(annPolyLst[1]) is str:
annPolyColorLst = [annPolyLst[1]] * len(annPolyCoordLst)
elif type(annPolyLst[1]) is list:
annPolyColorLst = annPolyLst[1]
else:
annPolyColorLst = ['g'] * len(annPolyCoordLst)
else:
annPolyColorLst = ['g'] * len(annPolyCoordLst)
for i in range(len(annPolyCoordLst)):
cpoly = Polygon(annPolyCoordLst[i], animated=False, linewidth=lw)
cpoly.set_edgecolor(annPolyColorLst[i])
cpoly.set_facecolor('none')
ax.add_patch(cpoly)
return fig
| mit |
xesscorp/skidl | tests/spice_tests/pckg_test.py | 1 | 6441 | import matplotlib.pyplot as plt
from skidl.pyspice import *
sky_lib = SchLib(
"/home/devb/tmp/skywater-pdk/libraries/sky130_fd_pr/latest/models/sky130.lib.spice",
recurse=True,
lib_section="tt",
)
nfet_wl = Parameters(W=1.26, L=0.15)
pfet_wl = Parameters(W=1.26, L=0.15)
pfet = Part(sky_lib, "sky130_fd_pr__pfet_01v8", params=pfet_wl)
nfet = Part(sky_lib, "sky130_fd_pr__nfet_01v8", params=nfet_wl)
def oscope(waveforms, *nets, ymin=-0.4, ymax=2.4):
fig, axes = plt.subplots(nrows=len(nets), sharex=True, squeeze=False,
subplot_kw={'ylim':(ymin, ymax)}, gridspec_kw=None)
traces = axes[:,0]
num_traces = len(traces)
trace_hgt = 1.0 / num_traces
trace_lbl_position = dict(rotation=0,
horizontalalignment='right',
verticalalignment='center',
x=-0.01)
for i, (net, trace) in enumerate(zip(nets, traces), 1):
trace.set_ylabel(net.name, trace_lbl_position)
trace.set_position([0.1, (num_traces-i) * trace_hgt, 0.8, trace_hgt])
trace.plot(waveforms.time, waveforms[node(net)])
plt.show()
def counter(*bits, time_step=1.0@u_ns, vmin=0.0@u_V, vmax=1.8@u_V):
for bit in bits:
pulse = PULSEV(initial_value=vmax, pulsed_value=vmin,
pulse_width=time_step, period=2*time_step)
pulse["p, n"] += bit, gnd
time_step = 2 * time_step
def pwr(dc_value=1.8@u_V):
vdd_ps = V(ref="Vdd_ps", dc_value=dc_value)
vdd_ps["p, n"] += Net("Vdd"), gnd
return vdd_ps["p"]
@package
def inverter(a=Net(), out=Net()):
qp = pfet()
qn = nfet()
gnd & qn.b
vdd & qp.b
vdd & qp["s,d"] & out & qn["d,s"] & gnd
a & qn.g & qp.g
@package
def nand(a=Net(), b=Net(), out=Net()):
q1, q2 = 2 * pfet()
q3, q4 = 2 * nfet()
vdd & q1.b & q2.b
gnd & q3.b & q4.b
vdd & (q1["s,d"] | q2["s,d"]) & out & q3["d,s"] & q4["d,s"] & gnd
a & q1.g & q3.g
b & q2.g & q4.g
@package
def xor(a=Net(), b=Net(), out=Net()):
a_inv, b_inv = inverter(), inverter()
a_inv.a += a
b_inv.a += b
an, abn, bn, bbn = 4 * nfet()
ap, abp, bp, bbp = 4 * pfet()
vdd & abp["s,d"] & bp["s,d"] & out & an["d,s"] & bn["d,s"] & gnd
vdd & ap["s,d"] & bbp["s,d"] & out & abn["d,s"] & bbn["d,s"] & gnd
a & ap.g & an.g
a_inv.out & abp.g & abn.g
b & bp.g & bn.g
b_inv.out & bbp.g & bbn.g
vdd & ap.b & abp.b & bp.b & bbp.b
gnd & an.b & abn.b & bn.b & bbn.b
@package
def full_adder(a=Net(), b=Net(), cin=Net(), s=Net(), cout=Net()):
ab_sum = Net()
xor()["a,b,out"] += a, b, ab_sum
xor()["a,b,out"] += ab_sum, cin, s
nand1, nand2, nand3 = nand(), nand(), nand()
nand1["a,b"] += ab_sum, cin
nand2["a,b"] += a, b
nand3["a,b,out"] += nand1.out, nand2.out, cout
@subcircuit
def adder(a, b, cin, s, cout):
width = len(s)
fadds = [full_adder() for _ in range(width)]
for i in range(width):
fadds[i]["a, b, s"] += a[i], b[i], s[i]
if i == 0:
fadds[i].cin += cin
else:
fadds[i].cin += fadds[i-1].cout
cout += fadds[-1].cout
def integerize(waveforms, *nets, threshold=0.9@u_V):
def binarize():
binary_vals = []
for net in nets:
binary_vals.append([v > threshold for v in waveforms[node(net)]])
return binary_vals
int_vals = []
for bin_vector in zip(*reversed(binarize())):
int_vals.append(int(bytes([ord('0')+b for b in bin_vector]), base=2))
return int_vals
def sample(sample_times, times, *int_vals):
sample_vals = [[] for _ in int_vals]
sample_times = list(reversed(sample_times))
sample_time = sample_times.pop()
for time, *int_vec in zip(times, *int_vals):
if sample_time < float(time):
for i, v in enumerate(int_vec):
sample_vals[i].append(v)
try:
sample_time = sample_times.pop()
except IndexError:
break
return sample_vals
@package
def weak_inverter(a=Net(), out=Net()):
weak_nfet_wl = Parameters(W=1.0, L=8.0)
weak_pfet_wl = Parameters(W=1.0, L=8.0)
qp = Part(sky_lib, "sky130_fd_pr__pfet_01v8", params=weak_pfet_wl)
qn = Part(sky_lib, "sky130_fd_pr__nfet_01v8", params=weak_nfet_wl)
gnd & qn.b
vdd & qp.b
vdd & qp["s,d"] & out & qn["d,s"] & gnd
a & qn.g & qp.g
@package
def sram_bit(wr=Net(), in_=Net(), out=Net()):
in_inv = inverter()
inv12, inv34 = weak_inverter(), weak_inverter()
m5, m6 = nfet(), nfet()
inv12["a, out"] += inv34["out, a"]
m5.s & out & inv12.out
m6.s & inv34.out
in_ & m5.d
in_ & in_inv["a, out"] & m6.d
wr & m5.g & m6.g
gnd & m5.b & m6.b
@package
def latch_bit(wr=Net(), in_=Net(), out=Net()):
inv_in, inv_out, inv_wr = inverter(), inverter(), inverter()
q_in, q_latch = nfet(), nfet()
in_ & q_in["s,d"] & inv_in["a, out"] & inv_out["a, out"] & out
inv_in.a & q_latch["s,d"] & out
q_in.g & wr
q_in.b & gnd
wr & inv_wr.a
q_latch.g & inv_wr.out
q_latch.b & gnd
@package
def reg_bit(wr=Net(), in_=Net(), out=Net()):
master, slave = latch_bit(), latch_bit()
wr_inv = inverter()
wr_inv.a += wr
in_ & master["in_, out"] & slave["in_, out"] & out
wr_inv.out & master.wr
wr & slave.wr
@subcircuit
def register(wr, in_, out):
width = len(out)
reg_bits = [reg_bit() for _ in range(width)]
for i, rb in enumerate(reg_bits):
rb["wr, in_, out"] += wr, in_[i], out[i]
@subcircuit
def cntr(clk, out):
global gnd
width = len(out)
zero = Bus(width)
gnd += zero
nxt = Bus(width)
adder(out, zero, vdd, nxt, Net())
register(clk, nxt, out)
reset()
vdd = pwr()
clk = Net('clk')
# nw, old = Bus('NW', 3), Bus('OLD', 3)
cnt = Bus('CNT', 3)
counter(clk)
# inverter()["a, out"] += old[0], nw[0]
# reg_bit()["wr, in_, out"] += clk, nw, old
# register(clk, nw, old)
# adder(old, Bus(gnd,gnd,gnd), vdd, nw, Net())
# old & inverter()["a, out"] & nw
# register(clk, cnt, nxt)
# sum, cout = Bus('SUM', len(cnt)), Net()
# adder(cnt, Bus('B', gnd,gnd,gnd), vdd, sum, cout)
# cnt = Bus('CNT',2)
# nxt = Bus('NXT',2)
cntr(clk, cnt)
waveforms = generate_netlist().simulator().transient(step_time=0.01@u_ns, end_time=30@u_ns)
# oscope(waveforms, clk, *nw, *old)
oscope(waveforms, clk, *cnt) | mit |
sergiohgz/incubator-airflow | airflow/hooks/hive_hooks.py | 3 | 36631 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function, unicode_literals
import contextlib
import os
import re
import subprocess
import time
from collections import OrderedDict
from tempfile import NamedTemporaryFile
import hmsclient
import six
import unicodecsv as csv
from past.builtins import basestring
from past.builtins import unicode
from six.moves import zip
import airflow.security.utils as utils
from airflow import configuration
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.utils.file import TemporaryDirectory
from airflow.utils.helpers import as_flattened_list
from airflow.utils.operator_helpers import AIRFLOW_VAR_NAME_FORMAT_MAPPING
HIVE_QUEUE_PRIORITIES = ['VERY_HIGH', 'HIGH', 'NORMAL', 'LOW', 'VERY_LOW']
def get_context_from_env_var():
"""
Extract context from env variable, e.g. dag_id, task_id and execution_date,
so that they can be used inside BashOperator and PythonOperator.
:return: The context of interest.
"""
return {format_map['default']: os.environ.get(format_map['env_var_format'], '')
for format_map in AIRFLOW_VAR_NAME_FORMAT_MAPPING.values()}
class HiveCliHook(BaseHook):
"""Simple wrapper around the hive CLI.
It also supports the ``beeline``
a lighter CLI that runs JDBC and is replacing the heavier
traditional CLI. To enable ``beeline``, set the use_beeline param in the
extra field of your connection as in ``{ "use_beeline": true }``
Note that you can also set default hive CLI parameters using the
``hive_cli_params`` to be used in your connection as in
``{"hive_cli_params": "-hiveconf mapred.job.tracker=some.jobtracker:444"}``
Parameters passed here can be overridden by run_cli's hive_conf param
The extra connection parameter ``auth`` gets passed as in the ``jdbc``
connection string as is.
:param mapred_queue: queue used by the Hadoop Scheduler (Capacity or Fair)
:type mapred_queue: string
:param mapred_queue_priority: priority within the job queue.
Possible settings include: VERY_HIGH, HIGH, NORMAL, LOW, VERY_LOW
:type mapred_queue_priority: string
:param mapred_job_name: This name will appear in the jobtracker.
This can make monitoring easier.
:type mapred_job_name: string
"""
def __init__(
self,
hive_cli_conn_id="hive_cli_default",
run_as=None,
mapred_queue=None,
mapred_queue_priority=None,
mapred_job_name=None):
conn = self.get_connection(hive_cli_conn_id)
self.hive_cli_params = conn.extra_dejson.get('hive_cli_params', '')
self.use_beeline = conn.extra_dejson.get('use_beeline', False)
self.auth = conn.extra_dejson.get('auth', 'noSasl')
self.conn = conn
self.run_as = run_as
if mapred_queue_priority:
mapred_queue_priority = mapred_queue_priority.upper()
if mapred_queue_priority not in HIVE_QUEUE_PRIORITIES:
raise AirflowException(
"Invalid Mapred Queue Priority. Valid values are: "
"{}".format(', '.join(HIVE_QUEUE_PRIORITIES)))
self.mapred_queue = mapred_queue or configuration.get('hive',
'default_hive_mapred_queue')
self.mapred_queue_priority = mapred_queue_priority
self.mapred_job_name = mapred_job_name
def _prepare_cli_cmd(self):
"""
This function creates the command list from available information
"""
conn = self.conn
hive_bin = 'hive'
cmd_extra = []
if self.use_beeline:
hive_bin = 'beeline'
jdbc_url = "jdbc:hive2://{conn.host}:{conn.port}/{conn.schema}"
if configuration.conf.get('core', 'security') == 'kerberos':
template = conn.extra_dejson.get(
'principal', "hive/[email protected]")
if "_HOST" in template:
template = utils.replace_hostname_pattern(
utils.get_components(template))
proxy_user = "" # noqa
if conn.extra_dejson.get('proxy_user') == "login" and conn.login:
proxy_user = "hive.server2.proxy.user={0}".format(conn.login)
elif conn.extra_dejson.get('proxy_user') == "owner" and self.run_as:
proxy_user = "hive.server2.proxy.user={0}".format(self.run_as)
jdbc_url += ";principal={template};{proxy_user}"
elif self.auth:
jdbc_url += ";auth=" + self.auth
jdbc_url = jdbc_url.format(**locals())
jdbc_url = '"{}"'.format(jdbc_url)
cmd_extra += ['-u', jdbc_url]
if conn.login:
cmd_extra += ['-n', conn.login]
if conn.password:
cmd_extra += ['-p', conn.password]
hive_params_list = self.hive_cli_params.split()
return [hive_bin] + cmd_extra + hive_params_list
def _prepare_hiveconf(self, d):
"""
This function prepares a list of hiveconf params
from a dictionary of key value pairs.
:param d:
:type d: dict
>>> hh = HiveCliHook()
>>> hive_conf = {"hive.exec.dynamic.partition": "true",
... "hive.exec.dynamic.partition.mode": "nonstrict"}
>>> hh._prepare_hiveconf(hive_conf)
["-hiveconf", "hive.exec.dynamic.partition=true",\
"-hiveconf", "hive.exec.dynamic.partition.mode=nonstrict"]
"""
if not d:
return []
return as_flattened_list(
zip(["-hiveconf"] * len(d),
["{}={}".format(k, v) for k, v in d.items()])
)
def run_cli(self, hql, schema=None, verbose=True, hive_conf=None):
"""
Run an hql statement using the hive cli. If hive_conf is specified
it should be a dict and the entries will be set as key/value pairs
in HiveConf
:param hive_conf: if specified these key value pairs will be passed
to hive as ``-hiveconf "key"="value"``. Note that they will be
passed after the ``hive_cli_params`` and thus will override
whatever values are specified in the database.
:type hive_conf: dict
>>> hh = HiveCliHook()
>>> result = hh.run_cli("USE airflow;")
>>> ("OK" in result)
True
"""
conn = self.conn
schema = schema or conn.schema
if schema:
hql = "USE {schema};\n{hql}".format(**locals())
with TemporaryDirectory(prefix='airflow_hiveop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir) as f:
hql = hql + '\n'
f.write(hql.encode('UTF-8'))
f.flush()
hive_cmd = self._prepare_cli_cmd()
env_context = get_context_from_env_var()
# Only extend the hive_conf if it is defined.
if hive_conf:
env_context.update(hive_conf)
hive_conf_params = self._prepare_hiveconf(env_context)
if self.mapred_queue:
hive_conf_params.extend(
['-hiveconf',
'mapreduce.job.queuename={}'
.format(self.mapred_queue),
'-hiveconf',
'mapred.job.queue.name={}'
.format(self.mapred_queue),
'-hiveconf',
'tez.job.queue.name={}'
.format(self.mapred_queue)
])
if self.mapred_queue_priority:
hive_conf_params.extend(
['-hiveconf',
'mapreduce.job.priority={}'
.format(self.mapred_queue_priority)])
if self.mapred_job_name:
hive_conf_params.extend(
['-hiveconf',
'mapred.job.name={}'
.format(self.mapred_job_name)])
hive_cmd.extend(hive_conf_params)
hive_cmd.extend(['-f', f.name])
if verbose:
self.log.info(" ".join(hive_cmd))
sp = subprocess.Popen(
hive_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=tmp_dir,
close_fds=True)
self.sp = sp
stdout = ''
while True:
line = sp.stdout.readline()
if not line:
break
stdout += line.decode('UTF-8')
if verbose:
self.log.info(line.decode('UTF-8').strip())
sp.wait()
if sp.returncode:
raise AirflowException(stdout)
return stdout
def test_hql(self, hql):
"""
Test an hql statement using the hive cli and EXPLAIN
"""
create, insert, other = [], [], []
for query in hql.split(';'): # naive
query_original = query
query = query.lower().strip()
if query.startswith('create table'):
create.append(query_original)
elif query.startswith(('set ',
'add jar ',
'create temporary function')):
other.append(query_original)
elif query.startswith('insert'):
insert.append(query_original)
other = ';'.join(other)
for query_set in [create, insert]:
for query in query_set:
query_preview = ' '.join(query.split())[:50]
self.log.info("Testing HQL [%s (...)]", query_preview)
if query_set == insert:
query = other + '; explain ' + query
else:
query = 'explain ' + query
try:
self.run_cli(query, verbose=False)
except AirflowException as e:
message = e.args[0].split('\n')[-2]
self.log.info(message)
error_loc = re.search('(\d+):(\d+)', message)
if error_loc and error_loc.group(1).isdigit():
lst = int(error_loc.group(1))
begin = max(lst - 2, 0)
end = min(lst + 3, len(query.split('\n')))
context = '\n'.join(query.split('\n')[begin:end])
self.log.info("Context :\n %s", context)
else:
self.log.info("SUCCESS")
def load_df(
self,
df,
table,
field_dict=None,
delimiter=',',
encoding='utf8',
pandas_kwargs=None, **kwargs):
"""
Loads a pandas DataFrame into hive.
Hive data types will be inferred if not passed but column names will
not be sanitized.
:param df: DataFrame to load into a Hive table
:type df: DataFrame
:param table: target Hive table, use dot notation to target a
specific database
:type table: str
:param field_dict: mapping from column name to hive data type.
Note that it must be OrderedDict so as to keep columns' order.
:type field_dict: OrderedDict
:param delimiter: field delimiter in the file
:type delimiter: str
:param encoding: string encoding to use when writing DataFrame to file
:type encoding: str
:param pandas_kwargs: passed to DataFrame.to_csv
:type pandas_kwargs: dict
:param kwargs: passed to self.load_file
"""
def _infer_field_types_from_df(df):
DTYPE_KIND_HIVE_TYPE = {
'b': 'BOOLEAN', # boolean
'i': 'BIGINT', # signed integer
'u': 'BIGINT', # unsigned integer
'f': 'DOUBLE', # floating-point
'c': 'STRING', # complex floating-point
'M': 'TIMESTAMP', # datetime
'O': 'STRING', # object
'S': 'STRING', # (byte-)string
'U': 'STRING', # Unicode
'V': 'STRING' # void
}
d = OrderedDict()
for col, dtype in df.dtypes.iteritems():
d[col] = DTYPE_KIND_HIVE_TYPE[dtype.kind]
return d
if pandas_kwargs is None:
pandas_kwargs = {}
with TemporaryDirectory(prefix='airflow_hiveop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir, mode="w") as f:
if field_dict is None:
field_dict = _infer_field_types_from_df(df)
df.to_csv(path_or_buf=f,
sep=(delimiter.encode(encoding)
if six.PY2 and isinstance(delimiter, unicode)
else delimiter),
header=False,
index=False,
encoding=encoding,
date_format="%Y-%m-%d %H:%M:%S",
**pandas_kwargs)
f.flush()
return self.load_file(filepath=f.name,
table=table,
delimiter=delimiter,
field_dict=field_dict,
**kwargs)
def load_file(
self,
filepath,
table,
delimiter=",",
field_dict=None,
create=True,
overwrite=True,
partition=None,
recreate=False,
tblproperties=None):
"""
Loads a local file into Hive
Note that the table generated in Hive uses ``STORED AS textfile``
which isn't the most efficient serialization format. If a
large amount of data is loaded and/or if the tables gets
queried considerably, you may want to use this operator only to
stage the data into a temporary table before loading it into its
final destination using a ``HiveOperator``.
:param filepath: local filepath of the file to load
:type filepath: str
:param table: target Hive table, use dot notation to target a
specific database
:type table: str
:param delimiter: field delimiter in the file
:type delimiter: str
:param field_dict: A dictionary of the fields name in the file
as keys and their Hive types as values.
Note that it must be OrderedDict so as to keep columns' order.
:type field_dict: OrderedDict
:param create: whether to create the table if it doesn't exist
:type create: bool
:param overwrite: whether to overwrite the data in table or partition
:type overwrite: bool
:param partition: target partition as a dict of partition columns
and values
:type partition: dict
:param recreate: whether to drop and recreate the table at every
execution
:type recreate: bool
:param tblproperties: TBLPROPERTIES of the hive table being created
:type tblproperties: dict
"""
hql = ''
if recreate:
hql += "DROP TABLE IF EXISTS {table};\n"
if create or recreate:
if field_dict is None:
raise ValueError("Must provide a field dict when creating a table")
fields = ",\n ".join(
[k + ' ' + v for k, v in field_dict.items()])
hql += "CREATE TABLE IF NOT EXISTS {table} (\n{fields})\n"
if partition:
pfields = ",\n ".join(
[p + " STRING" for p in partition])
hql += "PARTITIONED BY ({pfields})\n"
hql += "ROW FORMAT DELIMITED\n"
hql += "FIELDS TERMINATED BY '{delimiter}'\n"
hql += "STORED AS textfile\n"
if tblproperties is not None:
tprops = ", ".join(
["'{0}'='{1}'".format(k, v) for k, v in tblproperties.items()])
hql += "TBLPROPERTIES({tprops})\n"
hql += ";"
hql = hql.format(**locals())
self.log.info(hql)
self.run_cli(hql)
hql = "LOAD DATA LOCAL INPATH '{filepath}' "
if overwrite:
hql += "OVERWRITE "
hql += "INTO TABLE {table} "
if partition:
pvals = ", ".join(
["{0}='{1}'".format(k, v) for k, v in partition.items()])
hql += "PARTITION ({pvals});"
# As a workaround for HIVE-10541, add a newline character
# at the end of hql (AIRFLOW-2412).
hql += '\n'
hql = hql.format(**locals())
self.log.info(hql)
self.run_cli(hql)
def kill(self):
if hasattr(self, 'sp'):
if self.sp.poll() is None:
print("Killing the Hive job")
self.sp.terminate()
time.sleep(60)
self.sp.kill()
class HiveMetastoreHook(BaseHook):
""" Wrapper to interact with the Hive Metastore"""
# java short max val
MAX_PART_COUNT = 32767
def __init__(self, metastore_conn_id='metastore_default'):
self.metastore_conn = self.get_connection(metastore_conn_id)
self.metastore = self.get_metastore_client()
def __getstate__(self):
# This is for pickling to work despite the thirft hive client not
# being pickable
d = dict(self.__dict__)
del d['metastore']
return d
def __setstate__(self, d):
self.__dict__.update(d)
self.__dict__['metastore'] = self.get_metastore_client()
def get_metastore_client(self):
"""
Returns a Hive thrift client.
"""
from thrift.transport import TSocket, TTransport
from thrift.protocol import TBinaryProtocol
ms = self.metastore_conn
auth_mechanism = ms.extra_dejson.get('authMechanism', 'NOSASL')
if configuration.conf.get('core', 'security') == 'kerberos':
auth_mechanism = ms.extra_dejson.get('authMechanism', 'GSSAPI')
kerberos_service_name = ms.extra_dejson.get('kerberos_service_name', 'hive')
socket = TSocket.TSocket(ms.host, ms.port)
if configuration.conf.get('core', 'security') == 'kerberos' \
and auth_mechanism == 'GSSAPI':
try:
import saslwrapper as sasl
except ImportError:
import sasl
def sasl_factory():
sasl_client = sasl.Client()
sasl_client.setAttr("host", ms.host)
sasl_client.setAttr("service", kerberos_service_name)
sasl_client.init()
return sasl_client
from thrift_sasl import TSaslClientTransport
transport = TSaslClientTransport(sasl_factory, "GSSAPI", socket)
else:
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
return hmsclient.HMSClient(iprot=protocol)
def get_conn(self):
return self.metastore
def check_for_partition(self, schema, table, partition):
"""
Checks whether a partition exists
:param schema: Name of hive schema (database) @table belongs to
:type schema: string
:param table: Name of hive table @partition belongs to
:type schema: string
:partition: Expression that matches the partitions to check for
(eg `a = 'b' AND c = 'd'`)
:type schema: string
:rtype: boolean
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_partition('airflow', t, "ds='2015-01-01'")
True
"""
with self.metastore as client:
partitions = client.get_partitions_by_filter(
schema, table, partition, 1)
if partitions:
return True
else:
return False
def check_for_named_partition(self, schema, table, partition_name):
"""
Checks whether a partition with a given name exists
:param schema: Name of hive schema (database) @table belongs to
:type schema: string
:param table: Name of hive table @partition belongs to
:type schema: string
:partition: Name of the partitions to check for (eg `a=b/c=d`)
:type schema: string
:rtype: boolean
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_named_partition('airflow', t, "ds=2015-01-01")
True
>>> hh.check_for_named_partition('airflow', t, "ds=xxx")
False
"""
with self.metastore as client:
return client.check_for_named_partition(schema, table, partition_name)
def get_table(self, table_name, db='default'):
"""Get a metastore table object
>>> hh = HiveMetastoreHook()
>>> t = hh.get_table(db='airflow', table_name='static_babynames')
>>> t.tableName
'static_babynames'
>>> [col.name for col in t.sd.cols]
['state', 'year', 'name', 'gender', 'num']
"""
if db == 'default' and '.' in table_name:
db, table_name = table_name.split('.')[:2]
with self.metastore as client:
return client.get_table(dbname=db, tbl_name=table_name)
def get_tables(self, db, pattern='*'):
"""
Get a metastore table object
"""
with self.metastore as client:
tables = client.get_tables(db_name=db, pattern=pattern)
return client.get_table_objects_by_name(db, tables)
def get_databases(self, pattern='*'):
"""
Get a metastore table object
"""
with self.metastore as client:
return client.get_databases(pattern)
def get_partitions(
self, schema, table_name, filter=None):
"""
Returns a list of all partitions in a table. Works only
for tables with less than 32767 (java short max val).
For subpartitioned table, the number might easily exceed this.
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> parts = hh.get_partitions(schema='airflow', table_name=t)
>>> len(parts)
1
>>> parts
[{'ds': '2015-01-01'}]
"""
with self.metastore as client:
table = client.get_table(dbname=schema, tbl_name=table_name)
if len(table.partitionKeys) == 0:
raise AirflowException("The table isn't partitioned")
else:
if filter:
parts = client.get_partitions_by_filter(
db_name=schema, tbl_name=table_name,
filter=filter, max_parts=HiveMetastoreHook.MAX_PART_COUNT)
else:
parts = client.get_partitions(
db_name=schema, tbl_name=table_name,
max_parts=HiveMetastoreHook.MAX_PART_COUNT)
pnames = [p.name for p in table.partitionKeys]
return [dict(zip(pnames, p.values)) for p in parts]
@staticmethod
def _get_max_partition_from_part_specs(part_specs, partition_key, filter_map):
"""
Helper method to get max partition of partitions with partition_key
from part specs. key:value pair in filter_map will be used to
filter out partitions.
:param part_specs: list of partition specs.
:type part_specs: list
:param partition_key: partition key name.
:type partition_key: string
:param filter_map: partition_key:partition_value map used for partition filtering,
e.g. {'key1': 'value1', 'key2': 'value2'}.
Only partitions matching all partition_key:partition_value
pairs will be considered as candidates of max partition.
:type filter_map: map
:return: Max partition or None if part_specs is empty.
"""
if not part_specs:
return None
# Assuming all specs have the same keys.
if partition_key not in part_specs[0].keys():
raise AirflowException("Provided partition_key {} "
"is not in part_specs.".format(partition_key))
if filter_map:
is_subset = set(filter_map.keys()).issubset(set(part_specs[0].keys()))
if filter_map and not is_subset:
raise AirflowException("Keys in provided filter_map {} "
"are not subset of part_spec keys: {}"
.format(', '.join(filter_map.keys()),
', '.join(part_specs[0].keys())))
candidates = [p_dict[partition_key] for p_dict in part_specs
if filter_map is None or
all(item in p_dict.items() for item in filter_map.items())]
if not candidates:
return None
else:
return max(candidates).encode('utf-8')
def max_partition(self, schema, table_name, field=None, filter_map=None):
"""
Returns the maximum value for all partitions with given field in a table.
If only one partition key exist in the table, the key will be used as field.
filter_map should be a partition_key:partition_value map and will be used to
filter out partitions.
:param schema: schema name.
:type schema: string
:param table_name: table name.
:type table_name: string
:param field: partition key to get max partition from.
:type field: string
:param filter_map: partition_key:partition_value map used for partition filtering.
:type filter_map: map
>>> hh = HiveMetastoreHook()
>>> filter_map = {'ds': '2015-01-01', 'ds': '2014-01-01'}
>>> t = 'static_babynames_partitioned'
>>> hh.max_partition(schema='airflow',\
... table_name=t, field='ds', filter_map=filter_map)
'2015-01-01'
"""
with self.metastore as client:
table = client.get_table(dbname=schema, tbl_name=table_name)
key_name_set = set(key.name for key in table.partitionKeys)
if len(table.partitionKeys) == 1:
field = table.partitionKeys[0].name
elif not field:
raise AirflowException("Please specify the field you want the max "
"value for.")
elif field not in key_name_set:
raise AirflowException("Provided field is not a partition key.")
if filter_map and not set(filter_map.keys()).issubset(key_name_set):
raise AirflowException("Provided filter_map contains keys "
"that are not partition key.")
part_names = \
client.get_partition_names(schema,
table_name,
max_parts=HiveMetastoreHook.MAX_PART_COUNT)
part_specs = [client.partition_name_to_spec(part_name)
for part_name in part_names]
return HiveMetastoreHook._get_max_partition_from_part_specs(part_specs,
field,
filter_map)
def table_exists(self, table_name, db='default'):
"""
Check if table exists
>>> hh = HiveMetastoreHook()
>>> hh.table_exists(db='airflow', table_name='static_babynames')
True
>>> hh.table_exists(db='airflow', table_name='does_not_exist')
False
"""
try:
self.get_table(table_name, db)
return True
except Exception:
return False
class HiveServer2Hook(BaseHook):
"""
Wrapper around the pyhive library
Note that the default authMechanism is PLAIN, to override it you
can specify it in the ``extra`` of your connection in the UI as in
"""
def __init__(self, hiveserver2_conn_id='hiveserver2_default'):
self.hiveserver2_conn_id = hiveserver2_conn_id
def get_conn(self, schema=None):
db = self.get_connection(self.hiveserver2_conn_id)
auth_mechanism = db.extra_dejson.get('authMechanism', 'NONE')
if auth_mechanism == 'NONE' and db.login is None:
# we need to give a username
username = 'airflow'
kerberos_service_name = None
if configuration.conf.get('core', 'security') == 'kerberos':
auth_mechanism = db.extra_dejson.get('authMechanism', 'KERBEROS')
kerberos_service_name = db.extra_dejson.get('kerberos_service_name', 'hive')
# pyhive uses GSSAPI instead of KERBEROS as a auth_mechanism identifier
if auth_mechanism == 'GSSAPI':
self.log.warning(
"Detected deprecated 'GSSAPI' for authMechanism "
"for %s. Please use 'KERBEROS' instead",
self.hiveserver2_conn_id
)
auth_mechanism = 'KERBEROS'
from pyhive.hive import connect
return connect(
host=db.host,
port=db.port,
auth=auth_mechanism,
kerberos_service_name=kerberos_service_name,
username=db.login or username,
database=schema or db.schema or 'default')
def _get_results(self, hql, schema='default', fetch_size=None, hive_conf=None):
from pyhive.exc import ProgrammingError
if isinstance(hql, basestring):
hql = [hql]
previous_description = None
with contextlib.closing(self.get_conn(schema)) as conn, \
contextlib.closing(conn.cursor()) as cur:
cur.arraysize = fetch_size or 1000
env_context = get_context_from_env_var()
if hive_conf:
env_context.update(hive_conf)
for k, v in env_context.items():
cur.execute("set {}={}".format(k, v))
for statement in hql:
cur.execute(statement)
# we only get results of statements that returns
lowered_statement = statement.lower().strip()
if (lowered_statement.startswith('select') or
lowered_statement.startswith('with') or
(lowered_statement.startswith('set') and
'=' not in lowered_statement)):
description = [c for c in cur.description]
if previous_description and previous_description != description:
message = '''The statements are producing different descriptions:
Current: {}
Previous: {}'''.format(repr(description),
repr(previous_description))
raise ValueError(message)
elif not previous_description:
previous_description = description
yield description
try:
# DB API 2 raises when no results are returned
# we're silencing here as some statements in the list
# may be `SET` or DDL
for row in cur:
yield row
except ProgrammingError:
self.log.debug("get_results returned no records")
def get_results(self, hql, schema='default', fetch_size=None, hive_conf=None):
"""
Get results of the provided hql in target schema.
:param hql: hql to be executed.
:param schema: target schema, default to 'default'.
:param fetch_size max size of result to fetch.
:param hive_conf: hive_conf to execute alone with the hql.
:return: results of hql execution.
"""
results_iter = self._get_results(hql, schema,
fetch_size=fetch_size, hive_conf=hive_conf)
header = next(results_iter)
results = {
'data': list(results_iter),
'header': header
}
return results
def to_csv(
self,
hql,
csv_filepath,
schema='default',
delimiter=',',
lineterminator='\r\n',
output_header=True,
fetch_size=1000,
hive_conf=None):
"""
Execute hql in target schema and write results to a csv file.
:param hql: hql to be executed.
:param csv_filepath: filepath of csv to write results into.
:param schema: target schema, , default to 'default'.
:param delimiter: delimiter of the csv file.
:param lineterminator: lineterminator of the csv file.
:param output_header: header of the csv file.
:param fetch_size: number of result rows to write into the csv file.
:param hive_conf: hive_conf to execute alone with the hql.
:return:
"""
results_iter = self._get_results(hql, schema,
fetch_size=fetch_size, hive_conf=hive_conf)
header = next(results_iter)
message = None
with open(csv_filepath, 'wb') as f:
writer = csv.writer(f,
delimiter=delimiter,
lineterminator=lineterminator,
encoding='utf-8')
try:
if output_header:
self.log.debug('Cursor description is %s', header)
writer.writerow([c[0] for c in header])
for i, row in enumerate(results_iter):
writer.writerow(row)
if i % fetch_size == 0:
self.log.info("Written %s rows so far.", i)
except ValueError as exception:
message = str(exception)
if message:
# need to clean up the file first
os.remove(csv_filepath)
raise ValueError(message)
self.log.info("Done. Loaded a total of %s rows.", i)
def get_records(self, hql, schema='default'):
"""
Get a set of records from a Hive query.
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> len(hh.get_records(sql))
100
"""
return self.get_results(hql, schema=schema)['data']
def get_pandas_df(self, hql, schema='default'):
"""
Get a pandas dataframe from a Hive query
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> df = hh.get_pandas_df(sql)
>>> len(df.index)
100
"""
import pandas as pd
res = self.get_results(hql, schema=schema)
df = pd.DataFrame(res['data'])
df.columns = [c[0] for c in res['header']]
return df
| apache-2.0 |
shangwuhencc/scikit-learn | setup.py | 76 | 9370 | #! /usr/bin/env python
#
# Copyright (C) 2007-2009 Cournapeau David <[email protected]>
# 2010 Fabian Pedregosa <[email protected]>
# License: 3-clause BSD
descr = """A set of python modules for machine learning and data mining"""
import sys
import os
import shutil
from distutils.command.clean import clean as Clean
from pkg_resources import parse_version
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
# This is a bit (!) hackish: we are setting a global variable so that the main
# sklearn __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet:
# the numpy distutils extensions that are used by scikit-learn to recursively
# build the compiled extensions in sub-packages is based on the Python import
# machinery.
builtins.__SKLEARN_SETUP__ = True
DISTNAME = 'scikit-learn'
DESCRIPTION = 'A set of python modules for machine learning and data mining'
with open('README.rst') as f:
LONG_DESCRIPTION = f.read()
MAINTAINER = 'Andreas Mueller'
MAINTAINER_EMAIL = '[email protected]'
URL = 'http://scikit-learn.org'
LICENSE = 'new BSD'
DOWNLOAD_URL = 'http://sourceforge.net/projects/scikit-learn/files/'
# We can actually import a restricted version of sklearn that
# does not need the compiled code
import sklearn
VERSION = sklearn.__version__
# Optional setuptools features
# We need to import setuptools early, if we want setuptools features,
# as it monkey-patches the 'setup' function
# For some commands, use setuptools
SETUPTOOLS_COMMANDS = set([
'develop', 'release', 'bdist_egg', 'bdist_rpm',
'bdist_wininst', 'install_egg_info', 'build_sphinx',
'egg_info', 'easy_install', 'upload', 'bdist_wheel',
'--single-version-externally-managed',
])
if SETUPTOOLS_COMMANDS.intersection(sys.argv):
import setuptools
extra_setuptools_args = dict(
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
)
else:
extra_setuptools_args = dict()
# Custom clean command to remove build artifacts
class CleanCommand(Clean):
description = "Remove build artifacts from the source tree"
def run(self):
Clean.run(self)
if os.path.exists('build'):
shutil.rmtree('build')
for dirpath, dirnames, filenames in os.walk('sklearn'):
for filename in filenames:
if (filename.endswith('.so') or filename.endswith('.pyd')
or filename.endswith('.dll')
or filename.endswith('.pyc')):
os.unlink(os.path.join(dirpath, filename))
for dirname in dirnames:
if dirname == '__pycache__':
shutil.rmtree(os.path.join(dirpath, dirname))
cmdclass = {'clean': CleanCommand}
# Optional wheelhouse-uploader features
# To automate release of binary packages for scikit-learn we need a tool
# to download the packages generated by travis and appveyor workers (with
# version number matching the current release) and upload them all at once
# to PyPI at release time.
# The URL of the artifact repositories are configured in the setup.cfg file.
WHEELHOUSE_UPLOADER_COMMANDS = set(['fetch_artifacts', 'upload_all'])
if WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv):
import wheelhouse_uploader.cmd
cmdclass.update(vars(wheelhouse_uploader.cmd))
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg:
# "Ignoring attempt to set 'name' (from ... "
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('sklearn')
return config
scipy_min_version = '0.9'
numpy_min_version = '1.6.1'
def get_scipy_status():
"""
Returns a dictionary containing a boolean specifying whether SciPy
is up-to-date, along with the version string (empty string if
not installed).
"""
scipy_status = {}
try:
import scipy
scipy_version = scipy.__version__
scipy_status['up_to_date'] = parse_version(
scipy_version) >= parse_version(scipy_min_version)
scipy_status['version'] = scipy_version
except ImportError:
scipy_status['up_to_date'] = False
scipy_status['version'] = ""
return scipy_status
def get_numpy_status():
"""
Returns a dictionary containing a boolean specifying whether NumPy
is up-to-date, along with the version string (empty string if
not installed).
"""
numpy_status = {}
try:
import numpy
numpy_version = numpy.__version__
numpy_status['up_to_date'] = parse_version(
numpy_version) >= parse_version(numpy_min_version)
numpy_status['version'] = numpy_version
except ImportError:
numpy_status['up_to_date'] = False
numpy_status['version'] = ""
return numpy_status
def setup_package():
metadata = dict(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
cmdclass=cmdclass,
**extra_setuptools_args)
if (len(sys.argv) >= 2
and ('--help' in sys.argv[1:] or sys.argv[1]
in ('--help-commands', 'egg_info', '--version', 'clean'))):
# For these actions, NumPy is not required.
#
# They are required to succeed without Numpy for example when
# pip is used to install Scikit-learn when Numpy is not yet present in
# the system.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
metadata['version'] = VERSION
else:
numpy_status = get_numpy_status()
numpy_req_str = "scikit-learn requires NumPy >= {0}.\n".format(
numpy_min_version)
scipy_status = get_scipy_status()
scipy_req_str = "scikit-learn requires SciPy >= {0}.\n".format(
scipy_min_version)
instructions = ("Installation instructions are available on the "
"scikit-learn website: "
"http://scikit-learn.org/stable/install.html\n")
if numpy_status['up_to_date'] is False:
if numpy_status['version']:
raise ImportError("Your installation of Numerical Python "
"(NumPy) {0} is out-of-date.\n{1}{2}"
.format(numpy_status['version'],
numpy_req_str, instructions))
else:
raise ImportError("Numerical Python (NumPy) is not "
"installed.\n{0}{1}"
.format(numpy_req_str, instructions))
if scipy_status['up_to_date'] is False:
if scipy_status['version']:
raise ImportError("Your installation of Scientific Python "
"(SciPy) {0} is out-of-date.\n{1}{2}"
.format(scipy_status['version'],
scipy_req_str, instructions))
else:
raise ImportError("Scientific Python (SciPy) is not "
"installed.\n{0}{1}"
.format(scipy_req_str, instructions))
from numpy.distutils.core import setup
metadata['configuration'] = configuration
setup(**metadata)
if __name__ == "__main__":
setup_package()
| bsd-3-clause |
yavalvas/yav_com | build/matplotlib/lib/mpl_examples/api/sankey_demo_old.py | 6 | 7147 | #!/usr/bin/env python
from __future__ import print_function
__author__ = "Yannick Copin <[email protected]>"
__version__ = "Time-stamp: <10/02/2010 16:49 [email protected]>"
import numpy as np
def sankey(ax,
outputs=[100.], outlabels=None,
inputs=[100.], inlabels='',
dx=40, dy=10, outangle=45, w=3, inangle=30, offset=2, **kwargs):
"""Draw a Sankey diagram.
outputs: array of outputs, should sum up to 100%
outlabels: output labels (same length as outputs),
or None (use default labels) or '' (no labels)
inputs and inlabels: similar for inputs
dx: horizontal elongation
dy: vertical elongation
outangle: output arrow angle [deg]
w: output arrow shoulder
inangle: input dip angle
offset: text offset
**kwargs: propagated to Patch (e.g., fill=False)
Return (patch,[intexts,outtexts]).
"""
import matplotlib.patches as mpatches
from matplotlib.path import Path
outs = np.absolute(outputs)
outsigns = np.sign(outputs)
outsigns[-1] = 0 # Last output
ins = np.absolute(inputs)
insigns = np.sign(inputs)
insigns[0] = 0 # First input
assert sum(outs) == 100, "Outputs don't sum up to 100%"
assert sum(ins) == 100, "Inputs don't sum up to 100%"
def add_output(path, loss, sign=1):
h = (loss/2 + w)*np.tan(outangle/180. * np.pi) # Arrow tip height
move, (x, y) = path[-1] # Use last point as reference
if sign == 0: # Final loss (horizontal)
path.extend([(Path.LINETO, [x+dx, y]),
(Path.LINETO, [x+dx, y+w]),
(Path.LINETO, [x+dx+h, y-loss/2]), # Tip
(Path.LINETO, [x+dx, y-loss-w]),
(Path.LINETO, [x+dx, y-loss])])
outtips.append((sign, path[-3][1]))
else: # Intermediate loss (vertical)
path.extend([(Path.CURVE4, [x+dx/2, y]),
(Path.CURVE4, [x+dx, y]),
(Path.CURVE4, [x+dx, y+sign*dy]),
(Path.LINETO, [x+dx-w, y+sign*dy]),
(Path.LINETO, [x+dx+loss/2, y+sign*(dy+h)]), # Tip
(Path.LINETO, [x+dx+loss+w, y+sign*dy]),
(Path.LINETO, [x+dx+loss, y+sign*dy]),
(Path.CURVE3, [x+dx+loss, y-sign*loss]),
(Path.CURVE3, [x+dx/2+loss, y-sign*loss])])
outtips.append((sign, path[-5][1]))
def add_input(path, gain, sign=1):
h = (gain/2)*np.tan(inangle/180. * np.pi) # Dip depth
move, (x, y) = path[-1] # Use last point as reference
if sign == 0: # First gain (horizontal)
path.extend([(Path.LINETO, [x-dx, y]),
(Path.LINETO, [x-dx+h, y+gain/2]), # Dip
(Path.LINETO, [x-dx, y+gain])])
xd, yd = path[-2][1] # Dip position
indips.append((sign, [xd-h, yd]))
else: # Intermediate gain (vertical)
path.extend([(Path.CURVE4, [x-dx/2, y]),
(Path.CURVE4, [x-dx, y]),
(Path.CURVE4, [x-dx, y+sign*dy]),
(Path.LINETO, [x-dx-gain/2, y+sign*(dy-h)]), # Dip
(Path.LINETO, [x-dx-gain, y+sign*dy]),
(Path.CURVE3, [x-dx-gain, y-sign*gain]),
(Path.CURVE3, [x-dx/2-gain, y-sign*gain])])
xd, yd = path[-4][1] # Dip position
indips.append((sign, [xd, yd+sign*h]))
outtips = [] # Output arrow tip dir. and positions
urpath = [(Path.MOVETO, [0, 100])] # 1st point of upper right path
lrpath = [(Path.LINETO, [0, 0])] # 1st point of lower right path
for loss, sign in zip(outs, outsigns):
add_output(sign>=0 and urpath or lrpath, loss, sign=sign)
indips = [] # Input arrow tip dir. and positions
llpath = [(Path.LINETO, [0, 0])] # 1st point of lower left path
ulpath = [(Path.MOVETO, [0, 100])] # 1st point of upper left path
for gain, sign in reversed(list(zip(ins, insigns))):
add_input(sign<=0 and llpath or ulpath, gain, sign=sign)
def revert(path):
"""A path is not just revertable by path[::-1] because of Bezier
curves."""
rpath = []
nextmove = Path.LINETO
for move, pos in path[::-1]:
rpath.append((nextmove, pos))
nextmove = move
return rpath
# Concatenate subpathes in correct order
path = urpath + revert(lrpath) + llpath + revert(ulpath)
codes, verts = zip(*path)
verts = np.array(verts)
# Path patch
path = Path(verts, codes)
patch = mpatches.PathPatch(path, **kwargs)
ax.add_patch(patch)
if False: # DEBUG
print("urpath", urpath)
print("lrpath", revert(lrpath))
print("llpath", llpath)
print("ulpath", revert(ulpath))
xs, ys = zip(*verts)
ax.plot(xs, ys, 'go-')
# Labels
def set_labels(labels, values):
"""Set or check labels according to values."""
if labels == '': # No labels
return labels
elif labels is None: # Default labels
return ['%2d%%' % val for val in values]
else:
assert len(labels) == len(values)
return labels
def put_labels(labels, positions, output=True):
"""Put labels to positions."""
texts = []
lbls = output and labels or labels[::-1]
for i, label in enumerate(lbls):
s, (x, y) = positions[i] # Label direction and position
if s == 0:
t = ax.text(x+offset, y, label,
ha=output and 'left' or 'right', va='center')
elif s > 0:
t = ax.text(x, y+offset, label, ha='center', va='bottom')
else:
t = ax.text(x, y-offset, label, ha='center', va='top')
texts.append(t)
return texts
outlabels = set_labels(outlabels, outs)
outtexts = put_labels(outlabels, outtips, output=True)
inlabels = set_labels(inlabels, ins)
intexts = put_labels(inlabels, indips, output=False)
# Axes management
ax.set_xlim(verts[:, 0].min()-dx, verts[:, 0].max()+dx)
ax.set_ylim(verts[:, 1].min()-dy, verts[:, 1].max()+dy)
ax.set_aspect('equal', adjustable='datalim')
return patch, [intexts, outtexts]
if __name__=='__main__':
import matplotlib.pyplot as plt
outputs = [10., -20., 5., 15., -10., 40.]
outlabels = ['First', 'Second', 'Third', 'Fourth', 'Fifth', 'Hurray!']
outlabels = [s+'\n%d%%' % abs(l) for l, s in zip(outputs, outlabels)]
inputs = [60., -25., 15.]
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, xticks=[], yticks=[], title="Sankey diagram")
patch, (intexts, outtexts) = sankey(ax, outputs=outputs,
outlabels=outlabels, inputs=inputs,
inlabels=None, fc='g', alpha=0.2)
outtexts[1].set_color('r')
outtexts[-1].set_fontweight('bold')
plt.show()
| mit |
percyfal/snakemakelib-core | snakemakelib/graphics/geom.py | 1 | 3798 | '''
Author: Per Unneberg
Created: Tue Dec 1 08:56:58 2015
'''
import pandas.core.common as com
from bokeh.models import ColumnDataSource
from . import utils
from .color import colorbrewer
from snakemakelib.log import LoggerManager
from .axes import xaxis, yaxis
smllogger = LoggerManager().getLogger(__name__)
__all__ = ['dotplot', 'lines', 'points']
def dotplot(x, y, df, return_source=False, marker='circle',
**kwargs):
# setup figure
fig = utils.create_bokeh_fig_set_props(plot_height=kwargs.pop('plot_height', None),
plot_width=kwargs.pop('plot_width', None),
**kwargs)
xaxis(fig, **kwargs)
yaxis(fig, **kwargs)
color = kwargs.get('color', None)
source = utils.df_to_source(df)
if com.is_numeric_dtype(source.to_df()[x]) == True:
raise TypeError("{}: dependant variable must not be numerical type".format(__name__))
if isinstance(y, list):
color = [None] * len(y)
if 'color' in kwargs:
if isinstance(kwargs['color'], list) and len(kwargs['color']) == len(y):
color = kwargs['color']
else:
color = [kwargs['color']] * len(y)
for yy, c in zip(y, color):
if not c is None:
kwargs['color'] = c
fig = utils.add_glyph(fig, x, yy, source, marker, **kwargs)
else:
fig = utils.add_glyph(fig, x, y, source, marker, **kwargs)
return fig
def points(x, y, df, return_source=False, marker='circle',
**kwargs):
"""Add points to a figure.
Args:
"""
# setup figure
fig = utils.create_bokeh_fig_set_props(plot_height=kwargs.pop('plot_height', None),
plot_width=kwargs.pop('plot_width', None),
**kwargs)
xaxis(fig, **kwargs)
yaxis(fig, **kwargs)
source = utils.df_to_source(df)
fig = utils.add_glyph(fig, x, y, source, marker, **kwargs)
return fig
def lines(fig, x, y, df, groups=None, **kwargs):
"""lines: add lines to a figure
Args:
fig (:py:class:`~bokeh.plotting.Plot`): bokeh Plot object
x (str): string for x component
y (str): string for y component
df (:py:class:`~pandas.DataFrame`): pandas DataFram
source (:py:class:`~bokeh.models.ColumnDataSource`): bokeh ColumnDataSource object
groups (str, list(str)): string or list of strings for columns to group by
kwargs: keyword arguments to pass to fig.line
Example:
.. bokeh-plot::
:source-position: above
import pandas as pd
from bokeh.plotting import figure, show, hplot
from snakemakelib.graphics import lines
df = pd.DataFrame([[1,2], [2,5], [3,9]], columns=["x", "y"])
f = figure(title="Line plot", plot_width=400, plot_height=400)
lines(f, "x", "y", df, legend="y")
lines(f, "x", "x", df, legend="x", color="red")
show(f)
"""
smllogger.debug("Adding points to figure {}".format(fig))
if groups is None:
source = ColumnDataSource(df)
fig.line(x=x, y=y, source=source, **kwargs)
else:
try:
grouped = df.groupby(groups)
except:
raise
colors = colorbrewer(datalen=len(grouped.groups.keys()))
for k, color in zip(grouped.groups.keys(), colors):
name = k
group = grouped.get_group(name)
source = ColumnDataSource(group)
if 'legend' in kwargs:
kwargs['legend'] = str(name)
if 'color' in kwargs:
kwargs['color'] = color
fig.line(x=x, y=y, source=source, **kwargs)
return fig
| mit |
kylerbrown/scikit-learn | examples/ensemble/plot_partial_dependence.py | 249 | 4456 | """
========================
Partial Dependence Plots
========================
Partial dependence plots show the dependence between the target function [1]_
and a set of 'target' features, marginalizing over the
values of all other features (the complement features). Due to the limits
of human perception the size of the target feature set must be small (usually,
one or two) thus the target features are usually chosen among the most
important features
(see :attr:`~sklearn.ensemble.GradientBoostingRegressor.feature_importances_`).
This example shows how to obtain partial dependence plots from a
:class:`~sklearn.ensemble.GradientBoostingRegressor` trained on the California
housing dataset. The example is taken from [HTF2009]_.
The plot shows four one-way and one two-way partial dependence plots.
The target variables for the one-way PDP are:
median income (`MedInc`), avg. occupants per household (`AvgOccup`),
median house age (`HouseAge`), and avg. rooms per household (`AveRooms`).
We can clearly see that the median house price shows a linear relationship
with the median income (top left) and that the house price drops when the
avg. occupants per household increases (top middle).
The top right plot shows that the house age in a district does not have
a strong influence on the (median) house price; so does the average rooms
per household.
The tick marks on the x-axis represent the deciles of the feature values
in the training data.
Partial dependence plots with two target features enable us to visualize
interactions among them. The two-way partial dependence plot shows the
dependence of median house price on joint values of house age and avg.
occupants per household. We can clearly see an interaction between the
two features:
For an avg. occupancy greater than two, the house price is nearly independent
of the house age, whereas for values less than two there is a strong dependence
on age.
.. [HTF2009] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning Ed. 2", Springer, 2009.
.. [1] For classification you can think of it as the regression score before
the link function.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.datasets.california_housing import fetch_california_housing
# fetch California housing dataset
cal_housing = fetch_california_housing()
# split 80/20 train-test
X_train, X_test, y_train, y_test = train_test_split(cal_housing.data,
cal_housing.target,
test_size=0.2,
random_state=1)
names = cal_housing.feature_names
print('_' * 80)
print("Training GBRT...")
clf = GradientBoostingRegressor(n_estimators=100, max_depth=4,
learning_rate=0.1, loss='huber',
random_state=1)
clf.fit(X_train, y_train)
print("done.")
print('_' * 80)
print('Convenience plot with ``partial_dependence_plots``')
print
features = [0, 5, 1, 2, (5, 1)]
fig, axs = plot_partial_dependence(clf, X_train, features, feature_names=names,
n_jobs=3, grid_resolution=50)
fig.suptitle('Partial dependence of house value on nonlocation features\n'
'for the California housing dataset')
plt.subplots_adjust(top=0.9) # tight_layout causes overlap with suptitle
print('_' * 80)
print('Custom 3d plot via ``partial_dependence``')
print
fig = plt.figure()
target_feature = (1, 5)
pdp, (x_axis, y_axis) = partial_dependence(clf, target_feature,
X=X_train, grid_resolution=50)
XX, YY = np.meshgrid(x_axis, y_axis)
Z = pdp.T.reshape(XX.shape).T
ax = Axes3D(fig)
surf = ax.plot_surface(XX, YY, Z, rstride=1, cstride=1, cmap=plt.cm.BuPu)
ax.set_xlabel(names[target_feature[0]])
ax.set_ylabel(names[target_feature[1]])
ax.set_zlabel('Partial dependence')
# pretty init view
ax.view_init(elev=22, azim=122)
plt.colorbar(surf)
plt.suptitle('Partial dependence of house value on median age and '
'average occupancy')
plt.subplots_adjust(top=0.9)
plt.show()
| bsd-3-clause |
eclee25/flu-SDI-exploratory-age | scripts/create_fluseverity_figs/F4_zOR_state.py | 1 | 4479 | #!/usr/bin/python
##############################################
###Python template
###Author: Elizabeth Lee
###Date: 7/27/14
###Function: Draw mean retro zOR vs. state with all seasons together
###Import data: R_export/OR_zip3_week_outpatient_cl.csv, R_export/allpopstat_zip3_season_cl.csv
#### These data were cleaned with data_extraction/clean_OR_hhsreg_week_outpatient.R and exported with OR_zip3_week.sql
#### allpopstat_zip3_season_cl.csv includes child, adult, and other populations; popstat_zip3_season_cl.csv includes only child and adult populations
###Command Line: python F4_zOR_region.py
##############################################
### notes ###
# Incidence per 100,000 is normalized by total population by second calendar year of the flu season
### packages/modules ###
import csv
import matplotlib.pyplot as plt
import numpy as np
from itertools import product
from collections import defaultdict
import operator
import matplotlib.cm as cm
## local modules ##
import functions as fxn
### data structures ###
# d_st_distr[state abbr] = [mean retro zOR S1, mean retrozOR S2, ...]
d_st_distr = defaultdict(list)
# d_st_distr_mask[state abbr] = [mean retro zOR S1, mean retrozOR S2, ...], where NaNs are masked
d_st_distr_mask = defaultdict(list)
# d_st_median[state abbr] = median of mean retro zOR across all seasons for a given state, where NaN is removed from median calculation
d_st_median = {}
# d_reg_col[region number] = str('color')
d_reg_col = {}
### called/local plotting parameters ###
ps = fxn.pseasons
fs = 24
fssml = 12
### functions ###
def grabStateToRegion(state_reg_file):
dict_state_region = {}
for line in state_reg_file:
state = str(line[5])
region = int(line[8])
dict_state_region[state] = region
return dict_state_region
### data files ###
# state zOR data
st_zORin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/Py_export/SDI_state_classifications.csv', 'r')
st_zORin.readline()
st_zOR = csv.reader(st_zORin, delimiter=',')
# state to region number conversion
st_regin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/R_export/allpopstat_zip3_season_cl.csv','r')
st_regin.readline()
st_reg = csv.reader(st_regin, delimiter=',')
### program ###
# nation-level peak-based retrospective classification
## read state zOR data ##
# d_st_classif[(season, state abbr)] = (mean retro zOR, mean early zOR)
d_st_classif = fxn.readStateClassifFile(st_zOR)
# grab list of unique states in dataset
states = list(set([key[1] for key in d_st_classif]))
## read state to region data ##
# d_st_reg[state abbr] = region number
d_st_reg = grabStateToRegion(st_reg)
## region-color dictionary ##
# d_reg_col[region number] = str('color')
d_reg_col = dict(zip(range(1,11), cm.rainbow(np.linspace(0, 1, len(range(1,11))))))
# d_st_distr[state abbr] = [mean retro zOR S1, mean retrozOR S2, ...]
# d_st_median[state abbr] = median of mean retro zOR across seasons (masked NaNs)
for st in states:
d_st_distr[st] = [d_st_classif[key][0] for key in d_st_classif if key[1] == st]
dummymask = np.ma.array(d_st_distr[st], mask = np.isnan(d_st_distr[st]))
d_st_distr_mask[st] = dummymask
dummymedian = np.ma.median(dummymask)
d_st_median[st] = dummymedian
# remove states with only masked medians from the dictionary
d_st_median_sub = dict((k, d_st_median[k]) for k in d_st_median if d_st_median[k])
# sort states by median of mean retro zOR across all seasons
sort_median_dict = sorted(d_st_median_sub.iteritems(), key=operator.itemgetter(1))
# grab list of sorted states for plotting
sorted_states = [item[0] for item in sort_median_dict]
# grab list of colors in order of sorted states -- each region is its own color
sorted_colors = [d_reg_col[d_st_reg[st]] for st in sorted_states]
# grab only unmasked values for boxplot
retrozOR_by_state = [[val for val in d_st_distr_mask[state].T if val] for state in sorted_states]
## draw figure ##
bxp = plt.boxplot(retrozOR_by_state, patch_artist=True)
for patch, color in zip(bxp['boxes'], sorted_colors):
patch.set_facecolor(color)
plt.ylabel('Mean Retrospective zOR', fontsize=fs)
plt.xlim([0.5, 47.5])
plt.ylim([-4, 10])
plt.xticks(xrange(1, len(sorted_states)+1), sorted_states, rotation = 'vertical', fontsize=fssml)
plt.yticks(fontsize=fssml)
plt.savefig('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Manuscripts/Age_Severity/fluseverity_figs/F4/zOR_state_national.png', transparent=False, bbox_inches='tight', pad_inches=0)
plt.close()
# plt.show()
| mit |
TomAugspurger/pandas | pandas/tests/io/parser/test_python_parser_only.py | 2 | 9485 | """
Tests that apply specifically to the Python parser. Unless specifically
stated as a Python-specific issue, the goal is to eventually move as many of
these tests out of this module as soon as the C parser can accept further
arguments when parsing.
"""
import csv
from io import BytesIO, StringIO
import pytest
from pandas.errors import ParserError
from pandas import DataFrame, Index, MultiIndex
import pandas._testing as tm
def test_default_separator(python_parser_only):
# see gh-17333
#
# csv.Sniffer in Python treats "o" as separator.
data = "aob\n1o2\n3o4"
parser = python_parser_only
expected = DataFrame({"a": [1, 3], "b": [2, 4]})
result = parser.read_csv(StringIO(data), sep=None)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("skipfooter", ["foo", 1.5, True])
def test_invalid_skipfooter_non_int(python_parser_only, skipfooter):
# see gh-15925 (comment)
data = "a\n1\n2"
parser = python_parser_only
msg = "skipfooter must be an integer"
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), skipfooter=skipfooter)
def test_invalid_skipfooter_negative(python_parser_only):
# see gh-15925 (comment)
data = "a\n1\n2"
parser = python_parser_only
msg = "skipfooter cannot be negative"
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), skipfooter=-1)
@pytest.mark.parametrize("kwargs", [dict(sep=None), dict(delimiter="|")])
def test_sniff_delimiter(python_parser_only, kwargs):
data = """index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
parser = python_parser_only
result = parser.read_csv(StringIO(data), index_col=0, **kwargs)
expected = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
columns=["A", "B", "C"],
index=Index(["foo", "bar", "baz"], name="index"),
)
tm.assert_frame_equal(result, expected)
def test_sniff_delimiter_comment(python_parser_only):
data = """# comment line
index|A|B|C
# comment line
foo|1|2|3 # ignore | this
bar|4|5|6
baz|7|8|9
"""
parser = python_parser_only
result = parser.read_csv(StringIO(data), index_col=0, sep=None, comment="#")
expected = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
columns=["A", "B", "C"],
index=Index(["foo", "bar", "baz"], name="index"),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("encoding", [None, "utf-8"])
def test_sniff_delimiter_encoding(python_parser_only, encoding):
parser = python_parser_only
data = """ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
if encoding is not None:
from io import TextIOWrapper
data = data.encode(encoding)
data = BytesIO(data)
data = TextIOWrapper(data, encoding=encoding)
else:
data = StringIO(data)
result = parser.read_csv(data, index_col=0, sep=None, skiprows=2, encoding=encoding)
expected = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
columns=["A", "B", "C"],
index=Index(["foo", "bar", "baz"], name="index"),
)
tm.assert_frame_equal(result, expected)
def test_single_line(python_parser_only):
# see gh-6607: sniff separator
parser = python_parser_only
result = parser.read_csv(StringIO("1,2"), names=["a", "b"], header=None, sep=None)
expected = DataFrame({"a": [1], "b": [2]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("kwargs", [dict(skipfooter=2), dict(nrows=3)])
def test_skipfooter(python_parser_only, kwargs):
# see gh-6607
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
parser = python_parser_only
result = parser.read_csv(StringIO(data), **kwargs)
expected = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"compression,klass", [("gzip", "GzipFile"), ("bz2", "BZ2File")]
)
def test_decompression_regex_sep(python_parser_only, csv1, compression, klass):
# see gh-6607
parser = python_parser_only
with open(csv1, "rb") as f:
data = f.read()
data = data.replace(b",", b"::")
expected = parser.read_csv(csv1)
module = pytest.importorskip(compression)
klass = getattr(module, klass)
with tm.ensure_clean() as path:
tmp = klass(path, mode="wb")
tmp.write(data)
tmp.close()
result = parser.read_csv(path, sep="::", compression=compression)
tm.assert_frame_equal(result, expected)
def test_read_csv_buglet_4x_multi_index(python_parser_only):
# see gh-6607
data = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
parser = python_parser_only
expected = DataFrame(
[
[-0.5109, -2.3358, -0.4645, 0.05076, 0.3640],
[0.4473, 1.4152, 0.2834, 1.00661, 0.1744],
[-0.6662, -0.5243, -0.3580, 0.89145, 2.5838],
],
columns=["A", "B", "C", "D", "E"],
index=MultiIndex.from_tuples(
[("a", "b", 10.0032, 5), ("a", "q", 20, 4), ("x", "q", 30, 3)],
names=["one", "two", "three", "four"],
),
)
result = parser.read_csv(StringIO(data), sep=r"\s+")
tm.assert_frame_equal(result, expected)
def test_read_csv_buglet_4x_multi_index2(python_parser_only):
# see gh-6893
data = " A B C\na b c\n1 3 7 0 3 6\n3 1 4 1 5 9"
parser = python_parser_only
expected = DataFrame.from_records(
[(1, 3, 7, 0, 3, 6), (3, 1, 4, 1, 5, 9)],
columns=list("abcABC"),
index=list("abc"),
)
result = parser.read_csv(StringIO(data), sep=r"\s+")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("add_footer", [True, False])
def test_skipfooter_with_decimal(python_parser_only, add_footer):
# see gh-6971
data = "1#2\n3#4"
parser = python_parser_only
expected = DataFrame({"a": [1.2, 3.4]})
if add_footer:
# The stray footer line should not mess with the
# casting of the first two lines if we skip it.
kwargs = dict(skipfooter=1)
data += "\nFooter"
else:
kwargs = dict()
result = parser.read_csv(StringIO(data), names=["a"], decimal="#", **kwargs)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"sep", ["::", "#####", "!!!", "123", "#1!c5", "%!c!d", "@@#4:2", "_!pd#_"]
)
@pytest.mark.parametrize(
"encoding", ["utf-16", "utf-16-be", "utf-16-le", "utf-32", "cp037"]
)
def test_encoding_non_utf8_multichar_sep(python_parser_only, sep, encoding):
# see gh-3404
expected = DataFrame({"a": [1], "b": [2]})
parser = python_parser_only
data = "1" + sep + "2"
encoded_data = data.encode(encoding)
result = parser.read_csv(
BytesIO(encoded_data), sep=sep, names=["a", "b"], encoding=encoding
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("quoting", [csv.QUOTE_MINIMAL, csv.QUOTE_NONE])
def test_multi_char_sep_quotes(python_parser_only, quoting):
# see gh-13374
kwargs = dict(sep=",,")
parser = python_parser_only
data = 'a,,b\n1,,a\n2,,"2,,b"'
msg = "ignored when a multi-char delimiter is used"
def fail_read():
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), quoting=quoting, **kwargs)
if quoting == csv.QUOTE_NONE:
# We expect no match, so there should be an assertion
# error out of the inner context manager.
with pytest.raises(AssertionError):
fail_read()
else:
fail_read()
def test_none_delimiter(python_parser_only, capsys):
# see gh-13374 and gh-17465
parser = python_parser_only
data = "a,b,c\n0,1,2\n3,4,5,6\n7,8,9"
expected = DataFrame({"a": [0, 7], "b": [1, 8], "c": [2, 9]})
# We expect the third line in the data to be
# skipped because it is malformed, but we do
# not expect any errors to occur.
result = parser.read_csv(
StringIO(data), header=0, sep=None, warn_bad_lines=True, error_bad_lines=False
)
tm.assert_frame_equal(result, expected)
captured = capsys.readouterr()
assert "Skipping line 3" in captured.err
@pytest.mark.parametrize("data", ['a\n1\n"b"a', 'a,b,c\ncat,foo,bar\ndog,foo,"baz'])
@pytest.mark.parametrize("skipfooter", [0, 1])
def test_skipfooter_bad_row(python_parser_only, data, skipfooter):
# see gh-13879 and gh-15910
msg = "parsing errors in the skipped footer rows"
parser = python_parser_only
def fail_read():
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), skipfooter=skipfooter)
if skipfooter:
fail_read()
else:
# We expect no match, so there should be an assertion
# error out of the inner context manager.
with pytest.raises(AssertionError):
fail_read()
def test_malformed_skipfooter(python_parser_only):
parser = python_parser_only
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
msg = "Expected 3 fields in line 4, saw 5"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), header=1, comment="#", skipfooter=1)
| bsd-3-clause |
gosox5555/data | us-weather-history/visualize_weather.py | 36 | 4799 | import matplotlib.pyplot as plt
import pandas as pd
from datetime import datetime
'''
This is an example to generate the Philadelphia, PA weather chart.
If you want to make the chart for another city, you will have to modify
this code slightly to read that city's data in, change the title, and
likely change the y-axis of the chart to fit your city's temperature range.
I also use a custom matplotlib style as the basis for these charts, which you
can find here: https://gist.githubusercontent.com/rhiever/d0a7332fe0beebfdc3d5/raw/223d70799b48131d5ce2723cd5784f39d7a3a653/tableau10.mplstyle
'''
weather_data = pd.read_csv('KPHL.csv', parse_dates=['date'])
print(weather_data.describe())
# Generate a bunch of histograms of the data to make sure that all of the data
# is in an expected range.
with plt.style.context('https://gist.githubusercontent.com/rhiever/d0a7332fe0beebfdc3d5/raw/223d70799b48131d5ce2723cd5784f39d7a3a653/tableau10.mplstyle'):
for column in weather_data.columns:
if column in ['date']:
continue
plt.figure()
plt.hist(weather_data[column].values)
plt.title(column)
plt.savefig('{}.png'.format(column))
# Make sure we're only plotting temperatures for July 2014 - June 2015
weather_data_subset = weather_data[weather_data['date'] >= datetime(year=2014, month=7, day=1)]
weather_data_subset = weather_data_subset[weather_data_subset['date'] < datetime(year=2015, month=7, day=1)].copy()
weather_data_subset['day_order'] = range(len(weather_data_subset))
day_order = weather_data_subset['day_order']
record_max_temps = weather_data_subset['record_max_temp'].values
record_min_temps = weather_data_subset['record_min_temp'].values
average_max_temps = weather_data_subset['average_max_temp'].values
average_min_temps = weather_data_subset['average_min_temp'].values
actual_max_temps = weather_data_subset['actual_max_temp'].values
actual_min_temps = weather_data_subset['actual_min_temp'].values
fig, ax1 = plt.subplots(figsize=(15, 7))
# Create the bars showing all-time record highs and lows
plt.bar(day_order, record_max_temps - record_min_temps, bottom=record_min_temps,
edgecolor='none', color='#C3BBA4', width=1)
# Create the bars showing average highs and lows
plt.bar(day_order, average_max_temps - average_min_temps, bottom=average_min_temps,
edgecolor='none', color='#9A9180', width=1)
# Create the bars showing this year's highs and lows
plt.bar(day_order, actual_max_temps - actual_min_temps, bottom=actual_min_temps,
edgecolor='black', linewidth=0.5, color='#5A3B49', width=1)
new_max_records = weather_data_subset[weather_data_subset.record_max_temp <= weather_data_subset.actual_max_temp]
new_min_records = weather_data_subset[weather_data_subset.record_min_temp >= weather_data_subset.actual_min_temp]
# Create the dots marking record highs and lows for the year
plt.scatter(new_max_records['day_order'].values + 0.5,
new_max_records['actual_max_temp'].values + 1.25,
s=15, zorder=10, color='#d62728', alpha=0.75, linewidth=0)
plt.scatter(new_min_records['day_order'].values + 0.5,
new_min_records['actual_min_temp'].values - 1.25,
s=15, zorder=10, color='#1f77b4', alpha=0.75, linewidth=0)
plt.ylim(-15, 111)
plt.xlim(-5, 370)
plt.yticks(range(-10, 111, 10), [r'{}$^\circ$'.format(x)
for x in range(-10, 111, 10)], fontsize=10)
plt.ylabel(r'Temperature ($^\circ$F)', fontsize=12)
month_beginning_df = weather_data_subset[weather_data_subset['date'].apply(lambda x: True if x.day == 1 else False)]
month_beginning_indeces = list(month_beginning_df['day_order'].values)
month_beginning_names = list(month_beginning_df['date'].apply(lambda x: x.strftime("%B")).values)
month_beginning_names[0] += '\n\'14'
month_beginning_names[6] += '\n\'15'
# Add the last month label manually
month_beginning_indeces += [weather_data_subset['day_order'].values[-1]]
month_beginning_names += ['July']
plt.xticks(month_beginning_indeces,
month_beginning_names,
fontsize=10)
ax2 = ax1.twiny()
plt.xticks(month_beginning_indeces,
month_beginning_names,
fontsize=10)
plt.xlim(-5, 370)
plt.grid(False)
ax3 = ax1.twinx()
plt.yticks(range(-10, 111, 10), [r'{}$^\circ$'.format(x)
for x in range(-10, 111, 10)], fontsize=10)
plt.ylim(-15, 111)
plt.grid(False)
plt.title('Philadelphia, PA\'s weather, July 2014 - June 2015\n\n', fontsize=20)
plt.savefig('philadelphia-weather-july14-june15.png')
| mit |
Srisai85/scikit-learn | examples/ensemble/plot_forest_importances_faces.py | 403 | 1519 | """
=================================================
Pixel importances with a parallel forest of trees
=================================================
This example shows the use of forests of trees to evaluate the importance
of the pixels in an image classification task (faces). The hotter the pixel,
the more important.
The code below also illustrates how the construction and the computation
of the predictions can be parallelized within multiple jobs.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.ensemble import ExtraTreesClassifier
# Number of cores to use to perform parallel fitting of the forest model
n_jobs = 1
# Load the faces dataset
data = fetch_olivetti_faces()
X = data.images.reshape((len(data.images), -1))
y = data.target
mask = y < 5 # Limit to 5 classes
X = X[mask]
y = y[mask]
# Build a forest and compute the pixel importances
print("Fitting ExtraTreesClassifier on faces data with %d cores..." % n_jobs)
t0 = time()
forest = ExtraTreesClassifier(n_estimators=1000,
max_features=128,
n_jobs=n_jobs,
random_state=0)
forest.fit(X, y)
print("done in %0.3fs" % (time() - t0))
importances = forest.feature_importances_
importances = importances.reshape(data.images[0].shape)
# Plot pixel importances
plt.matshow(importances, cmap=plt.cm.hot)
plt.title("Pixel importances with forests of trees")
plt.show()
| bsd-3-clause |
darioml/python-aer | setup.py | 1 | 1103 | #! /usr/bin/env python
"""
Author: Dario ML
Program: SETUP.PY
Date: Saturday, June 06, 2014
Description: Setup and install TD algorithms.
"""
from distutils.core import setup
setup(name='python-aer',
version='0.1.2',
author="Dario Magliocchetti",
author_email="[email protected]",
url="https://github.com/darioml/pAER-python-aer-lib",
description='Python Address Event Representation (AER) Library',
long_description='This package provides tools required to visulate, manipulate and use address event representational data (.aedat format). ',
package_dir={"paer" : "src"},
packages=["paer"],
license="GPL 2.0",
classifiers=[
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Environment :: Console",
"License :: OSI Approved :: GNU General Public License v2 (GPLv2)",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Development Status :: 3 - Alpha"
],
install_requires=[
'numpy',
'scipy',
'matplotlib'
]
)
| gpl-2.0 |
lmjohns3/cube-experiment | plots/plot-target-approach.py | 1 | 3356 | #!/usr/bin/env python
import climate
import itertools
import lmj.cubes
import lmj.plot
import numpy as np
import pandas as pd
FRAME_RATE = 50
MARKERS = 'r-fing-index l-fing-index r-heel r-head-front'
_COLORS = '#111111 #d62728 #1f77b4 #2ca02c'
COLORS = dict(zip(MARKERS.split(), _COLORS.split()))
@climate.annotate(
root='read subject data from this file tree',
pattern=('plot data from files matching this pattern', 'option'),
output=('save movie in this output filename', 'option'),
animate=('if given, create a rotating 3d animation', 'option'),
target=('plot data for this target', 'option', None, int),
approach_sec=('plot variance for N sec prior to target acquisition', 'option', None, float),
)
def main(root, pattern='*/*/*trial00*', output=None, animate=None, target=3, approach_sec=1):
targets = None
num_frames = int(FRAME_RATE * approach_sec)
# first, read trial data and extract marker trajectories. group by marker
# and source cube.
data = {}
for trial in lmj.cubes.Experiment(root).trials_matching(pattern):
trial.load()
targets = trial
move = trial.movement_to(target)
move.df.index = pd.Index(np.arange(1 - len(move.df), 1))
source = move.df.source.iloc[0]
for marker in MARKERS.split():
data.setdefault((marker, source), []).append(move.trajectory(marker))
# next, compute mean and stderr for every time step approaching targets.
# group again by marker and source cube.
agg = {}
for key, dfs in data.items():
keys = list(range(len(dfs)))
merged = pd.concat(dfs, axis=1, keys=keys).groupby(axis=1, level=1)
agg[key] = merged.mean(), merged.std() / np.sqrt(merged.size())
with lmj.plot.axes(spines=True) as ax:
ts = np.arange(-num_frames, 0)
for marker in reversed(MARKERS.split()):
for (m, s), (mean, stderr) in agg.items():
if m == marker:
ax.plot(ts, stderr.sum(axis=1)[-num_frames:], color=COLORS[marker])
ax.set_xlim(-num_frames, 0)
ax.set_xticks(np.linspace(-num_frames, 0, 5))
ax.set_xticklabels(np.linspace(-num_frames / FRAME_RATE, 0, 5))
ax.set_xlabel('Time Before Touch (sec)')
ax.set_ylabel('Summed Standard Error')
def render(ax):
lmj.cubes.plots.show_cubes(ax, targets, target_num=target)
for (marker, source), (mean, stderr) in agg.items():
mx, my, mz = mean.x, mean.y, mean.z
sx, sy, sz = stderr.x, stderr.y, stderr.z
for t in np.linspace(0, num_frames, 7).astype(int):
x, y, z = lmj.cubes.plots.ellipsoid(
[mx[-t], my[-t], mz[-t]],
[sx[-t], sy[-t], sz[-t]])
ax.plot_wireframe(x, z, y, color=COLORS[marker], alpha=0.3, lw=1)
if animate:
lmj.plot.rotate_3d(
lmj.cubes.plots.show_3d(render),
output=output,
azim=(0, 90),
elev=(5, 20),
fig=dict(figsize=(10, 4.8)))
if not output:
lmj.plot.show()
else:
with lmj.plot.axes3d() as ax:
render(ax)
lmj.cubes.plots.show_3d(lambda ax: None)(ax)
ax.view_init(elev=15, azim=-110)
if __name__ == '__main__':
climate.call(main)
| mit |
nschmidtALICE/AliPhysics | PWGPP/FieldParam/fitsol.py | 39 | 8343 | #!/usr/bin/env python
debug = True # enable trace
def trace(x):
global debug
if debug: print(x)
trace("loading...")
from itertools import combinations, combinations_with_replacement
from glob import glob
from math import *
import operator
from os.path import basename
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn.linear_model
import sklearn.feature_selection
import datetime
def prec_from_pathname(path):
if '2k' in path: return 0.002
elif '5k' in path: return 0.005
else: raise AssertionError('Unknown field strengh: %s' % path)
# ['x', 'y', 'z', 'xx', 'xy', 'xz', 'yy', ...]
def combinatrial_vars(vars_str='xyz', length=3):
term_list = []
for l in range(length):
term_list.extend([''.join(v) for v in combinations_with_replacement(list(vars_str), 1 + l)])
return term_list
# product :: a#* => [a] -> a
def product(xs):
return reduce(operator.mul, xs, 1) # foldl in Haskell
# (XYZ, "xx") -> XX
def term(dataframe, vars_str):
return product(map(lambda x: dataframe[x], list(vars_str)))
# (f(X), Y) -> (max deviation, max%, avg dev, avg%)
def deviation_stat(fX, Y, prec=0.005):
dev = np.abs(fX - Y)
(max_dev, avg_dev) = (dev.max(axis=0), dev.mean(axis=0))
(max_pct, avg_pct) = (max_dev / prec * 100, avg_dev / prec * 100)
return (max_dev, max_pct, avg_dev, avg_pct)
# IO Df
def load_samples(path, cylindrical_axis=True, absolute_axis=True, genvars=[]):
sample_cols = ['x', 'y', 'z', 'Bx', 'By', 'Bz']
df = pd.read_csv(path, sep=' ', names=sample_cols)
if cylindrical_axis:
df['r'] = np.sqrt(df.x**2 + df.y**2)
df['p'] = np.arctan2(df.y, df.x)
df['Bt'] = np.sqrt(df.Bx**2 + df.By**2)
df['Bpsi'] = np.arctan2(df.By, df.Bx) - np.arctan2(df.y, df.x)
df['Br'] = df.Bt * np.cos(df.Bpsi)
df['Bp'] = df.Bt * np.sin(df.Bpsi)
if absolute_axis:
df['X'] = np.abs(df.x)
df['Y'] = np.abs(df.y)
df['Z'] = np.abs(df.z)
for var in genvars:
df[var] = term(df, var)
return df
def choose(vars, df1, df2):
X1 = df1.loc[:, vars].as_matrix()
X2 = df2.loc[:, vars].as_matrix()
return (X1, X2)
# IO ()
def run_analysis_for_all_fields():
sample_set = glob("dat_z22/*2k*.sample.dat")
test_set = glob("dat_z22/*2k*.test.dat")
#print(sample_set, test_set)
assert(len(sample_set) == len(test_set) and len(sample_set) > 0)
result = pd.DataFrame()
for i, sample_file in enumerate(sample_set):
trace("run_analysis('%s', '%s')" % (sample_file, test_set[i]))
df = run_analysis(sample_file, test_set[i])
result = result.append(df, ignore_index=True)
write_header(result)
def run_analysis(sample_file = 'dat_z22/tpc2k-z0-q2.sample.dat',
test_file = 'dat_z22/tpc2k-z0-q2.test.dat'):
global precision, df, test, lr, la, xvars_full, xvars, yvars, X, Y, Xtest, Ytest, ana_result
precision = prec_from_pathname(sample_file)
assert(precision == prec_from_pathname(test_file))
xvars_full = combinatrial_vars('xyz', 3)[3:] # variables except x, y, z upto 3 dims
trace("reading training samples... " + sample_file)
df = load_samples(sample_file, genvars=xvars_full)
trace("reading test samples..." + test_file)
test = load_samples(test_file, genvars=xvars_full)
trace("linear regression fit...")
lr = sklearn.linear_model.LinearRegression()
#ri = sklearn.linear_model.RidgeCV()
#la = sklearn.linear_model.LassoCV()
fs = sklearn.feature_selection.RFE(lr, 1, verbose=0)
#xvars = ['x','y','z','xx','yy','zz','xy','yz','xz','xzz','yzz']
#xvars = ["xx", "yy", "zz", 'x', 'y', 'z', 'xzz', 'yzz']
#xvars = ['xxxr', 'xrrX', 'zzrX', 'p', 'xyrr', 'xzzr', 'xrrY', 'xzrX', 'xxxz', 'xzzr']
#xvars=['x', 'xzz', 'xyz', 'yz', 'yy', 'zz', 'xy', 'xx', 'z', 'y', 'xz', 'yzz']
yvars = ['Bx', 'By', 'Bz']
#yvars = ['Bz']
(Y, Ytest) = choose(yvars, df, test)
#(Y, Ytest) = (df['Bz'], test['Bz'])
xvars = combinatrial_vars('xyz', 3) # use all terms upto 3rd power
(X, Xtest) = choose(xvars, df, test)
for y in yvars:
fs.fit(X, df[y])
res = pd.DataFrame({ "term": xvars, "rank": fs.ranking_ })
trace(y)
trace(res.sort_values(by = "rank"))
#xvars=list(res.sort_values(by="rank")[:26]['term'])
lr.fit(X, Y)
trace(', '.join(yvars) + " = 1 + " + ' + '.join(xvars))
test_dev = deviation_stat(lr.predict(Xtest), Ytest, prec=precision)
#for i in range(len(yvars)):
# arr = [lr.intercept_[i]] + lr.coef_[i]
# arr = [ str(x) for x in arr ]
# print(yvars[i] + " = { " + ', '.join(arr) + " }")
# print("deviation stat [test]: max %.2e (%.1f%%) avg %.2e (%.1f%%)" %
# ( test_dev[0][i], test_dev[1][i], test_dev[2][i], test_dev[3][i] ))
(sample_score, test_score) = (lr.score(X, Y), lr.score(Xtest, Ytest))
trace("linear regression R^2 [train data]: %.8f" % sample_score)
trace("linear regression R^2 [test data] : %.8f" % test_score)
return pd.DataFrame(
{ "xvars": [xvars],
"yvars": [yvars],
"max_dev": [test_dev[0]],
"max%": [test_dev[1]],
"avg_dev": [test_dev[2]],
"avg%": [test_dev[3]],
"sample_score": [sample_score],
"score": [test_score],
"coeffs": [lr.coef_],
"intercept": [lr.intercept_],
"sample_file": [sample_file],
"test_file": [test_file],
"precision": [precision],
"volume_id": [volume_id_from_path(sample_file)]
})
def volume_id_from_path(path):
return basename(path)\
.replace('.sample.dat', '')\
.replace('-', '_')
def get_location_by_volume_id(id):
if 'its' in id: r_bin = 0
if 'tpc' in id: r_bin = 1
if 'tof' in id: r_bin = 2
if 'tofext' in id: r_bin = 3
if 'cal' in id: r_bin = 4
z_bin = int(id.split('_')[1][1:]) # "tofext2k_z0_q4" -> 0
if 'q1' in id: quadrant = 0
if 'q2' in id: quadrant = 1
if 'q3' in id: quadrant = 2
if 'q4' in id: quadrant = 3
return r_bin, z_bin, quadrant
def write_header(result):
#result.to_csv("magfield_params.csv")
#result.to_html("magfield_params.html")
print("# This file was generated from sysid.py at " + str(datetime.datetime.today()))
print("# " + ', '.join(result.iloc[0].yvars) + " = 1 + " + ' + '.join(result.iloc[0].xvars))
print("# barrel r: 0 < its < 80 < tpc < 250 < tof < 400 < tofext < 423 < cal < 500")
print("# barrel z: -550 < z < 550")
print("# phi: 0 < q1 < 0.5pi < q2 < pi < q3 < 1.5pi < q4 < 2pi")
print("# header: Rbin Zbin Quadrant Nval_per_compoment(=20)")
print("# data: Nval_per_compoment x floats")
#print("# R^2: coefficient of determination in multiple linear regression. [0,1]")
print("")
for index, row in result.iterrows():
#print("// ** %s - R^2 %s" % (row.volume_id, row.score))
print("#" + row.volume_id)
r_bin, z_bin, quadrant = get_location_by_volume_id(row.volume_id)
print("%s %s %s 20" % (r_bin, z_bin, quadrant))
for i, yvar in enumerate(row.yvars):
name = row.volume_id #+ '_' + yvar.lower()
print("# precision: tgt %.2e max %.2e (%.1f%%) avg %.2e (%.1f%%)" %
(row['precision'], row['max_dev'][i], row['max%'][i], row['avg_dev'][i], row['avg%'][i]))
coef = [row['intercept'][i]] + list(row['coeffs'][i])
arr = [ "%.5e" % x for x in coef ]
body = ' '.join(arr)
#decl = "const double[] %s = { %s };\n" % (name, body)
#print(decl)
print(body)
print("")
#write_header(run_analysis())
run_analysis_for_all_fields()
#for i in range(10):
# for xvars in combinations(xvars_full, i+1):
#(X, Xtest) = choose(xvars, df, test)
#lr.fit(X, Y)
#ri.fit(X, Y)
#la.fit(X, Y)
#fs.fit(X, Y)
#print xvars
#(sample_score, test_score) = (lr.score(X, Y), lr.score(Xtest, Ytest))
#print("linear R^2[sample] %.8f" % sample_score)
#print("linear R^2[test] %.8f" % test_score)
#(sample_score2, test_score2) = (la.score(X, Y), la.score(Xtest, Ytest))
#print("lasso R^2[sample] %.8f" % sample_score2)
#print("lasso R^2[test] %.8f" % test_score2)
#print(la.coef_)
#for i in range(len(yvars)):
# print(yvars[i])
# print(pd.DataFrame({"Name": xvars, "Params": lr.coef_[i]}).sort_values(by='Params'))
# print("+ %e" % lr.intercept_[i])
#sample_dev = deviation_stat(lr.predict(X), Y, prec=precision)
#test_dev = deviation_stat(lr.predict(Xtest), Ytest, prec=precision)
#test_dev2 = deviation_stat(la.predict(Xtest), Ytest, prec=precision)
#print("[sample] max %.2e (%.1f%%) avg %.2e (%.1f%%)" % sample_dev)
#print("[test] max %.2e (%.1f%%) avg %.2e (%.1f%%)" % test_dev )
#print("lasso [test] max %.2e (%.1f%%) avg %.2e (%.1f%%)" % test_dev2 )
| bsd-3-clause |
oysstu/pyopencl-in-action | ch14/rdft.py | 1 | 3847 | """
Listing 14.1: The discrete Fourier transform (for real numbers)
"""
import numpy as np
import pyopencl as cl
import utility
import matplotlib.pyplot as plt
NUM_POINTS = 2 ** 8
kernel_src = '''
__kernel void rdft(__global float *x) {
int N = (get_global_size(0)-1)*2;
int num_vectors = N/4;
float X_real = 0.0f;
float X_imag = 0.0f;
float4 input, arg, w_real, w_imag;
float two_pi_k_over_N =
2*M_PI_F*get_global_id(0)/N;
for(int i=0; i<num_vectors; i++) {
arg = (float4) (two_pi_k_over_N*(i*4),
two_pi_k_over_N*(i*4+1),
two_pi_k_over_N*(i*4+2),
two_pi_k_over_N*(i*4+3));
w_real = cos(arg);
w_imag = sin(arg);
input = vload4(i, x);
X_real += dot(input, w_real);
X_imag -= dot(input, w_imag);
}
barrier(CLK_GLOBAL_MEM_FENCE);
if(get_global_id(0) == 0) {
x[0] = X_real;
}
else if(get_global_id(0) == get_global_size(0)-1) {
x[1] = X_real;
}
else {
x[get_global_id(0) * 2] = X_real;
x[get_global_id(0) * 2 + 1] = X_imag;
}
}
'''
# Get device and context, create command queue and program
dev = utility.get_default_device()
context = cl.Context(devices=[dev])
queue = cl.CommandQueue(context, dev)
# Build program in the specified context using the kernel source code
prog = cl.Program(context, kernel_src)
try:
prog.build(options=['-Werror'], devices=[dev])
except:
print('Build log:')
print(prog.get_build_info(dev, cl.program_build_info.LOG))
raise
# Determine maximum work-group size
wg_max_size = dev.max_work_group_size
# Data and device buffers
input_data = np.zeros(shape=(NUM_POINTS,), dtype=np.float32)
output_data = np.empty_like(input_data, dtype=np.float32)
# Initialize data with a rectangle function
input_data[:NUM_POINTS // 4] = 1.0
mf = cl.mem_flags
data_buffer = cl.Buffer(context, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=input_data)
# Execute kernel and copy result
# rdft(__global float *x)
global_size = (NUM_POINTS // 2 + 1,)
local_size = None
prog.rdft(queue, global_size, local_size, data_buffer)
cl.enqueue_copy(queue, dest=output_data, src=data_buffer, is_blocking=True)
# Change to array of complex values
f_first = np.array([output_data[0] + 0j]) # X[0] is the DC signal (no im. component)
f_last = np.array([output_data[1] + 0j]) # X[1] is the N/2 frequency
cl_fft = output_data[2::2] + 1j * output_data[3::2] # From there, real and im. alternates
# The final result is assembled by concatenating f0, f1 : fN/2-1, fN/2 and the conjugate of f1:fN/2-1
cl_fft = np.concatenate((f_first, cl_fft, f_last, np.conj(cl_fft[::-1])))
np_fft = np.fft.fft(input_data)
# Print first ten complex values
np.set_printoptions(precision=4, suppress=True)
print('CL FFT [0:10]:')
print(cl_fft[:10])
print('\nNumpy FFT [0:10]:')
print(np_fft[:10])
# Visualize result
cl_magnitude = np.absolute(cl_fft)
np_magnitude = np.absolute(np_fft)
# Before calculating the phase, frequencies of low magnitude should be set to zero
# This is due to numerical inaccuracies
cl_fft[cl_magnitude < 0.0001] = 0.0 + 0.0j
cl_phase = np.angle(cl_fft)
np_phase = np.angle(np_fft)
k = np.arange(0, NUM_POINTS)
f, axes = plt.subplots(4, sharex=True)
axes[0].set_title('Re')
axes[0].plot(k, np.real(cl_fft), label='OpenCL')
axes[0].plot(k, np.real(np_fft), label='Numpy')
axes[0].legend()
axes[1].set_title('Im')
axes[1].plot(k, np.imag(cl_fft), label='OpenCL')
axes[1].plot(k, np.imag(np_fft), label='Numpy')
axes[2].set_title('Magnitude')
axes[2].plot(k, cl_magnitude, label='OpenCL')
axes[2].plot(k, np_magnitude, label='Numpy')
axes[3].set_title('Phase')
axes[3].plot(k, cl_phase, label='OpenCL')
axes[3].plot(k, np_phase, label='Numpy')
[ax.locator_params(nbins=2, axis='y') for ax in axes]
plt.xlim([0, NUM_POINTS-1])
plt.show()
| mit |
nagyistoce/deep_nets_iclr04 | lib/common_imports.py | 3 | 1562 | import random
import theano
import theano.tensor as T
import numpy
import scipy
import scipy.misc as misc
import scipy.io as io
import scipy.ndimage.filters as filters
import scipy.ndimage as ndimage
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import math
import re
import time
import sys, os
import xml.etree.ElementTree as ET
import cPickle as pickle
import glob
from collections import defaultdict
# My own libs
import tools
import alfileformat as al
from layer_blocks import ConvPoolLayer, OutputLayer, InputLayerSW, \
SoftMaxLayer, DropoutLayer
class Machine(object):
layer_map = {
'InputLayerSW': InputLayerSW,
'ConvPoolLayer' : ConvPoolLayer,
'OutputLayer' : OutputLayer,
'SoftMaxLayer': SoftMaxLayer,
'DropoutLayer' : DropoutLayer,
}
def __init__(self, params):
# argv[1] is the conf xml file
# set extra / overwrite params
if len(sys.argv) > 2:
print sys.argv[2];
type_map ={'int':int, 'bool':bool, 'str':str, 'float':float}
test_param_str = sys.argv[2].split(',')
print test_param_str
for t_par in test_param_str:
print t_par
key, val = t_par.split('=')
print key, val
val, type = re.split('\(*\)*', val)[0:2]
print 'val={0}, type={1}'.format(val, type)
val = type_map[type](val)
print 'Setting params.{0} to {1}'.format(key, val)
setattr(params, key, val)
| gpl-2.0 |
aestrivex/mne-python | mne/viz/tests/test_ica.py | 3 | 4784 | # Authors: Denis Engemann <[email protected]>
# Alexandre Gramfort <[email protected]>
#
# License: Simplified BSD
import os.path as op
import warnings
from numpy.testing import assert_raises
from mne import io, read_events, Epochs, read_cov
from mne import pick_types
from mne.utils import run_tests_if_main, requires_sklearn
from mne.viz.utils import _fake_click
from mne.preprocessing import ICA, create_ecg_epochs, create_eog_epochs
# Set our plotters to test mode
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
warnings.simplefilter('always') # enable b/c these tests throw warnings
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
evoked_fname = op.join(base_dir, 'test-ave.fif')
raw_fname = op.join(base_dir, 'test_raw.fif')
cov_fname = op.join(base_dir, 'test-cov.fif')
event_name = op.join(base_dir, 'test-eve.fif')
event_id, tmin, tmax = 1, -0.1, 0.2
def _get_raw(preload=False):
return io.Raw(raw_fname, preload=preload)
def _get_events():
return read_events(event_name)
def _get_picks(raw):
return [0, 1, 2, 6, 7, 8, 12, 13, 14] # take a only few channels
def _get_epochs():
raw = _get_raw()
events = _get_events()
picks = _get_picks(raw)
epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
return epochs
@requires_sklearn
def test_plot_ica_components():
"""Test plotting of ICA solutions
"""
import matplotlib.pyplot as plt
raw = _get_raw()
ica = ICA(noise_cov=read_cov(cov_fname), n_components=2,
max_pca_components=3, n_pca_components=3)
ica_picks = _get_picks(raw)
ica.fit(raw, picks=ica_picks)
warnings.simplefilter('always', UserWarning)
with warnings.catch_warnings(record=True):
for components in [0, [0], [0, 1], [0, 1] * 2, None]:
ica.plot_components(components, image_interp='bilinear', res=16)
ica.info = None
assert_raises(RuntimeError, ica.plot_components, 1)
plt.close('all')
@requires_sklearn
def test_plot_ica_sources():
"""Test plotting of ICA panel
"""
import matplotlib.pyplot as plt
raw = io.Raw(raw_fname, preload=False)
raw.crop(0, 1, copy=False)
raw.preload_data()
picks = _get_picks(raw)
epochs = _get_epochs()
raw.pick_channels([raw.ch_names[k] for k in picks])
ica_picks = pick_types(raw.info, meg=True, eeg=False, stim=False,
ecg=False, eog=False, exclude='bads')
ica = ICA(n_components=2, max_pca_components=3, n_pca_components=3)
ica.fit(raw, picks=ica_picks)
raw.info['bads'] = ['MEG 0113']
assert_raises(RuntimeError, ica.plot_sources, inst=raw)
ica.plot_sources(epochs)
epochs.info['bads'] = ['MEG 0113']
assert_raises(RuntimeError, ica.plot_sources, inst=epochs)
epochs.info['bads'] = []
with warnings.catch_warnings(record=True): # no labeled objects mpl
ica.plot_sources(epochs.average())
evoked = epochs.average()
fig = ica.plot_sources(evoked)
# Test a click
ax = fig.get_axes()[0]
line = ax.lines[0]
_fake_click(fig, ax,
[line.get_xdata()[0], line.get_ydata()[0]], 'data')
_fake_click(fig, ax,
[ax.get_xlim()[0], ax.get_ylim()[1]], 'data')
# plot with bad channels excluded
ica.plot_sources(evoked, exclude=[0])
ica.exclude = [0]
ica.plot_sources(evoked) # does the same thing
assert_raises(ValueError, ica.plot_sources, 'meeow')
plt.close('all')
@requires_sklearn
def test_plot_ica_overlay():
"""Test plotting of ICA cleaning
"""
import matplotlib.pyplot as plt
raw = _get_raw(preload=True)
picks = _get_picks(raw)
ica = ICA(noise_cov=read_cov(cov_fname), n_components=2,
max_pca_components=3, n_pca_components=3)
ica.fit(raw, picks=picks)
# don't test raw, needs preload ...
ecg_epochs = create_ecg_epochs(raw, picks=picks)
ica.plot_overlay(ecg_epochs.average())
eog_epochs = create_eog_epochs(raw, picks=picks)
ica.plot_overlay(eog_epochs.average())
assert_raises(ValueError, ica.plot_overlay, raw[:2, :3][0])
ica.plot_overlay(raw)
plt.close('all')
@requires_sklearn
def test_plot_ica_scores():
"""Test plotting of ICA scores
"""
import matplotlib.pyplot as plt
raw = _get_raw()
picks = _get_picks(raw)
ica = ICA(noise_cov=read_cov(cov_fname), n_components=2,
max_pca_components=3, n_pca_components=3)
ica.fit(raw, picks=picks)
ica.plot_scores([0.3, 0.2], axhline=[0.1, -0.1])
assert_raises(ValueError, ica.plot_scores, [0.2])
plt.close('all')
run_tests_if_main()
| bsd-3-clause |
ronaldahmed/SLAM-for-ugv | neural-navigation-with-lstm/utils.py | 2 | 20861 | import numpy as np
import ipdb
import os,sys,re
from nltk import word_tokenize
from six.moves import cPickle as pickle
from sklearn.cross_validation import train_test_split
from collections import Counter
from MARCO.POMDP.MarkovLoc_Grid import getMapGrid
from MARCO.POMDP.MarkovLoc_Jelly import getMapJelly
from MARCO.POMDP.MarkovLoc_L import getMapL
from MARCO.Robot.Meanings import Wall,End,Empty
#######################################################################################################
data_dir = 'data/'
SEED = 42
np.random.seed(SEED)
path_patt = re.compile(r'\d+,\s*\d+,\s*[-]*\d+')
# actions
FW = 0
L = 1
R = 2
STOP = 3
PAD_decode = 4
actions_str = [
"FORWARD",
"LEFT",
"RIGHT",
"STOP",
"<PAD>",
]
num_actions = len(actions_str)
forward_step = 1 # 1 cell
rotation_step = 90 # degrees
# Special indicator for sequence padding
EOS = '<EOS>'
PAD = '<PAD>'
RARE = '<RARE>'
#######################################################################################################
class Sample(object):
def __init__(self,_inst=[],_actions=[],path=[],_id='',sP=-1,eP=-1,_map_name=''):
self._instructions = _inst
self._actions = _actions
self._id = _id
# path: sequence of position states (x,y,th)
self._path = path
# start and end position (id_localization) | global (multisentence) start and end
self._startPos = sP
self._endPos = eP
self._map_name = _map_name
def __repr__(self):
res = ("{ instructions:\n"+
" "+str(self._instructions) + '\n'
" actions:\n"+
" "+str(verbose_actions(self._actions)) + '\n'
" path:\n"+
" "+str(self._path)+' }')
return res
class MapData:
def __init__(self,_map_name,_map):
# can be Grid, Jelly or L
self.name = _map_name.lower()
# map object
self.map = _map
# format: [Sample(x,y,sample_id)]
self.samples = []
def add_sample(self,_instructions,_actions,_path,_id,sP,eP,map_name):
# add new sample (nav_instructions,actions,sample_id)
# input: instructions, actions, path, sample_id, start_pos, end_pos, map_name
self.samples.append( Sample(_instructions,_actions,_path,_id,sP,eP,map_name) )
def get_multi_sentence_samples(self):
# return: [[Sample], [Sample]]
ms_sample_list = []
prev_id = self.samples[0]._id
n_sam = len(self.samples)
ms_sample = []
for i in xrange(n_sam):
if self.samples[i]._id != prev_id:
ms_sample_list.append(ms_sample)
ms_sample = [self.samples[i]]
else:
ms_sample.append(self.samples[i])
prev_id = self.samples[i]._id
# add last batch
ms_sample_list.append(ms_sample)
return ms_sample_list
def verbose_actions(actions):
#print string command for each action
return [actions_str[act_id] for act_id in actions]
def get_actions_and_path(path_text,_map):
"""
Extract action and path seq from raw string in data (FW(x,y,th);L(x,y,th)...)
"""
list_pre_act = path_text.split(';')
n_act = len(list_pre_act)
actions = []
path = []
for i in xrange(n_act):
x,y,th = -1,-1,-1
id_act = -1
if i==n_act-1:
str_action = list_pre_act[i].strip('(').strip(')').split(',')
x,y,th = [int(comp.strip()) for comp in str_action]
id_act = STOP
else:
prx = list_pre_act[i].find('(')
id_act = actions_str.index(list_pre_act[i][:prx])
x,y,th = [int(comp.strip()) for comp in list_pre_act[i][prx+1:-1].split(',')]
pose = _map.platdir2orient(th)
xg,yg = _map.locations[ _map.plat2place(x,y) ]
if xg < 1 or yg < 1:
print("Map: ",_map.name)
print(" xp,yp: ", x,y)
print(" xg,yg: ", xg,yg)
print("="*30)
ipdb.set_trace()
path.append( (xg,yg,pose) )
actions.append(id_act)
return actions,path
"""
Read single and multiple sentence instructions
return: {map_name : MapData object [with data in 'samples' attribute]}
"""
def get_data():
map_data = {
'grid' : MapData("grid" ,getMapGrid()),
'jelly' : MapData("jelly",getMapJelly()),
'l' : MapData("l",getMapL())
}
for map_name, data_obj in map_data.items():
filename = map_name + '.settrc'
sample_id = ''
flag_toggle = False
toggle = 0
actions = path = tokens = []
start_pos = end_pos = -1
for line in open( os.path.join(data_dir,filename) ):
line=line.strip("\n")
if line=='':
#ipdb.set_trace()
# reset variables
flag_toggle = False
toggle = 0
actions = path = tokens = []
start_pos = end_pos = -1
sample_id = ''
continue
if line.startswith("Cleaned-"):
prex = "Cleaned-"
sample_id = line[len(prex):]
if line.find('map=')!=-1:
# ignore line: y=... map=... x=...
flag_toggle=True
temp = line.split('\t')
start_pos = int(temp[0][2:]) # y=...
end_pos = int(temp[-1][2:]) # x=...
continue
if flag_toggle:
if toggle==0:
# read instructions
tokens = word_tokenize(line)
else:
# read actions and path
actions,path = get_actions_and_path(line,data_obj.map)
# save new single-sentence sample
data_obj.add_sample(tokens, actions, path, sample_id, start_pos, end_pos, map_name)
# reset variables
actions = path = tokens = []
toggle = (toggle+1)%2
#END-IF-TOGGLE
#END-FOR-READ-FILE
#END-FOR-MAPS
return map_data
##########################################################################################
##########################################################################################
class Fold:
def __init__(self,train_set,val_set,test_set,test_multi_set,vocab):
self.train_data = train_set
self.valid_data = val_set
self.test_single_data = test_set
self.test_multi_data = test_multi_set
self.vocabulary = vocab
self.vocabulary_size = len(vocab)
"""
Shuffles and splits data in train.val, and test sets for each fold conf
Each fold is one-map-out conf with (train:0.9,val:0.1)
return: [fold0,fold1,fold2]
"""
def get_folds_vDev(dir='data/', val=0.1, force=False):
pickle_file = 'folds_vDev.pickle'
filename = os.path.join(dir,pickle_file)
folds = []
if force or not os.path.exists(filename):
# Make pickle object
dataByMap = get_data()
map_names = dataByMap.keys()
n_names = len(map_names)
# Iteration over folds
for i in range(n_names):
# reset arrays
train_set = []
valid_set = []
complete_set = [] # for universal vocab
#
test_single_set = dataByMap[map_names[i]].samples
test_multi_set = dataByMap[map_names[i]].get_multi_sentence_samples()
for j in range(n_names):
if j != i:
# shuffle data before splitting
data = np.array(dataByMap[map_names[j]].samples) # shuffle in separate array, preserver order for multi_sentence building
np.random.shuffle(data)
# split into training and validation sets
train_samples,valid_samples = train_test_split( data,
test_size=val,
random_state = SEED)
train_set.extend(train_samples)
valid_set.extend(valid_samples)
complete_set.extend(data)
# Reformat to word index
#vocabulary = getVocabulary(train_set)
vocabulary = getVocabulary(complete_set) # universal vocabulary
train_set = reformat_wordid(train_set ,vocabulary)
valid_set = reformat_wordid(valid_set ,vocabulary)
test_single_set = reformat_wordid(test_single_set,vocabulary)
# for multi sentences
temp = []
for parag in test_multi_set:
temp.append(reformat_wordid(parag,vocabulary))
test_multi_set = temp
# shuffle between maps
np.random.shuffle(train_set)
np.random.shuffle(valid_set)
np.random.shuffle(test_single_set)
np.random.shuffle(test_multi_set)
#END-FOR-TRAIN-VAL-SPLIT
folds.append( Fold(train_set,valid_set,test_single_set,test_multi_set,vocabulary) )
#END-FOR-FOLDS
print('Pickling %s.' % filename)
try:
with open(filename, 'wb') as f:
pickle.dump(folds, f, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', filename, ':', e)
else:
with open(filename, 'rb') as f:
folds = pickle.load(f)
print('%s read from pickle...' % filename)
return folds
"""
Shuffles and splits data in train and test sets for each fold conf
Each fold is one-map-out conf with (train:1.0)
Test set is for iteration stopping criteria
return: [fold0,fold1,fold2]
"""
def get_folds_vTest(dir='data/', val=0.1, force=False):
pickle_file = 'folds_vTest.pickle'
filename = os.path.join(dir,pickle_file)
folds = []
if force or not os.path.exists(filename):
# Make pickle object
dataByMap = get_data()
map_names = dataByMap.keys()
n_names = len(map_names)
# Iteration over folds
for i in range(n_names):
# reset arrays
train_set = []
valid_set = []
#
test_single_set = dataByMap[map_names[i]].samples
test_multi_set = dataByMap[map_names[i]].get_multi_sentence_samples()
for j in range(n_names):
if j != i:
train_set.extend(dataByMap[map_names[j]].samples)
# Reformat to word index
vocabulary = getVocabulary(train_set)
train_set = reformat_wordid(train_set ,vocabulary)
valid_set = reformat_wordid(valid_set ,vocabulary)
test_single_set = reformat_wordid(test_single_set,vocabulary)
# for multi sentences
temp = []
for parag in test_multi_set:
temp.append(reformat_wordid(parag,vocabulary))
test_multi_set = temp
# shuffle between maps
np.random.shuffle(train_set)
np.random.shuffle(valid_set)
np.random.shuffle(test_single_set)
np.random.shuffle(test_multi_set)
#END-FOR-TRAIN-VAL-SPLIT
folds.append( Fold(train_set,valid_set,test_single_set,test_multi_set,vocabulary) )
#END-FOR-FOLDS
print('Pickling %s.' % filename)
try:
with open(filename, 'wb') as f:
pickle.dump(folds, f, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', filename, ':', e)
else:
with open(filename, 'rb') as f:
folds = pickle.load(f)
print('%s read from pickle...' % filename)
return folds
##########################################################################################
##########################################################################################
def getVocabulary(data):
vocab = Counter()
for sample in data:
vocab.update(sample._instructions)
frequency_threshold = 0 # > THR
vocab = [w for w,f in vocab.items() if f>frequency_threshold]
vocab.append(EOS)
vocab.append(PAD)
vocab.append(RARE)
vocab_dict = dict( zip(vocab,xrange(len(vocab))) )
return vocab_dict
def reformat_wordid(data,vocab):
# data: [Sample]
wordid_data = []
for sample in data:
instructions = sample._instructions
ref_ins = []
for w in instructions:
ref_ins.append( vocab[w] if w in vocab else vocab[RARE] )
ref_ins.append(vocab[EOS]) # add end of sentence token
new_sample = Sample( ref_ins,
sample._actions,
sample._path,
sample._id,
sample._startPos,sample._endPos,
sample._map_name)
wordid_data.append(new_sample)
return wordid_data
##########################################################################################
##########################################################################################
class BatchGenerator:
def __init__(self,data,batch_size,vocab):
self._encoder_unrollings = 49 # experimental
self._decoder_unrollings = 31 # experimental
self._data = data # format: [Sample(word_id format)]
self._data_size = len(data)
self._batch_size = batch_size
self._vocabulary = vocab
self._id2word = dict( zip(vocab.values(),vocab.keys()) )
self._vocabulary_size = len(vocab)
# batch splitting vars
segment = self._data_size // batch_size
self._cursor = [offset * segment for offset in range(batch_size)] # make <segment> buckets from where extract one batch elmt each time
def get_batch(self):
# Encoder and decoder batches in one-hot format [n_unroll,batch_size,{vocab_size,num_actions}]
encoder_batch = [np.zeros(shape=(self._batch_size, self._vocabulary_size), dtype=np.float)
for _ in xrange(self._encoder_unrollings)]
decoder_batch = [PAD_decode * np.ones(self._batch_size, dtype=np.int32)
for _ in xrange(self._decoder_unrollings)] # list of action_ids, not one-hot repr
sample_batch = [0 for _ in xrange(self._batch_size)]
for b in xrange(self._batch_size):
encoder_seq = self._data[ self._cursor[b] ]._instructions
decoder_seq = self._data[ self._cursor[b] ]._actions
# save original sample in batch
sample_batch[b] = self._data[ self._cursor[b] ]
# One-hot formatting for ENCODER
for roll in xrange(len(encoder_seq)):
encoder_batch[roll][b,encoder_seq[roll]] = 1.0
# ZERO PADDING: if outside len of enc, leave empty (0s)
## Action_id formatting for DECODER
for i,act in enumerate(decoder_seq):
decoder_batch[i][b] = act
# update cursor for current segment
self._cursor[b] = (self._cursor[b] + 1) % self._data_size
#END-FOR-BATCH-SEGMENT
decoder_batch = np.array(decoder_batch)
return encoder_batch,decoder_batch,sample_batch
def get_one_sample(self):
# SGD instead of mini-batch, one sample at a time
encoder_input = []
encoder_seq = self._data[ self._cursor[0] ]._instructions
decoder_seq = self._data[ self._cursor[0] ]._actions
for i in xrange(len(encoder_seq)):
roll = np.zeros(shape=(1,self._vocabulary_size),dtype=np.float)
roll[0,encoder_seq[i]] = 1.0
encoder_input.append(roll)
sample = self._data[ self._cursor[0] ]
self._cursor[0] = (self._cursor[0] + 1) % self._data_size
return encoder_input,decoder_seq,[sample]
def batch2string(self,encoder_batch,decoder_batch):
for b in xrange(self._batch_size):
print("Batch:",b)
print("-"*70)
print(" encoder: ",[self._id2word[encoder_batch[i][b,:].argmax()]
if sum(encoder_batch[i][b,:])>0 else PAD
for i in range(self._encoder_unrollings)])
print(" decoder: ",[ actions_str[decoder_batch[i][b,:].argmax()]
if sum(decoder_batch[i][b,:])>0 else PAD
for i in range(self._decoder_unrollings)])
print("="*70)
#END-FOR-BATCH_SIZE
##########################################################################################
def get_landmark_set(_map):
# Iterates over all locations (intersections and ends) of map, extracting landmarks from map (context festures)
# return dict {Meaning: id}
feats = set()
for loc in xrange(1,_map.NumPlaces):
for angle in xrange(_map.NumPoses):
views = _map.getView((loc,angle))
views = views[0][0]
[feats.add(feat) for view in views for feat in view if feat!=End and feat!=Empty] #End and Empty noy in dictionary
n_feats = len(feats)
feat_dict = dict( zip(list(feats),xrange(n_feats)) )
return feat_dict
def get_objects_set(_map):
mid_objects = set()
for loc in xrange(1,_map.NumPlaces):
for angle in xrange(_map.NumPoses):
views = _map.getView((loc,angle))
views = views[0][0]
if views[0][1]!=Empty:
mid_objects.add(views[0][1]) #mid non-empty
n_feats = len(mid_objects)
feat_dict = dict( zip(list(mid_objects),xrange(n_feats)) )
return feat_dict
def get_world_context_id(_map,place,pose):
"""
get bag-of-words repr of wold context from (place,pose)
return:
featsByPose: [set(Meanings inst. in that direction)] x numPoses -> [fw,rg,bw,lf]
cell_object: Object in current cell (can be Empty)
"""
featsByPose = []
cell_object = Empty
for i in range(_map.NumPoses):
curr_pose = (pose+i)%_map.NumPoses
views = _map.getView((place,curr_pose))[0][0] # format: [(views,prob)]
cell_object = views[0][1] # no problem with overwriting it, bc it's the same in all directions
curr_view = set()
for j,view in enumerate(views):
if j>0:
curr_view.add(view[1]) #only add object if cell is not current
curr_view.add(view[3])
curr_view.add(view[4])
curr_view.add(view[5])
if Empty in curr_view:
curr_view.remove(Empty)
if End in curr_view: # if End is found, replace with wall
curr_view.remove(End)
curr_view.add(Wall)
featsByPose.append(curr_view)
return featsByPose,cell_object
def get_sparse_world_context(_map,place,pose,feature_dict,object_dict):
num_feats = len(feature_dict)
num_objects = len(object_dict)
y_t = np.zeros(shape=(1,num_objects + 4*num_feats),dtype=np.float32)
featsByPose,cell_object = get_world_context_id(_map,place,pose)
# add features for every direction
for i,features in enumerate(featsByPose):
ids = [feature_dict[feat] + i*num_feats for feat in features]
y_t[0,ids] = 1.0
# add object in current cell, if any
if cell_object != Empty:
y_t[0,4*num_feats+object_dict[cell_object]] = 1.0
return y_t
def get_batch_world_context(sample_batch_roll,_t,_maps,feature_dict,object_dict,batch_size):
"""
args:
sample_batch_roll: placeholder of shape [batch_size,1] with Sample obj data for ONE roll
_t: time step (for path indexing)
_map: {name: Map object}
num_feats: number of map features
batch_size: <>
return : world_state vector y [batch_size x 3 * num_feats]
"""
num_feats = len(feature_dict)
num_objects = len(object_dict)
roll_y = np.zeros(shape=(batch_size,4*num_feats + num_objects),dtype=np.float32)
for b in xrange(batch_size):
map_name = sample_batch_roll[b]._map_name
if _t < len(sample_batch_roll[b]._path):
# check world state for valid step
x,y,pose = sample_batch_roll[b]._path[_t]
place = _maps[map_name].locationByCoord[(x,y)]
roll_y[b,:] = get_sparse_world_context(_maps[map_name],place,pose,feature_dict,object_dict)
return roll_y
def move(state,action,_map):
"""
state; (xg,yg,pose)
action: action_id
_map: map object form getMap.*
return: (next_loc,next_pos) next position state after applying action
"""
if action==STOP or action==PAD_decode:
return -1
xg,yg,pose = state
if action==FW:
nx = ny = -1
if pose==0:
nx,ny = xg-2,yg
elif pose==1:
nx,ny = xg,yg+2
elif pose==2:
nx,ny = xg+2,yg
else: #pose==3
nx,ny = xg,yg-2
if (nx,ny) in _map.locationByCoord:
return nx,ny,pose
else:
return -1
elif action==L:
return xg,yg,(pose-1)% _map.NumPoses
elif action==R:
return xg,yg,(pose+1)% _map.NumPoses
##########################################################################################
class BeamS_Node(object):
def __init__(self,_id,logprob,loss,parent,pos_state,dec_st,dec_ct,dist,act_id):
self._id = _id
self._log_prob = logprob
self._loss = loss
self._parent = parent
self._pos_state = pos_state
self._dec_st = dec_st # [st]*num_ensembles
self._dec_ct = dec_ct # [ct]*num_ensembles
self._dist=dist
self._act_id=act_id
class Config(object):
encoder_unrollings = 49
decoder_unrollings = 31
num_actions = 5
max_gradient_norm = 5.0
def __init__(self,batch_size,
vocab_size,
num_nodes=100,
learning_rate=0.1,
learning_rate_decay_factor=0.1,
embedding_world_state_size=30,
dropout_rate=1.0
):
self.vocab_size = vocab_size
self.num_nodes = num_nodes
self.learning_rate = learning_rate
self.learning_rate_decay_factor = learning_rate_decay_factor
self.embedding_world_state_size = embedding_world_state_size
self.dropout_rate = dropout_rate
##########################################################################################
def test_dataset(dataByMap):
for name,mapdata in dataByMap.items():
for data in mapdata.samples:
init_pos = data._path[0]
end_pos = data._path[-1]
# get end pos from actions and init pos
state = prev_state = init_pos
end_followed = []
followed_path = []
for action in data._actions:
prev_state = state
state = move(state,action,mapdata.map)
if state == -1:
end_followed = prev_state
break
followed_path.append(prev_state)
if end_followed==[]:
end_followed = state
followed_path.append(end_followed)
if end_followed != end_pos or data._path!=followed_path:
print("ID: ",data._id)
print("True seq: %s" % (','.join([actions_str[act] for act in data._actions])))
ipdb.set_trace()
##########################################################################################
"""
map_data = get_data()
#ms_data = map_data['jelly'].get_multi_sentence_samples()
input_lens = set()
output_lens = set()
for mname,md in map_data.items():
input_len = set( [len(sample._instructions) for sample in md.samples] )
output_len = set( [len(sample._actions) for sample in md.samples] )
input_lens.update(input_len)
output_lens.update(output_len)
iL = list(input_lens)
oL = list(output_lens)
ipdb.set_trace()
"""
#folds = get_folds_vDev()
#folds_vt = get_folds_vTest()
#batch_gen = BatchGenerator(folds[0].train_data,
# 2,
# folds[0].vocabulary)
#enc,dec,samples = batch_gen.get_batch()
#batch_gen.batch2string(enc,dec)
#ipdb.set_trace()
"""
mg = getMapGrid()
mj = getMapJelly()
ml = getMapL()
ipdb.set_trace()
#state = move((1,2),FW,mm)
ff = get_landmark_set(mj)
od = get_objects_set(mj)
y = get_sparse_world_context(mj,15,0,ff,od)
ipdb.set_trace()
"""
#map_data = get_data()
#test_dataset(map_data) | mit |
jesuscript/topo-mpi | topo/tests/__init__.py | 2 | 8892 | """
Unit tests for Topographica.
Use the 'run' function to run all the tests.
We use unittest and doctest to create tests. The run() function calls
tests in files in topo/tests/ that:
* have a name beginning with 'test' and ending with '.py', if the file
defines the 'suite' attribute;
* have a name beginning with 'test' and ending with '.txt'.
If Tkinter cannot be imported, files that have a name ending with
'_tk' are not imported (hence any tests that they contain are
skipped).
unittest
========
We use unittest in two different ways. The first is simply
to run a series of tests:
class TestSomething(unittest.TestCase):
def setUp(self):
...
def test_x(self):
...
def test_y(self):
...
def extra(self):
...
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestSomething))
In the example above, setUp will be called, followed by test_x and
test_y (i.e. the methods setUp and test_* are called automatically);
the extra() method will not be called (unless your code calls it).
setUp does not have to exist.
The second way we use unittest is to pass a series of scenarios
through one battery of tests:
class TestSomething(unittest.TestCase):
def test_x(self):
...
def test_y(self):
...
class TestCase1(TestSomething):
def setUp(self):
...
class TestCase2(TestSomething):
def setUp(self):
...
suite = unittest.TestSuite()
cases = [TestScenario1,TestScenario2]
suite.addTests([unittest.makeSuite(case) for case in cases])
In this second example, TestScenario1.setUp will be called, followed
by test_x and test_y. After this, TestScenario2.setUp will be called,
followed again by test_x and test_y. setUp in the two TestScenarios is
therefore used to create some different data or situations to pass
through the tests.
To be run() automatically, unittest files must (a) be named test*.py, and
(b) must define the 'suite' attribute.
Additionally, unittest files should:
(a) contain the following code to allow the file to be run on its own:
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite)
(b) set suite.requires_display=True if the tests require a DISPLAY.
(c) call topo.tests.start_tkgui() before running tests (e.g. in
setUP()) if they require the GUI to be running
doctest
=======
$Id$
"""
__version__='$Revision$'
# CEBALERT: It might be good if tests/ were a directory at the top
# level, with a subdirectory structure mirroring that of topo/. Then
# it is more likely we'd have a separate test file for each module,
# and we could also simply name the files the same as what they are
# testing, which could make it simpler to find the right test file.
# CEBALERT: tests often affect each other. Make sure test authors are
# aware of that, and have some kind of policy. (Setting class
# attributes, sharing a sim, etc)
# CEBALERT: some of the test modules are missing code to handle running
# (i.e. running as './topographica topo/tests/testsheet.py').
import unittest,doctest,os,re,fnmatch,socket
import param
# Automatically discover all test*.py files in this directory
__all__ = [re.sub('\.py$','',f)
for f in fnmatch.filter(os.listdir(__path__[0]),'test*.py')]
all_doctest = sorted(fnmatch.filter(os.listdir(__path__[0]),'test*.txt'))
# location in which to create semi-permanent test data
output_path = param.normalize_path.prefix
tests_output_path = os.path.join(output_path,'tests',socket.gethostname())
if not os.path.exists(tests_output_path):
print "Creating %s"%tests_output_path
os.makedirs(tests_output_path)
try:
import Tkinter
except ImportError:
tk_tests = fnmatch.filter(__all__,'*_tk')
tk_doctests = fnmatch.filter(all_doctest,'*_tk')
param.Parameterized().warning('no Tkinter module: skipping %s'%str(tk_tests+tk_doctests))
for t in tk_tests:
__all__.remove(t)
for t in tk_doctests:
all_doctest.remove(t)
try:
import gmpy
gmpy_imported=True
except ImportError:
gmpy_imported=False
if gmpy_imported and gmpy.__file__ is None:
gmpy_imported=False
if not gmpy_imported:
import param
param.Parameterized().warning('no gmpy module: testgmpynumber.txt skipped')
all_doctest.remove('testgmpynumber.txt')
# CEBALERT: we need to rename these/reorganize the tests
__all__.remove('test_script')
__all__.remove('test_map_measurement')
try:
import scikits.audiolab
except ImportError:
import param
param.Parameterized().message("no scikits.audiolab: testaudio.py skipped")
__all__.remove('testaudio')
# CEBALERT: should be using python warnings, and having unittest
# report warnings.
try:
import matplotlib
except ImportError:
import param
param.Parameterized().warning("Matplotlib is not available; skipping Matplotlib tests.")
__all__.remove('testmatplotlib')
__all__.remove('testmatplotlib_tk')
__all__.sort()
def all_suite():
"""
__all__:
For each test module that defines a 'suite' attribute, add its
tests. Only adds tests requiring a display if the DISPLAY
environment variable is set.
all_doctest:
Add each doctest file to the suite.
"""
suite = unittest.TestSuite()
for test_name in __all__:
# import the module
exec 'import '+test_name
test_module = locals()[test_name]
try:
print 'Loading suite from module %s ...' % test_name,
new_test = getattr(test_module,'suite')
if _check_for_display(new_test):
print 'ok.'
suite.addTest(new_test)
else:
print 'skipped: No $DISPLAY.'
except AttributeError,err:
print err
for filename in all_doctest:
print 'Loading doctest file', filename
suite.addTest(doctest.DocFileSuite(filename))
return suite
# Note that this is set up so that if all the tests are run and
# there's no DISPLAY, tests requiring DISPLAY are skipped - but if a
# test is run individually via run_named() and it requires DISPLAY, an
# error will be raised.
def _check_for_display(suite):
"""
Return True if no DISPLAY required or DISPLAY is required and it exists,
otherwise return False.
"""
if not hasattr(suite,'requires_display'):
return True
elif os.getenv('DISPLAY'):
return True
else:
return False
def run(verbosity=1,test_modules=None):
"""
Run tests in all test_modules; test_modules defaults to all_suit().
E.g. to run all tests:
./topographica -c 'from topo.tests import run; run()'
verbosity specifies the level of information printed during the
tests (see unittest.TextTestRunner).
To run only a subset of the tests, specify a list of test modules or doctest
file names. For example:
./topographica -c 'from topo.tests import run, testimage, testsheet; run(test_modules=[testimage,testsheet,"testDynamicParameter.txt"])'
"""
import types
if not test_modules:
run_suite = all_suite()
else:
assert isinstance(test_modules,list), 'test_modules argument must be a list of test modules or doctest filenames.'
run_suite = unittest.TestSuite()
for test_module in test_modules:
if isinstance(test_module,types.ModuleType):
if _check_for_display(test_module.suite):
run_suite.addTest(test_module.suite)
else:
raise Exception("Cannot run test without a valid DISPLAY.")
elif isinstance(test_module,str):
if test_module in all_doctest:
run_suite.addTest(doctest.DocFileSuite(test_module))
else:
raise ValueError, '"%s" is not an available doctest file.' % test_module
else:
raise ValueError, '%s is not a valid test module' % str(test_module)
return unittest.TextTestRunner(verbosity=verbosity).run(run_suite)
# CB: if the unit tests were faster, I wouldn't keep needing this...
def run_named(name,verbosity=2):
"""
Run the named test module.
Convenience function to make it easy to run a single test module.
Examples:
./topographica -c 'import topo.tests; topo.tests.run_named("testsnapshots.py")'
./topographica -c 'import topo.tests; topo.tests.run_named("testDynamicParameter.txt")'
"""
if name.endswith('.py'):
module_name = "topo.tests."+name[0:-3]
import __main__
exec "import %s"%module_name in __main__.__dict__
test_module = eval(module_name,__main__.__dict__)
else:
test_module = name
run(verbosity,test_modules=[test_module])
| bsd-3-clause |
granrothge/neutronpy | doc/conf.py | 3 | 11249 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
r"""Sphinx Documentation builder
"""
from distutils.version import LooseVersion
import glob
import inspect
from os.path import relpath, dirname
import os
import sys
import sphinx
import neutronpy_sphinx_rtd_theme
try:
import neutronpy
except ImportError:
raise RuntimeError(
'Cannot import neutronpy, it must be installed before building documentation. Please investigate.')
if LooseVersion(sphinx.__version__) < LooseVersion('1'):
raise RuntimeError('Need sphinx >= 1 for numpydoc to work correctly')
needs_sphinx = '1.4'
# -----------------------------------------------------------------------------
# releases (changelog) configuration
# -----------------------------------------------------------------------------
# releases_issue_uri = "https://github.com/neutronpy/neutronpy/issues/%s"
# releases_release_uri = "https://github.com/neutronpy/neutronpy/tree/%s"
releases_github_path = "neutronpy/neutronpy"
releases_debug = False
# -----------------------------------------------------------------------------
# General configuration
# -----------------------------------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.intersphinx',
'matplotlib.sphinxext.plot_directive',
'numpydoc',
'releases',
'nbsphinx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'neutronpy'
copyright = '2018, David M Fobes'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = neutronpy.__version__.split('-')[0]
# The full version, including alpha/beta/rc tags.
release = neutronpy.__version__
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '**.ipynb_checkpoints']
exclude_dirs = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'autolink'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'neutronpy_sphinx_rtd_theme.support.LightStyle'
# -----------------------------------------------------------------------------
# HTML output
# -----------------------------------------------------------------------------
html_theme = "neutronpy_sphinx_rtd_theme"
html_theme_path = [neutronpy_sphinx_rtd_theme.get_html_theme_path()]
html_static_path = [os.path.join('.', '_static')]
html_theme_options = {'logo': 'logo.png',
'logo_name': True,
'logo_text_align': 'center',
'description': "",
'github_user': 'neutronpy',
'github_repo': 'neutronpy',
'travis_button': True,
'github_banner': True,
'link': '#3782BE',
'link_hover': '#3782BE',
'sidebar_includehidden': True}
# Sister-site links to API docs
html_theme_options['extra_nav_links'] = {"NeutronPy Docs": 'http://neutronpy.github.io/reference'}
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {'**': ['about.html',
'navigation.html',
'searchbox.html',
'donate.html']}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {'index': 'indexcontent.html',}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = True
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'neutronpydoc'
# -----------------------------------------------------------------------------
# LaTeX output
# -----------------------------------------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
_stdauthor = 'David M Fobes'
latex_documents = [('reference/index', 'neutronpy-ref.tex', 'NeutronPy Reference', _stdauthor, 'manual')]
# Additional stuff for the LaTeX preamble.
latex_elements = {'preamble': r"""\
\usepackage{amsmath}
\DeclareUnicodeCharacter{00A0}{\nobreakspace}
% In the parameters section, place a newline after the Parameters
% header
\usepackage{expdlist}
\let\latexdescription=\description
\def\description{\latexdescription{}{} \breaklabel}
% Make Examples/etc section headers smaller and more compact
\makeatletter
\titleformat{\paragraph}{\normalsize\py@HeaderFamily}%
{\py@TitleColor}{0em}{\py@TitleColor}{\py@NormalColor}
\titlespacing*{\paragraph}{0pt}{1ex}{0pt}
\makeatother
% Fix footer/header
\renewcommand{\chaptermark}[1]{\markboth{\MakeUppercase{\thechapter.\ #1}}{}}
\renewcommand{\sectionmark}[1]{\markright{\MakeUppercase{\thesection.\ #1}}}
"""
}
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'neutronpy', 'neutronpy Documentation',
['David M Fobes'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -----------------------------------------------------------------------------
# Texinfo output
# -----------------------------------------------------------------------------
texinfo_documents = [("contents", 'numpy', 'Numpy Documentation', _stdauthor,
'Numpy', "NumPy: array processing for numbers, strings, records, and objects.",
'Programming', 1)]
# -----------------------------------------------------------------------------
# Intersphinx configuration
# -----------------------------------------------------------------------------
intersphinx_mapping = {'http://docs.python.org/dev': None}
# -----------------------------------------------------------------------------
# Autosummary
# -----------------------------------------------------------------------------
# from sphinx.ext import autodoc
# class DocsonlyMethodDocumenter(autodoc.MethodDocumenter):
# def format_args(self):
# return None
#
# autodoc.add_documenter(DocsonlyMethodDocumenter)
numpydoc_show_class_members = False
# autodoc_default_flags = ['members']
autodoc_docstring_signature = True
autosummary_generate = glob.glob("reference/*.rst")
# -----------------------------------------------------------------------------
# Coverage checker
# -----------------------------------------------------------------------------
coverage_ignore_modules = r"""
""".split()
coverage_ignore_functions = r"""
test($|_) (some|all)true bitwise_not cumproduct pkgload
generic\.
""".split()
coverage_ignore_classes = r"""
""".split()
coverage_c_path = []
coverage_c_regexes = {}
coverage_ignore_c_items = {}
# -----------------------------------------------------------------------------
# Source code links
# -----------------------------------------------------------------------------
for name in ['sphinx.ext.linkcode', 'numpydoc.linkcode']:
try:
__import__(name)
extensions.append(name)
break
except ImportError:
pass
else:
print("NOTE: linkcode extension not found -- no links to source generated")
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
"""
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except:
return None
try:
fn = inspect.getsourcefile(obj)
except:
fn = None
if not fn:
return None
try:
source, lineno = inspect.findsource(obj)
except:
lineno = None
if lineno:
linespec = "#L%d" % (lineno + 1)
else:
linespec = ""
fn = relpath(fn, start=dirname(neutronpy.__file__))
if 'dev' in neutronpy.__version__:
return "http://github.com/neutronpy/neutronpy/blob/master/neutronpy/%s%s" % (fn, linespec)
else:
return "http://github.com/neutronpy/neutronpy/blob/v%s/neutronpy/%s%s" % (neutronpy.__version__, fn, linespec)
| mit |
shangwuhencc/scikit-learn | sklearn/datasets/lfw.py | 141 | 19372 | """Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
from os import listdir, makedirs, remove
from os.path import join, exists, isdir
from sklearn.utils import deprecated
import logging
import numpy as np
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
from ..externals.six import b
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warning("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
logger.warning("Downloading LFW data (~200MB): %s", archive_url)
urllib.urlretrieve(archive_url, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
# Checks if jpeg reading worked. Refer to issue #3594 for more
# details.
img = imread(file_path)
if img.ndim is 0:
raise RuntimeError("Failed to read the image file %s, "
"Please make sure that libjpeg is installed"
% file_path)
face = np.asarray(img[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representaion
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in listdir(folder_path)]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person : int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.target : numpy array of shape (13233,)
Labels associated to each face image. Those labels range from 0-5748
and correspond to the person IDs.
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# interating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
@deprecated("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
def load_lfw_people(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_people(download_if_missing=False)
Check fetch_lfw_people.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_people(download_if_missing=download_if_missing, **kwargs)
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Read more in the :ref:`User Guide <labeled_faces_in_the_wild>`.
Parameters
----------
subset : optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
The data is returned as a Bunch object with the following attributes:
data : numpy array of shape (2200, 5828)
Each row corresponds to 2 ravel'd face images of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
pairs : numpy array of shape (2200, 2, 62, 47)
Each row has 2 face images corresponding to same or different person
from the dataset containing 5749 people. Changing the ``slice_`` or resize
parameters will change the shape of the output.
target : numpy array of shape (13233,)
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
@deprecated("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
def load_lfw_pairs(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_pairs(download_if_missing=False)
Check fetch_lfw_pairs.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_pairs(download_if_missing=download_if_missing, **kwargs)
| bsd-3-clause |
etkirsch/scikit-learn | examples/ensemble/plot_gradient_boosting_quantile.py | 392 | 2114 | """
=====================================================
Prediction Intervals for Gradient Boosting Regression
=====================================================
This example shows how quantile regression can be used
to create prediction intervals.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import GradientBoostingRegressor
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d(np.random.uniform(0, 10.0, size=100)).T
X = X.astype(np.float32)
# Observations
y = f(X).ravel()
dy = 1.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
y = y.astype(np.float32)
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
xx = xx.astype(np.float32)
alpha = 0.95
clf = GradientBoostingRegressor(loss='quantile', alpha=alpha,
n_estimators=250, max_depth=3,
learning_rate=.1, min_samples_leaf=9,
min_samples_split=9)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_upper = clf.predict(xx)
clf.set_params(alpha=1.0 - alpha)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_lower = clf.predict(xx)
clf.set_params(loss='ls')
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_pred = clf.predict(xx)
# Plot the function, the prediction and the 90% confidence interval based on
# the MSE
fig = plt.figure()
plt.plot(xx, f(xx), 'g:', label=u'$f(x) = x\,\sin(x)$')
plt.plot(X, y, 'b.', markersize=10, label=u'Observations')
plt.plot(xx, y_pred, 'r-', label=u'Prediction')
plt.plot(xx, y_upper, 'k-')
plt.plot(xx, y_lower, 'k-')
plt.fill(np.concatenate([xx, xx[::-1]]),
np.concatenate([y_upper, y_lower[::-1]]),
alpha=.5, fc='b', ec='None', label='90% prediction interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
vortex-ape/scikit-learn | sklearn/feature_selection/tests/test_base.py | 98 | 3681 | import numpy as np
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from sklearn.base import BaseEstimator
from sklearn.feature_selection.base import SelectorMixin
from sklearn.utils import check_array
from sklearn.utils.testing import assert_raises, assert_equal
class StepSelector(SelectorMixin, BaseEstimator):
"""Retain every `step` features (beginning with 0)"""
def __init__(self, step=2):
self.step = step
def fit(self, X, y=None):
X = check_array(X, 'csc')
self.n_input_feats = X.shape[1]
return self
def _get_support_mask(self):
mask = np.zeros(self.n_input_feats, dtype=bool)
mask[::self.step] = True
return mask
support = [True, False] * 5
support_inds = [0, 2, 4, 6, 8]
X = np.arange(20).reshape(2, 10)
Xt = np.arange(0, 20, 2).reshape(2, 5)
Xinv = X.copy()
Xinv[:, 1::2] = 0
y = [0, 1]
feature_names = list('ABCDEFGHIJ')
feature_names_t = feature_names[::2]
feature_names_inv = np.array(feature_names)
feature_names_inv[1::2] = ''
def test_transform_dense():
sel = StepSelector()
Xt_actual = sel.fit(X, y).transform(X)
Xt_actual2 = StepSelector().fit_transform(X, y)
assert_array_equal(Xt, Xt_actual)
assert_array_equal(Xt, Xt_actual2)
# Check dtype matches
assert_equal(np.int32, sel.transform(X.astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(X.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_t_actual = sel.transform([feature_names])
assert_array_equal(feature_names_t, names_t_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xt_actual = sel.fit(sparse(X)).transform(sparse(X))
Xt_actual2 = sel.fit_transform(sparse(X))
assert_array_equal(Xt, Xt_actual.toarray())
assert_array_equal(Xt, Xt_actual2.toarray())
# Check dtype matches
assert_equal(np.int32, sel.transform(sparse(X).astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(sparse(X).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_inverse_transform_dense():
sel = StepSelector()
Xinv_actual = sel.fit(X, y).inverse_transform(Xt)
assert_array_equal(Xinv, Xinv_actual)
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(Xt.astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(Xt.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_inv_actual = sel.inverse_transform([feature_names_t])
assert_array_equal(feature_names_inv, names_inv_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_inverse_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xinv_actual = sel.fit(sparse(X)).inverse_transform(sparse(Xt))
assert_array_equal(Xinv, Xinv_actual.toarray())
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(sparse(Xt).astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(sparse(Xt).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_get_support():
sel = StepSelector()
sel.fit(X, y)
assert_array_equal(support, sel.get_support())
assert_array_equal(support_inds, sel.get_support(indices=True))
| bsd-3-clause |
timothydmorton/exosyspop | exosyspop/catalog.py | 1 | 4737 | from __future__ import print_function, division
from corner import corner
import pandas as pd
import numpy as np
class Catalog(pd.DataFrame):
_required_columns = ()
def __init__(self, *args, **kwargs):
super(Catalog, self).__init__(*args, **kwargs)
# ensure all required columns are present
missing = []
for c in self._required_columns:
if c not in self.columns:
missing.append(c)
if len(missing) > 0:
raise ValueError('Must contain all required columns! ' +
'({} missing)'.format(missing))
class ObservedCatalog(Catalog):
"""
DataFrame containing observed properties
Must have the following columns:
* primary log10(depth), duration, slope (trapezoidal params)
* secondary log10(depth), duration, slope (trapezoidal params)
* secondary phase
Primary/secondary are defined observationally: that is,
the deeper (or only) is called the primary
"""
_required_columns = ('host', 'period',
'n_pri', 'logd_pri', 'dur_pri', 'slope_pri', 'snr_pri',
'n_sec', 'logd_sec', 'dur_sec', 'slope_sec', 'snr_sec',
'phase_sec')
class SimulatedCatalog(Catalog):
def __init__(self, *args, **kwargs):
super(SimulatedCatalog, self).__init__(*args, **kwargs)
self._observed = None
self._trap_regr = True
def observe(self, efficiency=None):
"""
Efficiency function is optional; if provided, must take SNR(s) and return [0,1](s)
"""
df = pd.DataFrame()
for c in ObservedCatalog._required_columns:
df[c] = np.nan * np.ones(len(self))
df.host = self.host
df.period = self.period
d_pri = self.d_pri * self.dilution
d_sec = self.d_sec * self.dilution
if efficiency is None:
pri_detected = d_pri > 1e-6
sec_detected = d_sec > 1e-6
else:
u_pri = np.random.random(len(self))
u_sec = np.random.random(len(self))
pri_detected = u_pri < efficiency(self.snr_pri)
sec_detected = u_sec < efficiency(self.snr_sec)
has_pri = (self.n_pri > 0) & pri_detected
has_sec = (self.n_sec > 0) & sec_detected
pri = has_pri & (d_pri > d_sec)
sec_is_pri = (~has_pri & has_sec) | (has_pri & has_sec & (d_pri < d_sec))
sec = (has_pri & has_sec) & (d_sec <= d_pri)
pri_is_sec = (has_pri & has_sec) & (d_pri < d_sec)
pri_masks = [pri, sec_is_pri]
pri_sources = ['pri', 'sec']
sec_masks = [sec, pri_is_sec]
sec_sources = ['sec', 'pri']
tag = '_regr' if self._trap_regr else ''
# primary properties
for col, source_base in zip(['logd_pri', 'dur_pri', 'slope_pri', 'n_pri', 'snr_pri'],
['trap_depth', 'trap_dur', 'trap_slope', 'n', 'snr']):
for m, s in zip(pri_masks, pri_sources):
if source_base in ('n', 'snr'):
source_col = source_base + '_' + s
else:
source_col = source_base + '_' + s + tag
df.loc[m, col] = self.loc[m, source_col]
if col=='logd_pri':
df.loc[m, col] = np.log10(df.loc[m, col] * self.loc[m, 'dilution'])
# secondary properties
for col, source_base in zip(['logd_sec', 'dur_sec', 'slope_sec', 'n_sec', 'snr_sec'],
['trap_depth', 'trap_dur', 'trap_slope', 'n', 'snr']):
for m, s in zip(sec_masks, sec_sources):
if source_base in ('n', 'snr'):
source_col = source_base + '_' + s
else:
source_col = source_base + '_' + s + tag
df.loc[m, col] = self.loc[m, source_col]
if col=='logd_sec':
df.loc[m, col] = np.log10(df.loc[m, col] * self.loc[m, 'dilution'])
df.loc[sec | pri_is_sec, 'phase_sec'] = self.loc[sec | pri_is_sec, 'phase_sec']
df.sec_is_pri = sec_is_pri
self._observed = ObservedCatalog(df).dropna(subset=['n_pri'])
return self._observed
@property
def observed(self):
if self._observed is None:
self.observe()
return self._observed
def trap_corner(self, sec=False, **kwargs):
if sec:
cols = ['dur_sec', 'logd_sec', 'slope_sec']
else:
cols = ['dur_pri', 'logd_pri', 'slope_pri']
return corner(self.observed[cols], labels=['duration', 'log(d)', 'T/tau'],
**kwargs)
| mit |
hydrogo/hydropy | hydropy/storm.py | 1 | 10270 | # -*- coding: utf-8 -*-
"""
Hydropy package
@author: Stijn Van Hoey
"""
import datetime
import numpy as np
import pandas as pd
from pandas.tseries.offsets import DateOffset, Day, Week, Hour, Minute
import matplotlib as mpl
mpl.rcParams['mathtext.default'] = 'regular'
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.ticker import LinearLocator
def selectstorms(flowserie, rainserie, number_of_storms=3,
min_period_in_between=7, search_period=7,
drywindow=96):
""" (pd.DataFrame, pd.DataFrame) -> List
Easy storm selection process, based on the maximum flows measured
in the given timeserie of flow measurements.
To define the startdate of the storm, 24h no rain before the Qmax is
searched for. The end date is found by checking the
flow at the startdate (Qbase) and searching the moment after Qmax with
the same flow within the first 2 weeks.
If none is found, relaxation (1.1*Qbase; 1.2*Qbase,...)
until a moment is found.
Parameters
----------
flowserie : pd.Series
Pandas Series with the date in the index
rainserie : pd.Series
Pandas Series with the date in the index
number_of_storms : int
Number of storms you want to select
min_period_in_between : int (days)
Minimum number of days in between to selected storms
search_period : int (days)
Period to look for the start of the storm, when rain started
drywindow : int
Number of timesteps to check for no-rain
"""
if not isinstance(flowserie, pd.Series):
raise Exception('flowserie is a single data Series')
if not isinstance(rainserie, pd.Series):
raise Exception('rainserie is a single data Series')
#fill na values with very low (negative) value
temp = flowserie.fillna(value=-777.).copy()
#sort the whole array
try:
temp = temp.sort(temp.columns.tolist(), ascending=False)
except:
temp.sort(ascending=False)
#find in the index three periods which are at least given number
# of days from each other
#after three concurrences, save these dates
stormmax = [temp.index[0]] #first element is a selected storm
i = 1
while len(stormmax) < number_of_storms:
#check for each period
alldif = True
for stormdate in stormmax:
if abs(temp.index[i] - stormdate) \
< datetime.timedelta(days=min_period_in_between):
alldif = False
#if new stormperiod, select
if alldif:
stormmax.append(temp.index[i])
i+=1
selstorms = []
for storm in stormmax:
##FIND DRY DAY WEEK BEFORE
#select period before storm (1 week)
presearchperiod = datetime.timedelta(days=search_period)
temp1 = rainserie[storm - presearchperiod:storm]
temp1 = pd.rolling_sum(temp1, window=drywindow, center=False)
#zero value means the preceding 24hours no rain: so, closest zeros
#to the date itself -24h are selected
if rainserie.ndim == 2:
temp1 = temp1.min(axis=1)
tempdates = temp1[temp1 < 0.001].index.tolist()
if len(tempdates) == 0:
raise Exception('Decrease drywindow period containing no rain.')
date_arg = np.argmin([abs(times - storm) for times in tempdates])
startstormdate = tempdates[date_arg] - Day()
#Get the flow value of the storm and when it is found again + 1 Day
temp2a = flowserie[startstormdate:startstormdate + Week()*2]
#only if multiple columns
if flowserie.ndim == 2:
temp2 = temp2a.max(axis=1)
else:
temp2 = temp2a
flowbase = temp2.ix[startstormdate]
lowerafterstorm = temp2[temp2 < flowbase][storm + Day():]
if lowerafterstorm.size == 0:
print 'Lower initial flow not found again...test with mean...'
if flowserie.ndim == 2:
temp2 = temp2a.mean(axis=1)
else:
temp2 = temp2a
flowbase = temp2.ix[startstormdate]
lowerafterstorm = temp2[temp2 < flowbase][storm + Day():]
cnt = 1
while lowerafterstorm.size == 0:
print '... still not working; relaxing conditions...', \
cnt*10, '% of minimal after storm incorporated'
flowbase = flowbase + 0.1*flowbase
lowerafterstorm = temp2[temp2 < flowbase][storm + Day():]
cnt += 1
endstormdate = lowerafterstorm.index[0]
#add to selected storms
selstorms.append({'startdate':startstormdate,'enddate':endstormdate})
return selstorms
def _control_dayhour(Timestamp):
"""pd.TimeStamp -> int
Help function for editing the date representation of the plots
"""
if Timestamp.hour == 0 and Timestamp.minute == 0:
return 0
else:
return 1
def _getsize(nrows):
"""int -> int
propose height of the figure based on number of rows
"""
size_dict = {1:6,2:6,3:8,4:8,5:10,6:12}
return size_dict[nrows]
def _add_labels_above(ax0, fig, flowdim, raindim):
""" matplotlib.axes -> None
"""
bbox = ax0.get_position()
rainlabel = ax0.text(bbox.x0 + bbox.width,
bbox.y0 + bbox.height, r"Rain ($mm$)",
transform=fig.transFigure,
verticalalignment="bottom",
horizontalalignment="right")
flowlabel = ax0.text(bbox.x0, bbox.y0 + bbox.height,
r"Flow ($m^3s^{-1}$)",
transform=fig.transFigure,
verticalalignment="bottom",
horizontalalignment="left")
if flowdim == 1:
flowlabel.set_color('#08519c')
if raindim == 1:
rainlabel.set_color('#6baed6')
def _make_comparable(axes):
"""axes -> None
updates the y-bound of the subplot, giving them all the bounds of the
largest
only used for the rain-flow combined subplots configuration within a
gridspec environment
"""
#check the configuration
if axes[0].get_subplotspec().get_gridspec().get_height_ratios():
nplots = int(len(axes)/2.)
ymaxes = [max(axs.get_yticks()) for axs in axes]
rainmax = max(ymaxes[::2])
flowmax = max(ymaxes[1::2])
newmaxes = [rainmax, flowmax]*nplots
for axs, nmax in zip(axes, newmaxes):
axs = axs.set_ybound(upper=nmax)
else:
ymaxes = [max(axs.get_yticks()) for axs in axes[1:]]
flowmax = max(ymaxes)
for axs in axes[1:]:
axs = axs.set_ybound(upper=flowmax)
def plotstorms(flowserie, rainserie, selected_storm,
tsfreq = None, tsfrequnit = None,
make_comparable = False,
period_title = False):
"""
Plot Flow-Rain plots for every storm period selected,
optimal sizes and configuration done for 1 till 5 subplots (storms)
"""
if len(selected_storm) > 6:
raise Exception('Split plotting up in multiple figures')
fig = plt.figure(facecolor = 'white', figsize=(12,
_getsize(len(selected_storm))))
gs0 = gridspec.GridSpec(len(selected_storm), 1)
gs0.update(hspace=0.35)
for j, storm in enumerate(selected_storm):
gs00 = gridspec.GridSpecFromSubplotSpec(2, 1,
subplot_spec=gs0[j],
hspace=0.0,
height_ratios = [2, 4])
#RAIN PLOT
ax0 = fig.add_subplot(gs00[0])
ax0.plot(rainserie[storm['startdate'] :
storm['enddate']].index.to_pydatetime(),
rainserie[storm['startdate'] :
storm['enddate']].values,
linestyle='steps')
#FLOW PLOT
stormflow = flowserie[storm['startdate'] : storm['enddate']]
ax1 = fig.add_subplot(gs00[1], sharex = ax0)
ax1.plot(stormflow.index.to_pydatetime(), stormflow.values,
label = r" Measured Flow ($m^3s^{-1}$)")
#if single plots of flow/rain -> set specific color
if flowserie.ndim == 1:
ax1.lines[0].set_color('#08519c')
if rainserie.ndim == 1:
ax0.lines[0].set_color('#6baed6')
#ADAPT ticks for storm-conditions (less than a month timeseries)
ax0.yaxis.set_major_locator(LinearLocator(3))
ax1.yaxis.set_major_locator(LinearLocator(3))
ax1.xaxis.set_minor_locator(mpl.dates.DayLocator())
ax1.xaxis.set_minor_formatter(mpl.dates.DateFormatter('%d'))
ax1.xaxis.set_major_locator(mpl.dates.MonthLocator(bymonthday =
[1, storm['startdate'].day + \
_control_dayhour(storm['startdate'])]))
ax1.xaxis.set_major_formatter(
mpl.dates.DateFormatter('\n %b %Y'))
#Add the labels of the different flows
if j == 0:
_add_labels_above(ax0, fig, flowserie.ndim, rainserie.ndim)
#Print the start and end period as title above subplots
if period_title:
ax0.set_title(storm['startdate'].strftime("%d/%m/%y") + " - " +
storm['enddate'].strftime("%d/%m/%y"),
fontweight='bold', fontsize = 12)
#Looks of the rainplot
ax0.set_xlabel('')
ax0.invert_yaxis()
ax0.yaxis.tick_right()
ax0.spines['bottom'].set_visible(False)
ax0.spines['top'].set_visible(False)
plt.setp(ax0.get_xminorticklabels(), visible=False)
plt.setp(ax0.get_xmajorticklabels(), visible=False)
plt.setp(ax0.get_xminorticklabels(), visible=False)
#looks of the flowplot
ax1.spines['top'].set_visible(False)
ax1.spines['bottom'].set_visible(False)
ax1.set_xlabel('')
plt.draw()
all_axes = fig.get_axes()
#Give all the subplots the same y-bounds
if make_comparable:
_make_comparable(all_axes)
return fig, all_axes
| bsd-2-clause |
MartinSavc/scikit-learn | examples/linear_model/plot_iris_logistic.py | 283 | 1678 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
`iris <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The
datapoints are colored according to their labels.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
bthirion/scikit-learn | examples/linear_model/plot_sgd_comparison.py | 112 | 1819 | """
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import LogisticRegression
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
X, y = digits.data, digits.target
classifiers = [
("SGD", SGDClassifier()),
("ASGD", SGDClassifier(average=True)),
("Perceptron", Perceptron()),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0)),
("SAG", LogisticRegression(solver='sag', tol=1e-1, C=1.e4 / X.shape[0]))
]
xx = 1. - np.array(heldout)
for name, clf in classifiers:
print("training %s" % name)
rng = np.random.RandomState(42)
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=i, random_state=rng)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
plt.plot(xx, yy, label=name)
plt.legend(loc="upper right")
plt.xlabel("Proportion train")
plt.ylabel("Test Error Rate")
plt.show()
| bsd-3-clause |
saiwing-yeung/scikit-learn | examples/cluster/plot_agglomerative_clustering.py | 343 | 2931 | """
Agglomerative clustering with and without structure
===================================================
This example shows the effect of imposing a connectivity graph to capture
local structure in the data. The graph is simply the graph of 20 nearest
neighbors.
Two consequences of imposing a connectivity can be seen. First clustering
with a connectivity matrix is much faster.
Second, when using a connectivity matrix, average and complete linkage are
unstable and tend to create a few clusters that grow very quickly. Indeed,
average and complete linkage fight this percolation behavior by considering all
the distances between two clusters when merging them. The connectivity
graph breaks this mechanism. This effect is more pronounced for very
sparse graphs (try decreasing the number of neighbors in
kneighbors_graph) and with complete linkage. In particular, having a very
small number of neighbors in the graph, imposes a geometry that is
close to that of single linkage, which is well known to have this
percolation instability.
"""
# Authors: Gael Varoquaux, Nelle Varoquaux
# License: BSD 3 clause
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.neighbors import kneighbors_graph
# Generate sample data
n_samples = 1500
np.random.seed(0)
t = 1.5 * np.pi * (1 + 3 * np.random.rand(1, n_samples))
x = t * np.cos(t)
y = t * np.sin(t)
X = np.concatenate((x, y))
X += .7 * np.random.randn(2, n_samples)
X = X.T
# Create a graph capturing local connectivity. Larger number of neighbors
# will give more homogeneous clusters to the cost of computation
# time. A very large number of neighbors gives more evenly distributed
# cluster sizes, but may not impose the local manifold structure of
# the data
knn_graph = kneighbors_graph(X, 30, include_self=False)
for connectivity in (None, knn_graph):
for n_clusters in (30, 3):
plt.figure(figsize=(10, 4))
for index, linkage in enumerate(('average', 'complete', 'ward')):
plt.subplot(1, 3, index + 1)
model = AgglomerativeClustering(linkage=linkage,
connectivity=connectivity,
n_clusters=n_clusters)
t0 = time.time()
model.fit(X)
elapsed_time = time.time() - t0
plt.scatter(X[:, 0], X[:, 1], c=model.labels_,
cmap=plt.cm.spectral)
plt.title('linkage=%s (time %.2fs)' % (linkage, elapsed_time),
fontdict=dict(verticalalignment='top'))
plt.axis('equal')
plt.axis('off')
plt.subplots_adjust(bottom=0, top=.89, wspace=0,
left=0, right=1)
plt.suptitle('n_cluster=%i, connectivity=%r' %
(n_clusters, connectivity is not None), size=17)
plt.show()
| bsd-3-clause |
Nik0l/UTemPro | UsersActivity.py | 1 | 9237 | __author__ = 'nb254'
from collections import Counter
import csv, time
import pandas as pd
POSTID_INDEX = 0
USERID_INDEX = 1
TIME_POSTED_INDEX = 2
POST_TYPE_INDEX = 3
def SaveStatToCSV(file_name, data, header):
with open(file_name, 'wb') as myfile:
wr = csv.writer(myfile)
wr.writerow(header)
wr.writerows(data)
return
def UniqueUsers(data):
unique_users = list(Counter(data['UserId']))
#print unique_users
return unique_users
def makePosts(data):
posts = []
for time_stamp in data['TimePosted']:
#print time_stamp
sq = time_stamp.replace("T", " ")
sq = sq[0:19]
time_posted = time.strptime(sq, "%Y-%m-%d %H:%M:%S")
year_posted = time.strftime("%Y", time_posted)
month_posted = time.strftime("%m", time_posted)
day_posted = time.strftime("%d", time_posted)
hour_posted = time.strftime("%H", time_posted)
min_posted = time.strftime("%M", time_posted)
posts.append([year_posted, month_posted, day_posted, hour_posted, min_posted])
df = pd.DataFrame(posts, columns=['year', 'month', 'day', 'hour', 'min'])
result = pd.concat([data, df], axis=1)
return result
def UsersActivity(users, posts):
activities = []
daytime_q = [0]*24
daytime_a = [0]*24
monthtime_q = [0]*12
monthtime_a = [0]*12
index = 0
num = 0
print posts.head()
#print "users: ", len(users)
for user in users:
for index in xrange(0, len(posts)):
hour_posted = posts['hour'][index]
month_posted = posts['month'][index]
if posts['UserId'][index] == user:# if the post belongs to a user
if posts['PostType'][index] == 1:# question
daytime_q[int(hour_posted)] = daytime_q[int(hour_posted)] + 1
monthtime_q[int(month_posted)-1] = monthtime_q[int(month_posted)-1] + 1
elif posts['PostType'][index] == 2:# answer
daytime_a[int(hour_posted)] = daytime_a[int(hour_posted)] + 1
monthtime_a[int(month_posted)-1] = monthtime_a[int(month_posted)-1] + 1
activities.append([user, monthtime_q, daytime_q, monthtime_a, daytime_a])
print ('user:', user)
num = num + 1
print ('number: ', num)
#print daytime
daytime_q = [0]*24
daytime_a = [0]*24
monthtime_q = [0]*12
monthtime_a = [0]*12
return activities
def usersActivityFast(posts):
posts = posts.sort(['UserId'], ascending=True)
posts = posts[posts['UserId'] != -1]
posts = posts[['UserId','PostType', 'year', 'month', 'day', 'hour', 'min']]
posts = posts.reset_index()
activities = []
act_time = dict(
daytime_q = [0]*24,
daytime_a = [0]*24,
monthtime_q = [0]*12,
monthtime_a = [0]*12,
)
num = 0
#print posts.head()
for index in xrange(0, len(posts)-1):
updateActivity(posts, index, act_time)
if posts['UserId'][index] != posts['UserId'][index+1]:
activities.append([posts['UserId'][index], act_time['daytime_q'],
act_time['daytime_a'], act_time['monthtime_q'], act_time['monthtime_a']])
resetActivity(act_time)
#print 'new user:', posts['UserId'][index+1]
else:
updateActivity(posts, index, act_time)
#print ('same user:', posts['UserId'][index+1])
num = num + 1
#print 'number: ', num
df_act = pd.DataFrame(activities, columns =['UserId','Q_HOUR', 'A_HOUR', 'Q_MONTH', 'A_MONTH'])
return df_act
def updateActivity(posts, index, act_time):
hour_posted = posts['hour'][index]
month_posted = posts['month'][index]
if posts['PostType'][index] == 1:# question
act_time['daytime_q'][int(hour_posted)] = act_time['daytime_q'][int(hour_posted)] + 1
act_time['monthtime_q'][int(month_posted)-1] = act_time['monthtime_q'][int(month_posted)-1] + 1
elif posts['PostType'][index] == 2:# answer
act_time['daytime_a'][int(hour_posted)] = act_time['daytime_a'][int(hour_posted)] + 1
act_time['monthtime_a'][int(month_posted)-1] = act_time['monthtime_a'][int(month_posted)-1] + 1
def resetActivity(act_time):
act_time['daytime_q'] = [0]*24
act_time['daytime_a'] = [0]*24
act_time['monthtime_q'] = [0]*12
act_time['monthtime_a'] = [0]*12
def HourActivity(daytime, hour_posted):
#hour_posted = time.strftime("%H", time_posted)
## 7am - 12pm, 12pm - 6 pm, 6pm - 12am, 0am - 7 am
#daytime = [0, 0, 0, 0]
if int(hour_posted) < 7:
daytime[0] = daytime[0] + 1
elif int(hour_posted) < 12:
daytime[1] = daytime[1] + 1
elif int(hour_posted) < 18:
daytime[2] = daytime[2] + 1
elif int(hour_posted) < 24:
daytime[3] = daytime[3] + 1
def MonthActivity(monthtime, month_posted):
#hour_posted = time.strftime("%H", time_posted)
## 7am - 12pm, 12pm - 6 pm, 6pm - 12am, 0am - 7 am
monthtime = [0]*12
monthtime[month_posted-1] = monthtime[month_posted-1] + 1
def GetUserActivity(filename):
f = open(filename)
users_activity = csv.reader(f, delimiter=',', quotechar='|')
return users_activity
def userActivityTransform(df):
dfq_hour = pd.DataFrame(list(df['Q_HOUR']), columns=['qh1','qh2','qh3','qh4','qh5','qh6','qh7','qh8','qh9','qh10','qh11','qh12',
'qh13','qh14','qh15','qh16','qh17','qh18','qh19','qh20','qh21','qh22','qh23','qh24'])
dfa_hour = pd.DataFrame(list(df['A_HOUR']), columns=['ah1','ah2','ah3','ah4','ah5','ah6','ah7','ah8','ah9','ah10','ah11','ah12',
'ah13','ah14','ah15','ah16','ah17','ah18','ah19','ah20','ah21','ah22','ah23','ah24'])
dfq_month = pd.DataFrame(list(df['Q_MONTH']), columns=['qm1', 'qm2', 'qm3', 'qm4', 'qm5', 'qm6',
'qm7', 'qm8', 'qm9', 'qm10', 'qm11', 'qm12'])
dfa_month = pd.DataFrame(list(df['A_MONTH']), columns=['am1', 'am2', 'am3', 'am4', 'am5', 'am6',
'am7', 'am8', 'am9', 'am10', 'am11', 'am12'])
result = pd.concat([df['UserId'], dfq_hour, dfa_hour, dfq_month, dfa_month], axis=1)
return result
def transformString(items):
items = items.replace(",", "")
items = items.replace("[", "")
items = items.replace("]", "")
items = items.split()
return items
def extractDayWeekActivity(posts, year, month, day):
#print year
#print len(posts)
posts = posts.loc[posts['year'].astype(int) == year]
#print posts
posts = posts.loc[posts['month'].astype(int) == month]
#print posts
#print 'this month', len(posts)
#posts_week = posts_week[['UserId','PostType', 'year', 'month', 'day', 'hour', 'min']]
posts_week = posts[posts['day'].astype(int) > day]
posts_week = posts_week.sort(['UserId'], ascending=False)
posts_week = posts_week.reset_index()
#print posts_week
#TODO if not saved, then indices must be put in order
posts_day = posts_week[posts_week['day'] > 12]
posts_day = posts_day[['UserId','PostType', 'year', 'month', 'day', 'hour', 'min']]
posts_day = posts_day.sort(['UserId'], ascending=False)
posts_day = posts_day.reset_index()
#print 'this day', len(posts_day)
#print posts_day
return posts_week, posts_day
def extractTimeIntervalFeatures(posts_week, posts_day):
activities_day = activityDay(posts_day)
df_day = pd.DataFrame(activities_day, columns=['UserId', 'P_NUM_LAST_DAY'])
#print df_day
#activities last week
activities = activityWeek(posts_week)
df_week = pd.DataFrame(activities, columns=['UserId', 'Q_LAST_WEEK', 'A_LAST_WEEK'])
#print df_week
df_result = pd.merge(df_week, df_day, how='left', on='UserId')
df_result = df_result.fillna(0)
df_result['P_NUM_LAST_DAY'] = df_result['P_NUM_LAST_DAY'].astype(int)
return df_result
def activityDay(posts):
activities_day = []
comments = 0
#activities last day
for index in xrange(0, len(posts)-1):
comments = comments + 1
if posts['UserId'][index] != posts['UserId'][index+1]:
activities_day.append([posts['UserId'][index], comments])
comments = 0
return activities_day
def activityWeek(posts):
activities = []
act = dict(
questions = 0,
answers = 0,
)
for index in xrange(0, len(posts)-1):
updateActivities(posts, act, index)
if posts['UserId'][index] != posts['UserId'][index+1]:
activities.append([posts['UserId'][index], act['questions'], act['answers']])
act['questions'] = 0
act['answers']= 0
return activities
def updateActivities(posts, act, index):
if posts['PostType'][index] == 1: # question
act['questions'] = act['questions'] + 1
elif posts['PostType'][index] == 2: # answer
act['answers'] = act['answers'] + 1
def theLatestPostTime(posts):
#TODO hardcoded for now - update
year = 2014
month = 7
day = 7
return year, month, day
def oneDayFromNow(year, day, month):
#TODO hardcoded for now - update
year = 2014
month = 12
day = 14
return year, month, day
| mit |
mdhurst1/RoBoCoP_CRN | plotting_functions/plot_step_results.py | 1 | 3207 | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 8 15:10:34 2016
Script to plot the results of RockyCoastCRN experiments to explore the influence
of platform erosion processes on the concentrations of CRNs built up in the
platform surface
Martin Hurst,
Feb 9th 2016
@author: mhurst
"""
#import modules
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib import rc
# Customise figure style #
rc('font',size=8)
rc('ytick.major',pad=5)
rc('xtick.major',pad=5)
padding = 5
#setup figure
plt.figure(1,figsize=(6,8),facecolor="white")
#Filename lists
FileNames = ["Steps100cm","Steps50cm","Steps20cm","Steps10cm","NoSteps"]
StepSizes = [100,50,20,10,0]
#setup subplot for plotting concentrations
ax1 = plt.axes([0.1,0.1,0.8,0.32])
ax1.spines['right'].set_visible(False)
ax1.spines['top'].set_visible(False)
ax1.yaxis.set_ticks_position('left')
ax1.xaxis.set_ticks_position('bottom')
plt.xlabel("Distance (m)")
plt.ylabel("$^{10}$Be Concentration (atoms g$^{-1}$ yr$^{-1}$)")
for i in range (0,len(FileNames)):
#setup subplot
ax = plt.axes([0.1,0.88-float(i)*0.11,0.8,0.09])
ax.set_xticklabels([])
ax.set_xticks([])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.yaxis.set_ticks_position('left')
ax.set_yticks([-10,-5,0,5])
#setup color
Color = float(i)/float(len(FileNames))
FileName = "../BlockRemoval_" + FileNames[i] + ".pdat"
f = open(FileName,'r')
Lines = f.readlines()
NoLines = len(Lines)
#Get header info and setup X coord
Header = Lines[0].strip().split(" ")
NXNodes = float(Header[0])
PlatformWidth = float(Header[1])
X = np.linspace(0,PlatformWidth+1,NXNodes)
for j in range(10,NoLines,8):
#Get data
Line = Lines[j].strip().split(" ")
Time = float(Line[0])
Z = np.array(Line[1:],dtype="float64")
#mask for NDVs
mask = Z != -9999
Zplot = Z[mask]
Xplot = X[mask]
plt.plot([Xplot[0]-20,Xplot[0],Xplot[0]],[5,5,0],'-',color=cm.Paired(Color))
if (i == 0):
plt.text(Xplot[0]-20,Zplot[0]+8, str(Time/1000.) + " ka")
plt.plot(Xplot,Zplot,'-',color=cm.Paired(Color))
#Offset -= 15
if (i ==0):
ax.text(950,5,'(a)')
if (i == 2):
plt.ylabel('Elevation (m)')
plt.xlim(-50,1000)
#Plot concentrations
#Get data
Line = Lines[NoLines-1].strip().split(" ")
Time = float(Line[0])
N = np.array(Line[1:],dtype="float64")
#plot concentration data
ax1.plot(X,N,'-',color=cm.Paired(Color),linewidth=2,label=str(StepSizes[i]/100.) + " m")
#Display legend
plt.rcParams.update({'legend.labelspacing':0.1})
plt.rcParams.update({'legend.columnspacing':1.0})
plt.rcParams.update({'legend.numpoints':1})
plt.rcParams.update({'legend.frameon':False})
plt.rcParams.update({'legend.handlelength':0.5})
plt.rcParams.update({'legend.fontsize':8})
ax1.legend(loc=1,ncol=1,title="Bed Spacing")
ax1.text(0,2800,'(b)')
ax1.set_xlim(-50,1000)
ax.set_xticklabels([])
plt.savefig("./BlockRemoval.png")
plt.show() | gpl-3.0 |
poo12138/gem5-stable | util/stats/barchart.py | 90 | 12472 | # Copyright (c) 2005-2006 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
# Lisa Hsu
import matplotlib, pylab
from matplotlib.font_manager import FontProperties
from matplotlib.numerix import array, arange, reshape, shape, transpose, zeros
from matplotlib.numerix import Float
from matplotlib.ticker import NullLocator
matplotlib.interactive(False)
from chart import ChartOptions
class BarChart(ChartOptions):
def __init__(self, default=None, **kwargs):
super(BarChart, self).__init__(default, **kwargs)
self.inputdata = None
self.chartdata = None
self.inputerr = None
self.charterr = None
def gen_colors(self, count):
cmap = matplotlib.cm.get_cmap(self.colormap)
if count == 1:
return cmap([ 0.5 ])
if count < 5:
return cmap(arange(5) / float(4))[:count]
return cmap(arange(count) / float(count - 1))
# The input data format does not match the data format that the
# graph function takes because it is intuitive. The conversion
# from input data format to chart data format depends on the
# dimensionality of the input data. Check here for the
# dimensionality and correctness of the input data
def set_data(self, data):
if data is None:
self.inputdata = None
self.chartdata = None
return
data = array(data)
dim = len(shape(data))
if dim not in (1, 2, 3):
raise AttributeError, "Input data must be a 1, 2, or 3d matrix"
self.inputdata = data
# If the input data is a 1d matrix, then it describes a
# standard bar chart.
if dim == 1:
self.chartdata = array([[data]])
# If the input data is a 2d matrix, then it describes a bar
# chart with groups. The matrix being an array of groups of
# bars.
if dim == 2:
self.chartdata = transpose([data], axes=(2,0,1))
# If the input data is a 3d matrix, then it describes an array
# of groups of bars with each bar being an array of stacked
# values.
if dim == 3:
self.chartdata = transpose(data, axes=(1,2,0))
def get_data(self):
return self.inputdata
data = property(get_data, set_data)
def set_err(self, err):
if err is None:
self.inputerr = None
self.charterr = None
return
err = array(err)
dim = len(shape(err))
if dim not in (1, 2, 3):
raise AttributeError, "Input err must be a 1, 2, or 3d matrix"
self.inputerr = err
if dim == 1:
self.charterr = array([[err]])
if dim == 2:
self.charterr = transpose([err], axes=(2,0,1))
if dim == 3:
self.charterr = transpose(err, axes=(1,2,0))
def get_err(self):
return self.inputerr
err = property(get_err, set_err)
# Graph the chart data.
# Input is a 3d matrix that describes a plot that has multiple
# groups, multiple bars in each group, and multiple values stacked
# in each bar. The underlying bar() function expects a sequence of
# bars in the same stack location and same group location, so the
# organization of the matrix is that the inner most sequence
# represents one of these bar groups, then those are grouped
# together to make one full stack of bars in each group, and then
# the outer most layer describes the groups. Here is an example
# data set and how it gets plotted as a result.
#
# e.g. data = [[[10,11,12], [13,14,15], [16,17,18], [19,20,21]],
# [[22,23,24], [25,26,27], [28,29,30], [31,32,33]]]
#
# will plot like this:
#
# 19 31 20 32 21 33
# 16 28 17 29 18 30
# 13 25 14 26 15 27
# 10 22 11 23 12 24
#
# Because this arrangement is rather conterintuitive, the rearrange
# function takes various matricies and arranges them to fit this
# profile.
#
# This code deals with one of the dimensions in the matrix being
# one wide.
#
def graph(self):
if self.chartdata is None:
raise AttributeError, "Data not set for bar chart!"
dim = len(shape(self.inputdata))
cshape = shape(self.chartdata)
if self.charterr is not None and shape(self.charterr) != cshape:
raise AttributeError, 'Dimensions of error and data do not match'
if dim == 1:
colors = self.gen_colors(cshape[2])
colors = [ [ colors ] * cshape[1] ] * cshape[0]
if dim == 2:
colors = self.gen_colors(cshape[0])
colors = [ [ [ c ] * cshape[2] ] * cshape[1] for c in colors ]
if dim == 3:
colors = self.gen_colors(cshape[1])
colors = [ [ [ c ] * cshape[2] for c in colors ] ] * cshape[0]
colors = array(colors)
self.figure = pylab.figure(figsize=self.chart_size)
outer_axes = None
inner_axes = None
if self.xsubticks is not None:
color = self.figure.get_facecolor()
self.metaaxes = self.figure.add_axes(self.figure_size,
axisbg=color, frameon=False)
for tick in self.metaaxes.xaxis.majorTicks:
tick.tick1On = False
tick.tick2On = False
self.metaaxes.set_yticklabels([])
self.metaaxes.set_yticks([])
size = [0] * 4
size[0] = self.figure_size[0]
size[1] = self.figure_size[1] + .12
size[2] = self.figure_size[2]
size[3] = self.figure_size[3] - .12
self.axes = self.figure.add_axes(size)
outer_axes = self.metaaxes
inner_axes = self.axes
else:
self.axes = self.figure.add_axes(self.figure_size)
outer_axes = self.axes
inner_axes = self.axes
bars_in_group = len(self.chartdata)
width = 1.0 / ( bars_in_group + 1)
center = width / 2
bars = []
for i,stackdata in enumerate(self.chartdata):
bottom = array([0.0] * len(stackdata[0]), Float)
stack = []
for j,bardata in enumerate(stackdata):
bardata = array(bardata)
ind = arange(len(bardata)) + i * width + center
yerr = None
if self.charterr is not None:
yerr = self.charterr[i][j]
bar = self.axes.bar(ind, bardata, width, bottom=bottom,
color=colors[i][j], yerr=yerr)
if self.xsubticks is not None:
self.metaaxes.bar(ind, [0] * len(bardata), width)
stack.append(bar)
bottom += bardata
bars.append(stack)
if self.xlabel is not None:
outer_axes.set_xlabel(self.xlabel)
if self.ylabel is not None:
inner_axes.set_ylabel(self.ylabel)
if self.yticks is not None:
ymin, ymax = self.axes.get_ylim()
nticks = float(len(self.yticks))
ticks = arange(nticks) / (nticks - 1) * (ymax - ymin) + ymin
inner_axes.set_yticks(ticks)
inner_axes.set_yticklabels(self.yticks)
elif self.ylim is not None:
inner_axes.set_ylim(self.ylim)
if self.xticks is not None:
outer_axes.set_xticks(arange(cshape[2]) + .5)
outer_axes.set_xticklabels(self.xticks)
if self.xsubticks is not None:
numticks = (cshape[0] + 1) * cshape[2]
inner_axes.set_xticks(arange(numticks) * width + 2 * center)
xsubticks = list(self.xsubticks) + [ '' ]
inner_axes.set_xticklabels(xsubticks * cshape[2], fontsize=7,
rotation=30)
if self.legend is not None:
if dim == 1:
lbars = bars[0][0]
if dim == 2:
lbars = [ bars[i][0][0] for i in xrange(len(bars))]
if dim == 3:
number = len(bars[0])
lbars = [ bars[0][number - j - 1][0] for j in xrange(number)]
if self.fig_legend:
self.figure.legend(lbars, self.legend, self.legend_loc,
prop=FontProperties(size=self.legend_size))
else:
self.axes.legend(lbars, self.legend, self.legend_loc,
prop=FontProperties(size=self.legend_size))
if self.title is not None:
self.axes.set_title(self.title)
def savefig(self, name):
self.figure.savefig(name)
def savecsv(self, name):
f = file(name, 'w')
data = array(self.inputdata)
dim = len(data.shape)
if dim == 1:
#if self.xlabel:
# f.write(', '.join(list(self.xlabel)) + '\n')
f.write(', '.join([ '%f' % val for val in data]) + '\n')
if dim == 2:
#if self.xlabel:
# f.write(', '.join([''] + list(self.xlabel)) + '\n')
for i,row in enumerate(data):
ylabel = []
#if self.ylabel:
# ylabel = [ self.ylabel[i] ]
f.write(', '.join(ylabel + [ '%f' % v for v in row]) + '\n')
if dim == 3:
f.write("don't do 3D csv files\n")
pass
f.close()
if __name__ == '__main__':
from random import randrange
import random, sys
dim = 3
number = 5
args = sys.argv[1:]
if len(args) > 3:
sys.exit("invalid number of arguments")
elif len(args) > 0:
myshape = [ int(x) for x in args ]
else:
myshape = [ 3, 4, 8 ]
# generate a data matrix of the given shape
size = reduce(lambda x,y: x*y, myshape)
#data = [ random.randrange(size - i) + 10 for i in xrange(size) ]
data = [ float(i)/100.0 for i in xrange(size) ]
data = reshape(data, myshape)
# setup some test bar charts
if True:
chart1 = BarChart()
chart1.data = data
chart1.xlabel = 'Benchmark'
chart1.ylabel = 'Bandwidth (GBps)'
chart1.legend = [ 'x%d' % x for x in xrange(myshape[-1]) ]
chart1.xticks = [ 'xtick%d' % x for x in xrange(myshape[0]) ]
chart1.title = 'this is the title'
if len(myshape) > 2:
chart1.xsubticks = [ '%d' % x for x in xrange(myshape[1]) ]
chart1.graph()
chart1.savefig('/tmp/test1.png')
chart1.savefig('/tmp/test1.ps')
chart1.savefig('/tmp/test1.eps')
chart1.savecsv('/tmp/test1.csv')
if False:
chart2 = BarChart()
chart2.data = data
chart2.colormap = 'gray'
chart2.graph()
chart2.savefig('/tmp/test2.png')
chart2.savefig('/tmp/test2.ps')
# pylab.show()
| bsd-3-clause |
KFubuki/dotfiles | python/colormap-master/joes-blu_grn_pnk2.py | 1 | 13205 |
from matplotlib.colors import LinearSegmentedColormap
from numpy import nan, inf
# Used to reconstruct the colormap in viscm
parameters = {'xp': [-6.1859603226509137, -14.579245694353546, -16.377806845432701, -75.430564639197684, 40.576629605406652, 14.197732722912633],
'yp': [-38.399280575539549, 5.6654676258992822, 11.061151079136692, 51.229016786570725, 35.641486810551555, -4.8261390887290077],
'min_JK': 27.8125,
'max_JK': 87.2916666667}
cm_data = [[ 0.08488181, 0.18308161, 0.67262216],
[ 0.08312345, 0.19202711, 0.66090297],
[ 0.08217997, 0.20014429, 0.64976469],
[ 0.08198433, 0.2075936 , 0.63911277],
[ 0.08247594, 0.21447696, 0.62891889],
[ 0.08359003, 0.2208603 , 0.61920504],
[ 0.08524777, 0.2268386 , 0.60985788],
[ 0.08738166, 0.23244191, 0.60093206],
[ 0.08992038, 0.2377215 , 0.59238917],
[ 0.09280155, 0.24272587, 0.58416923],
[ 0.09596381, 0.24747257, 0.57631143],
[ 0.0993575 , 0.251998 , 0.56876406],
[ 0.10293853, 0.25632645, 0.56150762],
[ 0.10666336, 0.2604716 , 0.55456023],
[ 0.1105036 , 0.26445857, 0.54787429],
[ 0.11443019, 0.26830128, 0.54144512],
[ 0.11841518, 0.27200976, 0.53528253],
[ 0.12244369, 0.27560103, 0.5293494 ],
[ 0.12649882, 0.27908535, 0.52363821],
[ 0.13056226, 0.28246931, 0.51815934],
[ 0.13462547, 0.28576389, 0.51289008],
[ 0.13868224, 0.28897881, 0.50780839],
[ 0.14271871, 0.2921176 , 0.50293114],
[ 0.14672864, 0.29518709, 0.49824894],
[ 0.15071345, 0.29819656, 0.4937278 ],
[ 0.1546626 , 0.30114819, 0.48938498],
[ 0.15857184, 0.30404672, 0.48521597],
[ 0.16243974, 0.30689736, 0.48120845],
[ 0.16626692, 0.3097055 , 0.47734461],
[ 0.17004651, 0.31247292, 0.47363702],
[ 0.17377693, 0.31520325, 0.47007969],
[ 0.1774579 , 0.31790023, 0.46666296],
[ 0.18109034, 0.32056767, 0.46337356],
[ 0.18466948, 0.32320706, 0.46022069],
[ 0.18819439, 0.3258211 , 0.4571996 ],
[ 0.19166421, 0.32841234, 0.45430585],
[ 0.19508052, 0.33098399, 0.4515254 ],
[ 0.19844012, 0.33353749, 0.44886326],
[ 0.20174153, 0.33607477, 0.44631801],
[ 0.20498379, 0.3385979 , 0.44388601],
[ 0.20816589, 0.3411088 , 0.44156374],
[ 0.21128813, 0.34360991, 0.43934139],
[ 0.21434794, 0.34610257, 0.43722134],
[ 0.21734359, 0.34858841, 0.43520227],
[ 0.22027364, 0.35106916, 0.433281 ],
[ 0.22313653, 0.35354653, 0.43145439],
[ 0.22593054, 0.3560222 , 0.42971931],
[ 0.22865443, 0.35849817, 0.42806853],
[ 0.23130553, 0.36097577, 0.42650261],
[ 0.23388157, 0.36345656, 0.42501923],
[ 0.23638033, 0.36594216, 0.42361498],
[ 0.23879948, 0.36843418, 0.42228628],
[ 0.24113658, 0.37093422, 0.42102943],
[ 0.24338915, 0.37344386, 0.41984053],
[ 0.24555463, 0.37596471, 0.41871526],
[ 0.2476304 , 0.37849855, 0.41764727],
[ 0.24961385, 0.3810466 , 0.41663458],
[ 0.25150254, 0.38361032, 0.41567235],
[ 0.25329409, 0.38619113, 0.41475548],
[ 0.25498629, 0.38879034, 0.41387864],
[ 0.25657711, 0.39140923, 0.41303625],
[ 0.25806477, 0.39404893, 0.41222253],
[ 0.25944772, 0.3967105 , 0.41143149],
[ 0.26072475, 0.39939485, 0.41065702],
[ 0.26189499, 0.40210277, 0.40989287],
[ 0.26295789, 0.4048349 , 0.40913271],
[ 0.26391332, 0.40759175, 0.40837023],
[ 0.2647615 , 0.41037366, 0.40759912],
[ 0.26550293, 0.41318086, 0.40681296],
[ 0.2661385 , 0.41601343, 0.40600552],
[ 0.26666972, 0.41887118, 0.40517133],
[ 0.26709825, 0.42175387, 0.40430471],
[ 0.26742604, 0.42466112, 0.40340023],
[ 0.26765527, 0.42759246, 0.40245273],
[ 0.26778838, 0.43054729, 0.40145736],
[ 0.26782797, 0.43352494, 0.40040961],
[ 0.2677768 , 0.43652465, 0.39930525],
[ 0.26763777, 0.4395456 , 0.3981404 ],
[ 0.26741386, 0.44258692, 0.39691145],
[ 0.26710811, 0.4456477 , 0.39561511],
[ 0.26672365, 0.448727 , 0.39424836],
[ 0.26626363, 0.45182384, 0.39280844],
[ 0.26573088, 0.45493737, 0.39129233],
[ 0.26512858, 0.4580666 , 0.38969761],
[ 0.26446024, 0.46121045, 0.38802247],
[ 0.26372917, 0.46436794, 0.38626487],
[ 0.26293876, 0.46753805, 0.38442293],
[ 0.26209248, 0.47071979, 0.38249482],
[ 0.26119391, 0.47391216, 0.38047885],
[ 0.26024678, 0.47711415, 0.37837332],
[ 0.25925503, 0.48032474, 0.37617663],
[ 0.25822279, 0.48354291, 0.37388717],
[ 0.25715449, 0.48676763, 0.37150332],
[ 0.25605486, 0.48999782, 0.3690235 ],
[ 0.25492731, 0.4932329 , 0.36644367],
[ 0.2537774 , 0.49647174, 0.36376208],
[ 0.25261249, 0.49971279, 0.36097881],
[ 0.25143921, 0.50295487, 0.35809205],
[ 0.25026483, 0.50619673, 0.35509995],
[ 0.24909736, 0.50943704, 0.35200056],
[ 0.24794283, 0.51267518, 0.34878753],
[ 0.24681094, 0.5159097 , 0.3454584 ],
[ 0.24571493, 0.51913835, 0.34201468],
[ 0.24466674, 0.52235947, 0.33845417],
[ 0.24367796, 0.52557168, 0.33477178],
[ 0.24275902, 0.52877417, 0.33095718],
[ 0.24193144, 0.53196339, 0.32701729],
[ 0.24121292, 0.53513707, 0.32294975],
[ 0.24061591, 0.53829468, 0.31873685],
[ 0.24016739, 0.54143201, 0.3143868 ],
[ 0.23989167, 0.54454583, 0.3098995 ],
[ 0.23980621, 0.54763531, 0.3052503 ],
[ 0.23994718, 0.55069438, 0.30045852],
[ 0.24033897, 0.55372091, 0.29550373],
[ 0.24101659, 0.5567097 , 0.29039154],
[ 0.242013 , 0.55965658, 0.28511189],
[ 0.24336597, 0.56255578, 0.27966796],
[ 0.24511307, 0.56540212, 0.27404701],
[ 0.24729554, 0.56818809, 0.26826528],
[ 0.24995455, 0.57090763, 0.26229943],
[ 0.25313183, 0.57355191, 0.25617055],
[ 0.25686732, 0.57611234, 0.24988551],
[ 0.26120079, 0.57857967, 0.24344791],
[ 0.26616801, 0.58094374, 0.23687306],
[ 0.27179453, 0.58319423, 0.23019404],
[ 0.27809821, 0.58532092, 0.22344485],
[ 0.28508368, 0.5873143 , 0.21667129],
[ 0.29273881, 0.58916631, 0.20993178],
[ 0.3010319 , 0.5908713 , 0.20329682],
[ 0.30991056, 0.5924269 , 0.19684672],
[ 0.31930301, 0.59383467, 0.19066735],
[ 0.32912207, 0.5951003 , 0.18484441],
[ 0.33927145, 0.59623329, 0.17945707],
[ 0.34965323, 0.59724611, 0.17457225],
[ 0.36017498, 0.59815298, 0.17024054],
[ 0.37075539, 0.59896866, 0.16649448],
[ 0.38132758, 0.59970726, 0.16334908],
[ 0.39183913, 0.60038173, 0.1608045 ],
[ 0.40223125, 0.60100791, 0.15885843],
[ 0.41249475, 0.60159175, 0.15747918],
[ 0.42261595, 0.60214034, 0.15663827],
[ 0.43255125, 0.60266853, 0.15631551],
[ 0.44232961, 0.60317358, 0.15646484],
[ 0.45192221, 0.60366715, 0.15705899],
[ 0.46135476, 0.60414659, 0.15805779],
[ 0.47059963, 0.60462352, 0.1594324 ],
[ 0.47969294, 0.60509169, 0.16114641],
[ 0.48861904, 0.60555917, 0.16317152],
[ 0.49738619, 0.60602708, 0.16547858],
[ 0.5060124 , 0.60649316, 0.16804181],
[ 0.51450148, 0.60695916, 0.17083789],
[ 0.52283993, 0.60743236, 0.17384267],
[ 0.53105197, 0.60790769, 0.17703821],
[ 0.53914308, 0.60838578, 0.18040711],
[ 0.54711786, 0.60886742, 0.18393356],
[ 0.55498092, 0.60935321, 0.18760336],
[ 0.5627368 , 0.60984364, 0.19140384],
[ 0.57038905, 0.61033941, 0.19532343],
[ 0.57793913, 0.61084192, 0.19935102],
[ 0.58539646, 0.61134942, 0.20347965],
[ 0.59276492, 0.61186207, 0.20770171],
[ 0.6000482 , 0.61237999, 0.21201055],
[ 0.60724981, 0.61290327, 0.2164004 ],
[ 0.61437307, 0.61343197, 0.22086628],
[ 0.62142114, 0.61396611, 0.22540392],
[ 0.62839694, 0.61450572, 0.23000966],
[ 0.63530325, 0.61505081, 0.23468046],
[ 0.64214263, 0.61560141, 0.23941375],
[ 0.64891741, 0.61615756, 0.24420741],
[ 0.65562676, 0.61672066, 0.24905779],
[ 0.66227569, 0.61728944, 0.25396539],
[ 0.66886611, 0.61786396, 0.2589293 ],
[ 0.67539975, 0.61844427, 0.2639489 ],
[ 0.68187814, 0.61903047, 0.26902384],
[ 0.68830265, 0.61962269, 0.27415403],
[ 0.69467448, 0.62022108, 0.27933959],
[ 0.70099464, 0.62082585, 0.28458084],
[ 0.70726401, 0.62143724, 0.28987828],
[ 0.71348256, 0.62205592, 0.29523192],
[ 0.71964871, 0.6226833 , 0.30064055],
[ 0.7257661 , 0.62331815, 0.30610795],
[ 0.73183497, 0.62396086, 0.3116352 ],
[ 0.7378554 , 0.62461192, 0.31722351],
[ 0.74382734, 0.62527185, 0.32287414],
[ 0.74975057, 0.62594124, 0.32858846],
[ 0.75562355, 0.62662141, 0.33436653],
[ 0.76144581, 0.62731307, 0.34020972],
[ 0.76721835, 0.62801608, 0.34612142],
[ 0.77294038, 0.62873127, 0.35210319],
[ 0.77861095, 0.62945956, 0.35815664],
[ 0.78422899, 0.63020192, 0.36428337],
[ 0.78979252, 0.63095987, 0.37048392],
[ 0.79530062, 0.63173429, 0.37676038],
[ 0.80075261, 0.63252589, 0.38311548],
[ 0.80614684, 0.63333595, 0.3895508 ],
[ 0.81148155, 0.63416582, 0.39606787],
[ 0.81675483, 0.63501695, 0.40266817],
[ 0.82196435, 0.63589105, 0.40935253],
[ 0.8271086 , 0.6367893 , 0.41612348],
[ 0.83218532, 0.63771334, 0.42298234],
[ 0.83719205, 0.63866495, 0.42993032],
[ 0.84212626, 0.63964598, 0.43696849],
[ 0.84698539, 0.64065826, 0.44409811],
[ 0.85176681, 0.64170366, 0.45132049],
[ 0.85646749, 0.64278436, 0.45863614],
[ 0.86108437, 0.64390252, 0.46604562],
[ 0.86561427, 0.64506041, 0.47354929],
[ 0.87005391, 0.64626036, 0.48114736],
[ 0.87440033, 0.6475044 , 0.48884145],
[ 0.8786496 , 0.64879536, 0.49663003],
[ 0.88279806, 0.65013581, 0.50451251],
[ 0.886842 , 0.6515284 , 0.51248812],
[ 0.8907776 , 0.65297579, 0.52055578],
[ 0.89460109, 0.65448064, 0.52871468],
[ 0.89830854, 0.65604567, 0.53696351],
[ 0.90189577, 0.65767392, 0.54529885],
[ 0.90535877, 0.65936821, 0.5537181 ],
[ 0.90869358, 0.66113137, 0.56221825],
[ 0.91189625, 0.66296617, 0.57079586],
[ 0.91496291, 0.6648754 , 0.57944686],
[ 0.91788981, 0.66686181, 0.5881658 ],
[ 0.92067342, 0.66892797, 0.59694738],
[ 0.92331042, 0.67107632, 0.60578593],
[ 0.92579776, 0.67330909, 0.6146753 ],
[ 0.92813267, 0.67562834, 0.62360891],
[ 0.93031328, 0.678036 , 0.63257538],
[ 0.93233787, 0.68053348, 0.64156724],
[ 0.93420501, 0.68312189, 0.65057749],
[ 0.93591386, 0.68580207, 0.65959765],
[ 0.93746411, 0.6885745 , 0.66861913],
[ 0.93885727, 0.69143911, 0.67762793],
[ 0.94009655, 0.69439504, 0.686608 ],
[ 0.94118182, 0.69744174, 0.69555831],
[ 0.94211522, 0.70057808, 0.70447036],
[ 0.94289928, 0.70380263, 0.71333615],
[ 0.94354455, 0.70711146, 0.72212851],
[ 0.94405369, 0.71050244, 0.73084569],
[ 0.9444278 , 0.71397394, 0.73948938],
[ 0.94467085, 0.7175234 , 0.74805426],
[ 0.94480169, 0.72114324, 0.75650653],
[ 0.94482174, 0.72483132, 0.76485204],
[ 0.94472934, 0.72858669, 0.77310077],
[ 0.94453501, 0.73240424, 0.78124012],
[ 0.94426678, 0.73627241, 0.78923231],
[ 0.9439052 , 0.74019659, 0.79711843],
[ 0.9434584 , 0.74417245, 0.80489253],
[ 0.94297022, 0.74818241, 0.81250113],
[ 0.94240599, 0.75223783, 0.82000299],
[ 0.94178596, 0.75632991, 0.82737865],
[ 0.94114205, 0.76044536, 0.83459832],
[ 0.94043604, 0.76459737, 0.84171794],
[ 0.93972281, 0.76876404, 0.84868276],
[ 0.93897849, 0.77295334, 0.85553024],
[ 0.93820719, 0.77716269, 0.86226445],
[ 0.93745099, 0.78137511, 0.86885223],
[ 0.93665014, 0.7856125 , 0.87536218]]
test_cm = LinearSegmentedColormap.from_list(__file__, cm_data)
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
try:
from viscm import viscm
viscm(test_cm)
except ImportError:
print("viscm not found, falling back on simple display")
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto',
cmap=test_cm)
plt.show()
| mit |
poojavade/Genomics_Docker | Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/statsmodels-0.5.0-py2.7-linux-x86_64.egg/statsmodels/sandbox/nonparametric/kdecovclass.py | 5 | 5713 | '''subclassing kde
Author: josef pktd
'''
import numpy as np
import scipy
from scipy import stats
import matplotlib.pylab as plt
class gaussian_kde_set_covariance(stats.gaussian_kde):
'''
from Anne Archibald in mailinglist:
http://www.nabble.com/Width-of-the-gaussian-in-stats.kde.gaussian_kde---td19558924.html#a19558924
'''
def __init__(self, dataset, covariance):
self.covariance = covariance
scipy.stats.gaussian_kde.__init__(self, dataset)
def _compute_covariance(self):
self.inv_cov = np.linalg.inv(self.covariance)
self._norm_factor = sqrt(np.linalg.det(2*np.pi*self.covariance)) * self.n
class gaussian_kde_covfact(stats.gaussian_kde):
def __init__(self, dataset, covfact = 'scotts'):
self.covfact = covfact
scipy.stats.gaussian_kde.__init__(self, dataset)
def _compute_covariance_(self):
'''not used'''
self.inv_cov = np.linalg.inv(self.covariance)
self._norm_factor = sqrt(np.linalg.det(2*np.pi*self.covariance)) * self.n
def covariance_factor(self):
if self.covfact in ['sc', 'scotts']:
return self.scotts_factor()
if self.covfact in ['si', 'silverman']:
return self.silverman_factor()
elif self.covfact:
return float(self.covfact)
else:
raise ValueError, \
'covariance factor has to be scotts, silverman or a number'
def reset_covfact(self, covfact):
self.covfact = covfact
self.covariance_factor()
self._compute_covariance()
def plotkde(covfact):
gkde.reset_covfact(covfact)
kdepdf = gkde.evaluate(ind)
plt.figure()
# plot histgram of sample
plt.hist(xn, bins=20, normed=1)
# plot estimated density
plt.plot(ind, kdepdf, label='kde', color="g")
# plot data generating density
plt.plot(ind, alpha * stats.norm.pdf(ind, loc=mlow) +
(1-alpha) * stats.norm.pdf(ind, loc=mhigh),
color="r", label='DGP: normal mix')
plt.title('Kernel Density Estimation - ' + str(gkde.covfact))
plt.legend()
from numpy.testing import assert_array_almost_equal, \
assert_almost_equal, assert_
def test_kde_1d():
np.random.seed(8765678)
n_basesample = 500
xn = np.random.randn(n_basesample)
xnmean = xn.mean()
xnstd = xn.std(ddof=1)
print xnmean, xnstd
# get kde for original sample
gkde = stats.gaussian_kde(xn)
# evaluate the density funtion for the kde for some points
xs = np.linspace(-7,7,501)
kdepdf = gkde.evaluate(xs)
normpdf = stats.norm.pdf(xs, loc=xnmean, scale=xnstd)
print 'MSE', np.sum((kdepdf - normpdf)**2)
print 'axabserror', np.max(np.abs(kdepdf - normpdf))
intervall = xs[1] - xs[0]
assert_(np.sum((kdepdf - normpdf)**2)*intervall < 0.01)
#assert_array_almost_equal(kdepdf, normpdf, decimal=2)
print gkde.integrate_gaussian(0.0, 1.0)
print gkde.integrate_box_1d(-np.inf, 0.0)
print gkde.integrate_box_1d(0.0, np.inf)
print gkde.integrate_box_1d(-np.inf, xnmean)
print gkde.integrate_box_1d(xnmean, np.inf)
assert_almost_equal(gkde.integrate_box_1d(xnmean, np.inf), 0.5, decimal=1)
assert_almost_equal(gkde.integrate_box_1d(-np.inf, xnmean), 0.5, decimal=1)
assert_almost_equal(gkde.integrate_box(xnmean, np.inf), 0.5, decimal=1)
assert_almost_equal(gkde.integrate_box(-np.inf, xnmean), 0.5, decimal=1)
assert_almost_equal(gkde.integrate_kde(gkde),
(kdepdf**2).sum()*intervall, decimal=2)
assert_almost_equal(gkde.integrate_gaussian(xnmean, xnstd**2),
(kdepdf*normpdf).sum()*intervall, decimal=2)
## assert_almost_equal(gkde.integrate_gaussian(0.0, 1.0),
## (kdepdf*normpdf).sum()*intervall, decimal=2)
if __name__ == '__main__':
# generate a sample
n_basesample = 1000
np.random.seed(8765678)
alpha = 0.6 #weight for (prob of) lower distribution
mlow, mhigh = (-3,3) #mean locations for gaussian mixture
xn = np.concatenate([mlow + np.random.randn(alpha * n_basesample),
mhigh + np.random.randn((1-alpha) * n_basesample)])
# get kde for original sample
#gkde = stats.gaussian_kde(xn)
gkde = gaussian_kde_covfact(xn, 0.1)
# evaluate the density funtion for the kde for some points
ind = np.linspace(-7,7,101)
kdepdf = gkde.evaluate(ind)
plt.figure()
# plot histgram of sample
plt.hist(xn, bins=20, normed=1)
# plot estimated density
plt.plot(ind, kdepdf, label='kde', color="g")
# plot data generating density
plt.plot(ind, alpha * stats.norm.pdf(ind, loc=mlow) +
(1-alpha) * stats.norm.pdf(ind, loc=mhigh),
color="r", label='DGP: normal mix')
plt.title('Kernel Density Estimation')
plt.legend()
gkde = gaussian_kde_covfact(xn, 'scotts')
kdepdf = gkde.evaluate(ind)
plt.figure()
# plot histgram of sample
plt.hist(xn, bins=20, normed=1)
# plot estimated density
plt.plot(ind, kdepdf, label='kde', color="g")
# plot data generating density
plt.plot(ind, alpha * stats.norm.pdf(ind, loc=mlow) +
(1-alpha) * stats.norm.pdf(ind, loc=mhigh),
color="r", label='DGP: normal mix')
plt.title('Kernel Density Estimation')
plt.legend()
#plt.show()
for cv in ['scotts', 'silverman', 0.05, 0.1, 0.5]:
plotkde(cv)
test_kde_1d()
np.random.seed(8765678)
n_basesample = 1000
xn = np.random.randn(n_basesample)
xnmean = xn.mean()
xnstd = xn.std(ddof=1)
# get kde for original sample
gkde = stats.gaussian_kde(xn)
| apache-2.0 |
abhishekkrthakur/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_01_language_train_model.py | 254 | 2253 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
vectorizer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char',
use_idf=False)
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
clf = Pipeline([
('vec', vectorizer),
('clf', Perceptron()),
])
# TASK: Fit the pipeline on the training set
clf.fit(docs_train, y_train)
# TASK: Predict the outcome on the testing set in a variable named y_predicted
y_predicted = clf.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
MartinDelzant/scikit-learn | sklearn/ensemble/tests/test_base.py | 284 | 1328 | """
Testing for the base module (sklearn.ensemble.base).
"""
# Authors: Gilles Louppe
# License: BSD 3 clause
from numpy.testing import assert_equal
from nose.tools import assert_true
from sklearn.utils.testing import assert_raise_message
from sklearn.datasets import load_iris
from sklearn.ensemble import BaggingClassifier
from sklearn.linear_model import Perceptron
def test_base():
# Check BaseEnsemble methods.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=3)
iris = load_iris()
ensemble.fit(iris.data, iris.target)
ensemble.estimators_ = [] # empty the list and create estimators manually
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator(append=False)
assert_equal(3, len(ensemble))
assert_equal(3, len(ensemble.estimators_))
assert_true(isinstance(ensemble[0], Perceptron))
def test_base_zero_n_estimators():
# Check that instantiating a BaseEnsemble with n_estimators<=0 raises
# a ValueError.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=0)
iris = load_iris()
assert_raise_message(ValueError,
"n_estimators must be greater than zero, got 0.",
ensemble.fit, iris.data, iris.target)
| bsd-3-clause |
ctuning/ck | ck/repo/module/graph/module.py | 1 | 52220 | #
# Collective Knowledge (various graphs for experiment)
#
# See CK LICENSE.txt for licensing details
# See CK COPYRIGHT.txt for copyright details
#
# Developer: Grigori Fursin, [email protected], http://fursin.net
#
cfg={} # Will be updated by CK (meta description of this module)
work={} # Will be updated by CK (temporal data)
ck=None # Will be updated by CK (initialized CK kernel)
# Local settings
var_post_subgraph='subgraph'
var_post_cur_subgraph='cur_subgraph'
var_post_tmp_graph_file='graph_tmp_file'
var_post_refresh_graph='refresh_graph'
var_post_reset_graph='reset_graph'
var_post_autorefresh='graph_autorefresh'
var_post_autorefresh_time='graph_autorefresh_time'
##############################################################################
# Initialize module
def init(i):
"""
Input: {}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
return {'return':0}
##############################################################################
# plot universal graph by flat dimensions
def plot(i):
"""
Input: {
(load_table_from_file) - load table directly from file
or
Select entries or table:
(repo_uoa) or (experiment_repo_uoa) - can be wild cards
(remote_repo_uoa) - if remote access, use this as a remote repo UOA
(module_uoa) or (experiment_module_uoa) - can be wild cards
(data_uoa) or (experiment_data_uoa) - can be wild cards
(repo_uoa_list) - list of repos to search
(module_uoa_list) - list of module to search
(data_uoa_list) - list of data to search
(search_dict) - search dict
(ignore_case) - if 'yes', ignore case when searching
OR
table - experiment table (if drawing from other functions)
(table_info) - point description
(flat_keys_list) - list of flat keys to extract from points into table
(order is important: for example, for plot -> X,Y,Z)
(flat_keys_list_separate_graphs) - [ [keys], [keys], ...] - several graphs ...
(sort_index) - (int) dimension to sort
(labels_for_separate_graphs) - list of labels for separate graphs
(flat_keys_index) - add all flat keys starting from this index
(flat_keys_index_end) - add all flat keys ending with this index (default #min)
(customize_dots) - if 'yes' and MPL engine, customize color and size from table_info
(out_to_file) - save picture or html to file, if supported
(will be preserved and not replotted - useful to have a copy of an original image
when replotting graphs in interactive papers)
(out_repo_uoa) - repo uoa where to save file (when reproducing graphs for interactive articles)
(out_module_uoa) - module uoa where to save file (when reproducing graphs for interactive articles)
(out_data_uoa) - data uoa where to save file (when reproducing graphs for interactive articles)
(out_id) - graph ID in the created graph entry
(out_common_meta) - graph common meta in the created graph entry
(out_graph_extra_meta) - graph extra meta (parameters and description)
(save_table_to_json_file) - save table to json file
(save_info_table_to_json_file) - save info table (mtable) to json file
(save_table_to_csv_file) - save table to csv file (need keys)
(save_to_html) - if interactive or html-based graph, save to html
(save_to_style) - if interactive or html-based graph, save to style (if needed)
(display_x_error_bar) - if 'yes', display error bar on X axis (using next dim)
(display_y_error_bar) - if 'yes', display error bar on Y axis (using next dim)
(display_z_error_bar) - if 'yes', display error bar on Z axis (using next dim)
Graphical parameters:
plot_type - mpl_2d_scatter
point_style - dict, setting point style for each separate graph {"0", "1", etc}
x_ticks_period - (int) for bar graphs, put periodicity when to show number
xmin
xmax
ymin
ymax
bound_lines - if 'yes', plot lines bounding all points ...
bound_style - ':' by default
bound_color - 'r' by default
skip_colorbar - if 'yes', do not show colorbar in heatmaps
colorbar_pad - (default = 0.15) - pad colorbar
h_lines - list of X for horizontal lines
h_lines_style
h_lines_color
v_lines - list of Y for vertical lines
v_lines_style
v_lines_color
If density graph:
(bins) - number of bins (int, default = 100)
(cov_factor) - float covariance factor
d3_div - div ID (ck_interactive). "body" if empty
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
(html) - html, if HTML generator such as d3
(style) - style for html, if HTML generator such as d3
}
"""
import os
import json
o=i.get('out','')
pst=i.get('point_style',{})
otf=i.get('out_to_file','')
otf_ruoa=i.get('out_repo_uoa','')
otf_muoa=i.get('out_module_uoa','')
otf_duoa=i.get('out_data_uoa','')
xtp=i.get('x_ticks_period','')
if xtp=='' or xtp==0: xtp=1
if xtp!='': xtp=int(xtp)
lsg=i.get('labels_for_separate_graphs',[])
stjf=i.get('save_table_to_json_file','')
sitjf=i.get('save_info_table_to_json_file','')
stcf=i.get('save_table_to_csv_file','')
table=i.get('table',[])
mtable=i.get('table_info', [])
rk=i.get('real_keys',[])
cdots=i.get('customize_dots','')
ltfj=i.get('load_table_from_file','')
if ltfj!='':
rx=ck.load_json_file({'json_file':ltfj})
if rx['return']>0: return rx
table=rx['dict']
# Check if table already there
if len(table)==0:
# Get table from entries
tmp_a=i.get('action','')
tmp_mu=i.get('module_uoa','')
i['action']='get'
i['module_uoa']=cfg['module_deps']['experiment']
if i.get('remote_repo_uoa','')!='':
i['repo_uoa']=i['remote_repo_uoa']
del(i['remote_repo_uoa'])
if 'out' in i: del(i['out'])
r=ck.access(i)
if r['return']>0: return r
table=r['table']
mtable=r.get('mtable',{})
rk=r['real_keys']
merged_meta=r.get('merged_meta',{})
pifs=r.get('plot_info_from_scenario',{})
if len(pifs)>0:
import copy
ii=copy.deepcopy(i)
i=pifs
i.update(ii)
i['action']=tmp_a
i['module_uoa']=tmp_mu
else:
# If sort/substitute
si=i.get('sort_index','')
if si!='':
rx=ck.access({'action':'sort_table',
'module_uoa':cfg['module_deps']['experiment'],
'table':table,
'sort_index':si})
if rx['return']>0: return rx
table=rx['table']
# Substitute all X with a loop (usually to sort Y and compare with predictions in scatter graphs, etc)
if i.get('substitute_x_with_loop','')=='yes':
rx=ck.access({'action':'substitute_x_with_loop',
'module_uoa':cfg['module_deps']['experiment'],
'table':table})
if rx['return']>0: return rx
table=rx['table']
if len(table)==0:
return {'return':1, 'error':'no points found'}
# Check if out to module
pp=''
if otf_duoa!='':
import os
if otf_muoa=='': otf_muoa=work['self_module_uid']
# Check extra params
o_id=os.path.splitext(os.path.basename(otf))[0]
o_cm=i.get('out_common_meta','')
o_gem=i.get('out_graph_extra_meta',{})
# Try to load first
ddd={}
xid=-1
ii={'action':'load',
'module_uoa':otf_muoa,
'repo_uoa':otf_ruoa,
'data_uoa':otf_duoa}
rx=ck.access(ii)
if rx['return']==0:
ddd=rx['dict']
dddx=ddd.get('graphs',[])
if o_id!='':
for q in range(0, len(dddx)):
if dddx[q].get('id','')==o_id:
xid=q
break
# Updating
if 'graphs' not in ddd:
ddd['graphs']=[]
ddd.update(o_cm)
# Prepare graph params
import copy
ii=copy.deepcopy(i)
for q in cfg['remove_keys_for_interactive_graphs']:
if q in ii: del(ii[q])
dddg={'params':ii}
dddg.update(o_gem)
if o_id!='':
dddg['id']=o_id
else:
dddg['id']='default'
if len(i.get('out_graph_meta',{}))>0:
dddg.update(i['out_graph_meta'])
if xid!=-1:
ddd['graphs'][xid]=dddg
else:
ddd['graphs'].append(dddg)
# Try to update this entry to be sure that we can record there, and get path
ii={'action':'update',
'module_uoa':otf_muoa,
'repo_uoa':otf_ruoa,
'data_uoa':otf_duoa,
'dict':ddd,
'substitute':'yes',
'ignore_update':'yes'}
rx=ck.access(ii)
if rx['return']>0: return rx
pp=rx['path']
# Save table to JSON file, if needed
if stjf!='':
if pp!='':
ppx=os.path.join(pp, stjf)
else:
ppx=stjf
rx=ck.save_json_to_file({'json_file':ppx, 'dict':table})
if rx['return']>0: return rx
# Save info table to JSON file, if needed
if sitjf!='':
if pp!='':
ppx=os.path.join(pp, sitjf)
else:
ppx=sitjf
rx=ck.save_json_to_file({'json_file':ppx, 'dict':mtable})
if rx['return']>0: return rx
# Save table to CSV file, if needed
if stcf!='':
if pp!='':
ppx=os.path.join(pp, stcf)
else:
ppx=stcf
ii={'action':'convert_table_to_csv',
'module_uoa':cfg['module_deps']['experiment'],
'table':table,
'keys':rk,
'merge_multi_tables':'yes',
'file_name':ppx}
rx=ck.access(ii)
if rx['return']>0: return rx
# Prepare libraries
pt=i.get('plot_type','')
html=''
style=''
hlines=i.get('h_lines',[])
vlines=i.get('v_lines',[])
# Check if display error bars
xerr=i.get('display_x_error_bar','')
yerr=i.get('display_y_error_bar','')
zerr=i.get('display_z_error_bar','')
xerr2=i.get('display_x_error_bar2','') # type 2
yerr2=i.get('display_y_error_bar2','')
zerr2=i.get('display_z_error_bar2','')
# Find min/max in all data and all dimensions / per sub-graph
tmin=[]
tmax=[]
stmin={}
stmax={}
for g in table:
gt=table[g]
stmin[g]=[]
stmax[g]=[]
mgt=[]
if g in mtable:
mgt=mtable[g]
xpst=pst.get(g,{})
remove_permanent=False
if xpst.get('remove_permanent','')=='yes':
remove_permanent=True
leave_only_permanent=False
if xpst.get('leave_only_permanent','')=='yes':
leave_only_permanent=True
ngt=[]
# for k in gt:
for uindex in range(0,len(gt)):
k=gt[uindex]
if (remove_permanent or leave_only_permanent) and uindex<len(mgt):
mu=mgt[uindex]
if remove_permanent and mu.get('permanent','')=='yes':
continue
if leave_only_permanent and mu.get('permanent','')!='yes':
continue
ngt.append(k)
if xpst.get('skip_from_dims','')=='yes':
continue
for d in range(0, len(k)):
v=k[d]
if len(tmin)<=d:
tmin.append(v)
tmax.append(v)
else:
if v!=None and v<tmin[d]: tmin[d]=v
if v!=None and v>tmax[d]: tmax[d]=v
if len(stmin[g])<=d and v!=None:
stmin[g].append(v)
stmax[g].append(v)
else:
if v!=None and v<stmin[g][d]: stmin[g][d]=v
if v!=None and v>stmax[g][d]: stmax[g][d]=v
table[g]=ngt
####################################################################### MPL ###
if pt.startswith('mpl_'):
import matplotlib as mpl
if ck.cfg.get('use_internal_engine_for_plotting','')=='yes':
mpl.use('agg') # if XWindows is not installed, use internal engine
elif os.environ.get('CK_MPL_BACKEND','')!='':
mpl.use(os.environ['CK_MPL_BACKEND'])
import matplotlib.pyplot as plt
fsize=int(i.get('font_size','10'))
fweight=i.get('font_weight','normal')
ffamily=i.get('font_family','arial')
# Set font
font=i.get('font',{})
if len(font)==0:
font = {'family':ffamily,
'weight':fweight,
'size': fsize}
plt.rc('font', **font)
# Configure graph
gs=cfg['mpl_point_styles']
sizex=i.get('mpl_image_size_x','')
if sizex=='': sizex='9'
sizey=i.get('mpl_image_size_y','')
if sizey=='': sizey='5'
dpi=i.get('mpl_image_dpi','')
if dpi=='': dpi='100'
if sizex!='' and sizey!='' and dpi!='':
fig=plt.figure(figsize=(int(sizex),int(sizey)))
else:
fig=plt.figure()
if i.get('plot_grid','')=='yes':
plt.grid(True)
bl=i.get('bound_lines','')
if pt=='mpl_3d_scatter' or pt=='mpl_3d_trisurf':
from mpl_toolkits.mplot3d import Axes3D
sp=fig.add_subplot(111, projection='3d')
else:
sp=fig.add_subplot(111)
if i.get('xscale_log','')=='yes': sp.set_xscale('log')
if i.get('yscale_log','')=='yes': sp.set_yscale('log')
if i.get('zscale_log','')=='yes': sp.set_zscale('log')
# If density or heatmap, find min and max for both graphs:
if pt=='mpl_1d_density' or pt=='mpl_1d_histogram' or pt=='mpl_2d_heatmap' or pt=='mpl_3d_scatter' or pt=='mpl_3d_trisurf':
index=0
if pt=='mpl_2d_heatmap': index=2
dmean=0.0
start=True
dmin=0.0
dmax=0.0
it=0
dt=0
for g in table:
gt=table[g]
for k in gt:
v=k[index]
if v!=None and v!='':
if start:
dmin=v
start=False
else:
dmin=min(dmin, v)
if start:
dmax=v
start=False
else:
dmax=max(dmax, v)
it+=1
dt+=v
if it!=0: dmean=dt/it
# If heatmap, prepare colorbar
if pt=='mpl_2d_heatmap' or pt=='mpl_3d_trisurf':
from matplotlib import cm
if len(i.get('color_dict',{}))>0:
xcmap = mpl.colors.LinearSegmentedColormap('my_colormap', i['color_dict'], 1024)
else:
xcmap = plt.cm.get_cmap('coolwarm')
if i.get('shifted_colormap','')=='yes':
r=ck.load_module_from_path({'path':work['path'], 'module_code_name':'module_shifted_colormap', 'skip_init':'yes'})
if r['return']>0: return r
scm=r['code']
xx_start=dmin
if i.get('shifted_colormap_start','')!='':
xx_start=i['shifted_colormap_start']
xx_stop=dmax
if i.get('shifted_colormap_stop','')!='':
xx_stop=i['shifted_colormap_stop']
xx_mid=1.0
if i.get('shifted_colormap_mid','')!='':
xx_mix=i['shifted_colormap_mid']
xcmap = scm.shiftedColorMap(xcmap, start=xx_start, stop=xx_stop, midpoint=xx_mid, name='shifted')
# Check forced min/max for different axis
xmin=i.get('xmin','')
xmax=i.get('xmax','')
ymin=i.get('ymin','')
ymax=i.get('ymax','')
zmin=i.get('zmin','')
zmax=i.get('zmax','')
if xmin!='':
sp.set_xlim(left=float(xmin))
if xmax!='':
sp.set_xlim(right=float(xmax))
if ymin!='':
sp.set_ylim(bottom=float(ymin))
if ymax!='':
sp.set_ylim(top=float(ymax))
if zmin!='':
sp.set_zlim(bottom=float(zmin))
if zmax!='':
sp.set_zlim(top=float(zmax))
# Check if invert axis
if i.get('invert_x_axis','')=='yes': plt.gca().invert_xaxis()
if i.get('invert_y_axis','')=='yes': plt.gca().invert_yaxis()
if i.get('invert_z_axis','')=='yes': plt.gca().invert_zaxis()
if pt=='mpl_2d_bars' or pt=='mpl_2d_lines':
ind=[]
gt=table['0']
xt=0
for q in gt:
xt+=1
if xt==xtp:
v=q[0]
xt=0
else:
v=0
ind.append(v)
sp.set_xticks(ind)
sp.set_xticklabels(ind, rotation=-20)
width=0.9/len(table)
# Iterate over separate graphs and add points
s=0
for g in sorted(table, key=int):
gt=table[g]
mgt=[]
if g in mtable:
mgt=mtable[g]
lbl=''
if s<len(lsg): lbl=lsg[s]
xpst=pst.get(g,{})
elw=int(xpst.get('elinewidth',0))
xfmt=xpst.get('fmt','')
xcapsize=xpst.get('capsize','')
if xcapsize=='' or xcapsize==None: xcapsize=None
else: xcapsize=int(xcapsize)
cl=xpst.get('color','')
if cl=='': cl=gs[s]['color']
sz=xpst.get('size','')
if sz=='': sz=gs[s]['size']
connect_lines=xpst.get('connect_lines','')
mrk=xpst.get('marker','')
if mrk=='': mrk=gs[s]['marker']
lst=xpst.get('line_style','')
if lst=='': lst=gs[s].get('line_style', '-')
heatmap=None
if pt=='mpl_2d_scatter' or pt=='mpl_2d_bars' or pt=='mpl_2d_lines':
mx=[]
mxerr=[]
my=[]
myerr=[]
mcolor=[]
msize=[]
# for u in gt:
for uindex in range(0,len(gt)):
u=gt[uindex]
iu=0
# Check if extra info (color and size)
minfo={}
if uindex<len(mgt):
minfo=mgt[uindex]
xcl=cl
if minfo.get('color','')!='':
xcl=minfo['color']
mcolor.append(xcl)
xsz=sz
if minfo.get('size','')!='':
xsz=minfo['size']
msize.append(int(xsz))
# Check if no None
partial=False
for q in u:
if q==None:
partial=True
break
if not partial:
mx.append(u[iu])
iu+=1
if xerr=='yes':
mxerr.append(u[iu])
iu+=1
my.append(u[iu])
iu+=1
if yerr=='yes':
myerr.append(u[iu])
iu+=1
if pt=='mpl_2d_bars':
mx1=[]
# mx2=[]
# names={}
# iq=0
for q in mx:
# if type(q)!=int and type(q)!=float:
# if q in names: q=names[q]
# else:
# names[q]=iq
# q=iq
# iq+=1
# mx2.append(str(q))
mx1.append(q+width*s)
if yerr=='yes':
sp.bar(mx1, my, width=width, edgecolor=cl, facecolor=cl, align='center', yerr=myerr, label=lbl) # , error_kw=dict(lw=2))
else:
sp.bar(mx1, my, width=width, edgecolor=cl, facecolor=cl, align='center', label=lbl)
elif pt=='mpl_2d_lines':
if yerr=='yes':
sp.errorbar(mx, my, yerr=myerr, ls='none', c=cl, elinewidth=elw)
sp.plot(mx, my, c=cl, label=lbl)
else:
draw_scatter=False
if xerr=='yes' and yerr=='yes':
if cdots=='yes':
sp.errorbar(mx, my, xerr=mxerr, yerr=myerr, ls='none', c=mcolor, elinewidth=elw, label=lbl, fmt=xfmt)
else:
sp.errorbar(mx, my, xerr=mxerr, yerr=myerr, ls='none', c=cl, elinewidth=elw, label=lbl, fmt=xfmt)
elif xerr=='yes' and yerr!='yes':
if cdots=='yes':
sp.errorbar(mx, my, xerr=mxerr, ls='none', c=mcolor, elinewidth=elw, label=lbl, fmt=xfmt)
else:
sp.errorbar(mx, my, xerr=mxerr, ls='none', c=cl, elinewidth=elw, label=lbl, fmt=xfmt)
elif yerr=='yes' and xerr!='yes':
if cdots=='yes':
sp.errorbar(mx, my, yerr=myerr, ls='none', c=mcolor, elinewidth=elw, label=lbl, fmt=xfmt, capsize=xcapsize)
else:
sp.errorbar(mx, my, yerr=myerr, ls='none', c=cl, elinewidth=elw, label=lbl, fmt=xfmt, capsize=xcapsize)
else:
draw_scatter=True
if draw_scatter or i.get('force_center_dot','')=='yes':
if cdots=='yes':
sp.scatter(mx, my, s=msize, edgecolor=mcolor, c=mcolor, marker=mrk, label=lbl)
else:
sp.scatter(mx, my, s=int(sz), edgecolor=cl, c=cl, marker=mrk, label=lbl)
if connect_lines=='yes':
if cdots=='yes':
sp.plot(mx, my, c=mcolor, label=lbl)
else:
sp.plot(mx, my, c=cl, label=lbl)
if xpst.get('frontier','')=='yes':
# not optimal solution, but should work (need to sort to draw proper frontier)
a=[]
for q in range(0, len(mx)):
a.append([mx[q],my[q]])
b=sorted(a, key=lambda k: k[0])
if xpst.get('reuse_dims','')=='yes':
mx=[b[0][0]]
my=[tmax[1]]
elif xpst.get('dims_from_this_graph','')=='yes':
mx=[stmin[g][0]]
my=[stmax[g][1]]
else:
mx=[tmin[0]]
my=[tmax[1]]
for j in b:
mx.append(j[0])
my.append(j[1])
if xpst.get('reuse_dims','')=='yes':
mx.append(tmax[0])
my.append(b[-1][1])
elif xpst.get('dims_from_this_graph','')=='yes':
mx.append(stmax[g][0])
my.append(stmin[g][1])
else:
mx.append(tmax[0])
my.append(tmin[1])
sp.plot(mx, my, c=cl, linestyle=lst, label=lbl)
elif pt=='mpl_1d_density' or pt=='mpl_1d_histogram':
if not start: # I.e. we got non empty points
xbins=i.get('bins', 100)
xcov_factor=i.get('cov_factor', '')
mx=[]
for u in gt:
mx.append(u[0])
ii={'action':'analyze',
'min':dmin,
'max':dmax,
'module_uoa':cfg['module_deps']['math.variation'],
'bins':xbins,
'cov_factor':xcov_factor,
'characteristics_table':mx}
r=ck.access(ii)
if r['return']>0: return r
xs=r['xlist']
dxs=r['ylist']
pxs=r['xlist2s']
dpxs=r['ylist2s']
if pt=='mpl_1d_density':
sp.plot(xs,dxs, label=lbl)
sp.plot(pxs, dpxs, 'x', mec='r', mew=2, ms=8) #, mfc=None, mec='r', mew=2, ms=8)
sp.plot([dmean,dmean],[0,dpxs[0]],'g--',lw=2)
else:
plt.hist(mx, bins=xbins, normed=True, label=lbl)
elif pt=='mpl_2d_heatmap' or pt=='mpl_3d_scatter' or pt=='mpl_3d_trisurf':
mx=[]
mxerr=[]
my=[]
myerr=[]
mz=[]
mzerr=[]
for u in gt:
iu=0
# Check if no None
partial=False
for q in u:
if q==None:
partial=True
break
if not partial:
mx.append(u[iu])
iu+=1
if xerr=='yes':
mxerr.append(u[iu])
iu+=1
my.append(u[iu])
iu+=1
if yerr=='yes':
myerr.append(u[iu])
iu+=1
mz.append(u[iu])
iu+=1
if zerr=='yes':
mzerr.append(u[iu])
iu+=1
if pt=='mpl_2d_heatmap':
heatmap=sp.scatter(mx, my, c=mz, s=int(sz), marker=mrk, lw=elw, cmap=xcmap)
elif pt=='mpl_3d_scatter':
heatmap=sp.scatter(mx,my,mz, c=cl, s=int(sz), marker=mrk, lw=elw)
elif pt=='mpl_3d_trisurf':
# heatmap=sp.plot_trisurf(mx,my,mz,cmap=cm.coolwarm, lw=elw)
heatmap=sp.plot_trisurf(mx,my,mz,cmap=xcmap, lw=elw)
s+=1
if s>=len(gs):s=0
# If heatmap, finish colors
if (pt=='mpl_2d_heatmap' or pt=='mpl_3d_trisurf') and i.get('skip_colorbar','')!='yes':
colorbar_pad=i.get('colorbar_pad','')
if colorbar_pad=='': colorbar_pad=0.15
colorbar_pad=float(colorbar_pad)
plt.colorbar(heatmap, orientation=xpst.get('colorbar_orietation','horizontal'), label=xpst.get('colorbar_label',''), pad=colorbar_pad)
# For lines
dxmin=tmin[0]
if xmin!='': dxmin=float(xmin)
dxmax=tmax[0]
if xmax!='': dxmax=float(xmax)
dymin=tmin[1]
if ymin!='': dymin=float(ymin)
dymax=tmax[1]
if ymax!='': dymax=float(ymax)
# If bounds
if bl=='yes':
xbs=i.get('bound_style',':')
xbc=i.get('bound_color','r')
sp.plot([tmin[0],tmax[0]],[tmin[1],tmin[1]], linestyle=xbs, c=xbc)
sp.plot([tmin[0],tmin[0]],[tmin[1],tmax[1]], linestyle=xbs, c=xbc)
# If horizontal lines
if len(hlines)>0:
xbs=i.get('h_lines_style','--')
xbc=i.get('h_lines_color','r')
for q in hlines:
sp.plot([dxmin,dxmax],[q,q], linestyle=xbs, c=xbc)
# If horizontal lines
if len(vlines)>0:
xbs=i.get('v_lines_style','--')
xbc=i.get('v_lines_color','r')
for q in vlines:
sp.plot([q,q],[dymin,dymax], linestyle=xbs, c=xbc)
# Checking scaling
if i.get('x_ticks_scale','')!='':
xticks = sp.get_xticks()/float(i['x_ticks_scale'])
sp.set_xticklabels(xticks)
if i.get('y_ticks_scale','')!='':
yticks = sp.get_yticks()/float(i['y_ticks_scale'])
sp.set_yticklabels(yticks)
if i.get('z_ticks_scale','')!='':
zticks = sp.get_zticks()/float(i['z_ticks_scale'])
sp.set_zticklabels(zticks)
# Set axes labels
xlab=i.get('axis_x_labels',[])
if len(xlab)>0:
ind=[]
qq=0
for q in xlab:
ind.append(qq)
qq+=1
sp.set_xticks(ind)
xrot=i.get('axis_x_rotation','')
if xrot=='': sp.set_xticklabels(xlab)
else: sp.set_xticklabels(xlab, rotation=xrot)
ylab=i.get('axis_y_labels',[])
if len(ylab)>0:
sp.set_yticks(ylab)
sp.set_yticklabels(ylab)
# Set axes names
axd=i.get('axis_x_desc','')
if axd!='': plt.xlabel(axd)
ayd=i.get('axis_y_desc','')
if ayd!='': plt.ylabel(ayd)
atitle=i.get('title','')
if atitle!='': plt.title(atitle)
# handles, labels = plt.get_legend_handles_labels()
plt.legend() #handles, labels)
try:
plt.tight_layout()
except Exception:
pass
if otf=='':
plt.show()
else:
if pp!='':
ppx=os.path.join(pp, otf)
else:
ppx=otf
plt.savefig(ppx)
####################################################################### D3 ###
elif pt.startswith('d3_'):
# Try to load template
ppx=os.path.join(work['path'],'templates',pt+'.html')
if not os.path.isfile(ppx):
return {'return':1, 'error':'template for this graph is not found'}
rx=ck.load_text_file({'text_file':ppx})
if rx['return']>0: return rx
html=rx['string']
# Check if style is there (optional)
ppx=os.path.join(work['path'],'templates',pt+'.style')
if os.path.isfile(ppx):
rx=ck.load_text_file({'text_file':ppx})
if rx['return']>0: return rx
style=rx['string']
# Convert data table into JSON
rx=ck.dumps_json({'dict':table})
if rx['return']>0: return rx
stable=rx['string']
# Convert info table into JSON
rx=ck.dumps_json({'dict':mtable})
if rx['return']>0: return rx
smtable=rx['string']
# Convert point styles into JSON
rx=ck.dumps_json({'dict':pst})
if rx['return']>0: return rx
spst=rx['string']
html=html.replace('$#x_ticks_period#$',str(xtp))
html=html.replace('$#display_x_error_bar#$',xerr)
html=html.replace('$#display_y_error_bar#$',yerr)
html=html.replace('$#display_z_error_bar#$',zerr)
html=html.replace('$#display_x_error_bar2#$',xerr2)
html=html.replace('$#display_y_error_bar2#$',yerr2)
html=html.replace('$#display_z_error_bar2#$',zerr2)
html=html.replace('$#cm_info_json#$',smtable)
html=html.replace('$#cm_point_style_json#$',spst)
html=html.replace('$#cm_data_json#$',stable)
html=html.replace('$#cm_info_json#$',smtable)
html=html.replace('$#cm_point_style_json#$',spst)
html=html.replace('$#h_lines#$',json.dumps(hlines))
html=html.replace('$#v_lines#$',json.dumps(vlines))
# Set axes names
axd=i.get('axis_x_desc','')
html=html.replace('$#axis_x_desc#$', axd)
ayd=i.get('axis_y_desc','')
html=html.replace('$#axis_y_desc#$', ayd)
size_x=i.get('image_width','')
if size_x=='': size_x=600
html=html.replace('$#ck_image_width#$', str(size_x))
size_y=i.get('image_height','')
if size_y=='': size_y=400
html=html.replace('$#ck_image_height#$', str(size_y))
xmin=i.get('xmin','')
html=html.replace('$#ck_xmin#$', str(xmin))
xmax=i.get('xmax','')
html=html.replace('$#ck_xmax#$', str(xmax))
ymin=i.get('ymin','')
html=html.replace('$#ck_ymin#$', str(ymin))
ymax=i.get('ymax','')
html=html.replace('$#ck_ymax#$', str(ymax))
# Save html to file (do not hardwire URLs)
x=i.get('out_to_file','')
if x!='':
if pp!='':
ppx=os.path.join(pp, x)
else:
ppx=x
rx=ck.save_text_file({'text_file':ppx, 'string':html})
if rx['return']>0: return rx
# Save style to file, if needed
x=i.get('save_to_style','')
if x!='':
if pp!='':
ppx=os.path.join(pp, x)
else:
ppx=x
rx=ck.save_text_file({'text_file':ppx, 'string':style})
if rx['return']>0: return rx
# Update URLs if needed (for example, to load .js files from CK repo)
url0=i.get('wfe_url','')
if url0=='': url0=ck.cfg.get('wfe_url_prefix','')
html=html.replace('$#ck_root_url#$', url0)
# Save working html locally to visualize without CK
y=i.get('d3_div','')
y1=''
y2=''
if y=='':
y='body'
else:
y1='<div id="'+y+'">\n\n'
y2='\n</div>\n'
y='div#'+y
html=html.replace('$#ck_where#$',y)
if i.get('save_to_html','')!='':
x='<html>\n\n<style>\n'+style+'</style>\n\n'+'<body>\n\n'+y1+html+y2+'\n\n</body>\n</html>\n'
x=x.replace('$#ck_where#$',y)
rx=ck.save_text_file({'text_file':i['save_to_html'], 'string':x})
if rx['return']>0: return rx
else:
return {'return':1, 'error':'this type of plot ('+pt+') is not supported'}
return {'return':0, 'html':html, 'style':style}
##############################################################################
# Continuously updated plot
def continuous_plot(i):
"""
Input: {
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
for q in range(0, 1000):
r=plot(i)
if r['return']>0: return r
x=ck.inp({'text':'Press any key'})
return {'return':0}
##############################################################################
# view entry as html
def html_viewer(i):
"""
Input: {
data_uoa
url_base
url_pull
url_cid
(subgraph)
url_pull_tmp
tmp_data_uoa
url_wiki
html_share
form_name - current form name
(all_params)
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
import os
h=''
st=''
raw='no'
top='no'
duoa=i['data_uoa']
burl=i['url_base']
purl=i['url_pull']
wurl=i.get('url_wiki','')
url_cid=i.get('url_cid','')
tpurl=i['url_pull_tmp']
tpuoa=i['tmp_data_uoa']
ap=i.get('all_params',{})
ruoa=ap.get('ck_top_repo','')
muoa=ap.get('ck_top_module','')
if muoa=='': muoa=work['self_module_uid']
cparams=ap.get('graph_params','') # current graph params
hshare=i.get('html_share','')
itype='png'
# Check autorefresh
ar=ap.get(var_post_autorefresh,'')
if ar=='on':
ap[var_post_refresh_graph]='yes'
form_name=i['form_name']
form_submit='document.'+form_name+'.submit();'
art=ap.get(var_post_autorefresh_time,'')
iart=5
if art!='':
try:
iart=int(art)
except ValueError:
iart=5
if ar=='on':
h+='\n'
h+='<script language="javascript">\n'
h+=' <!--\n'
h+=' setTimeout(\''+form_submit+'\','+str(iart*1000)+');\n'
h+=' //-->\n'
h+='</script>\n'
h+='\n'
# Set replotting
jj={'action':'create_input',
'module_uoa':cfg['module_deps']['wfe'],
'type':'hidden',
'name': var_post_refresh_graph,
'value':'yes'}
rx=ck.access(jj)
if rx['return']>0: return rx
h+=rx['html']+'\n'
if duoa!='':
# Load entry
rx=ck.access({'action':'load',
'module_uoa':muoa,
'data_uoa':duoa})
if rx['return']>0: return rx
pp=rx['path']
dd=rx['dict']
duid=rx['data_uid']
name=dd.get('name','')
gsr=dd.get('get_shared_repo','')
sruoa=dd.get('scripts_repo_uoa','')
smuoa=dd.get('scripts_module_uoa','')
sduoa=dd.get('scripts_data_uoa','')
h+=' <span id="ck_entries1a">'+name+'</span><br>\n'
h+=' <div id="ck_entries_space4"></div>\n'
graphs=dd.get('graphs',[])
# If more than one subgraph, prepare selector
hsb=''
igraph=0
cgraph=0
x=ap.get(var_post_cur_subgraph,'')
try:
cgraph=int(x)
except ValueError:
cgraph=0
# sgraph=i.get(var_post_subgraph,'')
# if sgraph=='':
sgraph=ap.get(var_post_subgraph,'')
if sgraph=='': sgraph=i.get('subgraph','')
if len(graphs)>1:
dx=[]
jgraph=0
for q in graphs:
vid=q.get('id','')
if sgraph=='': sgraph=vid
if vid==sgraph:
igraph=jgraph
x=q.get('name','')
if x=='': x=vid
dx.append({'name':vid, 'value':vid})
jgraph+=1
jj={'action':'create_selector',
'module_uoa':cfg['module_deps']['wfe'],
'name': var_post_subgraph,
'onchange':form_submit,
'data':dx,
'selected_value':sgraph}
rx=ck.access(jj)
if rx['return']>0: return rx
hsb=rx['html']+'\n'
if igraph!=cgraph:
ap[var_post_reset_graph]='yes'
cgraph=igraph
# Save current subgraph to detect change and reset ...
jj={'action':'create_input',
'module_uoa':cfg['module_deps']['wfe'],
'type':'hidden',
'name': var_post_cur_subgraph,
'value':str(cgraph)}
rx=ck.access(jj)
if rx['return']>0: return rx
h+=rx['html']+'\n'
# Visualize
gid=''
if igraph<len(graphs):
g=graphs[igraph]
output=g.get('output','')
gid=g.get('id','')
if gid!='':
# Get graph params
if g.get('notes','')!='':
h+='<i>'+g['notes']+'</i>'
pjson=os.path.join(pp, gid+'.json')
if not os.path.isfile(pjson): pjson=''
pcsv=os.path.join(pp, gid+'.csv')
if not os.path.isfile(pcsv): pcsv=''
h+='<div id="ck_entries_space4"></div>\n'
if hshare!='':
h+='<div id="ck_entries_space4"></div>\n'
h+=hshare
h+=' <div id="ck_entries_space4"></div>\n'
h+='<div style="text-align: right;">'
if wurl!='':
h+='[ <a href="'+wurl+'">Discussion wiki (comments, reproducibility, etc.)</a> ]'
h+='</div>\n'
# h+=' <hr class="ck_hr">\n'
if hsb!='':
h+=' <div id="ck_entries_space4"></div>\n'
h+='<center>Select subgraph: '+hsb+'</center>\n'
# h+=' <hr class="ck_hr">\n'
# Check if interactive (html + style already prepared)
if output=='html':
image=gid+'.html'
else:
image=gid+'.'+itype
params=g.get('params',{})
problem_converting_json=''
if var_post_reset_graph not in ap and cparams!='':
rx=ck.convert_json_str_to_dict({'str':cparams, 'skip_quote_replacement':'yes'})
if rx['return']>0:
problem_converting_json=rx['error']
else:
params=rx['dict']
rx=ck.dumps_json({'dict':params, 'sort_keys':'yes'})
if rx['return']>0: return rx
jparams=rx['string']
# Check if need to regenerate
problem=''
image_orig=image
himage=''
if var_post_refresh_graph in ap:
image_orig=''
import copy
ii=copy.deepcopy(params)
ii['action']='plot'
ii['module_uoa']=work['self_module_uoa']
image=ap.get(var_post_tmp_graph_file,'')
if image=='':
rx=ck.gen_tmp_file({'prefix':'tmp-', 'suffix':'.'+itype, 'remove_dir':'yes'})
if rx['return']>0: return rx
image=rx['file_name']
ii['out_to_file']=image
# Preset current entry params
jj={'action':'create_input',
'module_uoa':cfg['module_deps']['wfe'],
'type':'hidden',
'name': var_post_tmp_graph_file,
'value':image}
rx=ck.access(jj)
if rx['return']>0: return rx
h+=rx['html']+'\n'
if ck.cfg.get('graph_tmp_repo_uoa','')!='':
ii['out_repo_uoa']=ck.cfg['graph_tmp_repo_uoa']
ii['out_module_uoa']='tmp'
ii['out_data_uoa']=tpuoa
# (save_table_to_json_file) - save table to json file
rx=ck.access(ii)
if rx['return']>0:
problem=rx['error']
purl=tpurl
# Prepare html
size_x=params.get('size_x','')
size_y=params.get('size_y','')
h+=' <table border="0" cellpadding="3" width="100%">\n'
h+=' <tr>\n'
extra=''
if size_x!='': extra+='width="'+str(size_x)+'" '
h+=' <td valign="top" '+extra+'>\n'
h+=' <div id="ck_entries">\n'
h+=' <b><small>Graph:</small></b>\n'
if problem!='':
h+='<br><br><span style="color:red;"><i>Problem: '+problem+'!</i></span><br>\n'
else:
if output=='html' and image!='':
# Check if style exists
pstyle=os.path.join(pp, gid+'.style')
if os.path.isfile(pstyle):
rx=ck.load_text_file({'text_file':pstyle})
if rx['return']>0: return rx
st=rx['string']
# Generate UID
rx=ck.gen_uid({})
if rx['return']>0: return rx
uid=rx['data_uid']
div_with_uid='ck_interactive_'+uid
h+='<div id="'+div_with_uid+'">\n'
z=muoa+':'+duoa
if var_post_refresh_graph in ap:
z=url_cid
import os
image_st=os.path.splitext(image)[0]+'.style'
h+='$#ck_include_start#${"cid":"'+z+'", "where":"div#'+div_with_uid+'", "html":"'+image+'", "style":"'+image_st+'"}$#ck_include_stop#$\n'
h+='</div>\n'
else:
if image!='':
if size_y!='': extra+='height="'+str(size_y)+'" '
himage='<img src="'+purl+image+'" '+extra+'>'
h+=' '+himage
h+=' </div>\n'
h+=' </td>\n'
h+=' <td valign="top">\n'
x='width:99%;'
if size_y!='': x+='height:'+str(size_y)+'px;'
h+=' <div id="ck_entries">\n'
h+=' <b><small>Graph params (to customize/reproduce):</small></b>\n'
if problem_converting_json!='':
h+='<br><br><span style="color:red;"><i>'+problem_converting_json+'</i></span><br>\n'
h+=' <textarea name="graph_params" style="'+x+'">\n'
h+=jparams+'\n'
h+=' </textarea><br>\n'
h+=' </div>\n'
h+=' </td>\n'
h+=' </tr>\n'
h+='</table>\n'
# h+=' <hr class="ck_hr">\n'
if g.get('skip_control','')!='yes':
h+='<center>\n'
h+='<button type="submit" name="'+var_post_refresh_graph+'">Replot graph</button>\n'
h+='<button type="submit" name="'+var_post_reset_graph+'">Reset graph</button>\n'
checked=''
if ar=='on': checked=' checked '
h+=' Auto-replot graph: <input type="checkbox" name="'+var_post_autorefresh+'" id="'+var_post_autorefresh+'" onchange="submit()"'+checked+'>,'
h+=' seconds: <input type="text" name="'+var_post_autorefresh_time+'" value="'+str(iart)+'">\n'
h+='</center>\n'
if g.get('skip_reproduce','')!='yes':
# h+='<hr class="ck_hr">\n'
h+='<center>\n'
h+='<div id="ck_entries" style="background-color: #dfffbf;">\n'
h+='<b>Reproducing graph:</b>\n'
h+='<table width="99%">\n'
h+=' <tr>\n'
h+=' <td valign="top" align="left" width="44%">\n'
h+=' <table border="0" cellpadding="5">\n'
h+=' <tr>\n'
h+=' <td valign="top" width="140"><b>Experiment entries:</b></td>\n'
h+=' <td valign="top"><i>\n'
duoal=params.get('data_uoa_list','')
if len(duoal)>0:
h+='\n'
for q in duoal:
h+='<a href="'+burl+'wcid=experiment:'+q+'">'+q+'</a><br>\n'
h+='\n'
h+=' </i></td>\n'
h+=' </tr>\n'
if smuoa!='' and sduoa!='':
cid=smuoa+':'+sduoa
if sruoa!='': cid=sruoa+':'+cid
h+=' <tr>\n'
h+=' <td valign="top"><b>Scripts to rebuild:</b></td>\n'
h+=' <td valign="top"><i>\n'
h+=' ck find '+cid+'<br>\n'
h+=' <a href="'+burl+'wcid='+cid+'">View in CK viewer</a>\n'
h+=' </i></td>\n'
h+=' </tr>\n'
if output!='html':
h+=' <tr>\n'
h+=' <td valign="top"><b>Replay graph from CMD:</b></td>\n'
h+=' <td valign="top"><i>\n'
h+=' ck replay graph:'+duoa+' id='+gid+'\n'
h+=' </i></td>\n'
h+=' </tr>\n'
h+=' </table>\n'
h+=' </td>\n'
h+=' <td valign="top" align="left" width="56%">\n'
h+=' <table border="0" cellpadding="5">\n'
if gsr!='':
h+=' <tr>\n'
h+=' <td valign="top" width="250"><b>Obtain shared CK repo with all artifacts:</b></td>\n'
h+=' <td valign="top">\n'
h+=' <i>'+gsr+'</i>\n'
h+=' </td>\n'
h+=' </tr>\n'
refresh=False
if var_post_refresh_graph in ap: refresh=True
# if (pjson!='' or pcsv!='') and image_orig!='' and himage!='':
# if (pjson!='' or pcsv!='') and not refresh: # if refresh, table may change
x1=purl+gid+'.json'
x2=purl+gid+'.csv'
h+=' <tr>\n'
h+=' <td valign="top"><b>Original experiment table:</b></td>\n'
h+=' <td valign="top"><i>\n'
if pjson!='':
h+=' <a href="'+x1+'">Download in JSON</a>;  '
if pcsv!='':
h+=' <a href="'+x2+'">Download in CSV</a>\n'
h+=' </i></td>\n'
h+=' </tr>\n'
if image_orig!='' and himage!='':
h+=' <tr>\n'
h+=' <td valign="top"><b>Embedd original image into interactive report/paper:</b></td>\n'
h+=' <td valign="top"><i>\n'
h+=' '+himage.replace('<','<').replace('>','>')+'\n'
h+=' </i></td>\n'
h+=' </tr>\n'
h+=' </table>\n'
h+=' </td>\n'
h+=' </tr>\n'
h+='</table>\n'
h+='</div>\n'
h+='</center>\n'
return {'return':0, 'raw':raw, 'show_top':top, 'html':h, 'style':st}
##############################################################################
# replaying saved graph from CMD
def replay(i):
"""
Input: {
(repo_uoa) - repo UOA of s saved graph
data_uoa - data UOA of a saved graph
(id) - subgraph id
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
o=i.get('out','')
duoa=i['data_uoa']
ruoa=i.get('repo_uoa','')
ii={'action':'load',
'module_uoa':work['self_module_uid'],
'data_uoa':duoa,
'repo_uoa':ruoa}
rx=ck.access(ii)
if rx['return']>0: return rx
dd=rx['dict']
graphs=dd.get('graphs',[])
if len(graphs)==0:
return {'return':1, 'error':'no saved graphs found'}
igraph=-1
if len(graphs)==1:
igraph=0
elif len(graphs)>1:
gid=i.get('id','')
if gid=='':
if o=='con':
ck.out('Available subgraphs ID:')
for q in graphs:
ck.out(' '+q.get('id',''))
ck.out('')
return {'return':1, 'error':'more than one subgraph found - please, specify id'}
jgraph=0
for q in graphs:
if q.get('id','')==gid:
igraph=jgraph
break
jgraph+=1
if igraph==-1:
return {'return':1, 'error':'can\'t find subgraph'}
params=graphs[igraph].get('params',{})
# Replaying
params['action']='plot'
params['module_uoa']=work['self_module_uid']
return ck.access(params)
| bsd-3-clause |
mlyundin/scikit-learn | sklearn/datasets/tests/test_lfw.py | 230 | 7880 | """This test for the LFW require medium-size data dowloading and processing
If the data has not been already downloaded by running the examples,
the tests won't run (skipped).
If the test are run, the first execution will be long (typically a bit
more than a couple of minutes) but as the dataset loader is leveraging
joblib, successive runs will be fast (less than 200ms).
"""
import random
import os
import shutil
import tempfile
import numpy as np
from sklearn.externals import six
try:
try:
from scipy.misc import imsave
except ImportError:
from scipy.misc.pilutil import imsave
except ImportError:
imsave = None
from sklearn.datasets import load_lfw_pairs
from sklearn.datasets import load_lfw_people
from sklearn.datasets import fetch_lfw_pairs
from sklearn.datasets import fetch_lfw_people
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import raises
SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_")
SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_")
LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home')
FAKE_NAMES = [
'Abdelatif_Smith',
'Abhati_Kepler',
'Camara_Alvaro',
'Chen_Dupont',
'John_Lee',
'Lin_Bauman',
'Onur_Lopez',
]
def setup_module():
"""Test fixture run once and common to all tests of this module"""
if imsave is None:
raise SkipTest("PIL not installed.")
if not os.path.exists(LFW_HOME):
os.makedirs(LFW_HOME)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = os.path.join(folder_name, name + '_%04d.jpg' % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
try:
imsave(file_path, uniface)
except ImportError:
raise SkipTest("PIL not installed")
# add some random file pollution to test robustness
with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f:
f.write(six.b('Text file to be ignored by the dataset loader.'))
# generate some pairing metadata files using the same format as LFW
with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f:
f.write(six.b("10\n"))
more_than_two = [name for name, count in six.iteritems(counts)
if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write(six.b('%s\t%d\t%d\n' % (name, first, second)))
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = random_state.choice(np.arange(counts[first_name]))
second_index = random_state.choice(np.arange(counts[second_name]))
f.write(six.b('%s\t%d\t%s\t%d\n' % (first_name, first_index,
second_name, second_index)))
with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
if os.path.isdir(SCIKIT_LEARN_DATA):
shutil.rmtree(SCIKIT_LEARN_DATA)
if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA):
shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA)
@raises(IOError)
def test_load_empty_lfw_people():
fetch_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_people_deprecation():
msg = ("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_people,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_people():
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=3, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_people.images.shape, (10, 62, 47))
assert_equal(lfw_people.data.shape, (10, 2914))
# the target is array of person integer ids
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
# names of the persons can be found using the target_names array
expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez']
assert_array_equal(lfw_people.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion and not limit on the number of picture per person
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_people.images.shape, (17, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_people.target,
[0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2])
assert_array_equal(lfw_people.target_names,
['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro',
'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez'])
@raises(ValueError)
def test_load_fake_lfw_people_too_restrictive():
fetch_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=100, download_if_missing=False)
@raises(IOError)
def test_load_empty_lfw_pairs():
fetch_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_pairs_deprecation():
msg = ("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_pairs,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_pairs():
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47))
# the target is whether the person is the same or not
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# names of the persons can be found using the target_names array
expected_classes = ['Different persons', 'Same person']
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
| bsd-3-clause |
jcarreiro/jmc-python | maps/plot.py | 1 | 2859 | # /usr/local/bin/python3
#
# Plot data over an OSM map.
import io
import pandas
import matplotlib.pyplot as plt
import numpy as np
import math
import urllib
from mpl_toolkits.basemap import Basemap
from PIL import Image
def deg2num(lat_deg, lon_deg, zoom):
lat_rad = math.radians(lat_deg)
n = 2.0 ** zoom
xtile = int((lon_deg + 180.0) / 360.0 * n)
ytile = int((1.0 - math.log(math.tan(lat_rad) + (1 / math.cos(lat_rad))) / math.pi) / 2.0 * n)
return (xtile, ytile)
def num2deg(xtile, ytile, zoom):
"""
http://wiki.openstreetmap.org/wiki/Slippy_map_tilenames
This returns the NW-corner of the square.
Use the function with xtile+1 and/or ytile+1 to get the other corners.
With xtile+0.5 & ytile+0.5 it will return the center of the tile.
"""
n = 2.0 ** zoom
lon_deg = xtile / n * 360.0 - 180.0
lat_rad = math.atan(math.sinh(math.pi * (1 - 2 * ytile / n)))
lat_deg = math.degrees(lat_rad)
return (lat_deg, lon_deg)
def getImageCluster(lat_deg, lon_deg, delta_lat, delta_long, zoom):
smurl = r"http://a.tile.openstreetmap.org/{0}/{1}/{2}.png"
xmin, ymax = deg2num(lat_deg, lon_deg, zoom)
xmax, ymin = deg2num(lat_deg + delta_lat, lon_deg + delta_long, zoom)
bbox_ul = num2deg(xmin, ymin, zoom)
bbox_ll = num2deg(xmin, ymax + 1, zoom)
#print bbox_ul, bbox_ll
bbox_ur = num2deg(xmax + 1, ymin, zoom)
bbox_lr = num2deg(xmax + 1, ymax +1, zoom)
#print bbox_ur, bbox_lr
cluster = Image.new('RGB',((xmax-xmin+1)*256-1,(ymax-ymin+1)*256-1) )
for xtile in range(xmin, xmax+1):
for ytile in range(ymin, ymax+1):
try:
imgurl=smurl.format(zoom, xtile, ytile)
print("Opening: " + imgurl)
imgstr = urllib.request.urlopen(imgurl).read()
tile = Image.open(io.StringIO(imgstr))
cluster.paste(tile, box=((xtile-xmin)*255 , (ytile-ymin)*255))
except:
print("Couldn't download image")
tile = None
return cluster, [bbox_ll[1], bbox_ll[0], bbox_ur[1], bbox_ur[0]]
def plot_map():
df = pandas.read_csv('/Users/jcarreiro/Downloads/badid.csv', sep='\t', header=-1)
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
bbox = df[3].min(), df[4].min(), df[3].max(), df[4].max()
m = Basemap(llcrnrlat=bbox[0],
llcrnrlon=bbox[1],
urcrnrlat=bbox[2],
urcrnrlon=bbox[3],
rsphere=(6378137.00, 6356752.3142),
resolution='h',
projection='merc',
lat_ts=20.0)
m.drawcoastlines()
m.fillcontinents()
m.drawparallels(np.arange(bbox[0], bbox[2], 0.1), labels=[1, 0, 0, 1])
m.drawmeridians(np.arange(bbox[1], bbox[3], 0.1), labels=[1, 0, 0, 1])
m.plot(df[4].values, df[3].values, latlon=True)
plt.show()
| mit |
michaelcapps/Titanic_ML | titanic_randForest.py | 1 | 1039 | #!/usr/bin/env python
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_classification
# read in and set up training data
train = pd.read_csv("train.csv")
train["Sex"].replace('female',0,inplace=True)
train["Sex"].replace('male',1,inplace=True)
train.fillna(-1,inplace=True)
train_vec = train.loc[:,["Pclass","Sex","Age"]].values
labels = train["Survived"].values
# Perform SVM fit (on class, sex, and age)
clf = RandomForestClassifier(max_depth=2, random_state=0)
clf.fit(train_vec, labels)
# Read in and set up test data
test = pd.read_csv("test.csv")
test["Sex"].replace('female',0,inplace=True)
test["Sex"].replace('male',1,inplace=True)
test.fillna(-1,inplace=True)
test_vec = test.loc[:,["Pclass","Sex","Age"]].values
# Predict survival in test set
predict = [clf.predict(test_vec)]
# output to csv
test = pd.read_csv("test.csv")
submission = pd.DataFrame({"PassengerId": test["PassengerId"], "Survived": predict[0]})
submission.to_csv("submission.csv", index=False)
| mit |
JPFrancoia/scikit-learn | sklearn/tree/tree.py | 4 | 42704 | """
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
# Nelson Liu <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta
from abc import abstractmethod
from math import ceil
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array
from ..utils import check_random_state
from ..utils import compute_sample_weight
from ..utils.multiclass import check_classification_targets
from ..exceptions import NotFittedError
from ._criterion import Criterion
from ._splitter import Splitter
from ._tree import DepthFirstTreeBuilder
from ._tree import BestFirstTreeBuilder
from ._tree import Tree
from . import _tree, _splitter, _criterion
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _criterion.Gini, "entropy": _criterion.Entropy}
CRITERIA_REG = {"mse": _criterion.MSE, "friedman_mse": _criterion.FriedmanMSE,
"mae": _criterion.MAE}
DENSE_SPLITTERS = {"best": _splitter.BestSplitter,
"random": _splitter.RandomSplitter}
SPARSE_SPLITTERS = {"best": _splitter.BestSparseSplitter,
"random": _splitter.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
min_impurity_split,
class_weight=None,
presort=False):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
self.class_weight = class_weight
self.presort = presort
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True,
X_idx_sorted=None):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
X_idx_sorted : array-like, shape = [n_samples, n_features], optional
The indexes of the sorted training input samples. If many tree
are grown on the same dataset, this allows the ordering to be
cached between trees. If None, the data will be sorted here.
Don't use this parameter unless you know what to do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
y = check_array(y, ensure_2d=False, dtype=None)
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
check_classification_targets(y)
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_encoded = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_encoded[:, k] = np.unique(y[:, k],
return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_encoded
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.min_samples_leaf, (numbers.Integral, np.integer)):
if not 1 <= self.min_samples_leaf:
raise ValueError("min_samples_leaf must be at least 1 "
"or in (0, 0.5], got %s"
% self.min_samples_leaf)
min_samples_leaf = self.min_samples_leaf
else: # float
if not 0. < self.min_samples_leaf <= 0.5:
raise ValueError("min_samples_leaf must be at least 1 "
"or in (0, 0.5], got %s"
% self.min_samples_leaf)
min_samples_leaf = int(ceil(self.min_samples_leaf * n_samples))
if isinstance(self.min_samples_split, (numbers.Integral, np.integer)):
if not 2 <= self.min_samples_split:
raise ValueError("min_samples_split must be at least 2 "
"or in (0, 1], got %s"
% self.min_samples_split)
min_samples_split = self.min_samples_split
else: # float
if not 0. < self.min_samples_split <= 1.:
raise ValueError("min_samples_split must be at least 2 "
"or in (0, 1], got %s"
% self.min_samples_split)
min_samples_split = int(ceil(self.min_samples_split * n_samples))
min_samples_split = max(2, min_samples_split)
min_samples_split = max(min_samples_split, 2 * min_samples_leaf)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1,
int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if sample_weight is None:
min_weight_leaf = (self.min_weight_fraction_leaf *
n_samples)
else:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
if self.min_impurity_split < 0.:
raise ValueError("min_impurity_split must be greater than "
"or equal to 0")
presort = self.presort
# Allow presort to be 'auto', which means True if the dataset is dense,
# otherwise it will be False.
if self.presort == 'auto' and issparse(X):
presort = False
elif self.presort == 'auto':
presort = True
if presort is True and issparse(X):
raise ValueError("Presorting is not supported for sparse "
"matrices.")
# If multiple trees are built on the same dataset, we only want to
# presort once. Splitters now can accept presorted indices if desired,
# but do not handle any presorting themselves. Ensemble algorithms
# which desire presorting must do presorting themselves and pass that
# matrix into each tree.
if X_idx_sorted is None and presort:
X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0),
dtype=np.int32)
if presort and X_idx_sorted.shape != X.shape:
raise ValueError("The shape of X (X.shape = {}) doesn't match "
"the shape of X_idx_sorted (X_idx_sorted"
".shape = {})".format(X.shape,
X_idx_sorted.shape))
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_,
n_samples)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
min_samples_leaf,
min_weight_leaf,
random_state,
self.presort)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth, self.min_impurity_split)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes,
self.min_impurity_split)
builder.build(self.tree_, X, y, sample_weight, X_idx_sorted)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
"match the input. Model n_features is %s and "
"input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
.. versionadded:: 0.17
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
def decision_path(self, X, check_input=True):
"""Return the decision path in the tree
.. versionadded:: 0.18
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
indicator : sparse csr array, shape = [n_samples, n_nodes]
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.decision_path(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
class_weight : dict, list of dicts, "balanced" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] https://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
min_impurity_split=1e-7,
class_weight=None,
presort=False):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state,
min_impurity_split=min_impurity_split,
presort=presort)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. If the impurity
of a node is below the threshold, the node is a leaf.
.. versionadded:: 0.18
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] https://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
min_impurity_split=1e-7,
presort=False):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state,
min_impurity_split=min_impurity_split,
presort=presort)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
min_impurity_split=1e-7,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
min_impurity_split=min_impurity_split,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
min_impurity_split=1e-7,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
min_impurity_split=min_impurity_split,
random_state=random_state)
| bsd-3-clause |
raymondxyang/tensorflow | tensorflow/python/estimator/canned/dnn_test.py | 20 | 16058 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dnn.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
import numpy as np
import six
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.estimator.canned import dnn
from tensorflow.python.estimator.canned import dnn_testing_utils
from tensorflow.python.estimator.canned import prediction_keys
from tensorflow.python.estimator.export import export
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.estimator.inputs import pandas_io
from tensorflow.python.feature_column import feature_column
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def _dnn_classifier_fn(*args, **kwargs):
return dnn.DNNClassifier(*args, **kwargs)
class DNNModelFnTest(dnn_testing_utils.BaseDNNModelFnTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNModelFnTest.__init__(self, dnn._dnn_model_fn)
class DNNClassifierEvaluateTest(
dnn_testing_utils.BaseDNNClassifierEvaluateTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierEvaluateTest.__init__(
self, _dnn_classifier_fn)
class DNNClassifierPredictTest(
dnn_testing_utils.BaseDNNClassifierPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierPredictTest.__init__(
self, _dnn_classifier_fn)
class DNNClassifierTrainTest(
dnn_testing_utils.BaseDNNClassifierTrainTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierTrainTest.__init__(
self, _dnn_classifier_fn)
def _dnn_regressor_fn(*args, **kwargs):
return dnn.DNNRegressor(*args, **kwargs)
class DNNRegressorEvaluateTest(
dnn_testing_utils.BaseDNNRegressorEvaluateTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorEvaluateTest.__init__(
self, _dnn_regressor_fn)
class DNNRegressorPredictTest(
dnn_testing_utils.BaseDNNRegressorPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorPredictTest.__init__(
self, _dnn_regressor_fn)
class DNNRegressorTrainTest(
dnn_testing_utils.BaseDNNRegressorTrainTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorTrainTest.__init__(
self, _dnn_regressor_fn)
def _queue_parsed_features(feature_map):
tensors_to_enqueue = []
keys = []
for key, tensor in six.iteritems(feature_map):
keys.append(key)
tensors_to_enqueue.append(tensor)
queue_dtypes = [x.dtype for x in tensors_to_enqueue]
input_queue = data_flow_ops.FIFOQueue(capacity=100, dtypes=queue_dtypes)
queue_runner.add_queue_runner(
queue_runner.QueueRunner(
input_queue,
[input_queue.enqueue(tensors_to_enqueue)]))
dequeued_tensors = input_queue.dequeue()
return {keys[i]: dequeued_tensors[i] for i in range(len(dequeued_tensors))}
class DNNRegressorIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension,
label_dimension, batch_size):
feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
est = dnn.DNNRegressor(
hidden_units=(2, 2),
feature_columns=feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predictions = np.array([
x[prediction_keys.PredictionKeys.PREDICTIONS]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, label_dimension), predictions.shape)
# EXPORT
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
label_dimension = 1
batch_size = 10
data = np.linspace(0., 2., batch_size, dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(data)
train_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x,
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
'y': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
class DNNClassifierIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _as_label(self, data_in_float):
return np.rint(data_in_float).astype(np.int64)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension,
n_classes, batch_size):
feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
est = dnn.DNNClassifier(
hidden_units=(2, 2),
feature_columns=feature_columns,
n_classes=n_classes,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predicted_proba = np.array([
x[prediction_keys.PredictionKeys.PROBABILITIES]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, n_classes), predicted_proba.shape)
# EXPORT
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
n_classes = 3
input_dimension = 2
batch_size = 10
data = np.linspace(
0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)
x_data = data.reshape(batch_size, input_dimension)
y_data = np.reshape(self._as_label(data[:batch_size]), (batch_size, 1))
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
y=y_data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
y=y_data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
input_dimension = 1
n_classes = 3
batch_size = 10
data = np.linspace(0., n_classes - 1., batch_size, dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(self._as_label(data))
train_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x,
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
input_dimension = 2
n_classes = 3
batch_size = 10
data = np.linspace(
0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=datum)),
'y':
feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=self._as_label(datum[:1]))),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([input_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([1], dtypes.int64),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
if __name__ == '__main__':
test.main()
| apache-2.0 |
dvro/brew | brew/preprocessing/smote.py | 3 | 1807 | from __future__ import division
import numpy as np
from sklearn.neighbors import NearestNeighbors
def smote(T, N=100, k=1):
"""
T: minority class data
N: percentage of oversampling
k: number of neighbors used
"""
# modification of original smote code so that it won't break if
# minority class is too small in relation to the k, maybe this is not
# sensible.
if T.shape[0] <= k + 1:
idx = np.random.choice(T.shape[0], size=(k + 1,))
T = T[idx, :]
# randomly select a subset of the data, to be used for creating synthethic
# samples
if N < 100:
sz = int(T.shape[0] * (N / 100))
idx = np.random.choice(T.shape[0], size=(sz,), replace=False)
T = T[idx, :]
N = 100
if N % 100 != 0:
raise ValueError('N must be < 100 OR multiple of 100')
N = int(N / 100)
n_minority_samples, n_features = T.shape
n_synthetic_samples = N * n_minority_samples
synthetic = np.zeros((n_synthetic_samples, n_features))
knn = NearestNeighbors(n_neighbors=k)
knn.fit(T)
count = 0
for i in range(n_minority_samples):
# first neighbor returned is always the very own sample, so
# get 1 more neighbor and discard the first neighbor returned
neighbors_idx = knn.kneighbors(
T[i, :].reshape(1,-1), n_neighbors=k + 1,
return_distance=False)[0][1:]
# randomly choose N neighbors of the sample (with replacement)
nn_idx = np.random.choice(neighbors_idx, size=(N,))
chosen_neighbors = T[nn_idx, :]
diff = chosen_neighbors - T[i, :]
gap = np.random.uniform(low=0.0, high=1.0, size=N)[:, np.newaxis]
synthetic[count:count + N, :] = T[i, :] + (gap * diff)
count += N
return synthetic
| mit |
jimgoo/zipline-fork | tests/test_sources.py | 10 | 7209 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import pytz
from six import integer_types
from unittest import TestCase
import zipline.utils.factory as factory
from zipline.sources import (DataFrameSource,
DataPanelSource,
RandomWalkSource)
from zipline.utils import tradingcalendar as calendar_nyse
from zipline.assets import AssetFinder
from zipline.finance.trading import TradingEnvironment
class TestDataFrameSource(TestCase):
def test_df_source(self):
source, df = factory.create_test_df_source(env=None)
assert isinstance(source.start, pd.lib.Timestamp)
assert isinstance(source.end, pd.lib.Timestamp)
for expected_dt, expected_price in df.iterrows():
sid0 = next(source)
assert expected_dt == sid0.dt
assert expected_price[0] == sid0.price
def test_df_sid_filtering(self):
_, df = factory.create_test_df_source(env=None)
source = DataFrameSource(df)
assert 1 not in [event.sid for event in source], \
"DataFrameSource should only stream selected sid 0, not sid 1."
def test_panel_source(self):
source, panel = factory.create_test_panel_source(source_type=5)
assert isinstance(source.start, pd.lib.Timestamp)
assert isinstance(source.end, pd.lib.Timestamp)
for event in source:
self.assertTrue('sid' in event)
self.assertTrue('arbitrary' in event)
self.assertTrue('type' in event)
self.assertTrue(hasattr(event, 'volume'))
self.assertTrue(hasattr(event, 'price'))
self.assertEquals(event['type'], 5)
self.assertEquals(event['arbitrary'], 1.)
self.assertEquals(event['sid'], 0)
self.assertTrue(isinstance(event['volume'], int))
self.assertTrue(isinstance(event['arbitrary'], float))
def test_yahoo_bars_to_panel_source(self):
env = TradingEnvironment()
finder = AssetFinder(env.engine)
stocks = ['AAPL', 'GE']
env.write_data(equities_identifiers=stocks)
start = pd.datetime(1993, 1, 1, 0, 0, 0, 0, pytz.utc)
end = pd.datetime(2002, 1, 1, 0, 0, 0, 0, pytz.utc)
data = factory.load_bars_from_yahoo(stocks=stocks,
indexes={},
start=start,
end=end)
check_fields = ['sid', 'open', 'high', 'low', 'close',
'volume', 'price']
copy_panel = data.copy()
sids = finder.map_identifier_index_to_sids(
data.items, data.major_axis[0]
)
copy_panel.items = sids
source = DataPanelSource(copy_panel)
for event in source:
for check_field in check_fields:
self.assertIn(check_field, event)
self.assertTrue(isinstance(event['volume'], (integer_types)))
self.assertTrue(event['sid'] in sids)
def test_nan_filter_dataframe(self):
dates = pd.date_range('1/1/2000', periods=2, freq='B', tz='UTC')
df = pd.DataFrame(np.random.randn(2, 2),
index=dates,
columns=[4, 5])
# should be filtered
df.loc[dates[0], 4] = np.nan
# should not be filtered, should have been ffilled
df.loc[dates[1], 5] = np.nan
source = DataFrameSource(df)
event = next(source)
self.assertEqual(5, event.sid)
event = next(source)
self.assertEqual(4, event.sid)
event = next(source)
self.assertEqual(5, event.sid)
self.assertFalse(np.isnan(event.price))
def test_nan_filter_panel(self):
dates = pd.date_range('1/1/2000', periods=2, freq='B', tz='UTC')
df = pd.Panel(np.random.randn(2, 2, 2),
major_axis=dates,
items=[4, 5],
minor_axis=['price', 'volume'])
# should be filtered
df.loc[4, dates[0], 'price'] = np.nan
# should not be filtered, should have been ffilled
df.loc[5, dates[1], 'price'] = np.nan
source = DataPanelSource(df)
event = next(source)
self.assertEqual(5, event.sid)
event = next(source)
self.assertEqual(4, event.sid)
event = next(source)
self.assertEqual(5, event.sid)
self.assertFalse(np.isnan(event.price))
class TestRandomWalkSource(TestCase):
def test_minute(self):
np.random.seed(123)
start_prices = {0: 100,
1: 500}
start = pd.Timestamp('1990-01-01', tz='UTC')
end = pd.Timestamp('1991-01-01', tz='UTC')
source = RandomWalkSource(start_prices=start_prices,
calendar=calendar_nyse, start=start,
end=end)
self.assertIsInstance(source.start, pd.lib.Timestamp)
self.assertIsInstance(source.end, pd.lib.Timestamp)
for event in source:
self.assertIn(event.sid, start_prices.keys())
self.assertIn(event.dt.replace(minute=0, hour=0),
calendar_nyse.trading_days)
self.assertGreater(event.dt, start)
self.assertLess(event.dt, end)
self.assertGreater(event.price, 0,
"price should never go negative.")
self.assertTrue(13 <= event.dt.hour <= 21,
"event.dt.hour == %i, not during market \
hours." % event.dt.hour)
def test_day(self):
np.random.seed(123)
start_prices = {0: 100,
1: 500}
start = pd.Timestamp('1990-01-01', tz='UTC')
end = pd.Timestamp('1992-01-01', tz='UTC')
source = RandomWalkSource(start_prices=start_prices,
calendar=calendar_nyse, start=start,
end=end, freq='daily')
self.assertIsInstance(source.start, pd.lib.Timestamp)
self.assertIsInstance(source.end, pd.lib.Timestamp)
for event in source:
self.assertIn(event.sid, start_prices.keys())
self.assertIn(event.dt.replace(minute=0, hour=0),
calendar_nyse.trading_days)
self.assertGreater(event.dt, start)
self.assertLess(event.dt, end)
self.assertGreater(event.price, 0,
"price should never go negative.")
self.assertEqual(event.dt.hour, 0)
| apache-2.0 |
matk86/pymatgen | pymatgen/io/gaussian.py | 3 | 53473 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import re
import numpy as np
import warnings
from pymatgen.core.operations import SymmOp
from pymatgen import Element, Molecule, Composition
from monty.io import zopen
from pymatgen.util.coord import get_angle
import scipy.constants as cst
from pymatgen.electronic_structure.core import Spin
"""
This module implements input and output processing from Gaussian.
"""
__author__ = 'Shyue Ping Ong, Germain Salvato-Vallverdu, Xin Chen'
__copyright__ = 'Copyright 2013, The Materials Virtual Lab'
__version__ = '0.1'
__maintainer__ = 'Shyue Ping Ong'
__email__ = '[email protected]'
__date__ = '8/1/15'
float_patt = re.compile(r"\s*([+-]?\d+\.\d+)")
HARTREE_TO_ELECTRON_VOLT = 1/cst.physical_constants["electron volt-hartree relationship"][0]
def read_route_line(route):
"""
read route line in gaussian input/output and return functional basis_set
and a dictionary of other route parameters
Args:
route (str) : the route line
return
functional (str) : the method (HF, PBE ...)
basis_set (str) : the basis set
route (dict) : dictionary of parameters
"""
scrf_patt = re.compile(r"^([sS][cC][rR][fF])\s*=\s*(.+)")
functional = None
basis_set = None
route_params = {}
dieze_tag = None
if route:
if "/" in route:
tok = route.split("/")
functional = tok[0].split()[-1]
basis_set = tok[1].split()[0]
for tok in [functional, basis_set, "/"]:
route = route.replace(tok, "")
for tok in route.split():
if scrf_patt.match(tok):
m = scrf_patt.match(tok)
route_params[m.group(1)] = m.group(2)
elif "#" in tok:
# does not store # in route to avoid error in input
dieze_tag = tok
continue
else:
d = tok.split("=")
v = None if len(d) == 1 else d[1]
route_params[d[0]] = v
return functional, basis_set, route_params, dieze_tag
class GaussianInput(object):
"""
An object representing a Gaussian input file.
Args:
mol: Input molecule. If molecule is a single string, it is used as a
direct input to the geometry section of the Gaussian input
file.
charge: Charge of the molecule. If None, charge on molecule is used.
Defaults to None. This allows the input file to be set a
charge independently from the molecule itself.
spin_multiplicity: Spin multiplicity of molecule. Defaults to None,
which means that the spin multiplicity is set to 1 if the
molecule has no unpaired electrons and to 2 if there are
unpaired electrons.
title: Title for run. Defaults to formula of molecule if None.
functional: Functional for run.
basis_set: Basis set for run.
route_parameters: Additional route parameters as a dict. For example,
{'SP':"", "SCF":"Tight"}
input_parameters: Additional input parameters for run as a dict. Used
for example, in PCM calculations. E.g., {"EPS":12}
link0_parameters: Link0 parameters as a dict. E.g., {"%mem": "1000MW"}
dieze_tag: # preceding the route line. E.g. "#p"
gen_basis: allows a user-specified basis set to be used in a Gaussian
calculation. If this is not None, the attribute ``basis_set`` will
be set to "Gen".
"""
# Commonly used regex patterns
zmat_patt = re.compile(r"^(\w+)*([\s,]+(\w+)[\s,]+(\w+))*[\-\.\s,\w]*$")
xyz_patt = re.compile(r"^(\w+)[\s,]+([\d\.eE\-]+)[\s,]+([\d\.eE\-]+)[\s,]+"
r"([\d\.eE\-]+)[\-\.\s,\w.]*$")
def __init__(self, mol, charge=None, spin_multiplicity=None, title=None,
functional="HF", basis_set="6-31G(d)", route_parameters=None,
input_parameters=None, link0_parameters=None, dieze_tag="#P",
gen_basis=None):
self._mol = mol
self.charge = charge if charge is not None else mol.charge
nelectrons = - self.charge + mol.charge + mol.nelectrons
if spin_multiplicity is not None:
self.spin_multiplicity = spin_multiplicity
if (nelectrons + spin_multiplicity) % 2 != 1:
raise ValueError(
"Charge of {} and spin multiplicity of {} is"
" not possible for this molecule".format(
self.charge, spin_multiplicity))
else:
self.spin_multiplicity = 1 if nelectrons % 2 == 0 else 2
self.functional = functional
self.basis_set = basis_set
self.link0_parameters = link0_parameters if link0_parameters else {}
self.route_parameters = route_parameters if route_parameters else {}
self.input_parameters = input_parameters if input_parameters else {}
self.title = title if title else self._mol.composition.formula
self.dieze_tag = dieze_tag if dieze_tag[0] == "#" else "#P"
self.gen_basis = gen_basis
if gen_basis is not None:
self.basis_set = "Gen"
@property
def molecule(self):
"""
Returns molecule associated with this GaussianInput.
"""
return self._mol
@staticmethod
def parse_coords(coord_lines):
"""
Helper method to parse coordinates.
"""
paras = {}
var_pattern = re.compile(r"^([A-Za-z]+\S*)[\s=,]+([\d\-\.]+)$")
for l in coord_lines:
m = var_pattern.match(l.strip())
if m:
paras[m.group(1)] = float(m.group(2))
species = []
coords = []
# Stores whether a Zmatrix format is detected. Once a zmatrix format
# is detected, it is assumed for the remaining of the parsing.
zmode = False
for l in coord_lines:
l = l.strip()
if not l:
break
if (not zmode) and GaussianInput.xyz_patt.match(l):
m = GaussianInput.xyz_patt.match(l)
species.append(m.group(1))
toks = re.split(r"[,\s]+", l.strip())
if len(toks) > 4:
coords.append([float(i) for i in toks[2:5]])
else:
coords.append([float(i) for i in toks[1:4]])
elif GaussianInput.zmat_patt.match(l):
zmode = True
toks = re.split(r"[,\s]+", l.strip())
species.append(toks[0])
toks.pop(0)
if len(toks) == 0:
coords.append(np.array([0, 0, 0]))
else:
nn = []
parameters = []
while len(toks) > 1:
ind = toks.pop(0)
data = toks.pop(0)
try:
nn.append(int(ind))
except ValueError:
nn.append(species.index(ind) + 1)
try:
val = float(data)
parameters.append(val)
except ValueError:
if data.startswith("-"):
parameters.append(-paras[data[1:]])
else:
parameters.append(paras[data])
if len(nn) == 1:
coords.append(np.array([0, 0, parameters[0]]))
elif len(nn) == 2:
coords1 = coords[nn[0] - 1]
coords2 = coords[nn[1] - 1]
bl = parameters[0]
angle = parameters[1]
axis = [0, 1, 0]
op = SymmOp.from_origin_axis_angle(coords1, axis,
angle, False)
coord = op.operate(coords2)
vec = coord - coords1
coord = vec * bl / np.linalg.norm(vec) + coords1
coords.append(coord)
elif len(nn) == 3:
coords1 = coords[nn[0] - 1]
coords2 = coords[nn[1] - 1]
coords3 = coords[nn[2] - 1]
bl = parameters[0]
angle = parameters[1]
dih = parameters[2]
v1 = coords3 - coords2
v2 = coords1 - coords2
axis = np.cross(v1, v2)
op = SymmOp.from_origin_axis_angle(
coords1, axis, angle, False)
coord = op.operate(coords2)
v1 = coord - coords1
v2 = coords1 - coords2
v3 = np.cross(v1, v2)
adj = get_angle(v3, axis)
axis = coords1 - coords2
op = SymmOp.from_origin_axis_angle(
coords1, axis, dih - adj, False)
coord = op.operate(coord)
vec = coord - coords1
coord = vec * bl / np.linalg.norm(vec) + coords1
coords.append(coord)
def parse_species(sp_str):
"""
The species specification can take many forms. E.g.,
simple integers representing atomic numbers ("8"),
actual species string ("C") or a labelled species ("C1").
Sometimes, the species string is also not properly capitalized,
e.g, ("c1"). This method should take care of these known formats.
"""
try:
return int(sp_str)
except ValueError:
sp = re.sub(r"\d", "", sp_str)
return sp.capitalize()
species = [parse_species(sp) for sp in species]
return Molecule(species, coords)
@staticmethod
def from_string(contents):
"""
Creates GaussianInput from a string.
Args:
contents: String representing an Gaussian input file.
Returns:
GaussianInput object
"""
lines = [l.strip() for l in contents.split("\n")]
link0_patt = re.compile(r"^(%.+)\s*=\s*(.+)")
link0_dict = {}
for i, l in enumerate(lines):
if link0_patt.match(l):
m = link0_patt.match(l)
link0_dict[m.group(1)] = m.group(2)
route_patt = re.compile(r"^#[sSpPnN]*.*")
route = None
for i, l in enumerate(lines):
if route_patt.match(l):
route = l
route_index = i
break
functional, basis_set, route_paras, dieze_tag = read_route_line(route)
ind = 2
title = []
while lines[route_index + ind].strip():
title.append(lines[route_index + ind].strip())
ind += 1
title = ' '.join(title)
ind += 1
toks = re.split(r"[\s,]", lines[route_index + ind])
charge = int(toks[0])
spin_mult = int(toks[1])
coord_lines = []
spaces = 0
input_paras = {}
ind += 1
for i in range(route_index + ind, len(lines)):
if lines[i].strip() == "":
spaces += 1
if spaces >= 2:
d = lines[i].split("=")
if len(d) == 2:
input_paras[d[0]] = d[1]
else:
coord_lines.append(lines[i].strip())
mol = GaussianInput.parse_coords(coord_lines)
mol.set_charge_and_spin(charge, spin_mult)
return GaussianInput(mol, charge=charge, spin_multiplicity=spin_mult,
title=title, functional=functional,
basis_set=basis_set, route_parameters=route_paras,
input_parameters=input_paras,link0_parameters=link0_dict,
dieze_tag=dieze_tag)
@staticmethod
def from_file(filename):
"""
Creates GaussianInput from a file.
Args:
filename: Gaussian input filename
Returns:
GaussianInput object
"""
with zopen(filename, "r") as f:
return GaussianInput.from_string(f.read())
def _find_nn_pos_before_site(self, siteindex):
"""
Returns index of nearest neighbor atoms.
"""
alldist = [(self._mol.get_distance(siteindex, i), i)
for i in range(siteindex)]
alldist = sorted(alldist, key=lambda x: x[0])
return [d[1] for d in alldist]
def get_zmatrix(self):
"""
Returns a z-matrix representation of the molecule.
"""
output = []
outputvar = []
for i, site in enumerate(self._mol):
if i == 0:
output.append("{}".format(site.specie))
elif i == 1:
nn = self._find_nn_pos_before_site(i)
bondlength = self._mol.get_distance(i, nn[0])
output.append("{} {} B{}".format(self._mol[i].specie,
nn[0] + 1, i))
outputvar.append("B{}={:.6f}".format(i, bondlength))
elif i == 2:
nn = self._find_nn_pos_before_site(i)
bondlength = self._mol.get_distance(i, nn[0])
angle = self._mol.get_angle(i, nn[0], nn[1])
output.append("{} {} B{} {} A{}".format(self._mol[i].specie,
nn[0] + 1, i,
nn[1] + 1, i))
outputvar.append("B{}={:.6f}".format(i, bondlength))
outputvar.append("A{}={:.6f}".format(i, angle))
else:
nn = self._find_nn_pos_before_site(i)
bondlength = self._mol.get_distance(i, nn[0])
angle = self._mol.get_angle(i, nn[0], nn[1])
dih = self._mol.get_dihedral(i, nn[0], nn[1], nn[2])
output.append("{} {} B{} {} A{} {} D{}"
.format(self._mol[i].specie, nn[0] + 1, i,
nn[1] + 1, i, nn[2] + 1, i))
outputvar.append("B{}={:.6f}".format(i, bondlength))
outputvar.append("A{}={:.6f}".format(i, angle))
outputvar.append("D{}={:.6f}".format(i, dih))
return "\n".join(output) + "\n\n" + "\n".join(outputvar)
def get_cart_coords(self):
"""
Return the cartesian coordinates of the molecule
"""
outs = []
to_s = lambda x: "%0.6f" % x
for i, site in enumerate(self._mol):
outs.append(" ".join([site.species_string,
" ".join([to_s(j) for j in site.coords])]))
return "\n".join(outs)
def __str__(self):
return self.to_string()
def to_string(self, cart_coords=False):
"""
Return GaussianInput string
Option: whe cart_coords sets to True return the cartesian coordinates
instead of the z-matrix
"""
def para_dict_to_string(para, joiner=" "):
para_str = ["{}={}".format(k, v) if v else k
for k, v in sorted(para.items())]
return joiner.join(para_str)
output = []
if self.link0_parameters:
output.append(para_dict_to_string(self.link0_parameters, "\n"))
output.append("{diez} {func}/{bset} {route}"
.format(diez=self.dieze_tag, func=self.functional,
bset=self.basis_set,
route=para_dict_to_string(self.route_parameters))
)
output.append("")
output.append(self.title)
output.append("")
output.append("{} {}".format(self.charge, self.spin_multiplicity))
if isinstance(self._mol, Molecule):
if cart_coords is True:
output.append(self.get_cart_coords())
else:
output.append(self.get_zmatrix())
else:
output.append(str(self._mol))
output.append("")
if self.gen_basis is not None:
output.append("{:s}\n".format(self.gen_basis))
output.append(para_dict_to_string(self.input_parameters, "\n"))
output.append("\n")
return "\n".join(output)
def write_file(self, filename,cart_coords=False):
"""
Write the input string into a file
Option: see __str__ method
"""
with zopen(filename, "w") as f:
f.write(self.to_string(cart_coords))
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"molecule": self.molecule.as_dict(),
"functional": self.functional,
"basis_set": self.basis_set,
"route_parameters": self.route_parameters,
"title": self.title,
"charge": self.charge,
"spin_multiplicity": self.spin_multiplicity,
"input_parameters": self.input_parameters,
"link0_parameters": self.link0_parameters,
"dieze_tag": self.dieze_tag}
@classmethod
def from_dict(cls, d):
return GaussianInput(mol=Molecule.from_dict(d["molecule"]),
functional=d["functional"],
basis_set=d["basis_set"],
route_parameters=d["route_parameters"],
title=d["title"],
charge=d["charge"],
spin_multiplicity=d["spin_multiplicity"],
input_parameters=d["input_parameters"],
link0_parameters=d["link0_parameters"])
class GaussianOutput(object):
"""
Parser for Gaussian output files.
Args:
filename: Filename of Gaussian output file.
.. note::
Still in early beta.
Attributes:
.. attribute:: structures
All structures from the calculation.
.. attribute:: energies
All energies from the calculation.
.. attribute:: eigenvalues
List of eigenvalues for the last geometry
.. attribute:: MO_coefficients
Matrix of MO coefficients for the last geometry
.. attribute:: cart_forces
All cartesian forces from the calculation.
.. attribute:: frequencies
A list for each freq calculation and for each mode of a dict with
{
"frequency": freq in cm-1,
"symmetry": symmetry tag
"r_mass": Reduce mass,
"f_constant": force constant,
"IR_intensity": IR Intensity,
"mode": normal mode
}
The normal mode is a 1D vector of dx, dy dz of each atom.
.. attribute:: hessian
Matrix of second derivatives of the energy with respect to cartesian
coordinates in the **input orientation** frame. Need #P in the
route section in order to be in the output.
.. attribute:: properly_terminated
True if run has properly terminated
.. attribute:: is_pcm
True if run is a PCM run.
.. attribute:: is_spin
True if it is an unrestricted run
.. attribute:: stationary_type
If it is a relaxation run, indicates whether it is a minimum (Minimum)
or a saddle point ("Saddle").
.. attribute:: corrections
Thermochemical corrections if this run is a Freq run as a dict. Keys
are "Zero-point", "Thermal", "Enthalpy" and "Gibbs Free Energy"
.. attribute:: functional
Functional used in the run.
.. attribute:: basis_set
Basis set used in the run
.. attribute:: route
Additional route parameters as a dict. For example,
{'SP':"", "SCF":"Tight"}
.. attribute:: dieze_tag
# preceding the route line, e.g. "#P"
.. attribute:: link0
Link0 parameters as a dict. E.g., {"%mem": "1000MW"}
.. attribute:: charge
Charge for structure
.. attribute:: spin_mult
Spin multiplicity for structure
.. attribute:: num_basis_func
Number of basis functions in the run.
.. attribute:: electrons
number of alpha and beta electrons as (N alpha, N beta)
.. attribute:: pcm
PCM parameters and output if available.
.. attribute:: errors
error if not properly terminated (list to be completed in error_defs)
.. attribute:: Mulliken_charges
Mulliken atomic charges
.. attribute:: eigenvectors
Matrix of shape (num_basis_func, num_basis_func). Each column is an
eigenvectors and contains AO coefficients of an MO.
eigenvectors[Spin] = mat(num_basis_func, num_basis_func)
.. attribute:: molecular_orbital
MO development coefficients on AO in a more convenient array dict
for each atom and basis set label.
mo[Spin][OM j][atom i] = {AO_k: coeff, AO_k: coeff ... }
.. attribute:: atom_basis_labels
Labels of AO for each atoms. These labels are those used in the output
of molecular orbital coefficients (POP=Full) and in the molecular_orbital
array dict.
atom_basis_labels[iatom] = [AO_k, AO_k, ...]
.. attribute:: resumes
List of gaussian data resume given at the end of the output file before
the quotation. The resumes are given as string.
Methods:
.. method:: to_input()
Return a GaussianInput object using the last geometry and the same
calculation parameters.
.. method:: read_scan()
Read a potential energy surface from a gaussian scan calculation.
.. method:: get_scan_plot()
Get a matplotlib plot of the potential energy surface
.. method:: save_scan_plot()
Save a matplotlib plot of the potential energy surface to a file
"""
def __init__(self, filename):
self.filename = filename
self._parse(filename)
@property
def final_energy(self):
return self.energies[-1]
@property
def final_structure(self):
return self.structures[-1]
def _parse(self, filename):
start_patt = re.compile(r" \(Enter \S+l101\.exe\)")
route_patt = re.compile(r" #[pPnNtT]*.*")
link0_patt = re.compile(r"^\s(%.+)\s*=\s*(.+)")
charge_mul_patt = re.compile(r"Charge\s+=\s*([-\d]+)\s+"
r"Multiplicity\s+=\s*(\d+)")
num_basis_func_patt = re.compile(r"([0-9]+)\s+basis functions")
num_elec_patt = re.compile(r"(\d+)\s+alpha electrons\s+(\d+)\s+beta electrons")
pcm_patt = re.compile(r"Polarizable Continuum Model")
stat_type_patt = re.compile(r"imaginary frequencies")
scf_patt = re.compile(r"E\(.*\)\s*=\s*([-\.\d]+)\s+")
mp2_patt = re.compile(r"EUMP2\s*=\s*(.*)")
oniom_patt = re.compile(r"ONIOM:\s+extrapolated energy\s*=\s*(.*)")
termination_patt = re.compile(r"(Normal|Error) termination")
error_patt = re.compile(
r"(! Non-Optimized Parameters !|Convergence failure)")
mulliken_patt = re.compile(
r"^\s*(Mulliken charges|Mulliken atomic charges)")
mulliken_charge_patt = re.compile(
r'^\s+(\d+)\s+([A-Z][a-z]?)\s*(\S*)')
end_mulliken_patt = re.compile(
r'(Sum of Mulliken )(.*)(charges)\s*=\s*(\D)')
std_orientation_patt = re.compile(r"Standard orientation")
end_patt = re.compile(r"--+")
orbital_patt = re.compile(r"(Alpha|Beta)\s*\S+\s*eigenvalues --(.*)")
thermo_patt = re.compile(r"(Zero-point|Thermal) correction(.*)="
r"\s+([\d\.-]+)")
forces_on_patt = re.compile(
r"Center\s+Atomic\s+Forces\s+\(Hartrees/Bohr\)")
forces_off_patt = re.compile(r"Cartesian\s+Forces:\s+Max.*RMS.*")
forces_patt = re.compile(
r"\s+(\d+)\s+(\d+)\s+([0-9\.-]+)\s+([0-9\.-]+)\s+([0-9\.-]+)")
freq_on_patt = re.compile(
r"Harmonic\sfrequencies\s+\(cm\*\*-1\),\sIR\sintensities.*Raman.*")
freq_patt = re.compile(r"Frequencies\s--\s+(.*)")
normal_mode_patt = re.compile(
r"\s+(\d+)\s+(\d+)\s+([0-9\.-]{4,5})\s+([0-9\.-]{4,5}).*")
mo_coeff_patt = re.compile(r"Molecular Orbital Coefficients:")
mo_coeff_name_patt = re.compile(r"\d+\s((\d+|\s+)\s+([a-zA-Z]{1,2}|\s+))\s+(\d+\S+)")
hessian_patt = re.compile(r"Force constants in Cartesian coordinates:")
resume_patt = re.compile(r"^\s1\\1\\GINC-\S*")
resume_end_patt = re.compile(r"^\s.*\\\\@")
self.properly_terminated = False
self.is_pcm = False
self.stationary_type = "Minimum"
self.structures = []
self.corrections = {}
self.energies = []
self.pcm = None
self.errors = []
self.Mulliken_charges = {}
self.link0 = {}
self.cart_forces = []
self.frequencies = []
self.eigenvalues = []
self.is_spin = False
self.hessian = None
self.resumes = []
coord_txt = []
read_coord = 0
read_mulliken = False
read_eigen = False
eigen_txt = []
parse_stage = 0
num_basis_found = False
terminated = False
parse_forces = False
forces = []
parse_freq = False
frequencies = []
read_mo = False
parse_hessian = False
with zopen(filename) as f:
for line in f:
if parse_stage == 0:
if start_patt.search(line):
parse_stage = 1
elif link0_patt.match(line):
m = link0_patt.match(line)
self.link0[m.group(1)] = m.group(2)
elif route_patt.search(line):
params = read_route_line(line)
self.functional = params[0]
self.basis_set = params[1]
self.route = params[2]
route_lower = {k.lower(): v for k, v in self.route.items()}
self.dieze_tag = params[3]
parse_stage = 1
elif parse_stage == 1:
if charge_mul_patt.search(line):
m = charge_mul_patt.search(line)
self.charge = int(m.group(1))
self.spin_mult = int(m.group(2))
parse_stage = 2
elif parse_stage == 2:
if self.is_pcm:
self._check_pcm(line)
if "freq" in route_lower and thermo_patt.search(line):
m = thermo_patt.search(line)
if m.group(1) == "Zero-point":
self.corrections["Zero-point"] = float(m.group(3))
else:
key = m.group(2).strip(" to ")
self.corrections[key] = float(m.group(3))
if read_coord:
if not end_patt.search(line):
coord_txt.append(line)
else:
read_coord = (read_coord + 1) % 4
if not read_coord:
sp = []
coords = []
for l in coord_txt[2:]:
toks = l.split()
sp.append(Element.from_Z(int(toks[1])))
coords.append([float(i) for i in toks[3:6]])
self.structures.append(Molecule(sp, coords))
if parse_forces:
m = forces_patt.search(line)
if m:
forces.extend([float(_v) for _v in m.groups()[2:5]])
elif forces_off_patt.search(line):
self.cart_forces.append(forces)
forces = []
parse_forces = False
# read molecular orbital eigenvalues
if read_eigen:
m = orbital_patt.search(line)
if m:
eigen_txt.append(line)
else:
read_eigen = False
self.eigenvalues = {Spin.up: []}
for eigenline in eigen_txt:
if "Alpha" in eigenline:
self.eigenvalues[Spin.up] += [float(e)
for e in float_patt.findall(eigenline)]
elif "Beta" in eigenline:
if Spin.down not in self.eigenvalues:
self.eigenvalues[Spin.down] = []
self.eigenvalues[Spin.down] += [float(e)
for e in float_patt.findall(eigenline)]
eigen_txt = []
# read molecular orbital coefficients
if read_mo:
# build a matrix with all coefficients
all_spin = [Spin.up]
if self.is_spin:
all_spin.append(Spin.down)
mat_mo = {}
for spin in all_spin:
mat_mo[spin] = np.zeros((self.num_basis_func, self.num_basis_func))
nMO = 0
end_mo = False
while nMO < self.num_basis_func and not end_mo:
f.readline()
f.readline()
self.atom_basis_labels = []
for i in range(self.num_basis_func):
line = f.readline()
# identify atom and OA labels
m = mo_coeff_name_patt.search(line)
if m.group(1).strip() != "":
iat = int(m.group(2)) - 1
# atname = m.group(3)
self.atom_basis_labels.append([m.group(4)])
else:
self.atom_basis_labels[iat].append(m.group(4))
# MO coefficients
coeffs = [float(c) for c in float_patt.findall(line)]
for j in range(len(coeffs)):
mat_mo[spin][i, nMO + j] = coeffs[j]
nMO += len(coeffs)
line = f.readline()
# manage pop=regular case (not all MO)
if nMO < self.num_basis_func and \
("Density Matrix:" in line or mo_coeff_patt.search(line)):
end_mo = True
warnings.warn("POP=regular case, matrix coefficients not complete")
f.readline()
self.eigenvectors = mat_mo
read_mo = False
# build a more convenient array dict with MO coefficient of
# each atom in each MO.
# mo[Spin][OM j][atom i] = {AO_k: coeff, AO_k: coeff ... }
mo = {}
for spin in all_spin:
mo[spin] = [[{} for iat in range(len(self.atom_basis_labels))]
for j in range(self.num_basis_func)]
for j in range(self.num_basis_func):
i = 0
for iat in range(len(self.atom_basis_labels)):
for label in self.atom_basis_labels[iat]:
mo[spin][j][iat][label] = self.eigenvectors[spin][i, j]
i += 1
self.molecular_orbital = mo
elif parse_freq:
while line.strip() != "": # blank line
ifreqs = [int(val) - 1 for val in line.split()]
for ifreq in ifreqs:
frequencies.append({"frequency": None,
"r_mass": None,
"f_constant": None,
"IR_intensity": None,
"symmetry": None,
"mode": []})
# read freq, intensity, masses, symmetry ...
while "Atom AN" not in line:
if "Frequencies --" in line:
freqs = map(float, float_patt.findall(line))
for ifreq, freq in zip(ifreqs, freqs):
frequencies[ifreq]["frequency"] = freq
elif "Red. masses --" in line:
r_masses = map(float, float_patt.findall(line))
for ifreq, r_mass in zip(ifreqs, r_masses):
frequencies[ifreq]["r_mass"] = r_mass
elif "Frc consts --" in line:
f_consts = map(float, float_patt.findall(line))
for ifreq, f_const in zip(ifreqs, f_consts):
frequencies[ifreq]["f_constant"] = f_const
elif "IR Inten --" in line:
IR_intens = map(float, float_patt.findall(line))
for ifreq, intens in zip(ifreqs, IR_intens):
frequencies[ifreq]["IR_intensity"] = intens
else:
syms = line.split()[:3]
for ifreq, sym in zip(ifreqs, syms):
frequencies[ifreq]["symmetry"] = sym
line = f.readline()
# read normal modes
line = f.readline()
while normal_mode_patt.search(line):
values = list(map(float, float_patt.findall(line)))
for i, ifreq in zip(range(0, len(values), 3), ifreqs):
frequencies[ifreq]["mode"].extend(values[i:i+3])
line = f.readline()
parse_freq = False
self.frequencies.append(frequencies)
frequencies = []
elif parse_hessian:
# read Hessian matrix under "Force constants in Cartesian coordinates"
# Hessian matrix is in the input orientation framework
# WARNING : need #P in the route line
parse_hessian = False
ndf = 3 * len(self.structures[0])
self.hessian = np.zeros((ndf, ndf))
j_indices = range(5)
jndf = 0
while jndf < ndf:
for i in range(jndf, ndf):
line = f.readline()
vals = re.findall(r"\s*([+-]?\d+\.\d+[eEdD]?[+-]\d+)", line)
vals = [float(val.replace("D", "E")) for val in vals]
for jval, val in enumerate(vals):
j = j_indices[jval]
self.hessian[i, j] = val
self.hessian[j, i] = val
jndf += len(vals)
line = f.readline()
j_indices = [j + 5 for j in j_indices]
elif termination_patt.search(line):
m = termination_patt.search(line)
if m.group(1) == "Normal":
self.properly_terminated = True
terminated = True
elif error_patt.search(line):
error_defs = {
"! Non-Optimized Parameters !": "Optimization "
"error",
"Convergence failure": "SCF convergence error"
}
m = error_patt.search(line)
self.errors.append(error_defs[m.group(1)])
elif (not num_basis_found) and \
num_basis_func_patt.search(line):
m = num_basis_func_patt.search(line)
self.num_basis_func = int(m.group(1))
num_basis_found = True
elif num_elec_patt.search(line):
m = num_elec_patt.search(line)
self.electrons = (int(m.group(1)), int(m.group(2)))
elif (not self.is_pcm) and pcm_patt.search(line):
self.is_pcm = True
self.pcm = {}
elif "freq" in route_lower and "opt" in route_lower and \
stat_type_patt.search(line):
self.stationary_type = "Saddle"
elif mp2_patt.search(line):
m = mp2_patt.search(line)
self.energies.append(float(m.group(1).replace("D",
"E")))
elif oniom_patt.search(line):
m = oniom_patt.matcher(line)
self.energies.append(float(m.group(1)))
elif scf_patt.search(line):
m = scf_patt.search(line)
self.energies.append(float(m.group(1)))
elif std_orientation_patt.search(line):
coord_txt = []
read_coord = 1
elif not read_eigen and orbital_patt.search(line):
eigen_txt.append(line)
read_eigen = True
elif mulliken_patt.search(line):
mulliken_txt = []
read_mulliken = True
elif not parse_forces and forces_on_patt.search(line):
parse_forces = True
elif freq_on_patt.search(line):
parse_freq = True
[f.readline() for i in range(3)]
elif mo_coeff_patt.search(line):
if "Alpha" in line:
self.is_spin = True
read_mo = True
elif hessian_patt.search(line):
parse_hessian = True
elif resume_patt.search(line):
resume = []
while not resume_end_patt.search(line):
resume.append(line)
line = f.readline()
if line == "\n": # security if \\@ not in one line !
break
resume.append(line)
resume = "".join([r.strip() for r in resume])
self.resumes.append(resume)
if read_mulliken:
if not end_mulliken_patt.search(line):
mulliken_txt.append(line)
else:
m = end_mulliken_patt.search(line)
mulliken_charges = {}
for line in mulliken_txt:
if mulliken_charge_patt.search(line):
m = mulliken_charge_patt.search(line)
dict = {int(m.group(1)): [m.group(2), float(m.group(3))]}
mulliken_charges.update(dict)
read_mulliken = False
self.Mulliken_charges = mulliken_charges
if not terminated:
#raise IOError("Bad Gaussian output file.")
warnings.warn("\n" + self.filename + \
": Termination error or bad Gaussian output file !")
def _check_pcm(self, line):
energy_patt = re.compile(r"(Dispersion|Cavitation|Repulsion) energy"
r"\s+\S+\s+=\s+(\S*)")
total_patt = re.compile(r"with all non electrostatic terms\s+\S+\s+"
r"=\s+(\S*)")
parameter_patt = re.compile(r"(Eps|Numeral density|RSolv|Eps"
r"\(inf[inity]*\))\s+=\s*(\S*)")
if energy_patt.search(line):
m = energy_patt.search(line)
self.pcm['{} energy'.format(m.group(1))] = float(m.group(2))
elif total_patt.search(line):
m = total_patt.search(line)
self.pcm['Total energy'] = float(m.group(1))
elif parameter_patt.search(line):
m = parameter_patt.search(line)
self.pcm[m.group(1)] = float(m.group(2))
def as_dict(self):
"""
Json-serializable dict representation.
"""
structure = self.final_structure
d = {"has_gaussian_completed": self.properly_terminated,
"nsites": len(structure)}
comp = structure.composition
d["unit_cell_formula"] = comp.as_dict()
d["reduced_cell_formula"] = Composition(comp.reduced_formula).as_dict()
d["pretty_formula"] = comp.reduced_formula
d["is_pcm"] = self.is_pcm
d["errors"] = self.errors
d["Mulliken_charges"] = self.Mulliken_charges
unique_symbols = sorted(list(d["unit_cell_formula"].keys()))
d["elements"] = unique_symbols
d["nelements"] = len(unique_symbols)
d["charge"] = self.charge
d["spin_multiplicity"] = self.spin_mult
vin = {"route": self.route, "functional": self.functional,
"basis_set": self.basis_set,
"nbasisfunctions": self.num_basis_func,
"pcm_parameters": self.pcm}
d["input"] = vin
nsites = len(self.final_structure)
vout = {
"energies": self.energies,
"final_energy": self.final_energy,
"final_energy_per_atom": self.final_energy / nsites,
"molecule": structure.as_dict(),
"stationary_type": self.stationary_type,
"corrections": self.corrections
}
d['output'] = vout
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
def read_scan(self):
"""
Read a potential energy surface from a gaussian scan calculation.
Returns:
A dict: {"energies": [ values ],
"coords": {"d1": [ values ], "A2", [ values ], ... }}
"energies" are the energies of all points of the potential energy
surface. "coords" are the internal coordinates used to compute the
potential energy surface and the internal coordinates optimized,
labelled by their name as defined in the calculation.
"""
def floatList(l):
""" return a list of float from a list of string """
return [float(v) for v in l]
scan_patt = re.compile(r"^\sSummary of the potential surface scan:")
optscan_patt = re.compile(r"^\sSummary of Optimized Potential Surface Scan")
# data dict return
data = {"energies": list(), "coords": dict()}
# read in file
with zopen(self.filename, "r") as f:
line = f.readline()
while line != "":
if optscan_patt.match(line):
f.readline()
line = f.readline()
endScan = False
while not endScan:
data["energies"] += floatList(float_patt.findall(line))
line = f.readline()
while not re.search(r"(^\s+(\d+)|^\s-+)", line):
icname = line.split()[0].strip()
if icname in data["coords"]:
data["coords"][icname] += floatList(float_patt.findall(line))
else:
data["coords"][icname] = floatList(float_patt.findall(line))
line = f.readline()
if re.search(r"^\s-+", line):
endScan = True
else:
line = f.readline()
elif scan_patt.match(line):
line = f.readline()
data["coords"] = {icname: list() for icname in line.split()[1:-1]}
f.readline()
line = f.readline()
while not re.search(r"^\s-+", line):
values = floatList(line.split())
data["energies"].append(values[-1])
for i, icname in enumerate(data["coords"]):
data["coords"][icname].append(values[i+1])
line = f.readline()
else:
line = f.readline()
return data
def get_scan_plot(self, coords=None):
"""
Get a matplotlib plot of the potential energy surface.
Args:
coords: internal coordinate name to use as abcissa.
"""
from pymatgen.util.plotting import pretty_plot
plt = pretty_plot(12, 8)
d = self.read_scan()
if coords and coords in d["coords"]:
x = d["coords"][coords]
plt.xlabel(coords)
else:
x = range(len(d["energies"]))
plt.xlabel("points")
plt.ylabel("Energy (eV)")
e_min = min(d["energies"])
y = [(e - e_min) * HARTREE_TO_ELECTRON_VOLT for e in d["energies"]]
plt.plot(x, y, "ro--")
return plt
def save_scan_plot(self, filename="scan.pdf", img_format="pdf", coords=None):
"""
Save matplotlib plot of the potential energy surface to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
coords: internal coordinate name to use as abcissa.
"""
plt = self.get_scan_plot(coords)
plt.savefig(filename, format=img_format)
def read_excitation_energies(self):
"""
Read a excitation energies after a TD-DFT calculation.
Returns:
A list: A list of tuple for each transition such as
[(energie (eV), lambda (nm), oscillatory strength), ... ]
"""
transitions = list()
# read in file
with zopen(self.filename, "r") as f:
line = f.readline()
td = False
while line != "":
if re.search(r"^\sExcitation energies and oscillator strengths:", line):
td = True
if td:
if re.search(r"^\sExcited State\s*\d", line):
val = [float(v) for v in float_patt.findall(line)]
transitions.append(tuple(val[0:3]))
line = f.readline()
return transitions
def get_spectre_plot(self, sigma=0.05, step=0.01):
"""
Get a matplotlib plot of the UV-visible xas. Transition are plotted
as vertical lines and as a sum of normal functions with sigma with. The
broadening is applied in energy and the xas is plotted as a function
of the wavelength.
Args:
sigma: Full width at half maximum in eV for normal functions.
step: bin interval in eV
Returns:
A dict: {"energies": values, "lambda": values, "xas": values}
where values are lists of abscissa (energies, lamba) and
the sum of gaussian functions (xas).
A matplotlib plot.
"""
from pymatgen.util.plotting import pretty_plot
from matplotlib.mlab import normpdf
plt = pretty_plot(12, 8)
transitions = self.read_excitation_energies()
minval = min([val[0] for val in transitions]) - 5.0 * sigma
maxval = max([val[0] for val in transitions]) + 5.0 * sigma
npts = int((maxval - minval) / step) + 1
eneval = np.linspace(minval, maxval, npts) # in eV
lambdaval = [cst.h * cst.c / (val * cst.e) * 1.e9 for val in eneval] # in nm
# sum of gaussian functions
spectre = np.zeros(npts)
for trans in transitions:
spectre += trans[2] * normpdf(eneval, trans[0], sigma)
spectre /= spectre.max()
plt.plot(lambdaval, spectre, "r-", label="spectre")
data = {"energies": eneval, "lambda": lambdaval, "xas": spectre}
# plot transitions as vlines
plt.vlines([val[1] for val in transitions], \
0., \
[val[2] for val in transitions], \
color="blue", \
label="transitions",
linewidth=2)
plt.xlabel("$\\lambda$ (nm)")
plt.ylabel("Arbitrary unit")
plt.legend()
return data, plt
def save_spectre_plot(self, filename="spectre.pdf", img_format="pdf",
sigma=0.05, step=0.01):
"""
Save matplotlib plot of the spectre to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
sigma: Full width at half maximum in eV for normal functions.
step: bin interval in eV
"""
d, plt = self.get_spectre_plot(sigma, step)
plt.savefig(filename, format=img_format)
def to_input(self, filename, mol=None, charge=None,
spin_multiplicity=None, title=None, functional=None,
basis_set=None, route_parameters=None, input_parameters=None,
link0_parameters=None, dieze_tag=None, cart_coords=False):
"""
Write a new input file using by default the last geometry read in the output
file and with the same calculation parameters. Arguments are the same as
GaussianInput class.
Returns
gaunip (GaussianInput) : the gaussian input object
"""
if not mol:
mol = self.final_structure
if charge is None:
charge = self.charge
if spin_multiplicity is None:
spin_multiplicity = self.spin_mult
if not title:
title = "restart "
if not functional:
functional = self.functional
if not basis_set:
basis_set = self.basis_set
if not route_parameters:
route_parameters = self.route
if not link0_parameters:
link0_parameters = self.link0
if not dieze_tag:
dieze_tag = self.dieze_tag
gauinp = GaussianInput(mol=mol,
charge=charge,
spin_multiplicity=spin_multiplicity,
title=title,
functional=functional,
basis_set=basis_set,
route_parameters=route_parameters,
input_parameters=input_parameters,
link0_parameters=link0_parameters,
dieze_tag=dieze_tag)
gauinp.write_file(filename, cart_coords=cart_coords)
return gauinp
| mit |
teonlamont/mne-python | mne/viz/topo.py | 2 | 35647 | """Functions to plot M/EEG data on topo (one axes per channel)."""
from __future__ import print_function
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
#
# License: Simplified BSD
from copy import deepcopy
from functools import partial
from itertools import cycle
import numpy as np
from ..io.constants import Bunch
from ..io.pick import channel_type, pick_types
from ..utils import _clean_names, warn
from ..channels.layout import _merge_grad_data, _pair_grad_sensors, find_layout
from ..defaults import _handle_default
from .utils import (_check_delayed_ssp, _get_color_list, _draw_proj_checkbox,
add_background_image, plt_show, _setup_vmin_vmax,
DraggableColorbar, _set_ax_facecolor, _setup_ax_spines,
_check_cov, _plot_masked_image)
def iter_topography(info, layout=None, on_pick=None, fig=None,
fig_facecolor='k', axis_facecolor='k',
axis_spinecolor='k', layout_scale=None):
"""Create iterator over channel positions.
This function returns a generator that unpacks into
a series of matplotlib axis objects and data / channel
indices, both corresponding to the sensor positions
of the related layout passed or inferred from the channel info.
`iter_topography`, hence, allows to conveniently realize custom
topography plots.
Parameters
----------
info : instance of Info
The measurement info.
layout : instance of mne.layout.Layout | None
The layout to use. If None, layout will be guessed
on_pick : callable | None
The callback function to be invoked on clicking one
of the axes. Is supposed to instantiate the following
API: `function(axis, channel_index)`
fig : matplotlib.figure.Figure | None
The figure object to be considered. If None, a new
figure will be created.
fig_facecolor : str | obj
The figure face color. Defaults to black.
axis_facecolor : str | obj
The axis face color. Defaults to black.
axis_spinecolor : str | obj
The axis spine color. Defaults to black. In other words,
the color of the axis' edge lines.
layout_scale: float | None
Scaling factor for adjusting the relative size of the layout
on the canvas. If None, nothing will be scaled.
Returns
-------
A generator that can be unpacked into:
ax : matplotlib.axis.Axis
The current axis of the topo plot.
ch_dx : int
The related channel index.
"""
return _iter_topography(info, layout, on_pick, fig, fig_facecolor,
axis_facecolor, axis_spinecolor, layout_scale)
def _iter_topography(info, layout, on_pick, fig, fig_facecolor='k',
axis_facecolor='k', axis_spinecolor='k',
layout_scale=None, unified=False, img=False, axes=None):
"""Iterate over topography.
Has the same parameters as iter_topography, plus:
unified : bool
If False (default), multiple matplotlib axes will be used.
If True, a single axis will be constructed. The former is
useful for custom plotting, the latter for speed.
"""
from matplotlib import pyplot as plt, collections
if fig is None:
fig = plt.figure()
def format_coord_unified(x, y, pos=None, ch_names=None):
"""Update status bar with channel name under cursor."""
# find candidate channels (ones that are down and left from cursor)
pdist = np.array([x, y]) - pos[:, :2]
pind = np.where((pdist >= 0).all(axis=1))[0]
if len(pind) > 0:
# find the closest channel
closest = pind[np.sum(pdist[pind, :]**2, axis=1).argmin()]
# check whether we are inside its box
in_box = (pdist[closest, :] < pos[closest, 2:]).all()
else:
in_box = False
return (('%s (click to magnify)' % ch_names[closest]) if
in_box else 'No channel here')
def format_coord_multiaxis(x, y, ch_name=None):
"""Update status bar with channel name under cursor."""
return '%s (click to magnify)' % ch_name
fig.set_facecolor(fig_facecolor)
if layout is None:
layout = find_layout(info)
if on_pick is not None:
callback = partial(_plot_topo_onpick, show_func=on_pick)
fig.canvas.mpl_connect('button_press_event', callback)
pos = layout.pos.copy()
if layout_scale:
pos[:, :2] *= layout_scale
ch_names = _clean_names(info['ch_names'])
iter_ch = [(x, y) for x, y in enumerate(layout.names) if y in ch_names]
if unified:
if axes is None:
under_ax = plt.axes([0, 0, 1, 1])
under_ax.axis('off')
else:
under_ax = axes
under_ax.format_coord = partial(format_coord_unified, pos=pos,
ch_names=layout.names)
under_ax.set(xlim=[0, 1], ylim=[0, 1])
axs = list()
for idx, name in iter_ch:
ch_idx = ch_names.index(name)
if not unified: # old, slow way
ax = plt.axes(pos[idx])
ax.patch.set_facecolor(axis_facecolor)
plt.setp(list(ax.spines.values()), color=axis_spinecolor)
ax.set(xticklabels=[], yticklabels=[])
plt.setp(ax.get_xticklines(), visible=False)
plt.setp(ax.get_yticklines(), visible=False)
ax._mne_ch_name = name
ax._mne_ch_idx = ch_idx
ax._mne_ax_face_color = axis_facecolor
ax.format_coord = partial(format_coord_multiaxis, ch_name=name)
yield ax, ch_idx
else:
ax = Bunch(ax=under_ax, pos=pos[idx], data_lines=list(),
_mne_ch_name=name, _mne_ch_idx=ch_idx,
_mne_ax_face_color=axis_facecolor)
axs.append(ax)
if unified:
under_ax._mne_axs = axs
# Create a PolyCollection for the axis backgrounds
verts = np.transpose([pos[:, :2],
pos[:, :2] + pos[:, 2:] * [1, 0],
pos[:, :2] + pos[:, 2:],
pos[:, :2] + pos[:, 2:] * [0, 1],
], [1, 0, 2])
if not img:
under_ax.add_collection(collections.PolyCollection(
verts, facecolor=axis_facecolor, edgecolor=axis_spinecolor,
linewidth=1.)) # Not needed for image plots.
for ax in axs:
yield ax, ax._mne_ch_idx
def _plot_topo(info, times, show_func, click_func=None, layout=None,
vmin=None, vmax=None, ylim=None, colorbar=None, border='none',
axis_facecolor='k', fig_facecolor='k', cmap='RdBu_r',
layout_scale=None, title=None, x_label=None, y_label=None,
font_color='w', unified=False, img=False, axes=None):
"""Plot on sensor layout."""
import matplotlib.pyplot as plt
if layout.kind == 'custom':
layout = deepcopy(layout)
layout.pos[:, :2] -= layout.pos[:, :2].min(0)
layout.pos[:, :2] /= layout.pos[:, :2].max(0)
# prepare callbacks
tmin, tmax = times[[0, -1]]
click_func = show_func if click_func is None else click_func
on_pick = partial(click_func, tmin=tmin, tmax=tmax, vmin=vmin,
vmax=vmax, ylim=ylim, x_label=x_label,
y_label=y_label, colorbar=colorbar)
if axes is None:
fig = plt.figure()
axes = plt.axes([0.015, 0.025, 0.97, 0.95])
_set_ax_facecolor(axes, fig_facecolor)
else:
fig = axes.figure
if colorbar:
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin, vmax))
sm.set_array(np.linspace(vmin, vmax))
cb = fig.colorbar(sm, ax=axes, pad=0.025, fraction=0.075, shrink=0.5,
anchor=(-1, 0.5))
cb_yticks = plt.getp(cb.ax.axes, 'yticklabels')
plt.setp(cb_yticks, color=font_color)
axes.axis('off')
my_topo_plot = _iter_topography(info, layout=layout, on_pick=on_pick,
fig=fig, layout_scale=layout_scale,
axis_spinecolor=border,
axis_facecolor=axis_facecolor,
fig_facecolor=fig_facecolor,
unified=unified, img=img, axes=axes)
for ax, ch_idx in my_topo_plot:
if layout.kind == 'Vectorview-all' and ylim is not None:
this_type = {'mag': 0, 'grad': 1}[channel_type(info, ch_idx)]
ylim_ = [v[this_type] if _check_vlim(v) else v for v in ylim]
else:
ylim_ = ylim
show_func(ax, ch_idx, tmin=tmin, tmax=tmax, vmin=vmin,
vmax=vmax, ylim=ylim_)
if title is not None:
plt.figtext(0.03, 0.95, title, color=font_color, fontsize=15, va='top')
return fig
def _plot_topo_onpick(event, show_func):
"""Onpick callback that shows a single channel in a new figure."""
# make sure that the swipe gesture in OS-X doesn't open many figures
orig_ax = event.inaxes
import matplotlib.pyplot as plt
try:
if hasattr(orig_ax, '_mne_axs'): # in unified, single-axes mode
x, y = event.xdata, event.ydata
for ax in orig_ax._mne_axs:
if x >= ax.pos[0] and y >= ax.pos[1] and \
x <= ax.pos[0] + ax.pos[2] and \
y <= ax.pos[1] + ax.pos[3]:
orig_ax = ax
break
else:
# no axis found
return
elif not hasattr(orig_ax, '_mne_ch_idx'):
# neither old nor new mode
return
ch_idx = orig_ax._mne_ch_idx
face_color = orig_ax._mne_ax_face_color
fig, ax = plt.subplots(1)
plt.title(orig_ax._mne_ch_name)
_set_ax_facecolor(ax, face_color)
# allow custom function to override parameters
show_func(ax, ch_idx)
except Exception as err:
# matplotlib silently ignores exceptions in event handlers,
# so we print
# it here to know what went wrong
print(err)
raise
def _compute_scalings(bn, xlim, ylim):
"""Compute scale factors for a unified plot."""
if isinstance(ylim[0], (tuple, list, np.ndarray)):
ylim = (ylim[0][0], ylim[1][0])
pos = bn.pos
bn.x_s = pos[2] / (xlim[1] - xlim[0])
bn.x_t = pos[0] - bn.x_s * xlim[0]
bn.y_s = pos[3] / (ylim[1] - ylim[0])
bn.y_t = pos[1] - bn.y_s * ylim[0]
def _check_vlim(vlim):
"""Check the vlim."""
return not np.isscalar(vlim) and vlim is not None
def _imshow_tfr(ax, ch_idx, tmin, tmax, vmin, vmax, onselect, ylim=None,
tfr=None, freq=None, x_label=None, y_label=None,
colorbar=False, cmap=('RdBu_r', True), yscale='auto',
mask=None, mask_style="both", mask_cmap="Greys",
mask_alpha=0.1, is_jointplot=False):
"""Show time-frequency map as two-dimensional image."""
from matplotlib import pyplot as plt
from matplotlib.widgets import RectangleSelector
if yscale not in ['auto', 'linear', 'log']:
raise ValueError("yscale should be either 'auto', 'linear', or 'log'"
", got {}".format(yscale))
cmap, interactive_cmap = cmap
times = np.linspace(tmin, tmax, num=tfr[ch_idx].shape[1])
img, t_end = _plot_masked_image(
ax, tfr[ch_idx], times, mask, picks=None, yvals=freq, cmap=cmap,
vmin=vmin, vmax=vmax, mask_style=mask_style, mask_alpha=mask_alpha,
mask_cmap=mask_cmap, yscale=yscale)
if x_label is not None:
ax.set_xlabel(x_label)
if y_label is not None:
ax.set_ylabel(y_label)
if colorbar:
if isinstance(colorbar, DraggableColorbar):
cbar = colorbar.cbar # this happens with multiaxes case
else:
cbar = plt.colorbar(mappable=img)
if interactive_cmap:
ax.CB = DraggableColorbar(cbar, img)
ax.RS = RectangleSelector(ax, onselect=onselect) # reference must be kept
return t_end
def _imshow_tfr_unified(bn, ch_idx, tmin, tmax, vmin, vmax, onselect,
ylim=None, tfr=None, freq=None, vline=None,
x_label=None, y_label=None, colorbar=False,
picker=True, cmap='RdBu_r', title=None, hline=None):
"""Show multiple tfrs on topo using a single axes."""
_compute_scalings(bn, (tmin, tmax), (freq[0], freq[-1]))
ax = bn.ax
data_lines = bn.data_lines
extent = (bn.x_t + bn.x_s * tmin, bn.x_t + bn.x_s * tmax,
bn.y_t + bn.y_s * freq[0], bn.y_t + bn.y_s * freq[-1])
data_lines.append(ax.imshow(tfr[ch_idx], clip_on=True, clip_box=bn.pos,
extent=extent, aspect="auto", origin="lower",
vmin=vmin, vmax=vmax, cmap=cmap))
def _plot_timeseries(ax, ch_idx, tmin, tmax, vmin, vmax, ylim, data, color,
times, vline=None, x_label=None, y_label=None,
colorbar=False, hline=None, hvline_color='w',
labels=None):
"""Show time series on topo split across multiple axes."""
import matplotlib.pyplot as plt
from matplotlib.colors import colorConverter
picker_flag = False
for data_, color_ in zip(data, color):
if not picker_flag:
# use large tol for picker so we can click anywhere in the axes
ax.plot(times, data_[ch_idx], color=color_, picker=1e9)
picker_flag = True
else:
ax.plot(times, data_[ch_idx], color=color_)
if x_label is not None:
ax.set(xlabel=x_label)
if y_label is not None:
if isinstance(y_label, list):
ax.set_ylabel(y_label[ch_idx])
else:
ax.set_ylabel(y_label)
def _format_coord(x, y, labels, ax):
"""Create status string based on cursor coordinates."""
idx = np.abs(times - x).argmin()
ylabel = ax.get_ylabel()
unit = (ylabel[ylabel.find('(') + 1:ylabel.find(')')]
if '(' in ylabel and ')' in ylabel else '')
labels = [''] * len(data) if labels is None else labels
# try to estimate whether to truncate condition labels
slen = 10 + sum([12 + len(unit) + len(label) for label in labels])
bar_width = (ax.figure.get_size_inches() * ax.figure.dpi)[0] / 5.5
trunc_labels = bar_width < slen
s = '%6.3f s: ' % times[idx]
for data_, label in zip(data, labels):
s += '%7.2f %s' % (data_[ch_idx, idx], unit)
if trunc_labels:
label = (label if len(label) <= 10 else
'%s..%s' % (label[:6], label[-2:]))
s += ' [%s] ' % label if label else ' '
return s
ax.format_coord = lambda x, y: _format_coord(x, y, labels=labels, ax=ax)
def _cursor_vline(event):
"""Draw cursor (vertical line)."""
ax = event.inaxes
if not ax:
return
if ax._cursorline is not None:
ax._cursorline.remove()
ax._cursorline = ax.axvline(event.xdata, color=ax._cursorcolor)
ax.figure.canvas.draw()
def _rm_cursor(event):
ax = event.inaxes
if ax._cursorline is not None:
ax._cursorline.remove()
ax._cursorline = None
ax.figure.canvas.draw()
ax._cursorline = None
# choose cursor color based on perceived brightness of background
try:
facecol = colorConverter.to_rgb(ax.get_facecolor())
except AttributeError: # older MPL
facecol = colorConverter.to_rgb(ax.get_axis_bgcolor())
face_brightness = np.dot(facecol, np.array([299, 587, 114]))
ax._cursorcolor = 'white' if face_brightness < 150 else 'black'
plt.connect('motion_notify_event', _cursor_vline)
plt.connect('axes_leave_event', _rm_cursor)
_setup_ax_spines(ax, vline, tmin, tmax)
ax.figure.set_facecolor('k' if hvline_color is 'w' else 'w')
ax.spines['bottom'].set_color(hvline_color)
ax.spines['left'].set_color(hvline_color)
ax.tick_params(axis='x', colors=hvline_color, which='both')
ax.tick_params(axis='y', colors=hvline_color, which='both')
ax.title.set_color(hvline_color)
ax.xaxis.label.set_color(hvline_color)
ax.yaxis.label.set_color(hvline_color)
if vline:
plt.axvline(vline, color=hvline_color, linewidth=1.0,
linestyle='--')
if hline:
plt.axhline(hline, color=hvline_color, linewidth=1.0, zorder=10)
if colorbar:
plt.colorbar()
def _plot_timeseries_unified(bn, ch_idx, tmin, tmax, vmin, vmax, ylim, data,
color, times, vline=None, x_label=None,
y_label=None, colorbar=False, hline=None,
hvline_color='w'):
"""Show multiple time series on topo using a single axes."""
import matplotlib.pyplot as plt
if not (ylim and not any(v is None for v in ylim)):
ylim = np.array([np.min(data), np.max(data)])
# Translation and scale parameters to take data->under_ax normalized coords
_compute_scalings(bn, (tmin, tmax), ylim)
pos = bn.pos
data_lines = bn.data_lines
ax = bn.ax
# XXX These calls could probably be made faster by using collections
for data_, color_ in zip(data, color):
data_lines.append(ax.plot(
bn.x_t + bn.x_s * times, bn.y_t + bn.y_s * data_[ch_idx],
linewidth=0.5, color=color_, clip_on=True, clip_box=pos)[0])
if vline:
vline = np.array(vline) * bn.x_s + bn.x_t
ax.vlines(vline, pos[1], pos[1] + pos[3], color=hvline_color,
linewidth=0.5, linestyle='--')
if hline:
hline = np.array(hline) * bn.y_s + bn.y_t
ax.hlines(hline, pos[0], pos[0] + pos[2], color=hvline_color,
linewidth=0.5)
if x_label is not None:
ax.text(pos[0] + pos[2] / 2., pos[1], x_label,
horizontalalignment='center', verticalalignment='top')
if y_label is not None:
y_label = y_label[ch_idx] if isinstance(y_label, list) else y_label
ax.text(pos[0], pos[1] + pos[3] / 2., y_label,
horizontalignment='right', verticalalignment='middle',
rotation=90)
if colorbar:
plt.colorbar()
def _erfimage_imshow(ax, ch_idx, tmin, tmax, vmin, vmax, ylim=None, data=None,
epochs=None, sigma=None, order=None, scalings=None,
vline=None, x_label=None, y_label=None, colorbar=False,
cmap='RdBu_r'):
"""Plot erfimage on sensor topography."""
from scipy import ndimage
import matplotlib.pyplot as plt
this_data = data[:, ch_idx, :].copy() * scalings[ch_idx]
if callable(order):
order = order(epochs.times, this_data)
if order is not None:
this_data = this_data[order]
if sigma > 0.:
this_data = ndimage.gaussian_filter1d(this_data, sigma=sigma, axis=0)
img = ax.imshow(this_data, extent=[tmin, tmax, 0, len(data)],
aspect='auto', origin='lower', vmin=vmin, vmax=vmax,
picker=True, cmap=cmap, interpolation='nearest')
ax = plt.gca()
if x_label is not None:
ax.set_xlabel(x_label)
if y_label is not None:
ax.set_ylabel(y_label)
if colorbar:
plt.colorbar(mappable=img)
def _erfimage_imshow_unified(bn, ch_idx, tmin, tmax, vmin, vmax, ylim=None,
data=None, epochs=None, sigma=None, order=None,
scalings=None, vline=None, x_label=None,
y_label=None, colorbar=False, cmap='RdBu_r'):
"""Plot erfimage topography using a single axis."""
from scipy import ndimage
_compute_scalings(bn, (tmin, tmax), (0, len(epochs.events)))
ax = bn.ax
data_lines = bn.data_lines
extent = (bn.x_t + bn.x_s * tmin, bn.x_t + bn.x_s * tmax, bn.y_t,
bn.y_t + bn.y_s * len(epochs.events))
this_data = data[:, ch_idx, :].copy() * scalings[ch_idx]
if callable(order):
order = order(epochs.times, this_data)
if order is not None:
this_data = this_data[order]
if sigma > 0.:
this_data = ndimage.gaussian_filter1d(this_data, sigma=sigma, axis=0)
data_lines.append(ax.imshow(this_data, extent=extent, aspect='auto',
origin='lower', vmin=vmin, vmax=vmax,
picker=True, cmap=cmap,
interpolation='nearest'))
def _plot_evoked_topo(evoked, layout=None, layout_scale=0.945, color=None,
border='none', ylim=None, scalings=None, title=None,
proj=False, vline=(0.,), hline=(0.,), fig_facecolor='k',
fig_background=None, axis_facecolor='k', font_color='w',
merge_grads=False, legend=True, axes=None, show=True,
noise_cov=None):
"""Plot 2D topography of evoked responses.
Clicking on the plot of an individual sensor opens a new figure showing
the evoked response for the selected sensor.
Parameters
----------
evoked : list of Evoked | Evoked
The evoked response to plot.
layout : instance of Layout | None
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout is
inferred from the data.
layout_scale: float
Scaling factor for adjusting the relative size of the layout
on the canvas
color : list of color objects | color object | None
Everything matplotlib accepts to specify colors. If not list-like,
the color specified will be repeated. If None, colors are
automatically drawn.
border : str
matplotlib borders style to be used for each sensor plot.
ylim : dict | None
ylim for plots (after scaling has been applied). The value
determines the upper and lower subplot limits. e.g.
ylim = dict(eeg=[-20, 20]). Valid keys are eeg, mag, grad. If None,
the ylim parameter for each channel is determined by the maximum
absolute peak.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If None,`
defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
title : str
Title of the figure.
proj : bool | 'interactive'
If true SSP projections are applied before display. If 'interactive',
a check box for reversible selection of SSP projection vectors will
be shown.
vline : list of floats | None
The values at which to show a vertical line.
hline : list of floats | None
The values at which to show a horizontal line.
fig_facecolor : str | obj
The figure face color. Defaults to black.
fig_background : None | array
A background image for the figure. This must be a valid input to
`matplotlib.pyplot.imshow`. Defaults to None.
axis_facecolor : str | obj
The face color to be used for each sensor plot. Defaults to black.
font_color : str | obj
The color of text in the colorbar and title. Defaults to white.
merge_grads : bool
Whether to use RMS value of gradiometer pairs. Only works for Neuromag
data. Defaults to False.
legend : bool | int | string | tuple
If True, create a legend based on evoked.comment. If False, disable the
legend. Otherwise, the legend is created and the parameter value is
passed as the location parameter to the matplotlib legend call. It can
be an integer (e.g. 0 corresponds to upper right corner of the plot),
a string (e.g. 'upper right'), or a tuple (x, y coordinates of the
lower left corner of the legend in the axes coordinate system).
See matplotlib documentation for more details.
axes : instance of matplotlib Axes | None
Axes to plot into. If None, axes will be created.
show : bool
Show figure if True.
noise_cov : instance of Covariance | str | None
Noise covariance used to whiten the data while plotting.
Whitened data channels names are shown in italic.
Can be a string to load a covariance from disk.
.. versionadded:: 0.16.0
Returns
-------
fig : Instance of matplotlib.figure.Figure
Images of evoked responses at sensor locations
"""
import matplotlib.pyplot as plt
from ..cov import whiten_evoked
if not type(evoked) in (tuple, list):
evoked = [evoked]
if type(color) in (tuple, list):
if len(color) != len(evoked):
raise ValueError('Lists of evoked objects and colors'
' must have the same length')
elif color is None:
colors = ['w'] + _get_color_list
stop = (slice(len(evoked)) if len(evoked) < len(colors)
else slice(len(colors)))
color = cycle(colors[stop])
if len(evoked) > len(colors):
warn('More evoked objects than colors available. You should pass '
'a list of unique colors.')
else:
color = cycle([color])
times = evoked[0].times
if not all((e.times == times).all() for e in evoked):
raise ValueError('All evoked.times must be the same')
noise_cov = _check_cov(noise_cov, evoked[0].info)
if noise_cov is not None:
evoked = [whiten_evoked(e, noise_cov) for e in evoked]
else:
evoked = [e.copy() for e in evoked]
info = evoked[0].info
ch_names = evoked[0].ch_names
scalings = _handle_default('scalings', scalings)
if not all(e.ch_names == ch_names for e in evoked):
raise ValueError('All evoked.picks must be the same')
ch_names = _clean_names(ch_names)
if merge_grads:
picks = _pair_grad_sensors(info, topomap_coords=False)
chs = list()
for pick in picks[::2]:
ch = info['chs'][pick]
ch['ch_name'] = ch['ch_name'][:-1] + 'X'
chs.append(ch)
info['chs'] = chs
info['bads'] = list() # bads dropped on pair_grad_sensors
info._update_redundant()
info._check_consistency()
new_picks = list()
for e in evoked:
data = _merge_grad_data(e.data[picks])
if noise_cov is None:
data *= scalings['grad']
e.data = data
new_picks.append(range(len(data)))
picks = new_picks
types_used = ['grad']
unit = _handle_default('units')['grad'] if noise_cov is None else 'NA'
y_label = 'RMS amplitude (%s)' % unit
if layout is None:
layout = find_layout(info)
if not merge_grads:
# XXX. at the moment we are committed to 1- / 2-sensor-types layouts
chs_in_layout = set(layout.names) & set(ch_names)
types_used = set(channel_type(info, ch_names.index(ch))
for ch in chs_in_layout)
# remove possible reference meg channels
types_used = set.difference(types_used, set('ref_meg'))
# one check for all vendors
meg_types = set(('mag', 'grad'))
is_meg = len(set.intersection(types_used, meg_types)) > 0
if is_meg:
types_used = list(types_used)[::-1] # -> restore kwarg order
picks = [pick_types(info, meg=kk, ref_meg=False, exclude=[])
for kk in types_used]
else:
types_used_kwargs = dict((t, True) for t in types_used)
picks = [pick_types(info, meg=False, exclude=[],
**types_used_kwargs)]
assert isinstance(picks, list) and len(types_used) == len(picks)
if noise_cov is None:
for e in evoked:
for pick, ch_type in zip(picks, types_used):
e.data[pick] *= scalings[ch_type]
if proj is True and all(e.proj is not True for e in evoked):
evoked = [e.apply_proj() for e in evoked]
elif proj == 'interactive': # let it fail early.
for e in evoked:
_check_delayed_ssp(e)
# Y labels for picked plots must be reconstructed
y_label = list()
for ch_idx in range(len(chs_in_layout)):
if noise_cov is None:
unit = _handle_default('units')[channel_type(info, ch_idx)]
else:
unit = 'NA'
y_label.append('Amplitude (%s)' % unit)
if ylim is None:
def set_ylim(x):
return np.abs(x).max()
ylim_ = [set_ylim([e.data[t] for e in evoked]) for t in picks]
ymax = np.array(ylim_)
ylim_ = (-ymax, ymax)
elif isinstance(ylim, dict):
ylim_ = _handle_default('ylim', ylim)
ylim_ = [ylim_[kk] for kk in types_used]
# extra unpack to avoid bug #1700
if len(ylim_) == 1:
ylim_ = ylim_[0]
else:
ylim_ = zip(*[np.array(yl) for yl in ylim_])
else:
raise TypeError('ylim must be None or a dict. Got %s.' % type(ylim))
data = [e.data for e in evoked]
comments = [e.comment for e in evoked]
show_func = partial(_plot_timeseries_unified, data=data, color=color,
times=times, vline=vline, hline=hline,
hvline_color=font_color)
click_func = partial(_plot_timeseries, data=data, color=color, times=times,
vline=vline, hline=hline, hvline_color=font_color,
labels=comments)
fig = _plot_topo(info=info, times=times, show_func=show_func,
click_func=click_func, layout=layout, colorbar=False,
ylim=ylim_, cmap=None, layout_scale=layout_scale,
border=border, fig_facecolor=fig_facecolor,
font_color=font_color, axis_facecolor=axis_facecolor,
title=title, x_label='Time (s)', y_label=y_label,
unified=True, axes=axes)
add_background_image(fig, fig_background)
if legend is not False:
legend_loc = 0 if legend is True else legend
labels = [e.comment if e.comment else 'Unknown' for e in evoked]
legend = plt.legend(labels, loc=legend_loc,
prop={'size': 10})
legend.get_frame().set_facecolor(axis_facecolor)
txts = legend.get_texts()
for txt, col in zip(txts, color):
txt.set_color(col)
if proj == 'interactive':
for e in evoked:
_check_delayed_ssp(e)
params = dict(evokeds=evoked, times=times,
plot_update_proj_callback=_plot_update_evoked_topo_proj,
projs=evoked[0].info['projs'], fig=fig)
_draw_proj_checkbox(None, params)
plt_show(show)
return fig
def _plot_update_evoked_topo_proj(params, bools):
"""Update topo sensor plots."""
evokeds = [e.copy() for e in params['evokeds']]
fig = params['fig']
projs = [proj for proj, b in zip(params['projs'], bools) if b]
params['proj_bools'] = bools
for e in evokeds:
e.add_proj(projs, remove_existing=True)
e.apply_proj()
# make sure to only modify the time courses, not the ticks
for ax in fig.axes[0]._mne_axs:
for line, evoked in zip(ax.data_lines, evokeds):
line.set_ydata(ax.y_t + ax.y_s * evoked.data[ax._mne_ch_idx])
fig.canvas.draw()
def plot_topo_image_epochs(epochs, layout=None, sigma=0., vmin=None,
vmax=None, colorbar=True, order=None, cmap='RdBu_r',
layout_scale=.95, title=None, scalings=None,
border='none', fig_facecolor='k',
fig_background=None, font_color='w', show=True):
"""Plot Event Related Potential / Fields image on topographies.
Parameters
----------
epochs : instance of Epochs
The epochs.
layout: instance of Layout
System specific sensor positions.
sigma : float
The standard deviation of the Gaussian smoothing to apply along
the epoch axis to apply in the image. If 0., no smoothing is applied.
vmin : float
The min value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers.
vmax : float
The max value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers.
colorbar : bool
Display or not a colorbar.
order : None | array of int | callable
If not None, order is used to reorder the epochs on the y-axis
of the image. If it's an array of int it should be of length
the number of good epochs. If it's a callable the arguments
passed are the times vector and the data as 2d array
(data.shape[1] == len(times)).
cmap : instance of matplotlib.pyplot.colormap
Colors to be mapped to the values.
layout_scale: float
scaling factor for adjusting the relative size of the layout
on the canvas.
title : str
Title of the figure.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If
None, defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
border : str
matplotlib borders style to be used for each sensor plot.
fig_facecolor : str | obj
The figure face color. Defaults to black.
fig_background : None | array
A background image for the figure. This must be a valid input to
`matplotlib.pyplot.imshow`. Defaults to None.
font_color : str | obj
The color of tick labels in the colorbar. Defaults to white.
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib figure
Figure distributing one image per channel across sensor topography.
"""
scalings = _handle_default('scalings', scalings)
data = epochs.get_data()
scale_coeffs = list()
for idx in range(epochs.info['nchan']):
ch_type = channel_type(epochs.info, idx)
scale_coeffs.append(scalings.get(ch_type, 1))
vmin, vmax = _setup_vmin_vmax(data, vmin, vmax)
if layout is None:
layout = find_layout(epochs.info)
show_func = partial(_erfimage_imshow_unified, scalings=scale_coeffs,
order=order, data=data, epochs=epochs, sigma=sigma,
cmap=cmap)
erf_imshow = partial(_erfimage_imshow, scalings=scale_coeffs, order=order,
data=data, epochs=epochs, sigma=sigma, cmap=cmap)
fig = _plot_topo(info=epochs.info, times=epochs.times,
click_func=erf_imshow, show_func=show_func, layout=layout,
colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
layout_scale=layout_scale, title=title,
fig_facecolor=fig_facecolor, font_color=font_color,
border=border, x_label='Time (s)', y_label='Epoch',
unified=True, img=True)
add_background_image(fig, fig_background)
plt_show(show)
return fig
| bsd-3-clause |
stefanloock/pyshearlab | examples/adjoint.py | 1 | 1348 | def tic():
#Homemade version of matlab tic and toc functions
import time
global startTime_for_tictoc
startTime_for_tictoc = time.time()
def toc():
import time
if 'startTime_for_tictoc' in globals():
print("Elapsed time is " + str(time.time() - startTime_for_tictoc) + " seconds.")
else:
print("Toc: start time not set")
import numpy as np
from scipy import ndimage as img
from scipy import io as sio
import matplotlib.pyplot as plt
import pyshearlab
tic()
print("--Testing adjoint")
print("loading image...")
sigma = 30
scales = 3
thresholdingFactor = 3
# load data
X = img.imread("barbara.jpg")[::4, ::4]
X = X.astype(float)
# add noise
Xnoisy = X + sigma*np.random.randn(X.shape[0], X.shape[1])
toc()
tic()
print("generating shearlet system...")
## create shearlets
shearletSystem = pyshearlab.SLgetShearletSystem2D(0,X.shape[0], X.shape[1], scales)
toc()
tic()
print("decomposition, thresholding and reconstruction...")
# decomposition
coeffs = pyshearlab.SLsheardec2D(Xnoisy, shearletSystem)
# thresholding
oldCoeffs = coeffs.copy()
weights = np.ones(coeffs.shape)
# reconstruction
Xadj = pyshearlab.SLshearadjoint2D(coeffs, shearletSystem)
# Validate adjoint equation
print('<Ax, Ax> = {}, <x, AtAx> = {}, should be equal'.format(
np.vdot(coeffs, coeffs), np.vdot(Xnoisy, Xadj))) | gpl-3.0 |
Molecular-Image-Recognition/Molecular-Image-Recognition | code/rmgpy/cantherm/kinetics.py | 2 | 11458 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2017 Prof. William H. Green ([email protected]),
# Prof. Richard H. West ([email protected]) and the RMG Team ([email protected])
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
import os.path
import numpy
import logging
from rmgpy.cantherm.output import prettify
from rmgpy.kinetics.arrhenius import Arrhenius, ArrheniusEP, PDepArrhenius, MultiArrhenius, MultiPDepArrhenius
from rmgpy.kinetics.chebyshev import Chebyshev
from rmgpy.kinetics.falloff import ThirdBody, Lindemann, Troe
from rmgpy.kinetics.kineticsdata import KineticsData, PDepKineticsData
from rmgpy.kinetics.tunneling import Wigner, Eckart
import rmgpy.quantity as quantity
import rmgpy.constants as constants
################################################################################
class KineticsJob:
"""
A representation of a CanTherm kinetics job. This job is used to compute
and save the high-pressure-limit kinetics information for a single reaction.
"""
def __init__(self, reaction,
Tmin=None,
Tmax=None,
Tlist=None,
Tcount=0):
if Tmin is not None:
self.Tmin = quantity.Quantity(Tmin)
else:
self.Tmin = None
if Tmax is not None:
self.Tmax = quantity.Quantity(Tmax)
else:
self.Tmax = None
self.Tcount = Tcount
if Tlist is not None:
self.Tlist = quantity.Quantity(Tlist)
self.Tmin = quantity.Quantity(numpy.min(self.Tlist.value_si),"K")
self.Tmax = quantity.Quantity(numpy.max(self.Tlist.value_si),"K")
self.Tcount = len(self.Tlist.value_si)
else:
if Tmin and Tmax is not None:
if self.Tcount <= 3.:
self.Tcount = 50
stepsize = (self.Tmax.value_si-self.Tmin.value_si)/self.Tcount
self.Tlist = quantity.Quantity(numpy.arange(self.Tmin.value_si, self.Tmax.value_si+stepsize, stepsize),"K")
else:
self.Tlist = None
self.reaction = reaction
self.kunits = None
@property
def Tmin(self):
"""The minimum temperature at which the computed k(T) values are valid, or ``None`` if not defined."""
return self._Tmin
@Tmin.setter
def Tmin(self, value):
self._Tmin = quantity.Temperature(value)
@property
def Tmax(self):
"""The maximum temperature at which the computed k(T) values are valid, or ``None`` if not defined."""
return self._Tmax
@Tmax.setter
def Tmax(self, value):
self._Tmax = quantity.Temperature(value)
@property
def Tlist(self):
"""The temperatures at which the k(T) values are computed."""
return self._Tlist
@Tlist.setter
def Tlist(self, value):
self._Tlist = quantity.Temperature(value)
def execute(self, outputFile=None, plot=False):
"""
Execute the kinetics job, saving the results to the given `outputFile`
on disk.
"""
if self.Tlist is not None:
self.generateKinetics(self.Tlist.value_si)
else:
self.generateKinetics()
if outputFile is not None:
self.save(outputFile)
if plot:
self.plot(os.path.dirname(outputFile))
def generateKinetics(self,Tlist=None):
"""
Generate the kinetics data for the reaction and fit it to a modified
Arrhenius model.
"""
kineticsClass = 'Arrhenius'
tunneling = self.reaction.transitionState.tunneling
if isinstance(tunneling, Wigner) and tunneling.frequency is None:
tunneling.frequency = (self.reaction.transitionState.frequency.value_si,"cm^-1")
elif isinstance(tunneling, Eckart) and tunneling.frequency is None:
tunneling.frequency = (self.reaction.transitionState.frequency.value_si,"cm^-1")
tunneling.E0_reac = (sum([reactant.conformer.E0.value_si for reactant in self.reaction.reactants])*0.001,"kJ/mol")
tunneling.E0_TS = (self.reaction.transitionState.conformer.E0.value_si*0.001,"kJ/mol")
tunneling.E0_prod = (sum([product.conformer.E0.value_si for product in self.reaction.products])*0.001,"kJ/mol")
elif tunneling is not None:
raise ValueError('Unknown tunneling model {0!r}.'.format(tunneling))
logging.info('Generating {0} kinetics model for {0}...'.format(kineticsClass, self.reaction))
if Tlist is None:
Tlist = 1000.0/numpy.arange(0.4, 3.35, 0.05)
klist = numpy.zeros_like(Tlist)
for i in range(Tlist.shape[0]):
klist[i] = self.reaction.calculateTSTRateCoefficient(Tlist[i])
order = len(self.reaction.reactants)
klist *= 1e6 ** (order-1)
self.kunits = {1: 's^-1', 2: 'cm^3/(mol*s)', 3: 'cm^6/(mol^2*s)'}[order]
self.Kequnits = {2:'mol^2/cm^6', 1:'mol/cm^3', 0:' ', -1:'cm^3/mol', -2:'cm^6/mol^2'}[len(self.reaction.products)-len(self.reaction.reactants)]
self.krunits = {1: 's^-1', 2: 'cm^3/(mol*s)', 3: 'cm^6/(mol^2*s)'}[len(self.reaction.products)]
self.reaction.kinetics = Arrhenius().fitToData(Tlist, klist, kunits=self.kunits)
def save(self, outputFile):
"""
Save the results of the kinetics job to the file located
at `path` on disk.
"""
reaction = self.reaction
ks = []
k0s = []
k0revs = []
krevs = []
logging.info('Saving kinetics for {0}...'.format(reaction))
order = len(self.reaction.reactants)
factor = 1e6 ** (order-1)
f = open(outputFile, 'a')
f.write('# ======= =========== =========== =========== ===============\n')
f.write('# Temp. k (TST) Tunneling k (TST+T) Units\n')
f.write('# ======= =========== =========== =========== ===============\n')
if self.Tlist is None:
Tlist = numpy.array([300,400,500,600,800,1000,1500,2000])
else:
Tlist =self.Tlist.value_si
for T in Tlist:
tunneling = reaction.transitionState.tunneling
reaction.transitionState.tunneling = None
k0 = reaction.calculateTSTRateCoefficient(T) * factor
reaction.transitionState.tunneling = tunneling
k = reaction.calculateTSTRateCoefficient(T) * factor
tunneling = reaction.transitionState.tunneling
kappa = k / k0
ks.append(k)
k0s.append(k0)
f.write('# {0:4g} K {1:11.3e} {2:11g} {3:11.3e} {4}\n'.format(T, k0, kappa, k, self.kunits))
f.write('# ======= =========== =========== =========== ===============\n')
f.write('\n\n')
f.write('# ======= ============ =========== ============ ============= =========\n')
f.write('# Temp. Kc (eq) Units krev (TST) krev (TST+T) Units\n')
f.write('# ======= ============ =========== ============ ============= =========\n')
for n,T in enumerate(Tlist):
k = ks[n]
k0 = k0s[n]
Keq = reaction.getEquilibriumConstant(T)
k0rev = k0/Keq
krev = k/Keq
k0revs.append(k0rev)
krevs.append(krev)
f.write('# {0:4g} K {1:11.3e} {2} {3:11.3e} {4:11.3e} {5}\n'.format(T, Keq, self.Kequnits, k0rev, krev, self.krunits))
f.write('# ======= ============ =========== ============ ============= =========\n')
f.write('\n\n')
kinetics0rev = Arrhenius().fitToData(Tlist, numpy.array(k0revs), kunits=self.krunits)
kineticsrev = Arrhenius().fitToData(Tlist, numpy.array(krevs), kunits=self.krunits)
f.write('# krev (TST) = {0} \n'.format(kinetics0rev))
f.write('# krev (TST+T) = {0} \n\n'.format(kineticsrev))
# Reaction path degeneracy is INCLUDED in the kinetics itself!
string = 'kinetics(label={0!r}, kinetics={1!r})'.format(reaction.label, reaction.kinetics)
f.write('{0}\n\n'.format(prettify(string)))
f.close()
# Also save the result to chem.inp
f = open(os.path.join(os.path.dirname(outputFile), 'chem.inp'), 'a')
reaction = self.reaction
kinetics = reaction.kinetics
string = '{0!s:51} {1:9.3e} {2:9.3f} {3:9.3f}\n'.format(
reaction,
kinetics.A.value_si * factor,
kinetics.n.value_si,
kinetics.Ea.value_si / 4184.,
)
f.write('{0}\n'.format(string))
f.close()
def plot(self, outputDirectory):
"""
Plot both the raw kinetics data and the Arrhenius fit versus
temperature. The plot is saved to the file ``kinetics.pdf`` in the
output directory. The plot is not generated if ``matplotlib`` is not
installed.
"""
# Skip this step if matplotlib is not installed
try:
import pylab
except ImportError:
return
Tlist = 1000.0/numpy.arange(0.4, 3.35, 0.05)
klist = numpy.zeros_like(Tlist)
klist2 = numpy.zeros_like(Tlist)
for i in range(Tlist.shape[0]):
klist[i] = self.reaction.calculateTSTRateCoefficient(Tlist[i])
klist2[i] = self.reaction.kinetics.getRateCoefficient(Tlist[i])
order = len(self.reaction.reactants)
klist *= 1e6 ** (order-1)
klist2 *= 1e6 ** (order-1)
pylab.semilogy(1000.0 / Tlist, klist, 'ok')
pylab.semilogy(1000.0 / Tlist, klist2, '-k')
pylab.xlabel('1000 / Temperature (1000/K)')
pylab.ylabel('Rate coefficient ({0})'.format(self.kunits))
pylab.savefig(os.path.join(outputDirectory, 'kinetics.pdf'))
pylab.close()
| mit |
lukefrasera/cs775Homework | hw_003/scripts/fisher_classify.py | 1 | 6148 | #!/usr/bin/env python
import numpy as np
from numpy import linalg as la
import matplotlib.pyplot as plt
import argparse
import os
import pdb
from scipy import spatial
import time
import operator
def ParseData(raw_data, class1, class2):
raw_data = raw_data.rstrip('\n')
raw_data_list = raw_data.split('\n')
data_list = list()
for raw_data_point in raw_data_list:
raw_data_point = raw_data_point.rstrip()
point = raw_data_point.split(' ')
data_list.append([float(x) for x in point])
data_list.pop()
data_list_np = np.array(data_list)
mask = (data_list_np[:, 0] == class1) + (data_list_np[:, 0] == class2)
data_list_np = data_list_np[mask]
return data_list_np
def FisherClassifier(features, classification, test_data, classa, classb):
'''
:param features:
:param classification:
:param test_data:
:return:
'''
# separate classes
class_a_features = features[classification == classa]
class_b_features = features[classification == classb]
class_a_mean = np.mean(class_a_features, 0).T
class_a_cov = np.cov(class_a_features.T)
class_b_mean = np.mean(class_b_features, 0).T
class_b_cov = np.cov(class_b_features.T)
# compute the Fisher criteria projection to one dimension
project_a = la.inv(class_a_cov + class_b_cov) * (class_a_mean - class_b_mean)
project_a = project_a / la.norm(project_a)
# project all of the data
class_a_project = class_a_features * project_a
class_b_project = class_b_features * project_a
class_a_gauss_build = GaussianBuild(class_a_project)
class_b_gauss_build = GaussianBuild(class_b_project)
# class_a_prob = []
# class_b_prob = []
classification_result = []
for sample in test_data:
sample_project = sample * project_a
class_a_prob = ComputeGaussianProbability(class_a_gauss_build[0], class_a_gauss_build[1], sample_project)
class_b_prob = ComputeGaussianProbability(class_b_gauss_build[0], class_b_gauss_build[1], sample_project)
if class_a_prob > class_b_prob:
classification_result.append(classa)
else:
classification_result.append(classb)
return classification_result
def GaussianBuild(features):
"""
computes the mean and covariance for a dataset
:param features: s x f np.matrix (s samples by f features)
:param classification: s x 1 np.ndarray
:param class_id: scalar value to find
:return: [covariance(f x f),mean (f x 1)]
"""
#pdb.set_trace()
print 'Of ', features.shape, 'Elements, ', features.shape
cov_mat = np.cov(features.T)
mean_mat = np.mean(features.T)
return [cov_mat, mean_mat]
def ComputeGaussianProbability(cov_mat, mean_mat, sample):
"""
computes the probability of a particular sample belonging to a particular gaussian distribution
:param cov_mat: f x f np.matrix (f features)
:param mean_mat: f x 1 np.matrix
:param sample: f x 1 np.matrix
:return:
"""
mean_mat = np.matrix(mean_mat).T
sample = sample.T
# sample = meanMat
non_invertible = True
eye_scale = 0.0
cov_mat_inverse = 1.0 / cov_mat
probability = 1.0 / (np.sqrt(la.norm(2 * np.pi * cov_mat)))
probability *= np.exp(-0.5 * (sample - mean_mat).T * cov_mat_inverse * (sample - mean_mat))
return probability
def main():
parser = argparse.ArgumentParser(description='Process input')
parser.add_argument('-t', '--training_file', type=str, help='submit data to train against')
parser.add_argument('-f', '--testing_file', type=str, help='submit data to test the trained model against')
parser.add_argument('-s', '--save_model', type=str, help='save out trained model')
parser.add_argument('-r', '--read_model', type=str, help='read in trained model')
parser.add_argument('-a', '--classa', type=int, help='class to test/train on')
parser.add_argument('-b', '--classb', type=int, help='class to test/train on')
parser.add_argument('-m', '--method', type=int, help='0=Fisher Discriminant')
args = parser.parse_args()
print os.getcwd()
# Check if Arguments allow execution
if (not args.training_file) and (not args.read_model):
print "Error: No training Data or model present!"
return -1
if args.training_file and args.read_model:
print "Error: cannot read model and traing data at the same time!"
return -1
if args.training_file:
# trainagainst training file
if not os.path.isfile(args.training_file):
print "Error: Training file doesn't exist!"
return -1
# train
with open(args.training_file) as file:
# read file contents
raw_data = file.read()
# parse data
data = ParseData(raw_data, args.classa, args.classb)
# plt.imshow(data[0,1:].reshape(1,256), cmap = plt.cm.Greys, interpolation = 'nearest')
# plt.show()
# train on data
classification = data[:, 0]
features = np.matrix(data[:, 1:])
if args.testing_file:
with open(args.testing_file) as test_file:
raw_test_data = test_file.read()
test_data = ParseData(raw_test_data, args.classa, args.classb)
test_data_truth = test_data[:, 0]
test_data = np.matrix(test_data[:, 1:])
if args.method == 0:
result = FisherClassifier(features, classification, test_data, args.classa, args.classb)
print result
print [int(x) for x in list(test_data_truth)]
errors = np.array(result) == test_data_truth
class_a_samples = errors[test_data_truth == args.classa]
class_b_samples = errors[test_data_truth == args.classb]
num_a_correct = np.sum(class_a_samples)
num_b_correct = np.sum(class_b_samples)
total_a = class_a_samples.shape[0]
total_b = class_b_samples.shape[0]
print (1.0-float(num_a_correct)/total_a)*100,'%% of class a was misclassified'
print (1.0-float(num_b_correct)/total_b)*100,'%% of class b was misclassified'
if __name__ == '__main__':
main()
| lgpl-3.0 |
hippke/TTV-TDV-exomoons | create_figures/system_viii.py | 1 | 7068 | """n-body simulator to derive TDV+TTV diagrams of planet-moon configurations.
Credit for part of the source is given to
https://github.com/akuchling/50-examples/blob/master/gravity.rst
Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported License
"""
import numpy
import math
import matplotlib.pylab as plt
from modified_turtle import Turtle
from phys_const import *
class Body(Turtle):
"""Subclass of Turtle representing a gravitationally-acting body"""
name = 'Body'
vx = vy = 0.0 # velocities in m/s
px = py = 0.0 # positions in m
def attraction(self, other):
"""(Body): (fx, fy) Returns the force exerted upon this body by the other body"""
# Distance of the other body
sx, sy = self.px, self.py
ox, oy = other.px, other.py
dx = (ox-sx)
dy = (oy-sy)
d = math.sqrt(dx**2 + dy**2)
# Force f and direction to the body
f = G * self.mass * other.mass / (d**2)
theta = math.atan2(dy, dx)
# direction of the force
fx = math.cos(theta) * f
fy = math.sin(theta) * f
return fx, fy
def loop(bodies, orbit_duration):
"""([Body]) Loops and updates the positions of all the provided bodies"""
# Calculate the duration of our simulation: One full orbit of the outer moon
seconds_per_day = 24*60*60
timesteps_per_day = 1000
timestep = seconds_per_day / timesteps_per_day
total_steps = int(orbit_duration / 3600 / 24 * timesteps_per_day)
#print total_steps, orbit_duration / 24 / 60 / 60
for body in bodies:
body.penup()
body.hideturtle()
for step in range(total_steps):
for body in bodies:
if body.name == 'planet':
# Add current position and velocity to our list
tdv_list.append(body.vx)
ttv_list.append(body.px)
force = {}
for body in bodies:
# Add up all of the forces exerted on 'body'
total_fx = total_fy = 0.0
for other in bodies:
# Don't calculate the body's attraction to itself
if body is other:
continue
fx, fy = body.attraction(other)
total_fx += fx
total_fy += fy
# Record the total force exerted
force[body] = (total_fx, total_fy)
# Update velocities based upon on the force
for body in bodies:
fx, fy = force[body]
body.vx += fx / body.mass * timestep
body.vy += fy / body.mass * timestep
# Update positions
body.px += body.vx * timestep
body.py += body.vy * timestep
#body.goto(body.px*SCALE, body.py*SCALE)
#body.dot(3)
def run_sim(R_star, transit_duration, bodies):
"""Run 3-body sim and convert results to TTV + TDV values in [minutes]"""
# Run 3-body sim for one full orbit of the outermost moon
loop(bodies, orbit_duration)
# Move resulting data from lists to numpy arrays
ttv_array = numpy.array([])
ttv_array = ttv_list
tdv_array = numpy.array([])
tdv_array = tdv_list
# Zeropoint correction
middle_point = numpy.amin(ttv_array) + numpy.amax(ttv_array)
ttv_array = numpy.subtract(ttv_array, 0.5 * middle_point)
ttv_array = numpy.divide(ttv_array, 1000) # km/s
# Compensate for barycenter offset of planet at start of simulation:
planet.px = 0.5 * (gravity_firstmoon + gravity_secondmoon)
stretch_factor = 1 / ((planet.px / 1000) / numpy.amax(ttv_array))
ttv_array = numpy.divide(ttv_array, stretch_factor)
# Convert to time units, TTV
ttv_array = numpy.divide(ttv_array, R_star)
ttv_array = numpy.multiply(ttv_array, transit_duration * 60 * 24) # minutes
# Convert to time units, TDV
oldspeed = (2 * R_star / transit_duration) * 1000 / 24 / 60 / 60 # m/sec
newspeed = oldspeed - numpy.amax(tdv_array)
difference = (transit_duration - (transit_duration * newspeed / oldspeed)) * 24 * 60
conversion_factor = difference / numpy.amax(tdv_array)
tdv_array = numpy.multiply(tdv_array, conversion_factor)
return ttv_array, tdv_array
"""Main routine"""
# Set variables and constants. Do not change these!
G = 6.67428e-11 # Gravitational constant G
SCALE = 5e-07 # [px/m] Only needed for plotting during nbody-sim
tdv_list = []
ttv_list = []
R_star = 6.96 * 10**5 # [km], solar radius
transit_duration = (2*pi/sqrt(G*(M_sun+M_jup)/a_jup**3)*R_sun/(pi*a_jup)*sqrt((1+R_jup/R_sun)**2))/60/60/24 # transit duration without a moon, Eq. (C1) Kipping (2009b, MNRAS), for q = 0
print transit_duration
planet = Body()
planet.name = 'planet'
planet.mass = M_jup
#semimajor_axis = 1. * AU #[m]
semimajor_axis = a_jup
stellar_mass = M_sun
radius_hill = semimajor_axis * (planet.mass / (3 * (stellar_mass))) ** (1./3)
# Define parameters
firstmoon = Body()
firstmoon.mass = 2 * M_gan
firstmoon.px = a_io
secondmoon = Body()
secondmoon.mass = M_gan
secondmoon.px = a_gan
# Calculate start velocities
firstmoon.vy = math.sqrt(G * planet.mass * (2 / firstmoon.px - 1 / firstmoon.px))
secondmoon.vy = math.sqrt(G * planet.mass * (2 / secondmoon.px - 1 / secondmoon.px))
planet.vy = (-secondmoon.vy * secondmoon.mass - firstmoon.vy * firstmoon.mass) / planet.mass
# Calculate planet displacement. This holds for circular orbits
gravity_firstmoon = (firstmoon.mass / planet.mass) * firstmoon.px
gravity_secondmoon = (secondmoon.mass / planet.mass) * secondmoon.px
planet.px = 0.5 * (gravity_firstmoon + gravity_secondmoon)
# Use the outermost moon to calculate the length of one full orbit duration
orbit_duration = math.sqrt((4 * math.pi**2 *secondmoon.px ** 3) / (G * (secondmoon.mass + planet.mass)))
# Run simulation. Make sure to add/remove the moons you want to simulate!
ttv_array, tdv_array = run_sim(
R_star,
transit_duration,
[planet, firstmoon, secondmoon])
# Output information
print 'TTV amplitude =', numpy.amax(ttv_array), \
'[min] = ', numpy.amax(ttv_array) * 60, '[sec]'
print 'TDV amplitude =', numpy.amax(tdv_array), \
'[min] = ', numpy.amax(tdv_array) * 60, '[sec]'
ax = plt.axes()
plt.plot(ttv_array, tdv_array, color = 'k')
plt.rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
plt.rc('text', usetex=True)
plt.tick_params(axis='both', which='major', labelsize = 16)
plt.xlabel('transit timing variation [minutes]', fontsize = 16)
plt.ylabel('transit duration variation [minutes]', fontsize = 16)
ax.tick_params(direction='out')
plt.ylim([numpy.amin(tdv_array) * 1.2, numpy.amax(tdv_array) * 1.2])
plt.xlim([numpy.amin(ttv_array) * 1.2, numpy.amax(ttv_array) * 1.2])
plt.plot((0, 0), (numpy.amax(tdv_array) * 10., numpy.amin(tdv_array) * 10.), 'k', linewidth=0.5)
plt.plot((numpy.amin(ttv_array) * 10., numpy.amax(ttv_array) * 10.), (0, 0), 'k', linewidth=0.5)
# Fix axes for comparison with eccentric moon
plt.xlim(-0.4, +0.4)
plt.ylim(-1, +1)
plt.savefig("fig_system_viii.eps", bbox_inches = 'tight')
| mit |
amolkahat/pandas | pandas/tests/indexing/test_indexing_slow.py | 2 | 3774 | # -*- coding: utf-8 -*-
import warnings
import numpy as np
import pandas as pd
from pandas.core.api import Series, DataFrame, MultiIndex
import pandas.util.testing as tm
import pytest
class TestIndexingSlow(object):
@pytest.mark.slow
@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
def test_multiindex_get_loc(self): # GH7724, GH2646
with warnings.catch_warnings(record=True):
# test indexing into a multi-index before & past the lexsort depth
from numpy.random import randint, choice, randn
cols = ['jim', 'joe', 'jolie', 'joline', 'jolia']
def validate(mi, df, key):
mask = np.ones(len(df)).astype('bool')
# test for all partials of this key
for i, k in enumerate(key):
mask &= df.iloc[:, i] == k
if not mask.any():
assert key[:i + 1] not in mi.index
continue
assert key[:i + 1] in mi.index
right = df[mask].copy()
if i + 1 != len(key): # partial key
right.drop(cols[:i + 1], axis=1, inplace=True)
right.set_index(cols[i + 1:-1], inplace=True)
tm.assert_frame_equal(mi.loc[key[:i + 1]], right)
else: # full key
right.set_index(cols[:-1], inplace=True)
if len(right) == 1: # single hit
right = Series(right['jolia'].values,
name=right.index[0],
index=['jolia'])
tm.assert_series_equal(mi.loc[key[:i + 1]], right)
else: # multi hit
tm.assert_frame_equal(mi.loc[key[:i + 1]], right)
def loop(mi, df, keys):
for key in keys:
validate(mi, df, key)
n, m = 1000, 50
vals = [randint(0, 10, n), choice(
list('abcdefghij'), n), choice(
pd.date_range('20141009', periods=10).tolist(), n), choice(
list('ZYXWVUTSRQ'), n), randn(n)]
vals = list(map(tuple, zip(*vals)))
# bunch of keys for testing
keys = [randint(0, 11, m), choice(
list('abcdefghijk'), m), choice(
pd.date_range('20141009', periods=11).tolist(), m), choice(
list('ZYXWVUTSRQP'), m)]
keys = list(map(tuple, zip(*keys)))
keys += list(map(lambda t: t[:-1], vals[::n // m]))
# covers both unique index and non-unique index
df = DataFrame(vals, columns=cols)
a, b = pd.concat([df, df]), df.drop_duplicates(subset=cols[:-1])
for frame in a, b:
for i in range(5): # lexsort depth
df = frame.copy() if i == 0 else frame.sort_values(
by=cols[:i])
mi = df.set_index(cols[:-1])
assert not mi.index.lexsort_depth < i
loop(mi, df, keys)
@pytest.mark.slow
def test_large_dataframe_indexing(self):
# GH10692
result = DataFrame({'x': range(10 ** 6)}, dtype='int64')
result.loc[len(result)] = len(result) + 1
expected = DataFrame({'x': range(10 ** 6 + 1)}, dtype='int64')
tm.assert_frame_equal(result, expected)
@pytest.mark.slow
def test_large_mi_dataframe_indexing(self):
# GH10645
result = MultiIndex.from_arrays([range(10 ** 6), range(10 ** 6)])
assert (not (10 ** 6, 0) in result)
| bsd-3-clause |
saurabhjn76/sympy | sympy/interactive/printing.py | 31 | 15830 | """Tools for setting up printing in interactive sessions. """
from __future__ import print_function, division
import sys
from distutils.version import LooseVersion as V
from io import BytesIO
from sympy import latex as default_latex
from sympy import preview
from sympy.core.compatibility import integer_types
from sympy.utilities.misc import debug
def _init_python_printing(stringify_func):
"""Setup printing in Python interactive session. """
import sys
from sympy.core.compatibility import builtins
def _displayhook(arg):
"""Python's pretty-printer display hook.
This function was adapted from:
http://www.python.org/dev/peps/pep-0217/
"""
if arg is not None:
builtins._ = None
print(stringify_func(arg))
builtins._ = arg
sys.displayhook = _displayhook
def _init_ipython_printing(ip, stringify_func, use_latex, euler, forecolor,
backcolor, fontsize, latex_mode, print_builtin,
latex_printer):
"""Setup printing in IPython interactive session. """
try:
from IPython.lib.latextools import latex_to_png
except ImportError:
pass
preamble = "\\documentclass[%s]{article}\n" \
"\\pagestyle{empty}\n" \
"\\usepackage{amsmath,amsfonts}%s\\begin{document}"
if euler:
addpackages = '\\usepackage{euler}'
else:
addpackages = ''
preamble = preamble % (fontsize, addpackages)
imagesize = 'tight'
offset = "0cm,0cm"
resolution = 150
dvi = r"-T %s -D %d -bg %s -fg %s -O %s" % (
imagesize, resolution, backcolor, forecolor, offset)
dvioptions = dvi.split()
debug("init_printing: DVIOPTIONS:", dvioptions)
debug("init_printing: PREAMBLE:", preamble)
latex = latex_printer or default_latex
def _print_plain(arg, p, cycle):
"""caller for pretty, for use in IPython 0.11"""
if _can_print_latex(arg):
p.text(stringify_func(arg))
else:
p.text(IPython.lib.pretty.pretty(arg))
def _preview_wrapper(o):
exprbuffer = BytesIO()
try:
preview(o, output='png', viewer='BytesIO',
outputbuffer=exprbuffer, preamble=preamble,
dvioptions=dvioptions)
except Exception as e:
# IPython swallows exceptions
debug("png printing:", "_preview_wrapper exception raised:",
repr(e))
raise
return exprbuffer.getvalue()
def _matplotlib_wrapper(o):
# mathtext does not understand certain latex flags, so we try to
# replace them with suitable subs
o = o.replace(r'\operatorname', '')
o = o.replace(r'\overline', r'\bar')
return latex_to_png(o)
def _can_print_latex(o):
"""Return True if type o can be printed with LaTeX.
If o is a container type, this is True if and only if every element of
o can be printed with LaTeX.
"""
from sympy import Basic
from sympy.matrices import MatrixBase
from sympy.physics.vector import Vector, Dyadic
if isinstance(o, (list, tuple, set, frozenset)):
return all(_can_print_latex(i) for i in o)
elif isinstance(o, dict):
return all(_can_print_latex(i) and _can_print_latex(o[i]) for i in o)
elif isinstance(o, bool):
return False
# TODO : Investigate if "elif hasattr(o, '_latex')" is more useful
# to use here, than these explicit imports.
elif isinstance(o, (Basic, MatrixBase, Vector, Dyadic)):
return True
elif isinstance(o, (float, integer_types)) and print_builtin:
return True
return False
def _print_latex_png(o):
"""
A function that returns a png rendered by an external latex
distribution, falling back to matplotlib rendering
"""
if _can_print_latex(o):
s = latex(o, mode=latex_mode)
try:
return _preview_wrapper(s)
except RuntimeError:
if latex_mode != 'inline':
s = latex(o, mode='inline')
return _matplotlib_wrapper(s)
def _print_latex_matplotlib(o):
"""
A function that returns a png rendered by mathtext
"""
if _can_print_latex(o):
s = latex(o, mode='inline')
try:
return _matplotlib_wrapper(s)
except Exception:
# Matplotlib.mathtext cannot render some things (like
# matrices)
return None
def _print_latex_text(o):
"""
A function to generate the latex representation of sympy expressions.
"""
if _can_print_latex(o):
s = latex(o, mode='plain')
s = s.replace(r'\dag', r'\dagger')
s = s.strip('$')
return '$$%s$$' % s
def _result_display(self, arg):
"""IPython's pretty-printer display hook, for use in IPython 0.10
This function was adapted from:
ipython/IPython/hooks.py:155
"""
if self.rc.pprint:
out = stringify_func(arg)
if '\n' in out:
print
print(out)
else:
print(repr(arg))
import IPython
if V(IPython.__version__) >= '0.11':
from sympy.core.basic import Basic
from sympy.matrices.matrices import MatrixBase
from sympy.physics.vector import Vector, Dyadic
printable_types = [Basic, MatrixBase, float, tuple, list, set,
frozenset, dict, Vector, Dyadic] + list(integer_types)
plaintext_formatter = ip.display_formatter.formatters['text/plain']
for cls in printable_types:
plaintext_formatter.for_type(cls, _print_plain)
png_formatter = ip.display_formatter.formatters['image/png']
if use_latex in (True, 'png'):
debug("init_printing: using png formatter")
for cls in printable_types:
png_formatter.for_type(cls, _print_latex_png)
elif use_latex == 'matplotlib':
debug("init_printing: using matplotlib formatter")
for cls in printable_types:
png_formatter.for_type(cls, _print_latex_matplotlib)
else:
debug("init_printing: not using any png formatter")
for cls in printable_types:
# Better way to set this, but currently does not work in IPython
#png_formatter.for_type(cls, None)
if cls in png_formatter.type_printers:
png_formatter.type_printers.pop(cls)
latex_formatter = ip.display_formatter.formatters['text/latex']
if use_latex in (True, 'mathjax'):
debug("init_printing: using mathjax formatter")
for cls in printable_types:
latex_formatter.for_type(cls, _print_latex_text)
else:
debug("init_printing: not using text/latex formatter")
for cls in printable_types:
# Better way to set this, but currently does not work in IPython
#latex_formatter.for_type(cls, None)
if cls in latex_formatter.type_printers:
latex_formatter.type_printers.pop(cls)
else:
ip.set_hook('result_display', _result_display)
def _is_ipython(shell):
"""Is a shell instance an IPython shell?"""
# shortcut, so we don't import IPython if we don't have to
if 'IPython' not in sys.modules:
return False
try:
from IPython.core.interactiveshell import InteractiveShell
except ImportError:
# IPython < 0.11
try:
from IPython.iplib import InteractiveShell
except ImportError:
# Reaching this points means IPython has changed in a backward-incompatible way
# that we don't know about. Warn?
return False
return isinstance(shell, InteractiveShell)
def init_printing(pretty_print=True, order=None, use_unicode=None,
use_latex=None, wrap_line=None, num_columns=None,
no_global=False, ip=None, euler=False, forecolor='Black',
backcolor='Transparent', fontsize='10pt',
latex_mode='equation*', print_builtin=True,
str_printer=None, pretty_printer=None,
latex_printer=None):
"""
Initializes pretty-printer depending on the environment.
Parameters
==========
pretty_print: boolean
If True, use pretty_print to stringify or the provided pretty
printer; if False, use sstrrepr to stringify or the provided string
printer.
order: string or None
There are a few different settings for this parameter:
lex (default), which is lexographic order;
grlex, which is graded lexographic order;
grevlex, which is reversed graded lexographic order;
old, which is used for compatibility reasons and for long expressions;
None, which sets it to lex.
use_unicode: boolean or None
If True, use unicode characters;
if False, do not use unicode characters.
use_latex: string, boolean, or None
If True, use default latex rendering in GUI interfaces (png and
mathjax);
if False, do not use latex rendering;
if 'png', enable latex rendering with an external latex compiler,
falling back to matplotlib if external compilation fails;
if 'matplotlib', enable latex rendering with matplotlib;
if 'mathjax', enable latex text generation, for example MathJax
rendering in IPython notebook or text rendering in LaTeX documents
wrap_line: boolean
If True, lines will wrap at the end; if False, they will not wrap
but continue as one line. This is only relevant if `pretty_print` is
True.
num_columns: int or None
If int, number of columns before wrapping is set to num_columns; if
None, number of columns before wrapping is set to terminal width.
This is only relevant if `pretty_print` is True.
no_global: boolean
If True, the settings become system wide;
if False, use just for this console/session.
ip: An interactive console
This can either be an instance of IPython,
or a class that derives from code.InteractiveConsole.
euler: boolean, optional, default=False
Loads the euler package in the LaTeX preamble for handwritten style
fonts (http://www.ctan.org/pkg/euler).
forecolor: string, optional, default='Black'
DVI setting for foreground color.
backcolor: string, optional, default='Transparent'
DVI setting for background color.
fontsize: string, optional, default='10pt'
A font size to pass to the LaTeX documentclass function in the
preamble.
latex_mode: string, optional, default='equation*'
The mode used in the LaTeX printer. Can be one of:
{'inline'|'plain'|'equation'|'equation*'}.
print_builtin: boolean, optional, default=True
If true then floats and integers will be printed. If false the
printer will only print SymPy types.
str_printer: function, optional, default=None
A custom string printer function. This should mimic
sympy.printing.sstrrepr().
pretty_printer: function, optional, default=None
A custom pretty printer. This should mimic sympy.printing.pretty().
latex_printer: function, optional, default=None
A custom LaTeX printer. This should mimic sympy.printing.latex()
This should mimic sympy.printing.latex().
Examples
========
>>> from sympy.interactive import init_printing
>>> from sympy import Symbol, sqrt
>>> from sympy.abc import x, y
>>> sqrt(5)
sqrt(5)
>>> init_printing(pretty_print=True) # doctest: +SKIP
>>> sqrt(5) # doctest: +SKIP
___
\/ 5
>>> theta = Symbol('theta') # doctest: +SKIP
>>> init_printing(use_unicode=True) # doctest: +SKIP
>>> theta # doctest: +SKIP
\u03b8
>>> init_printing(use_unicode=False) # doctest: +SKIP
>>> theta # doctest: +SKIP
theta
>>> init_printing(order='lex') # doctest: +SKIP
>>> str(y + x + y**2 + x**2) # doctest: +SKIP
x**2 + x + y**2 + y
>>> init_printing(order='grlex') # doctest: +SKIP
>>> str(y + x + y**2 + x**2) # doctest: +SKIP
x**2 + x + y**2 + y
>>> init_printing(order='grevlex') # doctest: +SKIP
>>> str(y * x**2 + x * y**2) # doctest: +SKIP
x**2*y + x*y**2
>>> init_printing(order='old') # doctest: +SKIP
>>> str(x**2 + y**2 + x + y) # doctest: +SKIP
x**2 + x + y**2 + y
>>> init_printing(num_columns=10) # doctest: +SKIP
>>> x**2 + x + y**2 + y # doctest: +SKIP
x + y +
x**2 + y**2
"""
import sys
from sympy.printing.printer import Printer
if pretty_print:
if pretty_printer is not None:
stringify_func = pretty_printer
else:
from sympy.printing import pretty as stringify_func
else:
if str_printer is not None:
stringify_func = str_printer
else:
from sympy.printing import sstrrepr as stringify_func
# Even if ip is not passed, double check that not in IPython shell
in_ipython = False
if ip is None:
try:
ip = get_ipython()
except NameError:
pass
else:
in_ipython = (ip is not None)
if ip and not in_ipython:
in_ipython = _is_ipython(ip)
if in_ipython and pretty_print:
try:
import IPython
# IPython 1.0 deprecates the frontend module, so we import directly
# from the terminal module to prevent a deprecation message from being
# shown.
if V(IPython.__version__) >= '1.0':
from IPython.terminal.interactiveshell import TerminalInteractiveShell
else:
from IPython.frontend.terminal.interactiveshell import TerminalInteractiveShell
from code import InteractiveConsole
except ImportError:
pass
else:
# This will be True if we are in the qtconsole or notebook
if not isinstance(ip, (InteractiveConsole, TerminalInteractiveShell)) \
and 'ipython-console' not in ''.join(sys.argv):
if use_unicode is None:
debug("init_printing: Setting use_unicode to True")
use_unicode = True
if use_latex is None:
debug("init_printing: Setting use_latex to True")
use_latex = True
if not no_global:
Printer.set_global_settings(order=order, use_unicode=use_unicode,
wrap_line=wrap_line, num_columns=num_columns)
else:
_stringify_func = stringify_func
if pretty_print:
stringify_func = lambda expr: \
_stringify_func(expr, order=order,
use_unicode=use_unicode,
wrap_line=wrap_line,
num_columns=num_columns)
else:
stringify_func = lambda expr: _stringify_func(expr, order=order)
if in_ipython:
_init_ipython_printing(ip, stringify_func, use_latex, euler,
forecolor, backcolor, fontsize, latex_mode,
print_builtin, latex_printer)
else:
_init_python_printing(stringify_func)
| bsd-3-clause |
MartinThoma/algorithms | ML/50-mlps/03-autokeras/hasy_tools.py | 12 | 46221 | #!/usr/bin/env python
"""
Tools for the HASY dataset.
Type `./hasy_tools.py --help` for the command line tools and `help(hasy_tools)`
in the interactive Python shell for the module options of hasy_tools.
See https://arxiv.org/abs/1701.08380 for details about the dataset.
"""
import csv
import json
import logging
import os
import random
random.seed(0) # make sure results are reproducible
import hashlib
import sys
import numpy as np
from PIL import Image, ImageDraw
from six.moves import urllib
from sklearn.model_selection import train_test_split
np.random.seed(0) # make sure results are reproducible
import matplotlib.pyplot as plt
import scipy.ndimage
try:
from urllib.request import urlretrieve # Python 3
except ImportError:
from urllib import urlretrieve # Python 2
import shutil
import tarfile
from six.moves import cPickle as pickle
from six.moves.urllib.error import HTTPError, URLError
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.INFO,
stream=sys.stdout)
__version__ = "v2.4"
n_classes = 369
labels = []
WIDTH = 32
HEIGHT = 32
img_rows = 32
img_cols = 32
img_channels = 1
symbol_id2index = None
def _load_csv(filepath, delimiter=',', quotechar="'"):
"""
Load a CSV file.
Parameters
----------
filepath : str
Path to a CSV file
delimiter : str, optional
quotechar : str, optional
Returns
-------
list of dicts : Each line of the CSV file is one element of the list.
"""
data = []
csv_dir = os.path.dirname(filepath)
with open(filepath) as csvfile:
reader = csv.DictReader(csvfile,
delimiter=delimiter,
quotechar=quotechar)
for row in reader:
for el in ['path', 'path1', 'path2']:
if el in row:
row[el] = os.path.abspath(os.path.join(csv_dir, row[el]))
data.append(row)
return data
def generate_index(csv_filepath):
"""
Generate an index 0...k for the k labels.
Parameters
----------
csv_filepath : str
Path to 'test.csv' or 'train.csv'
Returns
-------
tuple of dict and a list
dict : Maps a symbol_id as in test.csv and
train.csv to an integer in 0...k, where k is the total
number of unique labels.
list : LaTeX labels
"""
symbol_id2index = {}
data = _load_csv(csv_filepath)
i = 0
labels = []
for item in data:
if item['symbol_id'] not in symbol_id2index:
symbol_id2index[item['symbol_id']] = i
labels.append(item['latex'])
i += 1
return symbol_id2index, labels
def _validate_file(fpath, md5_hash):
"""
Validate a file against a MD5 hash.
Parameters
----------
fpath: string
Path to the file being validated
md5_hash: string
The MD5 hash being validated against
Returns
---------
bool
True, if the file is valid. Otherwise False.
"""
hasher = hashlib.md5()
with open(fpath, 'rb') as f:
buf = f.read()
hasher.update(buf)
if str(hasher.hexdigest()) == str(md5_hash):
return True
else:
return False
def _get_file(fname, origin, md5_hash=None, cache_subdir='~/.datasets'):
"""
Download a file from a URL if it not already in the cache.
Passing the MD5 hash will verify the file after download
as well as if it is already present in the cache.
Parameters
----------
fname: name of the file
origin: original URL of the file
md5_hash: MD5 hash of the file for verification
cache_subdir: directory being used as the cache
Returns
-------
Path to the downloaded file
"""
datadir_base = os.path.expanduser("~/.datasets")
if not os.path.exists(datadir_base):
os.makedirs(datadir_base)
if not os.access(datadir_base, os.W_OK):
logging.warning(f"Could not access {cache_subdir}.")
datadir_base = os.path.join('/tmp', '.data')
datadir = os.path.join(datadir_base, cache_subdir)
if not os.path.exists(datadir):
os.makedirs(datadir)
fpath = os.path.join(datadir, fname)
download = False
if os.path.exists(fpath):
# File found; verify integrity if a hash was provided.
if md5_hash is not None:
if not _validate_file(fpath, md5_hash):
print('A local file was found, but it seems to be '
'incomplete or outdated.')
download = True
else:
download = True
if download:
print(f'Downloading data from {origin} to {fpath}')
error_msg = 'URL fetch failure on {}: {} -- {}'
try:
try:
urlretrieve(origin, fpath)
except URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except (Exception, KeyboardInterrupt) as e:
if os.path.exists(fpath):
os.remove(fpath)
raise
return fpath
def load_data(mode='fold-1', image_dim_ordering='tf'):
"""
Load HASYv2 dataset.
Parameters
----------
mode : string, optional (default: "complete")
- "complete" : Returns {'x': x, 'y': y} with all labeled data
- "fold-1": Returns {'x_train': x_train,
'y_train': y_train,
'x_test': x_test,
'y_test': y_test}
- "fold-2", ..., "fold-10": See "fold-1"
- "verification": Returns {'train': {'x_train': List of loaded images,
'y_train': list of labels},
'test-v1': {'X1s': List of first images,
'X2s': List of second images,
'ys': List of labels
'True' or 'False'}
'test-v2': {'X1s': List of first images,
'X2s': List of second images,
'ys': List of labels
'True' or 'False'}
'test-v3': {'X1s': List of first images,
'X2s': List of second images,
'ys': List of labels
'True' or 'False'}}
image_dim_ordering : 'th' for theano or 'tf' for tensorflow (default: 'tf')
Returns
-------
dict
See "mode" parameter for details.
All 'x..' keys contain a uint8 numpy array [index, y, x, depth] (or
[index, depth, y, x] for image_dim_ordering='t')
All 'y..' keys contain a 2D uint8 numpy array [[label]]
"""
# Download if not already done
fname = 'HASYv2.tar.bz2'
origin = 'https://zenodo.org/record/259444/files/HASYv2.tar.bz2'
fpath = _get_file(fname, origin=origin,
md5_hash='fddf23f36e24b5236f6b3a0880c778e3',
cache_subdir='HASYv2')
path = os.path.dirname(fpath)
# Extract content if not already done
untar_fpath = os.path.join(path, "HASYv2")
if not os.path.exists(untar_fpath):
print('Extract contents from archive...')
tfile = tarfile.open(fpath, 'r:bz2')
try:
tfile.extractall(path=untar_fpath)
except (Exception, KeyboardInterrupt) as e:
if os.path.exists(untar_fpath):
if os.path.isfile(untar_fpath):
os.remove(untar_fpath)
else:
shutil.rmtree(untar_fpath)
raise
tfile.close()
# Create pickle if not already done
pickle_fpath = os.path.join(untar_fpath, "hasy-data.pickle")
if not os.path.exists(pickle_fpath):
# Load mapping from symbol names to indices
symbol_csv_fpath = os.path.join(untar_fpath, "symbols.csv")
symbol_id2index, labels = generate_index(symbol_csv_fpath)
globals()["labels"] = labels
globals()["symbol_id2index"] = symbol_id2index
# Load data
data_csv_fpath = os.path.join(untar_fpath, "hasy-data-labels.csv")
data_csv = _load_csv(data_csv_fpath)
x_compl = np.zeros((len(data_csv), 1, WIDTH, HEIGHT), dtype=np.uint8)
y_compl = []
s_compl = []
path2index = {}
# Load HASYv2 data
for i, data_item in enumerate(data_csv):
fname = os.path.join(untar_fpath, data_item['path'])
s_compl.append(fname)
x_compl[i, 0, :, :] = scipy.ndimage.imread(fname,
flatten=False,
mode='L')
label = symbol_id2index[data_item['symbol_id']]
y_compl.append(label)
path2index[fname] = i
y_compl = np.array(y_compl, dtype=np.int64)
data = {'x': x_compl,
'y': y_compl,
's': s_compl,
'labels': labels,
'path2index': path2index}
# Store data as pickle to speed up later calls
with open(pickle_fpath, 'wb') as f:
pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL)
else:
with open(pickle_fpath, 'rb') as f:
data = pickle.load(f)
globals()["labels"] = data['labels']
labels = data['labels']
x_compl = data['x']
y_compl = np.reshape(data['y'], (len(data['y']), 1))
s_compl = data['s']
path2index = data['path2index']
if image_dim_ordering == 'tf':
x_compl = x_compl.transpose(0, 2, 3, 1)
if mode == 'complete':
return {'x': x_compl, 'y': y_compl}
elif mode.startswith('fold-'):
fold = int(mode.split("-")[1])
if fold < 1 or fold > 10:
raise NotImplementedError
# Load fold
fold_dir = os.path.join(untar_fpath,
f"classification-task/fold-{fold}")
train_csv_fpath = os.path.join(fold_dir, "train.csv")
test_csv_fpath = os.path.join(fold_dir, "test.csv")
train_csv = _load_csv(train_csv_fpath)
test_csv = _load_csv(test_csv_fpath)
train_ids = np.array([path2index[row['path']] for row in train_csv])
test_ids = np.array([path2index[row['path']] for row in test_csv])
x_train = x_compl[train_ids]
x_test = x_compl[test_ids]
y_train = y_compl[train_ids]
y_test = y_compl[test_ids]
s_train = [s_compl[id_] for id_ in train_ids]
s_test = [s_compl[id_] for id_ in test_ids]
data = {'x_train': x_train,
'y_train': y_train,
'x_test': x_test,
'y_test': y_test,
's_train': s_train,
's_test': s_test,
'labels': labels
}
return data
elif mode == 'verification':
# Load the data
symbol_id2index = globals()["symbol_id2index"]
base_ = os.path.join(untar_fpath, "verification-task")
# Load train data
train_csv_fpath = os.path.join(base_, "train.csv")
train_csv = _load_csv(train_csv_fpath)
train_ids = np.array([path2index[row['path']] for row in train_csv])
x_train = x_compl[train_ids]
y_train = y_compl[train_ids]
s_train = [s_compl[id_] for id_ in train_ids]
# Load test data
test1_csv_fpath = os.path.join(base_, 'test-v1.csv')
test2_csv_fpath = os.path.join(base_, 'test-v2.csv')
test3_csv_fpath = os.path.join(base_, 'test-v3.csv')
tmp1 = _load_images_verification_test(test1_csv_fpath,
x_compl,
path2index)
tmp2 = _load_images_verification_test(test2_csv_fpath,
x_compl,
path2index)
tmp3 = _load_images_verification_test(test3_csv_fpath,
x_compl,
path2index)
data = {'train': {'x_train': x_train,
'y_train': y_train,
'source': s_train},
'test-v1': tmp1,
'test-v2': tmp2,
'test-v3': tmp3}
return data
else:
raise NotImplementedError
def load_images(csv_filepath, symbol_id2index,
one_hot=True,
flatten=False,
normalize=True,
shuffle=True):
"""
Load the images into a 4D uint8 numpy array [index, y, x, depth].
Parameters
----------
csv_filepath : str
'test.csv' or 'train.csv'
symbol_id2index : dict
Dictionary generated by generate_index
one_hot : bool, optional (default: True)
Make label vector as 1-hot encoding, otherwise index
flatten : bool, optional (default: False)
Flatten feature vector
normalize : bool, optional (default: True)
Noramlize features to {0.0, 1.0}
shuffle : bool, optional (default: True)
Shuffle loaded data
Returns
-------
images, labels, source :
Images is a 4D uint8 numpy array [index, y, x, depth]
and labels is a 2D uint8 numpy array [index][1-hot enc]
and source is a list of file paths
"""
WIDTH, HEIGHT = 32, 32
dataset_path = os.path.dirname(csv_filepath)
data = _load_csv(csv_filepath)
if flatten:
images = np.zeros((len(data), WIDTH * HEIGHT))
else:
images = np.zeros((len(data), WIDTH, HEIGHT, 1))
labels, sources = [], []
for i, data_item in enumerate(data):
fname = os.path.join(dataset_path, data_item['path'])
sources.append(fname)
if flatten:
img = scipy.ndimage.imread(fname, flatten=False, mode='L')
images[i, :] = img.flatten()
else:
images[i, :, :, 0] = scipy.ndimage.imread(fname,
flatten=False,
mode='L')
label = symbol_id2index[data_item['symbol_id']]
labels.append(label)
# Make sure the type of images is float32
images = np.array(images, dtype=np.float32)
if normalize:
images /= 255.0
data = [images, np.array(labels), sources]
if shuffle:
perm = np.arange(len(labels))
np.random.shuffle(perm)
data[0] = data[0][perm]
data[1] = data[1][perm]
data[2] = [data[2][index] for index in perm]
if one_hot:
data = (data[0], np.eye(len(symbol_id2index))[data[1]], data[2])
return data
def _load_images_verification_test(csv_filepath, x_compl, path2index):
"""
Load images from the verification test files.
Parameters
----------
csv_filepath : str
Path to 'test-v1.csv' or 'test-v2.csv' or 'test-v3.csv'
x_compl : numpy array
Complete hasy data
path2index : dict
Map paths to indices of x_compl
Returns
-------
list
[x1s, x2s, labels, sources] where all four are lists of equal length
x1s and x2s contain images,
labels contains either True or False
sources contains strings
"""
test1_csv = _load_csv(csv_filepath)
test1_x1_ids = np.array([path2index[row['path1']]
for row in test1_csv])
test1_x2_ids = np.array([path2index[row['path2']]
for row in test1_csv])
test1_ys = np.array([row['is_same'] == 'True' for row in test1_csv],
dtype=np.float64)
test1_sources = [(row['path1'], row['path2']) for row in test1_csv]
return {'X1s': x_compl[test1_x1_ids],
'X2s': x_compl[test1_x2_ids],
'ys': test1_ys,
'sources': test1_sources}
def _maybe_download(expected_files, work_directory='HASYv2'):
"""
Download the data, unless it is already there.
Parameters
----------
expected_files : list
Each list contains a dict with keys 'filename', 'source', 'md5sum',
where 'filename' denotes the local filename within work_directory,
'source' is an URL where the file can be downloaded and
'md5sum' is the expected MD5 sum of the file
work_directory : str
"""
if not os.path.exists(work_directory):
os.mkdir(work_directory)
for entry in expected_files:
filepath = os.path.join(work_directory, entry['filename'])
logging.info("Search '%s'", filepath)
if not os.path.exists(filepath):
filepath, _ = urllib.request.urlretrieve(entry['source'], filepath)
statinfo = os.stat(filepath)
logging.info('Successfully downloaded %s (%i bytes)'
% (entry['filename'], statinfo.st_size))
with open(filepath, 'rb') as f:
md5sum_actual = hashlib.md5(f.read()).hexdigest()
if md5sum_actual != entry['md5sum']:
logging.error("File '%s' was expected to have md5sum %s, but "
"has '%s'",
entry['filename'],
entry['md5sum'],
md5sum_actual)
else:
with open(filepath, 'rb') as f:
md5sum_actual = hashlib.md5(f.read()).hexdigest()
if md5sum_actual != entry['md5sum']:
logging.error("File '%s' was expected to have md5sum %s, but "
"has '%s'",
entry['filename'],
entry['md5sum'],
md5sum_actual)
def _maybe_extract(tarfile_path, work_directory):
import tarfile
hasy_tools_path = os.path.join(work_directory, "hasy_tools.py")
if not os.path.isfile(hasy_tools_path):
with tarfile.open(tarfile_path, "r:bz2") as tar:
tar.extractall(path=work_directory)
def _get_data(dataset_path):
"""
Download data and extract it, if it is not already in dataset_path.
Parameters
----------
dataset_path : str
"""
filelist = [{'filename': 'HASYv2.tar.bz2',
'source': ('https://zenodo.org/record/259444/files/'
'HASYv2.tar.bz2'),
'md5sum': 'fddf23f36e24b5236f6b3a0880c778e3'}]
_maybe_download(filelist, work_directory=dataset_path)
tar_filepath = os.path.join(dataset_path, filelist[0]['filename'])
_maybe_extract(tar_filepath, dataset_path)
def _is_valid_png(filepath):
"""
Check if the PNG image is valid.
Parameters
----------
filepath : str
Path to a PNG image
Returns
-------
bool : True if the PNG image is valid, otherwise False.
"""
try:
test = Image.open(filepath)
test.close()
return True
except:
return False
def _verify_all(csv_data_path):
"""Verify all PNG files in the training and test directories."""
train_data = _load_csv(csv_data_path)
for data_item in train_data:
if not _is_valid_png(data_item['path']):
logging.info("%s is invalid." % data_item['path'])
logging.info("Checked %i items of %s." %
(len(train_data), csv_data_path))
def create_random_overview(img_src, x_images, y_images):
"""Create a random overview of images."""
# Create canvas
background = Image.new('RGB',
(35 * x_images, 35 * y_images),
(255, 255, 255))
bg_w, bg_h = background.size
# Paste image on canvas
for x in range(x_images):
for y in range(y_images):
path = random.choice(img_src)['path']
img = Image.open(path, 'r')
img_w, img_h = img.size
offset = (35 * x, 35 * y)
background.paste(img, offset)
# Draw lines
draw = ImageDraw.Draw(background)
for y in range(y_images): # horizontal lines
draw.line((0, 35 * y - 2, 35 * x_images, 35 * y - 2), fill=0)
for x in range(x_images): # vertical lines
draw.line((35 * x - 2, 0, 35 * x - 2, 35 * y_images), fill=0)
# Store
background.save('hasy-overview.png')
def _get_colors(data, verbose=False):
"""
Get how often each color is used in data.
Parameters
----------
data : dict
with key 'path' pointing to an image
verbose : bool, optional
Returns
-------
color_count : dict
Maps a grayscale value (0..255) to how often it was in `data`
"""
color_count = {}
for i in range(256):
color_count[i] = 0
for i, data_item in enumerate(data):
if i % 1000 == 0 and i > 0 and verbose:
print("%i of %i done" % (i, len(data)))
fname = os.path.join('.', data_item['path'])
img = scipy.ndimage.imread(fname, flatten=False, mode='L')
for row in img:
for pixel in row:
color_count[pixel] += 1
return color_count
def data_by_class(data):
"""
Organize `data` by class.
Parameters
----------
data : list of dicts
Each dict contains the key `symbol_id` which is the class label.
Returns
-------
dbc : dict
mapping class labels to lists of dicts
"""
dbc = {}
for item in data:
if item['symbol_id'] in dbc:
dbc[item['symbol_id']].append(item)
else:
dbc[item['symbol_id']] = [item]
return dbc
def _get_color_statistics(csv_filepath, verbose=False):
"""
Count how often white / black is in the image.
Parameters
----------
csv_filepath : str
'test.csv' or 'train.csv'
verbose : bool, optional
"""
symbolid2latex = _get_symbolid2latex()
data = _load_csv(csv_filepath)
black_level, classes = [], []
for symbol_id, elements in data_by_class(data).items():
colors = _get_colors(elements)
b = colors[0]
w = colors[255]
black_level.append(float(b) / (b + w))
classes.append(symbol_id)
if verbose:
print("{}:\t{:0.4f}".format(symbol_id, black_level[-1]))
print("Average black level: {:0.2f}%"
.format(np.average(black_level) * 100))
print("Median black level: {:0.2f}%"
.format(np.median(black_level) * 100))
print("Minimum black level: {:0.2f}% (class: {})"
.format(min(black_level),
[symbolid2latex[c]
for bl, c in zip(black_level, classes)
if bl <= min(black_level)]))
print("Maximum black level: {:0.2f}% (class: {})"
.format(max(black_level),
[symbolid2latex[c]
for bl, c in zip(black_level, classes)
if bl >= max(black_level)]))
def _get_symbolid2latex(csv_filepath='symbols.csv'):
"""Return a dict mapping symbol_ids to LaTeX code."""
symbol_data = _load_csv(csv_filepath)
symbolid2latex = {}
for row in symbol_data:
symbolid2latex[row['symbol_id']] = row['latex']
return symbolid2latex
def _analyze_class_distribution(csv_filepath,
max_data,
bin_size):
"""Plot the distribution of training data over graphs."""
symbol_id2index, labels = generate_index(csv_filepath)
index2symbol_id = {}
for index, symbol_id in symbol_id2index.items():
index2symbol_id[symbol_id] = index
data, y, s = load_images(csv_filepath, symbol_id2index, one_hot=False)
data = {}
for el in y:
if el in data:
data[el] += 1
else:
data[el] = 1
classes = data
images = len(y)
# Create plot
print("Classes: %i" % len(classes))
print("Images: %i" % images)
class_counts = sorted([count for _, count in classes.items()])
print("\tmin: %i" % min(class_counts))
fig = plt.figure()
ax1 = fig.add_subplot(111)
# plt.title('HASY training data distribution')
plt.xlabel('Amount of available testing images')
plt.ylabel('Number of classes')
# Where we want the ticks, in pixel locations
ticks = [int(el) for el in list(np.linspace(0, max_data, 21))]
# What those pixel locations correspond to in data coordinates.
# Also set the float format here
ax1.set_xticks(ticks)
labels = ax1.get_xticklabels()
plt.setp(labels, rotation=30)
min_examples = 0
ax1.hist(class_counts, bins=range(min_examples, max_data + 1, bin_size))
# plt.show()
filename = '{}.pdf'.format('data-dist')
plt.savefig(filename)
logging.info(f"Plot has been saved as {filename}")
symbolid2latex = _get_symbolid2latex()
top10 = sorted(classes.items(), key=lambda n: n[1], reverse=True)[:10]
top10_data = 0
for index, count in top10:
print("\t%s:\t%i" % (symbolid2latex[index2symbol_id[index]], count))
top10_data += count
total_data = sum([count for index, count in classes.items()])
print("Top-10 has %i training data (%0.2f%% of total)" %
(top10_data, float(top10_data) * 100.0 / total_data))
print("%i classes have more than %i data items." %
(sum([1 for _, count in classes.items() if count > max_data]),
max_data))
def _analyze_pca(csv_filepath):
"""
Analyze how much data can be compressed.
Parameters
----------
csv_filepath : str
Path relative to dataset_path to a CSV file which points to images
"""
import itertools as it
from sklearn.decomposition import PCA
symbol_id2index, labels = generate_index(csv_filepath)
data, y, s = load_images(csv_filepath, symbol_id2index, one_hot=False)
data = data.reshape(data.shape[0], data.shape[1] * data.shape[2])
pca = PCA()
pca.fit(data)
sum_ = 0.0
done_values = [None, None, None]
done_points = [False, False, False]
chck_points = [0.9, 0.95, 0.99]
for counter, el in enumerate(pca.explained_variance_ratio_):
sum_ += el
for check_point, done, i in zip(chck_points, done_points, it.count()):
if not done and sum_ >= check_point:
done_points[i] = counter
done_values[i] = sum_
for components, variance in zip(done_points, done_values):
print("%i components explain %0.2f of the variance" %
(components, variance))
def _get_euclidean_dist(e1, e2):
"""Calculate the euclidean distance between e1 and e2."""
e1 = e1.flatten()
e2 = e2.flatten()
return sum([(el1 - el2)**2 for el1, el2 in zip(e1, e2)])**0.5
def _inner_class_distance(data):
"""Measure the eucliden distances of one class to the mean image."""
distances = []
mean_img = None
for e1 in data:
fname1 = os.path.join('.', e1['path'])
img1 = scipy.ndimage.imread(fname1, flatten=False, mode='L')
if mean_img is None:
mean_img = img1.tolist()
else:
mean_img += img1
mean_img = mean_img / float(len(data))
# mean_img = thresholdize(mean_img, 'auto')
scipy.misc.imshow(mean_img)
for e1 in data:
fname1 = os.path.join('.', e1['path'])
img1 = scipy.ndimage.imread(fname1, flatten=False, mode='L')
dist = _get_euclidean_dist(img1, mean_img)
distances.append(dist)
return (distances, mean_img)
def thresholdize(img, threshold=0.5):
"""Create a black-and-white image from a grayscale image."""
img_new = []
if threshold == 'auto':
img_flat = sorted(img.flatten())
threshold_ind = int(0.85 * len(img_flat))
threshold = img_flat[threshold_ind]
for row in img:
bla = []
for col in row:
if col > threshold:
bla.append(1)
else:
bla.append(0)
img_new.append(bla)
return np.array(img_new)
def _analyze_distances(csv_filepath):
"""Analyze the distance between elements of one class and class means."""
symbolid2latex = _get_symbolid2latex()
data = _load_csv(csv_filepath)
data = data_by_class(data)
mean_imgs = []
for class_, data_class in data.items():
latex = symbolid2latex[class_]
d, mean_img = _inner_class_distance(data_class)
# scipy.misc.imshow(mean_img)
print("%s: min=%0.4f, avg=%0.4f, median=%0.4f max=%0.4f" %
(latex, np.min(d), np.average(d), np.median(d), np.max(d)))
distarr = sorted([(label, mean_c, _get_euclidean_dist(mean_c,
mean_img))
for label, mean_c in mean_imgs],
key=lambda n: n[2])
for label, mean_c, d in distarr:
print(f"\t{label}: {d:0.4f}")
mean_imgs.append((latex, mean_img))
def _analyze_variance(csv_filepath):
"""Calculate the variance of each pixel."""
symbol_id2index, labels = generate_index(csv_filepath)
data, y, s = load_images(csv_filepath, symbol_id2index, one_hot=False)
# Calculate mean
sum_ = np.zeros((32, 32))
for el in data:
el = np.squeeze(el)
sum_ += el
mean_ = sum_ / float(len(data))
scipy.misc.imshow(mean_)
# Calculate variance
centered_ = np.zeros((32, 32))
for el in data:
el = np.squeeze(el)
centered_ += (el - mean_)**2
centered_ = (1. / len(data)) * centered_**0.5
scipy.misc.imshow(centered_)
for row in list(centered_):
row = list(row)
print(" ".join(["%0.1f" % nr for nr in row]))
def _analyze_correlation(csv_filepath):
"""
Analyze and visualize the correlation of features.
Parameters
----------
csv_filepath : str
Path to a CSV file which points to images
"""
import pandas as pd
from matplotlib import cm as cm
from matplotlib import pyplot as plt
symbol_id2index, labels = generate_index(csv_filepath)
data, y, s = load_images(csv_filepath,
symbol_id2index,
one_hot=False,
flatten=True)
df = pd.DataFrame(data=data)
logging.info("Data loaded. Start correlation calculation. Takes 1.5h.")
fig = plt.figure()
ax1 = fig.add_subplot(111)
# Where we want the ticks, in pixel locations
ticks = np.linspace(0, 1024, 17)
# What those pixel locations correspond to in data coordinates.
# Also set the float format here
ax1.set_xticks(ticks)
ax1.set_yticks(ticks)
labels = ax1.get_xticklabels()
plt.setp(labels, rotation=30)
cmap = cm.get_cmap('viridis', 30)
cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
ax1.grid(True)
# Add colorbar, make sure to specify tick locations to match desired
# ticklabels
fig.colorbar(cax, ticks=[-0.15, 0, 0.15, 0.30, 0.45, 0.60, 0.75, 0.90, 1])
filename = '{}.pdf'.format('feature-correlation')
plt.savefig(filename)
def _create_stratified_split(csv_filepath, n_splits):
"""
Create a stratified split for the classification task.
Parameters
----------
csv_filepath : str
Path to a CSV file which points to images
n_splits : int
Number of splits to make
"""
from sklearn.model_selection import StratifiedKFold
data = _load_csv(csv_filepath)
labels = [el['symbol_id'] for el in data]
skf = StratifiedKFold(labels, n_folds=n_splits)
i = 1
kdirectory = 'classification-task'
if not os.path.exists(kdirectory):
os.makedirs(kdirectory)
for train_index, test_index in skf:
print("Create fold %i" % i)
directory = "%s/fold-%i" % (kdirectory, i)
if not os.path.exists(directory):
os.makedirs(directory)
else:
print("Directory '%s' already exists. Please remove it." %
directory)
i += 1
train = [data[el] for el in train_index]
test_ = [data[el] for el in test_index]
for dataset, name in [(train, 'train'), (test_, 'test')]:
with open(f"{directory}/{name}.csv", 'wb') as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow(('path', 'symbol_id', 'latex', 'user_id'))
for el in dataset:
csv_writer.writerow(("../../%s" % el['path'],
el['symbol_id'],
el['latex'],
el['user_id']))
def _create_pair(r1_data, r2_data):
"""Create a pair for the verification test."""
symbol_index = random.choice(r1_data.keys())
r1 = random.choice(r1_data[symbol_index])
is_same = random.choice([True, False])
if is_same:
symbol_index2 = symbol_index
r2 = random.choice(r1_data[symbol_index2])
else:
symbol_index2 = random.choice(r2_data.keys())
while symbol_index2 == symbol_index:
symbol_index2 = random.choice(r2_data.keys())
r2 = random.choice(r2_data[symbol_index2])
return (r1['path'], r2['path'], is_same)
def _create_verification_task(sample_size=32, test_size=0.05):
"""
Create the datasets for the verification task.
Parameters
----------
sample_size : int
Number of classes which will be taken completely
test_size : float in (0, 1)
Percentage of the remaining data to be taken to test
"""
# Get the data
data = _load_csv('hasy-data-labels.csv')
for el in data:
el['path'] = "../hasy-data/" + el['path'].split("hasy-data/")[1]
data = sorted(data_by_class(data).items(),
key=lambda n: len(n[1]),
reverse=True)
symbolid2latex = _get_symbolid2latex()
# Get complete classes
symbols = random.sample(range(len(data)), k=sample_size)
symbols = sorted(symbols, reverse=True)
test_data_excluded = []
for symbol_index in symbols:
# for class_label, items in data:
class_label, items = data.pop(symbol_index)
test_data_excluded += items
print(symbolid2latex[class_label])
# Get data from remaining classes
data_n = []
for class_label, items in data:
data_n = data_n + items
ys = [el['symbol_id'] for el in data_n]
x_train, x_test, y_train, y_test = train_test_split(data_n,
ys,
test_size=test_size)
# Write the training / test data
print("Test data (excluded symbols) = %i" % len(test_data_excluded))
print("Test data (included symbols) = %i" % len(x_test))
print("Test data (total) = %i" % (len(x_test) + len(test_data_excluded)))
kdirectory = 'verification-task'
if not os.path.exists(kdirectory):
os.makedirs(kdirectory)
with open("%s/train.csv" % kdirectory, 'wb') as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow(('path', 'symbol_id', 'latex', 'user_id'))
for el in x_train:
csv_writer.writerow((el['path'],
el['symbol_id'],
el['latex'],
el['user_id']))
x_test_inc_class = data_by_class(x_test)
x_text_exc_class = data_by_class(test_data_excluded)
# V1: Both symbols belong to the training set (included symbols)
with open("%s/test-v1.csv" % kdirectory, 'wb') as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow(('path1', 'path2', 'is_same'))
for i in range(100000):
test_data_tuple = _create_pair(x_test_inc_class, x_test_inc_class)
csv_writer.writerow(test_data_tuple)
# V2: r1 belongs to a symbol in the training set, but r2 might not
with open("%s/test-v2.csv" % kdirectory, 'wb') as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow(('path1', 'path2', 'is_same'))
for i in range(100000):
test_data_tuple = _create_pair(x_test_inc_class, x_text_exc_class)
csv_writer.writerow(test_data_tuple)
# V3: r1 and r2 both don't belong to symbols in the training set
with open("%s/test-v3.csv" % kdirectory, 'wb') as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow(('path1', 'path2', 'is_same'))
for i in range(100000):
test_data_tuple = _create_pair(x_text_exc_class, x_text_exc_class)
csv_writer.writerow(test_data_tuple)
def _count_users(csv_filepath):
"""
Count the number of users who contributed to the dataset.
Parameters
----------
csv_filepath : str
Path to a CSV file which points to images
"""
data = _load_csv(csv_filepath)
user_ids = {}
for el in data:
if el['user_id'] not in user_ids:
user_ids[el['user_id']] = [el['path']]
else:
user_ids[el['user_id']].append(el['path'])
max_els = 0
max_user = 0
for user_id, elements in user_ids.items():
if len(elements) > max_els:
max_els = len(elements)
max_user = user_id
print("Dataset has %i users." % len(user_ids))
print("User %s created most (%i elements, %0.2f%%)" %
(max_user, max_els, float(max_els) / len(data) * 100.0))
def _analyze_cm(cm_file, total_symbols=100):
"""
Analyze a confusion matrix.
Parameters
----------
cm_file : str
Path to a confusion matrix in JSON format.
Each line contains a list of non-negative integers.
cm[i][j] indicates how often members of class i were labeled with j
"""
symbolid2latex = _get_symbolid2latex()
symbol_id2index, labels = generate_index('hasy-data-labels.csv')
index2symbol_id = {}
for index, symbol_id in symbol_id2index.items():
index2symbol_id[symbol_id] = index
# Load CM
with open(cm_file) as data_file:
cm = json.load(data_file)
class_accuracy = []
n = len(cm)
test_samples_sum = np.sum(cm)
# Number of recordings for symbols which don't have a single correct
# prediction
sum_difficult_none = 0
# Number of recordings for symbols which have an accuracy of less than 5%
sum_difficult_five = 0
for i in range(n):
total = sum([cm[i][j] for j in range(n)])
class_accuracy.append({'class_index': i,
'class_accuracy': float(cm[i][i]) / total,
'class_confusion_index': np.argmax(cm[i]),
'correct_total': cm[i][i],
'class_total': total})
print("Lowest class accuracies:")
class_accuracy = sorted(class_accuracy, key=lambda n: n['class_accuracy'])
index2latex = lambda n: symbolid2latex[index2symbol_id[n]]
for i in range(total_symbols):
if class_accuracy[i]['correct_total'] == 0:
sum_difficult_none += class_accuracy[i]['class_total']
if class_accuracy[i]['class_accuracy'] < 0.05:
sum_difficult_five += class_accuracy[i]['class_total']
latex_orig = index2latex(class_accuracy[i]['class_index'])
latex_conf = index2latex(class_accuracy[i]['class_confusion_index'])
# print("\t%i. \t%s:\t%0.4f (%s); correct=%i" %
# (i + 1,
# latex_orig,
# class_accuracy[i]['class_accuracy'],
# latex_conf,
# class_accuracy[i]['correct_total']))
print(("\t\\verb+{:<15}+ & ${:<15}$ & {:<15} & \\verb+{:<15}+ "
"& ${:<15}$ \\\\ ({})").format
(latex_orig, latex_orig,
class_accuracy[i]['class_total'],
latex_conf, latex_conf,
class_accuracy[i]['correct_total']))
print("Non-correct: %0.4f%%" %
(sum_difficult_none / float(test_samples_sum)))
print("five-correct: %0.4f%%" %
(sum_difficult_five / float(test_samples_sum)))
print("Easy classes")
class_accuracy = sorted(class_accuracy,
key=lambda n: n['class_accuracy'],
reverse=True)
for i in range(total_symbols):
latex_orig = index2latex(class_accuracy[i]['class_index'])
latex_conf = index2latex(class_accuracy[i]['class_confusion_index'])
if class_accuracy[i]['class_accuracy'] < 0.99:
break
# print("\t%i. \t%s:\t%0.4f (%s); correct=%i" %
# (i + 1,
# latex_orig,
# class_accuracy[i]['class_accuracy'],
# latex_conf,
# class_accuracy[i]['correct_total']))
print(("\t\\verb+{:<15}+ & ${:<15}$ & {:<15} & "
"\\verb+{:<15}+ & ${:<15}$ \\\\ ({})").format
(latex_orig, latex_orig,
class_accuracy[i]['class_total'],
latex_conf, latex_conf,
class_accuracy[i]['correct_total']))
# cm = np.array(cm)
# scipy.misc.imshow(cm)
def preprocess(x):
"""Preprocess features."""
x = x.astype('float32')
x /= 255.0
return x
def _get_parser():
"""Get parser object for hasy_tools.py."""
import argparse
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
parser = ArgumentParser(description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("--dataset",
dest="dataset",
help="specify which data to use")
parser.add_argument("--verify",
dest="verify",
action="store_true",
default=False,
help="verify PNG files")
parser.add_argument("--overview",
dest="overview",
action="store_true",
default=False,
help="Get overview of data")
parser.add_argument("--analyze_color",
dest="analyze_color",
action="store_true",
default=False,
help="Analyze the color distribution")
parser.add_argument("--class_distribution",
dest="class_distribution",
action="store_true",
default=False,
help="Analyze the class distribution")
parser.add_argument("--distances",
dest="distances",
action="store_true",
default=False,
help="Analyze the euclidean distance distribution")
parser.add_argument("--pca",
dest="pca",
action="store_true",
default=False,
help=("Show how many principal components explain "
"90%% / 95%% / 99%% of the variance"))
parser.add_argument("--variance",
dest="variance",
action="store_true",
default=False,
help="Analyze the variance of features")
parser.add_argument("--correlation",
dest="correlation",
action="store_true",
default=False,
help="Analyze the correlation of features")
parser.add_argument("--create-classification-task",
dest="create_folds",
action="store_true",
default=False,
help=argparse.SUPPRESS)
parser.add_argument("--create-verification-task",
dest="create_verification_task",
action="store_true",
default=False,
help=argparse.SUPPRESS)
parser.add_argument("--count-users",
dest="count_users",
action="store_true",
default=False,
help="Count how many different users have created "
"the dataset")
parser.add_argument("--analyze-cm",
dest="cm",
default=False,
help="Analyze a confusion matrix in JSON format.")
return parser
if __name__ == "__main__":
args = _get_parser().parse_args()
if args.verify:
if args.dataset is None:
logging.error("--dataset needs to be set for --verify")
sys.exit()
_verify_all(args.dataset)
if args.overview:
img_src = _load_csv(args.dataset)
create_random_overview(img_src, x_images=10, y_images=10)
if args.analyze_color:
_get_color_statistics(csv_filepath=args.dataset)
if args.class_distribution:
_analyze_class_distribution(csv_filepath=args.dataset,
max_data=1000,
bin_size=25)
if args.pca:
_analyze_pca(csv_filepath=args.dataset)
if args.distances:
_analyze_distances(csv_filepath=args.dataset)
if args.variance:
_analyze_variance(csv_filepath=args.dataset)
if args.correlation:
_analyze_correlation(csv_filepath=args.dataset)
if args.create_folds:
_create_stratified_split(args.dataset, int(args.create_folds))
if args.count_users:
_count_users(csv_filepath=args.dataset)
if args.create_verification_task:
_create_verification_task()
if args.cm:
_analyze_cm(args.cm)
| mit |
IRC-SPHERE/dictionary-learning | spams_transformer.py | 1 | 10532 | # The MIT License (MIT)
# Copyright (c) 2014-2017 University of Bristol
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
import spams
class SpamsTransformer(BaseEstimator, TransformerMixin):
"""
This class performs dictionary learning on data using the SPArse Modeling Software (SPAMS).
"""
def __init__(self, total_num_bases, l1_dictionary, l1_reconstruct, class_conditional=False,
positive_coefficients=True, l0_max=0, verbose=0, num_iterations=100,
minibatch_size=512, use_lasso=True):
"""
:param total_num_bases: The total number of atoms to be learnt by the dictionary learning software. No default
value.
:param l1_dictionary: Regularisation parameter (1-norm) for learning the dictionary. No default value.
:param l1_reconstruct: Regularisation parameter (1-norm) for reconstruction. No default value.
:param class_conditional: Whether to partition the data by class, and to learn atoms on a per-class basis.
Default: False
Note: the number of atoms per class will be @total_num_bases / num_classes.
:param positive_coefficients: Constrains the coefficients to be positive in reconstruction. Default: True
:param l0_max: When using OMP, this parameter sets the maximal number of coefficients to be used
Note: (0 \leq @l0_max \leq total_num_bases). Default: 0
:param verbose: Verbosity level. Default: 0.
:param num_iterations: Number of iterations for training. Default: 100.
:param minibatch_size: The sample size for each minibatch. Default: 512.
:param use_lasso: When `True` uses LASSO during optimisation, when `False` uses OMP.
"""
self.total_num_bases = total_num_bases
self.l1_dictionary = l1_dictionary
self.l1_reconstruct = l1_reconstruct
self.num_iterations = num_iterations
self.minibatch_size = minibatch_size
self.use_lasso = use_lasso
self.l0_max = l0_max
self.verbose = verbose
self.class_conditional = class_conditional
self.positive_coefficients = positive_coefficients
self.dictionary = None
def fit(self, x, y=None):
"""
Fits the dictionary learning model.
:param x: The input data. $x \in \mathbb{R}^{N \times M}. Here $N$ is the number of instances, and $M$ is the
dimensionality of each instance.
:param y: The labels that are associated with the data. Only used when `self.class_conditional = True`.
:return: self
"""
def fit(x, num):
return spams.trainDL(
K=num,
numThreads=2,
X=np.asfortranarray(x.T),
mode=[4, 2][self.use_lasso],
lambda1=[self.l0_max, self.l1_dictionary][self.use_lasso],
iter=self.num_iterations,
verbose=self.verbose,
posAlpha=self.positive_coefficients,
batchsize=self.minibatch_size,
)
if self.class_conditional:
unique_labels = np.unique(y)
num_bases = self.total_num_bases / len(unique_labels)
self.dictionary = np.column_stack(
[fit(x[y == yy], num_bases) for yy in unique_labels]
)
else:
self.dictionary = fit(x, self.total_num_bases)
return self
def transform(self, X, mask=None):
"""
Transforms data X to coefficients.
:param X:The input data. $x \in \mathbb{R}^{N \times M}. Here $N$ is the number of instances, and $M$ is the
dimensionality of each instance.
:param mask: Allows missing data to be present in `X`. `mask` should be a binary matrix of the same shape as
`X`. An element that evaluates to `True` indicates that data is present, and `False` means that data
is missing. Set `mask = None` (default value) to do un-masked transformations.
:return: Returns a sparse matrix
"""
if self.use_lasso:
return self._transform_lasso(X, mask)
return self._transform_omp(X, mask)
def inverse_transform(self, alphas, y=None):
"""
Reconstructs input data based on their coefficients.
:param alphas: Sparse coefficient matrix, eg, as returned from the `self.transform` method
:param y: Unused in every case
:return: Reconstructed matrix.
"""
acc_hat = alphas.dot(self.dictionary.T)
return np.asarray(acc_hat, dtype=np.float)
def lasso_params(self, X):
"""
Builds the parameters for the LASSO dictionary learning
:param X: Input data. See `.fit` for more information.
:return: Dictionary containing relevant parameters for LASSO optimisation
"""
return dict(
X=np.asfortranarray(X.T),
D=np.asfortranarray(self.dictionary),
lambda1=self.l1_reconstruct,
numThreads=2,
pos=self.positive_coefficients
)
def omp_params(self, X):
"""
Builds a parameter dictionary for OMP dictionary learning
:param X: Input data See `.fit` for more information.
:return: Dictionary containing relevant parameters for OMP optimisation.
"""
return dict(
X=np.asfortranarray(X.T),
D=np.asfortranarray(self.dictionary),
lambda1=self.l1_reconstruct,
numThreads=2,
L=self.l0_max
)
def _transform_lasso(self, X, mask):
"""
Performs LASSO transformation
:param X: Input data. See `.fit` for more information.
:param mask: Mask on input data. See `.fit` for more information.
:return: Reconstruction parameters
"""
if mask is None:
return spams.lasso(**self.lasso_params(X)).T
return spams.lassoMask(B=np.asfortranarray(mask.T), **self.lasso_params(X)).T
def _transform_omp(self, X, mask):
"""
Performs the OMP transformation.
:param X: Input data. See `.fit` for more information.
:param mask: Mask on input data. See `.fit` for more information.
:return: Reconstruction parameters
"""
if mask is None:
return spams.omp(**self.omp_params(X)).T
return spams.ompMask(
X=np.asfortranarray(X.T),
D=np.asfortranarray(self.dictionary),
B=np.asfortranarray(mask.T),
L=self.l0_max,
lambda1=self.l1_reconstruct,
numThreads=2,
).T
@staticmethod
def save(model, file_name):
"""
Serialise model to file.
:param model: Model
:param file_name: Filename
:return:
"""
import cPickle as pickle
import gzip
with gzip.open(file_name, 'wb') as fil:
pickle.dump(model, fil, protocol=0)
@staticmethod
def load(file_name):
"""
Deserialise model from file
:param file_name: Filename
:return:
"""
import cPickle as pickle
import gzip
with gzip.load(file_name, 'rb') as fil:
return pickle.load(fil)
def main():
import matplotlib.pyplot as pl
import seaborn as sns
sns.set_style('darkgrid')
sns.set_context('poster')
rng = np.random.RandomState(123)
D = 21
N = 500
K = 100
# Generate the data
t = np.linspace(-np.pi * 2, np.pi * 2, D)
y = np.asarray([rng.choice(2, 1) for _ in xrange(N)])
x = np.asarray(
[np.sin(t * (1.0 + y[n]) + rng.normal(0, 0.125, size=D)) for n in xrange(N)]
)
# Fit the model
dl = SpamsTransformer(
total_num_bases=K,
l1_dictionary=1.2 / np.sqrt(D),
l1_reconstruct=1.2 / np.sqrt(D),
num_iterations=100
)
dl.fit(x)
# Plot the data and the learnt bases
fig, axes = pl.subplots(2, 1, sharex=True)
axes[0].plot(t, x.T)
axes[0].set_ylabel('Original data')
axes[1].plot(t, dl.dictionary)
axes[1].set_ylabel('Learnt dictionary')
# Reconstruct the data and plot the first datapoint and its reconstruction
alphas = dl.transform(x) # Compute the reconstruction coefficients
x_hat = dl.inverse_transform(alphas) # Reconstruct the original data
print type(alphas)
fig, axes = pl.subplots(2, 1, sharex=False, sharey=False)
abs_diff = np.abs(x - x_hat)
axes[0].plot(t, x[0], label='Original data')
axes[0].plot(t, x_hat[0], label='Reconstruction (MAE: {:.3f})'.format(
np.mean(abs_diff)
))
pl.legend()
axes[1].hist(abs_diff.ravel(), bins=np.linspace(abs_diff.min(), abs_diff.max(), 31))
print 'Average number of reconstruction coefficients: {}'.format(
alphas.nnz / float(N)
)
pl.show()
if __name__ == '__main__':
main()
| mit |
ywcui1990/htmresearch | projects/sequence_prediction/discrete_sequences/plot.py | 8 | 7504 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import argparse
import json
from matplotlib import pyplot
import numpy
from expsuite import PyExperimentSuite
def readExperiment(experiment):
with open(experiment, "r") as file:
predictions = []
predictionsSDR = []
truths = []
iterations = []
resets = []
randoms = []
trains = []
killCell = []
sequenceCounter = []
counter = 0
for line in file.readlines():
dataRec = json.loads(line)
if 'iteration' in dataRec.keys():
iterations.append(dataRec['iteration'])
else:
iterations.append(counter)
if 'predictions' in dataRec.keys():
predictions.append(dataRec['predictions'])
else:
predictions.append(None)
if 'predictionsSDR' in dataRec.keys():
predictionsSDR.append(dataRec['predictionsSDR'])
else:
predictionsSDR.append(None)
if 'truth' in dataRec.keys():
truths.append(dataRec['truth'])
else:
truths.append(None)
if 'reset' in dataRec.keys():
resets.append(dataRec['reset'])
else:
resets.append(None)
if 'random' in dataRec.keys():
randoms.append(dataRec['random'])
else:
randoms.append(None)
if 'train' in dataRec.keys():
trains.append(dataRec['train'])
else:
trains.append(None)
if 'killCell' in dataRec.keys():
killCell.append(dataRec['killCell'])
else:
killCell.append(None)
if 'sequenceCounter' in dataRec.keys():
sequenceCounter.append(dataRec['sequenceCounter'])
else:
sequenceCounter.append(None)
counter += 1
return {'predictions': predictions,
'predictionsSDR': predictionsSDR,
'truths': truths,
'iterations': iterations,
'resets': resets,
'randoms': randoms,
'trains': trains,
'killCell': killCell,
'sequenceCounter': sequenceCounter}
def movingAverage(a, n):
movingAverage = []
for i in xrange(len(a)):
start = max(0, i - n)
values = a[start:i+1]
movingAverage.append(sum(values) / float(len(values)))
return movingAverage
def plotMovingAverage(data, window, label=None):
movingData = movingAverage(data, min(len(data), window))
style = 'ro' if len(data) < window else ''
pyplot.plot(range(len(movingData)), movingData, style, label=label)
def plotAccuracy(results, train=None, window=100, type="sequences", label=None, hideTraining=True, lineSize=None):
pyplot.title("High-order prediction")
pyplot.xlabel("# of sequences seen")
pyplot.ylabel("High-order prediction accuracy over last {0} tested {1}".format(window, type))
accuracy = results[0]
x = results[1]
movingData = movingAverage(accuracy, min(len(accuracy), window))
pyplot.plot(x, movingData, label=label, linewidth=lineSize)
# dX = numpy.array([x[i+1] - x[i] for i in xrange(len(x) - 1)])
# testEnd = numpy.array(x)[dX > dX.mean()].tolist()
# testEnd = testEnd + [x[-1]]
# dX = numpy.insert(dX, 0, 0)
# testStart = numpy.array(x)[dX > dX.mean()].tolist()
# testStart = [0] + testStart
# for line in testStart:
# pyplot.axvline(line, color='orange')
# for i in xrange(len(testStart)):
# pyplot.axvspan(testStart[i], testEnd[i], alpha=0.15, facecolor='black')
if not hideTraining:
for i in xrange(len(train)):
if train[i]:
pyplot.axvline(i, color='orange')
pyplot.xlim(0, x[-1])
pyplot.ylim(0, 1.1)
def computeAccuracy(predictions, truths, iterations,
resets=None, randoms=None, num=None,
sequenceCounter=None):
accuracy = []
x = []
for i in xrange(len(predictions) - 1):
if num is not None and i > num:
continue
if truths[i] is None:
continue
# identify the end of sequence
if resets is not None or randoms is not None:
if not (resets[i+1] or randoms[i+1]):
continue
correct = truths[i] is None or truths[i] in predictions[i]
accuracy.append(correct)
if sequenceCounter is not None:
x.append(sequenceCounter[i])
else:
x.append(iterations[i])
return (accuracy, x)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('experiments', metavar='/path/to/experiment /path/...', nargs='+', type=str)
parser.add_argument('-w', '--window', type=int, default=100)
parser.add_argument('-n', '--num', type=int, default=None)
parser.add_argument('-t', '--training-hide', type=int, nargs='+')
parser.add_argument('-g', '--graph-labels', type=str, nargs='+')
parser.add_argument('-s', '--size-of-line', type=float, nargs='+')
parser.add_argument('-l', '--legend-position', type=int, default=4)
parser.add_argument('-f', '--full', action='store_true')
parser.add_argument('-o', '--output', type=str, default=None)
suite = PyExperimentSuite()
args = parser.parse_args()
from pylab import rcParams
rcParams.update({'figure.autolayout': True})
rcParams.update({'figure.facecolor': 'white'})
rcParams.update({'ytick.labelsize': 8})
rcParams.update({'figure.figsize': (12, 6)})
rcParams.update({'pdf.fonttype': 42})
experiments = args.experiments
for i, experiment in enumerate(experiments):
iteration = suite.get_history(experiment, 0, 'iteration')
predictions = suite.get_history(experiment, 0, 'predictions')
truth = suite.get_history(experiment, 0, 'truth')
train = suite.get_history(experiment, 0, 'train')
resets = None if args.full else suite.get_history(experiment, 0, 'reset')
randoms = None if args.full else suite.get_history(experiment, 0, 'random')
type = "elements" if args.full else "sequences"
hideTraining = args.training_hide is not None and len(args.training_hide) > i and args.training_hide[i] > 0
lineSize = args.size_of_line[i] if args.size_of_line is not None and len(args.size_of_line) > i else 0.8
label = args.graph_labels[i] if args.graph_labels is not None and len(args.graph_labels) > i else experiment
plotAccuracy(computeAccuracy(predictions, truth, iteration, resets=resets, randoms=randoms, num=args.num),
train,
window=args.window,
type=type,
label=label,
hideTraining=hideTraining,
lineSize=lineSize)
if len(experiments) > 1:
pyplot.legend(loc=args.legend_position)
if args.output is not None:
pyplot.savefig(args.output)
else:
pyplot.show()
| agpl-3.0 |
wilsonkichoi/zipline | setup.py | 1 | 9878 | #!/usr/bin/env python
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import re
import sys
from operator import lt, gt, eq, le, ge
from os.path import (
abspath,
dirname,
join,
)
from distutils.version import StrictVersion
from setuptools import (
Extension,
find_packages,
setup,
)
import versioneer
class LazyBuildExtCommandClass(dict):
"""
Lazy command class that defers operations requiring Cython and numpy until
they've actually been downloaded and installed by setup_requires.
"""
def __contains__(self, key):
return (
key == 'build_ext'
or super(LazyBuildExtCommandClass, self).__contains__(key)
)
def __setitem__(self, key, value):
if key == 'build_ext':
raise AssertionError("build_ext overridden!")
super(LazyBuildExtCommandClass, self).__setitem__(key, value)
def __getitem__(self, key):
if key != 'build_ext':
return super(LazyBuildExtCommandClass, self).__getitem__(key)
from Cython.Distutils import build_ext as cython_build_ext
import numpy
# Cython_build_ext isn't a new-style class in Py2.
class build_ext(cython_build_ext, object):
"""
Custom build_ext command that lazily adds numpy's include_dir to
extensions.
"""
def build_extensions(self):
"""
Lazily append numpy's include directory to Extension includes.
This is done here rather than at module scope because setup.py
may be run before numpy has been installed, in which case
importing numpy and calling `numpy.get_include()` will fail.
"""
numpy_incl = numpy.get_include()
for ext in self.extensions:
ext.include_dirs.append(numpy_incl)
super(build_ext, self).build_extensions()
return build_ext
ext_modules = [
Extension('zipline.assets._assets', ['zipline/assets/_assets.pyx']),
Extension('zipline.lib.adjustment', ['zipline/lib/adjustment.pyx']),
Extension('zipline.lib._factorize', ['zipline/lib/_factorize.pyx']),
Extension(
'zipline.lib._float64window', ['zipline/lib/_float64window.pyx']
),
Extension('zipline.lib._int64window', ['zipline/lib/_int64window.pyx']),
Extension('zipline.lib._uint8window', ['zipline/lib/_uint8window.pyx']),
Extension('zipline.lib._labelwindow', ['zipline/lib/_labelwindow.pyx']),
Extension('zipline.lib.rank', ['zipline/lib/rank.pyx']),
Extension('zipline.data._equities', ['zipline/data/_equities.pyx']),
Extension('zipline.data._adjustments', ['zipline/data/_adjustments.pyx']),
Extension('zipline._protocol', ['zipline/_protocol.pyx']),
Extension('zipline.gens.sim_engine', ['zipline/gens/sim_engine.pyx']),
Extension(
'zipline.data._minute_bar_internal',
['zipline/data/_minute_bar_internal.pyx']
)
]
STR_TO_CMP = {
'<': lt,
'<=': le,
'=': eq,
'==': eq,
'>': gt,
'>=': ge,
}
SYS_VERSION = '.'.join(list(map(str, sys.version_info[:3])))
def _filter_requirements(lines_iter, filter_names=None,
filter_sys_version=False):
for line in lines_iter:
line = line.strip()
if not line or line.startswith('#'):
continue
match = REQ_PATTERN.match(line)
if match is None:
raise AssertionError(
"Could not parse requirement: '%s'" % line)
name = match.group('name')
if filter_names is not None and name not in filter_names:
continue
if filter_sys_version and match.group('pyspec'):
pycomp, pyspec = match.group('pycomp', 'pyspec')
comp = STR_TO_CMP[pycomp]
pyver_spec = StrictVersion(pyspec)
if comp(SYS_VERSION, pyver_spec):
# pip install -r understands lines with ;python_version<'3.0',
# but pip install -e does not. Filter here, removing the
# env marker.
yield line.split(';')[0]
continue
yield line
REQ_UPPER_BOUNDS = {
'bcolz': '<1',
'pandas': '<0.18',
}
def _with_bounds(req):
try:
req, lower = req.split('==')
except ValueError:
return req
else:
with_bounds = [req, '>=', lower]
upper = REQ_UPPER_BOUNDS.get(req)
if upper:
with_bounds.extend([',', upper])
return ''.join(with_bounds)
REQ_PATTERN = re.compile("(?P<name>[^=<>]+)(?P<comp>[<=>]{1,2})(?P<spec>[^;]+)"
"(?:(;\W*python_version\W*(?P<pycomp>[<=>]{1,2})\W*"
"(?P<pyspec>[0-9\.]+)))?")
def _conda_format(req):
def _sub(m):
name = m.group('name').lower()
if name == 'numpy':
return 'numpy x.x'
formatted = '%s %s%s' % ((name,) + m.group('comp', 'spec'))
pycomp, pyspec = m.group('pycomp', 'pyspec')
if pyspec:
# Compare the two-digit string versions as ints.
selector = ' # [int(py) %s int(%s)]' % (
pycomp, ''.join(pyspec.split('.')[:2]).ljust(2, '0')
)
return formatted + selector
return formatted
return REQ_PATTERN.sub(_sub, req, 1)
def read_requirements(path,
strict_bounds,
conda_format=False,
filter_names=None):
"""
Read a requirements.txt file, expressed as a path relative to Zipline root.
Returns requirements with the pinned versions as lower bounds
if `strict_bounds` is falsey.
"""
real_path = join(dirname(abspath(__file__)), path)
with open(real_path) as f:
reqs = _filter_requirements(f.readlines(), filter_names=filter_names,
filter_sys_version=not conda_format)
if not strict_bounds:
reqs = map(_with_bounds, reqs)
if conda_format:
reqs = map(_conda_format, reqs)
return list(reqs)
def install_requires(strict_bounds=False, conda_format=False):
return read_requirements('etc/requirements.txt',
strict_bounds=strict_bounds,
conda_format=conda_format)
def extras_requires(conda_format=False):
extras = {
extra: read_requirements('etc/requirements_{0}.txt'.format(extra),
strict_bounds=True,
conda_format=conda_format)
for extra in ('dev', 'talib')
}
extras['all'] = [req for reqs in extras.values() for req in reqs]
return extras
def setup_requirements(requirements_path, module_names, strict_bounds,
conda_format=False):
module_names = set(module_names)
module_lines = read_requirements(requirements_path,
strict_bounds=strict_bounds,
conda_format=conda_format,
filter_names=module_names)
if len(set(module_lines)) != len(module_names):
raise AssertionError(
"Missing requirements. Looking for %s, but found %s."
% (module_names, module_lines)
)
return module_lines
conda_build = os.path.basename(sys.argv[0]) in ('conda-build', # unix
'conda-build-script.py') # win
setup_requires = setup_requirements(
'etc/requirements.txt',
('Cython', 'numpy'),
strict_bounds=conda_build,
conda_format=conda_build,
)
conditional_arguments = {
'setup_requires' if not conda_build else 'build_requires': setup_requires,
}
setup(
name='zipline',
url="http://zipline.io",
version=versioneer.get_version(),
cmdclass=LazyBuildExtCommandClass(versioneer.get_cmdclass()),
description='A backtester for financial algorithms.',
entry_points={
'console_scripts': [
'zipline = zipline.__main__:main',
],
},
author='Quantopian Inc.',
author_email='[email protected]',
packages=find_packages(include=['zipline', 'zipline.*']),
ext_modules=ext_modules,
include_package_data=True,
package_data={root.replace(os.sep, '.'):
['*.pyi', '*.pyx', '*.pxi', '*.pxd']
for root, dirnames, filenames in os.walk('zipline')
if '__pycache__' not in root},
license='Apache 2.0',
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Topic :: Office/Business :: Financial',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: System :: Distributed Computing',
],
install_requires=install_requires(conda_format=conda_build),
extras_require=extras_requires(conda_format=conda_build),
**conditional_arguments
)
| apache-2.0 |
FluidityProject/fluidity | tools/statplot.py | 2 | 15434 | #!/usr/bin/env python3
import gi
gi.require_version('Gdk', '3.0')
gi.require_version('Gtk', '3.0')
from gi.repository import Gdk, Gtk # noqa: E402
import argparse # noqa: E402
from lxml import etree # noqa: E402
from matplotlib import rc # noqa: E402
from matplotlib.backends.backend_gtk3agg import ( # noqa: E402
FigureCanvasGTK3Agg as FigureCanvas)
from matplotlib.backends.backend_gtk3 import ( # noqa: E402
NavigationToolbar2GTK3 as NavigationToolbar)
from matplotlib.figure import Figure # noqa: E402
import matplotlib.ticker as tck # noqa: E402
import numpy # noqa: E402
import sys # noqa: E402
import warnings # noqa: E402
rc('text', usetex=False)
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='''GUI for Fluidity .stat outputs
When the window is displayed, press the following keys for additional options:
- x, y -> Change the scale of the x, y axis (automatically chooses between
linear, log and symlog).
- l -> Change the representation of the data (solid line or markers).
- r -> Reload the .stat file. Only relevant for simulations that are still
running.
- a, o -> Grab focus onto the abscissa, ordinate text boxes.
- Esc -> Release focus on text boxes.
- q -> Exit the GUI.''')
parser.add_argument('statfile', nargs='+', help='Path(s) to the .stat file(s)')
args = parser.parse_args()
class StatplotWindow(Gtk.Window):
def __init__(self, statfile):
Gtk.Window.__init__(self)
self.connect('key-release-event', self.KeyPressed)
self.set_border_width(8)
self.set_default_size(1600, 900)
self.set_position(Gtk.WindowPosition.CENTER)
self.set_title(statfile[-1])
self.statfile = statfile
vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=8)
vbox.set_homogeneous(False)
self.add(vbox)
hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=8)
hbox.set_homogeneous(True)
vbox.pack_end(hbox, False, False, 0)
self.entries, self.values = self.ReadData(self.statfile)
store = Gtk.ListStore.new([str])
for entry in self.entries:
store.append([entry])
self.xCombo = self.CreateCombo(store)
self.yCombo = self.CreateCombo(store)
self.InitCombo()
self.InitCompletion(self.xCombo)
self.InitCompletion(self.yCombo)
self.xCombo.connect('changed', self.ComboChanged)
self.xCombo.connect('key-release-event', self.ReleaseFocus)
self.yCombo.connect('changed', self.ComboChanged)
self.yCombo.connect('key-release-event', self.ReleaseFocus)
hbox.pack_start(self.xCombo, True, True, 0)
hbox.pack_start(self.yCombo, True, True, 0)
self.fig = Figure(figsize=(14, 7))
self.ax = self.fig.gca()
self.canvas = FigureCanvas(self.fig)
self.canvas.set_can_focus(True)
vbox.pack_start(self.canvas, True, True, 0)
self.PlotType = 'marker' if self.values.ndim == 1 else 'line'
self.PlotData('create', self.PlotType, 'linear', 'linear')
self.toolbar = NavigationToolbar(self.canvas, self)
vbox.pack_start(self.toolbar, False, False, 0)
def ComboChanged(self, comboBox):
activeText = comboBox.get_child().get_text()
if activeText in self.entries:
comboBox.set_active(self.entries.index(activeText))
if comboBox is self.xCombo:
self.PlotData('update', self.PlotType, 'linear',
self.ax.get_yscale())
elif comboBox is self.yCombo:
self.PlotData('update', self.PlotType, self.ax.get_xscale(),
'linear')
self.canvas.grab_focus()
@staticmethod
def CreateCombo(store):
comboBox = Gtk.ComboBox.new_with_model_and_entry(store)
comboBox.set_entry_text_column(0)
comboBox.set_margin_start(100)
comboBox.set_margin_end(100)
comboBox.set_wrap_width(3)
return comboBox
def FormatAxis(self, ax2fmt, scale):
if ax2fmt == 'x':
setScale = self.ax.set_xscale
curAxis = self.ax.xaxis
curData = self.xData
elif ax2fmt == 'y':
setScale = self.ax.set_yscale
curAxis = self.ax.yaxis
curData = self.yData
setScale(scale)
if scale == 'linear':
self.ax.ticklabel_format(style='sci', axis=ax2fmt,
scilimits=(0, 0), useMathText=True)
curAxis.set_minor_locator(tck.AutoMinorLocator())
self.ax.relim()
self.ax.autoscale(True, ax2fmt, None)
elif scale == 'log':
curAxis.set_minor_locator(
tck.LogLocator(subs=numpy.arange(2, 10)))
logFmt = tck.LogFormatterSciNotation(base=10,
labelOnlyBase=False,
minor_thresholds=(4, 1))
curAxis.set_minor_formatter(logFmt)
self.ax.relim()
self.ax.autoscale(True, ax2fmt, None)
elif scale == 'symlog':
axMin = min(abs(curData[curData != 0]))
axMax = max(abs(curData))
axRange = numpy.log10(axMax / axMin)
if ax2fmt == 'x':
setScale(
'symlog', basex=10, subsx=numpy.arange(2, 10),
linthreshx=axMin * 10 ** (axRange / 2))
elif ax2fmt == 'y':
setScale(
'symlog', basey=10, subsy=numpy.arange(2, 10),
linthreshy=axMin * 10 ** (axRange / 2))
# Thomas Duvernay, 06/01/19
# There seems to be a bug with the labelling of the 0 tick
# when a 'symlog' is used as an axis scale. It looks like
# it is considered as a minor tick.
symLogLoc = tck.SymmetricalLogLocator(
subs=numpy.arange(2, 10),
linthresh=axMin * 10 ** (axRange / 2), base=10)
curAxis.set_minor_locator(symLogLoc)
logFmt = tck.LogFormatterSciNotation(
base=10, labelOnlyBase=False, minor_thresholds=(4, 1),
linthresh=axMin * 10 ** (axRange / 2))
curAxis.set_minor_formatter(logFmt)
self.ax.set_xlabel(self.xCombo.get_child().get_text(),
fontweight='bold', fontsize=20)
self.ax.set_ylabel(self.yCombo.get_child().get_text(),
fontweight='bold', fontsize=20)
self.ax.tick_params(which='major', length=7, labelsize=16, width=2)
self.ax.tick_params(which='minor', length=4, labelsize=10, width=2,
colors='xkcd:scarlet', labelrotation=45)
self.ax.xaxis.get_offset_text().set(fontsize=13, fontweight='bold',
color='xkcd:black')
self.ax.yaxis.get_offset_text().set(fontsize=13, fontweight='bold',
color='xkcd:black')
self.fig.set_tight_layout(True)
def InitCombo(self):
if 'ElapsedTime' in self.entries:
iterX = self.xCombo.get_model().get_iter(
self.entries.index('ElapsedTime'))
self.xCombo.set_active_iter(iterX)
self.yCombo.set_active(0)
else:
self.xCombo.set_active(0)
self.yCombo.set_active(1)
@staticmethod
def InitCompletion(comboBox):
completion = Gtk.EntryCompletion.new()
completion.set_text_column(0)
completion.set_inline_completion(True)
completion.set_inline_selection(False)
completion.set_model(comboBox.get_model())
comboBox.get_child().set_completion(completion)
def KeyPressed(self, widget, event):
key = event.string
if (self.xCombo.get_child().has_focus()
or self.yCombo.get_child().has_focus()):
pass
elif key == 'r':
self.entries, self.values = self.ReadData(self.statfile)
self.PlotData('update', self.PlotType, self.ax.get_xscale(),
self.ax.get_yscale())
elif key == 'q':
self.destroy()
elif key == 'x' or key == 'y':
if self.values.ndim == 1:
warnings.warn('Insufficient data available to turn on '
'logarithmic scale', stacklevel=2)
return
if key == 'x':
self.get_scale = self.ax.get_xscale
curData = self.xData
else:
self.get_scale = self.ax.get_yscale
curData = self.yData
if self.get_scale() == 'linear' and (curData == 0).all():
warnings.warn('Change to logarithmic scale denied: the '
f'selected variable for the {key} axis is null.',
stacklevel=2)
return
elif (self.get_scale() == 'linear'
and max(abs(curData)) / min(abs(curData)) < 10):
warnings.warn('Change to logarithmic scale denied: the '
f'selected variable for the {key} axis has a '
'range of variation smaller than one order of '
'magnitude.', stacklevel=2)
return
elif self.get_scale() == 'linear':
axMin = min(abs(curData[curData != 0]))
axMax = max(abs(curData))
if axMin != axMax and min(curData) < 0:
scale = 'symlog'
elif axMin != axMax:
scale = 'log'
else:
warnings.warn('Change to logarithmic scale denied: the '
f'selected variable for the {key} axis is a '
'constant.', stacklevel=2)
return
elif self.get_scale() in ['log', 'symlog']:
scale = 'linear'
self.FormatAxis(key, scale)
self.fig.canvas.draw_idle()
self.fig.canvas.draw()
self.fig.canvas.flush_events()
elif key == 'l':
if self.values.ndim == 1:
warnings.warn('Insufficient data available to turn on line '
+ 'display', stacklevel=2)
return
self.PlotType = 'marker' if self.PlotType == 'line' else 'line'
self.PlotData('update', self.PlotType, self.ax.get_xscale(),
self.ax.get_yscale())
elif key == 'a':
self.xCombo.grab_focus()
elif key == 'o':
self.yCombo.grab_focus()
def PlotData(self, action, type, xscale, yscale):
self.xData = self.values[..., self.xCombo.get_active()]
self.yData = self.values[..., self.yCombo.get_active()]
if action == 'create':
self.statplot, = self.ax.plot(self.xData, self.yData)
elif action == 'update':
self.statplot.set_xdata(self.xData)
self.statplot.set_ydata(self.yData)
self.toolbar.update()
if type == 'line':
self.statplot.set_color('xkcd:light purple')
self.statplot.set_linestyle('solid')
self.statplot.set_linewidth(2)
self.statplot.set_marker('None')
elif type == 'marker':
self.statplot.set_linestyle('None')
self.statplot.set_marker('d')
self.statplot.set_markeredgecolor('xkcd:black')
self.statplot.set_markerfacecolor('xkcd:light purple')
self.statplot.set_markersize(7)
self.statplot.set_markeredgewidth(0.3)
self.FormatAxis('x', xscale)
self.FormatAxis('y', yscale)
if action == 'update':
self.fig.canvas.draw_idle()
self.fig.canvas.draw()
self.fig.canvas.flush_events()
@staticmethod
def ReadData(statfile):
def GatherEntries(stat2read):
print('Reading ' + stat2read)
entries = []
with open(stat2read, 'r') as fid:
for num, line in enumerate(fid):
if line.startswith('<field'):
info = etree.fromstring(line)
if info.attrib['statistic'] == 'value':
entries.append(info.attrib['name'])
elif all([x in info.attrib for x in
['components', 'material_phase']]):
for i in range(int(info.attrib['components'])):
entries.append('{e[material_phase]}%{e[name]}'
'%{e[statistic]}%{i}'
.format(e=info.attrib, i=i))
elif 'components' in info.attrib:
for i in range(int(info.get('components'))):
entries.append('{e[name]}%{e[statistic]}%{i}'
.format(e=info.attrib, i=i))
elif 'material_phase' in info.attrib:
entries.append('{e[material_phase]}%{e[name]}'
'%{e[statistic]}'
.format(e=info.attrib))
else:
entries.append('{e[name]}%{e[statistic]}'
.format(e=info.attrib))
elif line.startswith('</header>'):
break
return numpy.asarray(entries), num
if statfile[0].endswith('stat'):
entries, num = GatherEntries(statfile[0])
values = numpy.genfromtxt(statfile[0], skip_header=num + 1)
values = values[..., numpy.argsort(entries)]
entries = entries[numpy.argsort(entries)]
if len(statfile) > 1:
for item in statfile[1:]:
entriesTemp, num = GatherEntries(item)
if numpy.array_equal(
entries, entriesTemp[numpy.argsort(entriesTemp)]):
valuesTemp = numpy.genfromtxt(item,
skip_header=num + 1)
valuesTemp = valuesTemp.T[numpy.argsort(entriesTemp)].T
values = numpy.vstack((values, valuesTemp))
else:
sys.exit('Statfiles entries do not match')
elif statfile[0].endswith('detectors'):
entries = GatherEntries(statfile[0])[0]
values = numpy.fromfile(statfile[0] + '.dat',
dtype=numpy.float64)
ncols = entries.size
nrows = values.size // ncols
values = values[:nrows * ncols].reshape(nrows, ncols)
return list(entries), values
def ReleaseFocus(self, widget, event):
if event.keyval == Gdk.KEY_Escape:
self.canvas.grab_focus()
window = StatplotWindow(args.statfile)
window.connect('delete-event', Gtk.main_quit)
window.connect('destroy', Gtk.main_quit)
window.show_all()
Gtk.main()
| lgpl-2.1 |
tawsifkhan/scikit-learn | sklearn/manifold/tests/test_mds.py | 324 | 1862 | import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_raises
from sklearn.manifold import mds
def test_smacof():
# test metric smacof using the data of "Modern Multidimensional Scaling",
# Borg & Groenen, p 154
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.451, .252],
[.016, -.238],
[-.200, .524]])
X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1)
X_true = np.array([[-1.415, -2.471],
[1.633, 1.107],
[.249, -.067],
[-.468, 1.431]])
assert_array_almost_equal(X, X_true, decimal=3)
def test_smacof_error():
# Not symmetric similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# Not squared similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# init not None and not correct format:
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.016, -.238],
[-.200, .524]])
assert_raises(ValueError, mds.smacof, sim, init=Z, n_init=1)
def test_MDS():
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed")
mds_clf.fit(sim)
| bsd-3-clause |
Eric89GXL/scikit-learn | examples/plot_train_error_vs_test_error.py | 8 | 2548 | """
=========================
Train error vs Test error
=========================
Illustration of how the performance of an estimator on unseen data (test data)
is not the same as the performance on training data. As the regularization
increases the performance on train decreases while the performance on test
is optimal within a range of values of the regularization parameter.
The example with an Elastic-Net regression model and the performance is
measured using the explained variance a.k.a. R^2.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from sklearn import linear_model
###############################################################################
# Generate sample data
n_samples_train, n_samples_test, n_features = 75, 150, 500
np.random.seed(0)
coef = np.random.randn(n_features)
coef[50:] = 0.0 # only the top 10 features are impacting the model
X = np.random.randn(n_samples_train + n_samples_test, n_features)
y = np.dot(X, coef)
# Split train and test data
X_train, X_test = X[:n_samples_train], X[n_samples_train:]
y_train, y_test = y[:n_samples_train], y[n_samples_train:]
###############################################################################
# Compute train and test errors
alphas = np.logspace(-5, 1, 60)
enet = linear_model.ElasticNet(l1_ratio=0.7)
train_errors = list()
test_errors = list()
for alpha in alphas:
enet.set_params(alpha=alpha)
enet.fit(X_train, y_train)
train_errors.append(enet.score(X_train, y_train))
test_errors.append(enet.score(X_test, y_test))
i_alpha_optim = np.argmax(test_errors)
alpha_optim = alphas[i_alpha_optim]
print("Optimal regularization parameter : %s" % alpha_optim)
# Estimate the coef_ on full data with optimal regularization parameter
enet.set_params(alpha=alpha_optim)
coef_ = enet.fit(X, y).coef_
###############################################################################
# Plot results functions
import pylab as pl
pl.subplot(2, 1, 1)
pl.semilogx(alphas, train_errors, label='Train')
pl.semilogx(alphas, test_errors, label='Test')
pl.vlines(alpha_optim, pl.ylim()[0], np.max(test_errors), color='k',
linewidth=3, label='Optimum on test')
pl.legend(loc='lower left')
pl.ylim([0, 1.2])
pl.xlabel('Regularization parameter')
pl.ylabel('Performance')
# Show estimated coef_ vs true coef
pl.subplot(2, 1, 2)
pl.plot(coef, label='True coef')
pl.plot(coef_, label='Estimated coef')
pl.legend()
pl.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.26)
pl.show()
| bsd-3-clause |
zuku1985/scikit-learn | examples/linear_model/plot_lasso_coordinate_descent_path.py | 63 | 2945 | """
=====================
Lasso and Elastic Net
=====================
Lasso and elastic net (L1 and L2 penalisation) implemented using a
coordinate descent.
The coefficients can be forced to be positive.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from itertools import cycle
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import lasso_path, enet_path
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
X /= X.std(axis=0) # Standardize data (easier to set the l1_ratio parameter)
# Compute paths
eps = 5e-3 # the smaller it is the longer is the path
print("Computing regularization path using the lasso...")
alphas_lasso, coefs_lasso, _ = lasso_path(X, y, eps, fit_intercept=False)
print("Computing regularization path using the positive lasso...")
alphas_positive_lasso, coefs_positive_lasso, _ = lasso_path(
X, y, eps, positive=True, fit_intercept=False)
print("Computing regularization path using the elastic net...")
alphas_enet, coefs_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, fit_intercept=False)
print("Computing regularization path using the positive elastic net...")
alphas_positive_enet, coefs_positive_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, positive=True, fit_intercept=False)
# Display results
plt.figure(1)
ax = plt.gca()
colors = cycle(['b', 'r', 'g', 'c', 'k'])
neg_log_alphas_lasso = -np.log10(alphas_lasso)
neg_log_alphas_enet = -np.log10(alphas_enet)
for coef_l, coef_e, c in zip(coefs_lasso, coefs_enet, colors):
l1 = plt.plot(neg_log_alphas_lasso, coef_l, c=c)
l2 = plt.plot(neg_log_alphas_enet, coef_e, linestyle='--', c=c)
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and Elastic-Net Paths')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'Elastic-Net'), loc='lower left')
plt.axis('tight')
plt.figure(2)
ax = plt.gca()
neg_log_alphas_positive_lasso = -np.log10(alphas_positive_lasso)
for coef_l, coef_pl, c in zip(coefs_lasso, coefs_positive_lasso, colors):
l1 = plt.plot(neg_log_alphas_lasso, coef_l, c=c)
l2 = plt.plot(neg_log_alphas_positive_lasso, coef_pl, linestyle='--', c=c)
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and positive Lasso')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'positive Lasso'), loc='lower left')
plt.axis('tight')
plt.figure(3)
ax = plt.gca()
neg_log_alphas_positive_enet = -np.log10(alphas_positive_enet)
for (coef_e, coef_pe, c) in zip(coefs_enet, coefs_positive_enet, colors):
l1 = plt.plot(neg_log_alphas_enet, coef_e, c=c)
l2 = plt.plot(neg_log_alphas_positive_enet, coef_pe, linestyle='--', c=c)
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Elastic-Net and positive Elastic-Net')
plt.legend((l1[-1], l2[-1]), ('Elastic-Net', 'positive Elastic-Net'),
loc='lower left')
plt.axis('tight')
plt.show()
| bsd-3-clause |
bnaul/scikit-learn | sklearn/linear_model/_glm/tests/test_glm.py | 5 | 15605 | # Authors: Christian Lorentzen <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from numpy.testing import assert_allclose
import pytest
import warnings
from sklearn.datasets import make_regression
from sklearn.linear_model._glm import GeneralizedLinearRegressor
from sklearn.linear_model import (
TweedieRegressor,
PoissonRegressor,
GammaRegressor
)
from sklearn.linear_model._glm.link import (
IdentityLink,
LogLink,
)
from sklearn._loss.glm_distribution import (
TweedieDistribution,
NormalDistribution, PoissonDistribution,
GammaDistribution, InverseGaussianDistribution,
)
from sklearn.linear_model import Ridge
from sklearn.exceptions import ConvergenceWarning
from sklearn.model_selection import train_test_split
@pytest.fixture(scope="module")
def regression_data():
X, y = make_regression(n_samples=107,
n_features=10,
n_informative=80, noise=0.5,
random_state=2)
return X, y
def test_sample_weights_validation():
"""Test the raised errors in the validation of sample_weight."""
# scalar value but not positive
X = [[1]]
y = [1]
weights = 0
glm = GeneralizedLinearRegressor()
# Positive weights are accepted
glm.fit(X, y, sample_weight=1)
# 2d array
weights = [[0]]
with pytest.raises(ValueError, match="must be 1D array or scalar"):
glm.fit(X, y, weights)
# 1d but wrong length
weights = [1, 0]
msg = r"sample_weight.shape == \(2,\), expected \(1,\)!"
with pytest.raises(ValueError, match=msg):
glm.fit(X, y, weights)
@pytest.mark.parametrize('name, instance',
[('normal', NormalDistribution()),
('poisson', PoissonDistribution()),
('gamma', GammaDistribution()),
('inverse-gaussian', InverseGaussianDistribution())])
def test_glm_family_argument(name, instance):
"""Test GLM family argument set as string."""
y = np.array([0.1, 0.5]) # in range of all distributions
X = np.array([[1], [2]])
glm = GeneralizedLinearRegressor(family=name, alpha=0).fit(X, y)
assert isinstance(glm._family_instance, instance.__class__)
glm = GeneralizedLinearRegressor(family='not a family')
with pytest.raises(ValueError, match="family must be"):
glm.fit(X, y)
@pytest.mark.parametrize('name, instance',
[('identity', IdentityLink()),
('log', LogLink())])
def test_glm_link_argument(name, instance):
"""Test GLM link argument set as string."""
y = np.array([0.1, 0.5]) # in range of all distributions
X = np.array([[1], [2]])
glm = GeneralizedLinearRegressor(family='normal', link=name).fit(X, y)
assert isinstance(glm._link_instance, instance.__class__)
glm = GeneralizedLinearRegressor(family='normal', link='not a link')
with pytest.raises(ValueError, match="link must be"):
glm.fit(X, y)
@pytest.mark.parametrize('family, expected_link_class', [
('normal', IdentityLink),
('poisson', LogLink),
('gamma', LogLink),
('inverse-gaussian', LogLink),
])
def test_glm_link_auto(family, expected_link_class):
# Make sure link='auto' delivers the expected link function
y = np.array([0.1, 0.5]) # in range of all distributions
X = np.array([[1], [2]])
glm = GeneralizedLinearRegressor(family=family, link='auto').fit(X, y)
assert isinstance(glm._link_instance, expected_link_class)
@pytest.mark.parametrize('alpha', ['not a number', -4.2])
def test_glm_alpha_argument(alpha):
"""Test GLM for invalid alpha argument."""
y = np.array([1, 2])
X = np.array([[1], [2]])
glm = GeneralizedLinearRegressor(family='normal', alpha=alpha)
with pytest.raises(ValueError,
match="Penalty term must be a non-negative"):
glm.fit(X, y)
@pytest.mark.parametrize('fit_intercept', ['not bool', 1, 0, [True]])
def test_glm_fit_intercept_argument(fit_intercept):
"""Test GLM for invalid fit_intercept argument."""
y = np.array([1, 2])
X = np.array([[1], [1]])
glm = GeneralizedLinearRegressor(fit_intercept=fit_intercept)
with pytest.raises(ValueError, match="fit_intercept must be bool"):
glm.fit(X, y)
@pytest.mark.parametrize('solver',
['not a solver', 1, [1]])
def test_glm_solver_argument(solver):
"""Test GLM for invalid solver argument."""
y = np.array([1, 2])
X = np.array([[1], [2]])
glm = GeneralizedLinearRegressor(solver=solver)
with pytest.raises(ValueError):
glm.fit(X, y)
@pytest.mark.parametrize('max_iter', ['not a number', 0, -1, 5.5, [1]])
def test_glm_max_iter_argument(max_iter):
"""Test GLM for invalid max_iter argument."""
y = np.array([1, 2])
X = np.array([[1], [2]])
glm = GeneralizedLinearRegressor(max_iter=max_iter)
with pytest.raises(ValueError, match="must be a positive integer"):
glm.fit(X, y)
@pytest.mark.parametrize('tol', ['not a number', 0, -1.0, [1e-3]])
def test_glm_tol_argument(tol):
"""Test GLM for invalid tol argument."""
y = np.array([1, 2])
X = np.array([[1], [2]])
glm = GeneralizedLinearRegressor(tol=tol)
with pytest.raises(ValueError, match="stopping criteria must be positive"):
glm.fit(X, y)
@pytest.mark.parametrize('warm_start', ['not bool', 1, 0, [True]])
def test_glm_warm_start_argument(warm_start):
"""Test GLM for invalid warm_start argument."""
y = np.array([1, 2])
X = np.array([[1], [1]])
glm = GeneralizedLinearRegressor(warm_start=warm_start)
with pytest.raises(ValueError, match="warm_start must be bool"):
glm.fit(X, y)
@pytest.mark.parametrize('fit_intercept', [False, True])
def test_glm_identity_regression(fit_intercept):
"""Test GLM regression with identity link on a simple dataset."""
coef = [1., 2.]
X = np.array([[1, 1, 1, 1, 1], [0, 1, 2, 3, 4]]).T
y = np.dot(X, coef)
glm = GeneralizedLinearRegressor(alpha=0, family='normal', link='identity',
fit_intercept=fit_intercept, tol=1e-12)
if fit_intercept:
glm.fit(X[:, 1:], y)
assert_allclose(glm.coef_, coef[1:], rtol=1e-10)
assert_allclose(glm.intercept_, coef[0], rtol=1e-10)
else:
glm.fit(X, y)
assert_allclose(glm.coef_, coef, rtol=1e-12)
@pytest.mark.parametrize('fit_intercept', [False, True])
@pytest.mark.parametrize('alpha', [0.0, 1.0])
@pytest.mark.parametrize('family', ['normal', 'poisson', 'gamma'])
def test_glm_sample_weight_consistentcy(fit_intercept, alpha, family):
"""Test that the impact of sample_weight is consistent"""
rng = np.random.RandomState(0)
n_samples, n_features = 10, 5
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
glm_params = dict(alpha=alpha, family=family, link='auto',
fit_intercept=fit_intercept)
glm = GeneralizedLinearRegressor(**glm_params).fit(X, y)
coef = glm.coef_.copy()
# sample_weight=np.ones(..) should be equivalent to sample_weight=None
sample_weight = np.ones(y.shape)
glm.fit(X, y, sample_weight=sample_weight)
assert_allclose(glm.coef_, coef, rtol=1e-12)
# sample_weight are normalized to 1 so, scaling them has no effect
sample_weight = 2*np.ones(y.shape)
glm.fit(X, y, sample_weight=sample_weight)
assert_allclose(glm.coef_, coef, rtol=1e-12)
# setting one element of sample_weight to 0 is equivalent to removing
# the correspoding sample
sample_weight = np.ones(y.shape)
sample_weight[-1] = 0
glm.fit(X, y, sample_weight=sample_weight)
coef1 = glm.coef_.copy()
glm.fit(X[:-1], y[:-1])
assert_allclose(glm.coef_, coef1, rtol=1e-12)
# check that multiplying sample_weight by 2 is equivalent
# to repeating correspoding samples twice
X2 = np.concatenate([X, X[:n_samples//2]], axis=0)
y2 = np.concatenate([y, y[:n_samples//2]])
sample_weight_1 = np.ones(len(y))
sample_weight_1[:n_samples//2] = 2
glm1 = GeneralizedLinearRegressor(**glm_params).fit(
X, y, sample_weight=sample_weight_1
)
glm2 = GeneralizedLinearRegressor(**glm_params).fit(
X2, y2, sample_weight=None
)
assert_allclose(glm1.coef_, glm2.coef_)
@pytest.mark.parametrize('fit_intercept', [True, False])
@pytest.mark.parametrize(
'family',
[NormalDistribution(), PoissonDistribution(),
GammaDistribution(), InverseGaussianDistribution(),
TweedieDistribution(power=1.5), TweedieDistribution(power=4.5)])
def test_glm_log_regression(fit_intercept, family):
"""Test GLM regression with log link on a simple dataset."""
coef = [0.2, -0.1]
X = np.array([[1, 1, 1, 1, 1], [0, 1, 2, 3, 4]]).T
y = np.exp(np.dot(X, coef))
glm = GeneralizedLinearRegressor(
alpha=0, family=family, link='log',
fit_intercept=fit_intercept, tol=1e-7)
if fit_intercept:
res = glm.fit(X[:, 1:], y)
assert_allclose(res.coef_, coef[1:], rtol=1e-6)
assert_allclose(res.intercept_, coef[0], rtol=1e-6)
else:
res = glm.fit(X, y)
assert_allclose(res.coef_, coef, rtol=2e-6)
@pytest.mark.parametrize('fit_intercept', [True, False])
def test_warm_start(fit_intercept):
n_samples, n_features = 110, 10
X, y = make_regression(n_samples=n_samples, n_features=n_features,
n_informative=n_features-2, noise=0.5,
random_state=42)
glm1 = GeneralizedLinearRegressor(
warm_start=False,
fit_intercept=fit_intercept,
max_iter=1000
)
glm1.fit(X, y)
glm2 = GeneralizedLinearRegressor(
warm_start=True,
fit_intercept=fit_intercept,
max_iter=1
)
# As we intentionally set max_iter=1, L-BFGS-B will issue a
# ConvergenceWarning which we here simply ignore.
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=ConvergenceWarning)
glm2.fit(X, y)
assert glm1.score(X, y) > glm2.score(X, y)
glm2.set_params(max_iter=1000)
glm2.fit(X, y)
# The two model are not exactly identical since the lbfgs solver
# computes the approximate hessian from previous iterations, which
# will not be strictly identical in the case of a warm start.
assert_allclose(glm1.coef_, glm2.coef_, rtol=1e-5)
assert_allclose(glm1.score(X, y), glm2.score(X, y), rtol=1e-4)
@pytest.mark.parametrize('n_samples, n_features', [(100, 10), (10, 100)])
@pytest.mark.parametrize('fit_intercept', [True, False])
@pytest.mark.parametrize('sample_weight', [None, True])
def test_normal_ridge_comparison(n_samples, n_features, fit_intercept,
sample_weight, request):
"""Compare with Ridge regression for Normal distributions."""
test_size = 10
X, y = make_regression(n_samples=n_samples + test_size,
n_features=n_features,
n_informative=n_features-2, noise=0.5,
random_state=42)
if n_samples > n_features:
ridge_params = {"solver": "svd"}
else:
ridge_params = {"solver": "saga", "max_iter": 1000000, "tol": 1e-7}
X_train, X_test, y_train, y_test, = train_test_split(
X, y, test_size=test_size, random_state=0
)
alpha = 1.0
if sample_weight is None:
sw_train = None
alpha_ridge = alpha * n_samples
else:
sw_train = np.random.RandomState(0).rand(len(y_train))
alpha_ridge = alpha * sw_train.sum()
# GLM has 1/(2*n) * Loss + 1/2*L2, Ridge has Loss + L2
ridge = Ridge(alpha=alpha_ridge, normalize=False,
random_state=42, fit_intercept=fit_intercept,
**ridge_params)
ridge.fit(X_train, y_train, sample_weight=sw_train)
glm = GeneralizedLinearRegressor(alpha=alpha, family='normal',
link='identity',
fit_intercept=fit_intercept,
max_iter=300,
tol=1e-5)
glm.fit(X_train, y_train, sample_weight=sw_train)
assert glm.coef_.shape == (X.shape[1], )
assert_allclose(glm.coef_, ridge.coef_, atol=5e-5)
assert_allclose(glm.intercept_, ridge.intercept_, rtol=1e-5)
assert_allclose(glm.predict(X_train), ridge.predict(X_train), rtol=2e-4)
assert_allclose(glm.predict(X_test), ridge.predict(X_test), rtol=2e-4)
def test_poisson_glmnet():
"""Compare Poisson regression with L2 regularization and LogLink to glmnet
"""
# library("glmnet")
# options(digits=10)
# df <- data.frame(a=c(-2,-1,1,2), b=c(0,0,1,1), y=c(0,1,1,2))
# x <- data.matrix(df[,c("a", "b")])
# y <- df$y
# fit <- glmnet(x=x, y=y, alpha=0, intercept=T, family="poisson",
# standardize=F, thresh=1e-10, nlambda=10000)
# coef(fit, s=1)
# (Intercept) -0.12889386979
# a 0.29019207995
# b 0.03741173122
X = np.array([[-2, -1, 1, 2], [0, 0, 1, 1]]).T
y = np.array([0, 1, 1, 2])
glm = GeneralizedLinearRegressor(alpha=1,
fit_intercept=True, family='poisson',
link='log', tol=1e-7,
max_iter=300)
glm.fit(X, y)
assert_allclose(glm.intercept_, -0.12889386979, rtol=1e-5)
assert_allclose(glm.coef_, [0.29019207995, 0.03741173122], rtol=1e-5)
def test_convergence_warning(regression_data):
X, y = regression_data
est = GeneralizedLinearRegressor(max_iter=1, tol=1e-20)
with pytest.warns(ConvergenceWarning):
est.fit(X, y)
def test_poisson_regression_family(regression_data):
# Make sure the family attribute is read-only to prevent searching over it
# e.g. in a grid search
est = PoissonRegressor()
est.family == "poisson"
msg = "PoissonRegressor.family must be 'poisson'!"
with pytest.raises(ValueError, match=msg):
est.family = 0
def test_gamma_regression_family(regression_data):
# Make sure the family attribute is read-only to prevent searching over it
# e.g. in a grid search
est = GammaRegressor()
est.family == "gamma"
msg = "GammaRegressor.family must be 'gamma'!"
with pytest.raises(ValueError, match=msg):
est.family = 0
def test_tweedie_regression_family(regression_data):
# Make sure the family attribute is always a TweedieDistribution and that
# the power attribute is properly updated
power = 2.0
est = TweedieRegressor(power=power)
assert isinstance(est.family, TweedieDistribution)
assert est.family.power == power
assert est.power == power
new_power = 0
new_family = TweedieDistribution(power=new_power)
est.family = new_family
assert isinstance(est.family, TweedieDistribution)
assert est.family.power == new_power
assert est.power == new_power
msg = "TweedieRegressor.family must be of type TweedieDistribution!"
with pytest.raises(TypeError, match=msg):
est.family = None
@pytest.mark.parametrize(
'estimator, value',
[
(PoissonRegressor(), True),
(GammaRegressor(), True),
(TweedieRegressor(power=1.5), True),
(TweedieRegressor(power=0), False)
],
)
def test_tags(estimator, value):
assert estimator._get_tags()['requires_positive_y'] is value
| bsd-3-clause |
kdebrab/pandas | asv_bench/benchmarks/timedelta.py | 4 | 2981 | import datetime
import numpy as np
from pandas import Series, timedelta_range, to_timedelta, Timestamp, Timedelta
class TimedeltaConstructor(object):
goal_time = 0.2
def time_from_int(self):
Timedelta(123456789)
def time_from_unit(self):
Timedelta(1, unit='d')
def time_from_components(self):
Timedelta(days=1, hours=2, minutes=3, seconds=4, milliseconds=5,
microseconds=6, nanoseconds=7)
def time_from_datetime_timedelta(self):
Timedelta(datetime.timedelta(days=1, seconds=1))
def time_from_np_timedelta(self):
Timedelta(np.timedelta64(1, 'ms'))
def time_from_string(self):
Timedelta('1 days')
def time_from_iso_format(self):
Timedelta('P4DT12H30M5S')
def time_from_missing(self):
Timedelta('nat')
class ToTimedelta(object):
goal_time = 0.2
def setup(self):
self.ints = np.random.randint(0, 60, size=10000)
self.str_days = []
self.str_seconds = []
for i in self.ints:
self.str_days.append('{0} days'.format(i))
self.str_seconds.append('00:00:{0:02d}'.format(i))
def time_convert_int(self):
to_timedelta(self.ints, unit='s')
def time_convert_string_days(self):
to_timedelta(self.str_days)
def time_convert_string_seconds(self):
to_timedelta(self.str_seconds)
class ToTimedeltaErrors(object):
goal_time = 0.2
params = ['coerce', 'ignore']
param_names = ['errors']
def setup(self, errors):
ints = np.random.randint(0, 60, size=10000)
self.arr = ['{0} days'.format(i) for i in ints]
self.arr[-1] = 'apple'
def time_convert(self, errors):
to_timedelta(self.arr, errors=errors)
class TimedeltaOps(object):
goal_time = 0.2
def setup(self):
self.td = to_timedelta(np.arange(1000000))
self.ts = Timestamp('2000')
def time_add_td_ts(self):
self.td + self.ts
class TimedeltaProperties(object):
goal_time = 0.2
def setup_cache(self):
td = Timedelta(days=365, minutes=35, seconds=25, milliseconds=35)
return td
def time_timedelta_days(self, td):
td.days
def time_timedelta_seconds(self, td):
td.seconds
def time_timedelta_microseconds(self, td):
td.microseconds
def time_timedelta_nanoseconds(self, td):
td.nanoseconds
class DatetimeAccessor(object):
goal_time = 0.2
def setup_cache(self):
N = 100000
series = Series(timedelta_range('1 days', periods=N, freq='h'))
return series
def time_dt_accessor(self, series):
series.dt
def time_timedelta_days(self, series):
series.dt.days
def time_timedelta_seconds(self, series):
series.dt.seconds
def time_timedelta_microseconds(self, series):
series.dt.microseconds
def time_timedelta_nanoseconds(self, series):
series.dt.nanoseconds
| bsd-3-clause |
NunoEdgarGub1/scikit-learn | sklearn/tests/test_pipeline.py | 162 | 14875 | """
Test the pipeline module.
"""
import numpy as np
from scipy import sparse
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises, assert_raises_regex, assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.base import clone
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.decomposition import PCA, RandomizedPCA, TruncatedSVD
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
class IncorrectT(object):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class T(IncorrectT):
def fit(self, X, y):
return self
def get_params(self, deep=False):
return {'a': self.a, 'b': self.b}
def set_params(self, **params):
self.a = params['a']
return self
class TransfT(T):
def transform(self, X, y=None):
return X
class FitParamT(object):
"""Mock classifier
"""
def __init__(self):
self.successful = False
pass
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def test_pipeline_init():
# Test the various init parameters of the pipeline.
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
pipe = assert_raises(TypeError, Pipeline, [('svc', IncorrectT)])
# Smoke test with only an estimator
clf = T()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf,
**pipe.get_params(deep=False)
))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
assert_equal(clf.b, None)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that we can't use the same stage name twice
assert_raises(ValueError, Pipeline, [('svc', SVC()), ('svc', SVC())])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that apart from estimators, the parameters are the same
params = pipe.get_params(deep=True)
params2 = pipe2.get_params(deep=True)
for x in pipe.get_params(deep=False):
params.pop(x)
for x in pipe2.get_params(deep=False):
params2.pop(x)
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_methods_anova():
# Test the various methods of the pipeline (anova).
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
# Test that the pipeline can take fit parameters
pipe = Pipeline([('transf', TransfT()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
def test_pipeline_raise_set_params_error():
# Test pipeline raises set params error message for nested models.
pipe = Pipeline([('cls', LinearRegression())])
# expected error message
error_msg = ('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.')
assert_raise_message(ValueError,
error_msg % ('fake', 'Pipeline'),
pipe.set_params,
fake='nope')
# nested model check
assert_raise_message(ValueError,
error_msg % ("fake", pipe),
pipe.set_params,
fake__estimator='nope')
def test_pipeline_methods_pca_svm():
# Test the various methods of the pipeline (pca + svm).
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA(n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
# Test the various methods of the pipeline (preprocessing + svm).
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = RandomizedPCA(n_components=2, whiten=True)
clf = SVC(probability=True, random_state=0)
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_fit_predict_on_pipeline():
# test that the fit_predict method is implemented on a pipeline
# test that the fit_predict on pipeline yields same results as applying
# transform and clustering steps separately
iris = load_iris()
scaler = StandardScaler()
km = KMeans(random_state=0)
# first compute the transform and clustering step separately
scaled = scaler.fit_transform(iris.data)
separate_pred = km.fit_predict(scaled)
# use a pipeline to do the transform and clustering in one step
pipe = Pipeline([('scaler', scaler), ('Kmeans', km)])
pipeline_pred = pipe.fit_predict(iris.data)
assert_array_almost_equal(pipeline_pred, separate_pred)
def test_fit_predict_on_pipeline_without_fit_predict():
# tests that a pipeline does not have fit_predict method when final
# step of pipeline does not have fit_predict defined
scaler = StandardScaler()
pca = PCA()
pipe = Pipeline([('scaler', scaler), ('pca', pca)])
assert_raises_regex(AttributeError,
"'PCA' object has no attribute 'fit_predict'",
getattr, pipe, 'fit_predict')
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4))
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("svd", svd), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert_equal(X_transformed.shape, (X.shape[0], 8))
def test_make_union():
pca = PCA()
mock = TransfT()
fu = make_union(pca, mock)
names, transformers = zip(*fu.transformer_list)
assert_equal(names, ("pca", "transft"))
assert_equal(transformers, (pca, mock))
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2)
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transft = TransfT()
pipeline = Pipeline([('mock', transft)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transft.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_make_pipeline():
t1 = TransfT()
t2 = TransfT()
pipe = make_pipeline(t1, t2)
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
pipe = make_pipeline(t1, t2, FitParamT())
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
assert_equal(pipe.steps[2][0], "fitparamt")
def test_feature_union_weights():
# test feature union with transformer weights
iris = load_iris()
X = iris.data
y = iris.target
pca = RandomizedPCA(n_components=2, random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10})
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7))
def test_feature_union_parallel():
# test that n_jobs work for FeatureUnion
X = JUNK_FOOD_DOCS
fs = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
])
fs_parallel = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs_parallel2 = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs.fit(X)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape[0], len(X))
fs_parallel.fit(X)
X_transformed_parallel = fs_parallel.transform(X)
assert_equal(X_transformed.shape, X_transformed_parallel.shape)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel.toarray()
)
# fit_transform should behave the same
X_transformed_parallel2 = fs_parallel2.fit_transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
# transformers should stay fit after fit_transform
X_transformed_parallel2 = fs_parallel2.transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
def test_feature_union_feature_names():
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = ft.get_feature_names()
for feat in feature_names:
assert_true("chars__" in feat or "words__" in feat)
assert_equal(len(feature_names), 35)
def test_classes_property():
iris = load_iris()
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
assert_raises(AttributeError, getattr, reg, "classes_")
clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
assert_raises(AttributeError, getattr, clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
| bsd-3-clause |
PlayUAV/MissionPlanner | Lib/site-packages/numpy/core/function_base.py | 82 | 5474 | __all__ = ['logspace', 'linspace']
import numeric as _nx
from numeric import array
def linspace(start, stop, num=50, endpoint=True, retstep=False):
"""
Return evenly spaced numbers over a specified interval.
Returns `num` evenly spaced samples, calculated over the
interval [`start`, `stop` ].
The endpoint of the interval can optionally be excluded.
Parameters
----------
start : scalar
The starting value of the sequence.
stop : scalar
The end value of the sequence, unless `endpoint` is set to False.
In that case, the sequence consists of all but the last of ``num + 1``
evenly spaced samples, so that `stop` is excluded. Note that the step
size changes when `endpoint` is False.
num : int, optional
Number of samples to generate. Default is 50.
endpoint : bool, optional
If True, `stop` is the last sample. Otherwise, it is not included.
Default is True.
retstep : bool, optional
If True, return (`samples`, `step`), where `step` is the spacing
between samples.
Returns
-------
samples : ndarray
There are `num` equally spaced samples in the closed interval
``[start, stop]`` or the half-open interval ``[start, stop)``
(depending on whether `endpoint` is True or False).
step : float (only if `retstep` is True)
Size of spacing between samples.
See Also
--------
arange : Similiar to `linspace`, but uses a step size (instead of the
number of samples).
logspace : Samples uniformly distributed in log space.
Examples
--------
>>> np.linspace(2.0, 3.0, num=5)
array([ 2. , 2.25, 2.5 , 2.75, 3. ])
>>> np.linspace(2.0, 3.0, num=5, endpoint=False)
array([ 2. , 2.2, 2.4, 2.6, 2.8])
>>> np.linspace(2.0, 3.0, num=5, retstep=True)
(array([ 2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 8
>>> y = np.zeros(N)
>>> x1 = np.linspace(0, 10, N, endpoint=True)
>>> x2 = np.linspace(0, 10, N, endpoint=False)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
num = int(num)
if num <= 0:
return array([], float)
if endpoint:
if num == 1:
return array([float(start)])
step = (stop-start)/float((num-1))
y = _nx.arange(0, num) * step + start
y[-1] = stop
else:
step = (stop-start)/float(num)
y = _nx.arange(0, num) * step + start
if retstep:
return y, step
else:
return y
def logspace(start,stop,num=50,endpoint=True,base=10.0):
"""
Return numbers spaced evenly on a log scale.
In linear space, the sequence starts at ``base ** start``
(`base` to the power of `start`) and ends with ``base ** stop``
(see `endpoint` below).
Parameters
----------
start : float
``base ** start`` is the starting value of the sequence.
stop : float
``base ** stop`` is the final value of the sequence, unless `endpoint`
is False. In that case, ``num + 1`` values are spaced over the
interval in log-space, of which all but the last (a sequence of
length ``num``) are returned.
num : integer, optional
Number of samples to generate. Default is 50.
endpoint : boolean, optional
If true, `stop` is the last sample. Otherwise, it is not included.
Default is True.
base : float, optional
The base of the log space. The step size between the elements in
``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.
Default is 10.0.
Returns
-------
samples : ndarray
`num` samples, equally spaced on a log scale.
See Also
--------
arange : Similiar to linspace, with the step size specified instead of the
number of samples. Note that, when used with a float endpoint, the
endpoint may or may not be included.
linspace : Similar to logspace, but with the samples uniformly distributed
in linear space, instead of log space.
Notes
-----
Logspace is equivalent to the code
>>> y = np.linspace(start, stop, num=num, endpoint=endpoint)
... # doctest: +SKIP
>>> power(base, y)
... # doctest: +SKIP
Examples
--------
>>> np.logspace(2.0, 3.0, num=4)
array([ 100. , 215.443469 , 464.15888336, 1000. ])
>>> np.logspace(2.0, 3.0, num=4, endpoint=False)
array([ 100. , 177.827941 , 316.22776602, 562.34132519])
>>> np.logspace(2.0, 3.0, num=4, base=2.0)
array([ 4. , 5.0396842 , 6.34960421, 8. ])
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 10
>>> x1 = np.logspace(0.1, 1, N, endpoint=True)
>>> x2 = np.logspace(0.1, 1, N, endpoint=False)
>>> y = np.zeros(N)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
y = linspace(start,stop,num=num,endpoint=endpoint)
return _nx.power(base,y)
| gpl-3.0 |
RobertABT/heightmap | build/matplotlib/examples/pylab_examples/psd_demo2.py | 9 | 1445 | #This example shows the effects of some of the different PSD parameters
import numpy as np
import matplotlib.pyplot as plt
dt = np.pi / 100.
fs = 1. / dt
t = np.arange(0, 8, dt)
y = 10. * np.sin(2 * np.pi * 4 * t) + 5. * np.sin(2 * np.pi * 4.25 * t)
y = y + np.random.randn(*t.shape)
#Plot the raw time series
fig = plt.figure()
fig.subplots_adjust(hspace=0.45, wspace=0.3)
ax = fig.add_subplot(2, 1, 1)
ax.plot(t, y)
#Plot the PSD with different amounts of zero padding. This uses the entire
#time series at once
ax2 = fig.add_subplot(2, 3, 4)
ax2.psd(y, NFFT=len(t), pad_to=len(t), Fs=fs)
ax2.psd(y, NFFT=len(t), pad_to=len(t)*2, Fs=fs)
ax2.psd(y, NFFT=len(t), pad_to=len(t)*4, Fs=fs)
plt.title('zero padding')
#Plot the PSD with different block sizes, Zero pad to the length of the original
#data sequence.
ax3 = fig.add_subplot(2, 3, 5, sharex=ax2, sharey=ax2)
ax3.psd(y, NFFT=len(t), pad_to=len(t), Fs=fs)
ax3.psd(y, NFFT=len(t)//2, pad_to=len(t), Fs=fs)
ax3.psd(y, NFFT=len(t)//4, pad_to=len(t), Fs=fs)
ax3.set_ylabel('')
plt.title('block size')
#Plot the PSD with different amounts of overlap between blocks
ax4 = fig.add_subplot(2, 3, 6, sharex=ax2, sharey=ax2)
ax4.psd(y, NFFT=len(t)//2, pad_to=len(t), noverlap=0, Fs=fs)
ax4.psd(y, NFFT=len(t)//2, pad_to=len(t), noverlap=int(0.05*len(t)/2.), Fs=fs)
ax4.psd(y, NFFT=len(t)//2, pad_to=len(t), noverlap=int(0.2*len(t)/2.), Fs=fs)
ax4.set_ylabel('')
plt.title('overlap')
plt.show()
| mit |
botlabio/autonomio | autonomio/plots/quadparam.py | 1 | 1784 | import numpy as np
import matplotlib
matplotlib.use('Agg') # this needs to be here exceptionslly
import matplotlib.pyplot as plt
plt.style.use('bmh')
def quadparam(data, x, y, size, color, title='auto'):
'''
USE: quadparam(df,'layers','batch_size','test_acc','test_loss')
Used for taking in 4 different parameters, where 'x'
and 'y' are for the axis, and then 'size' and 'color'
are for adding another two dimesions.
data = pandas dataframe coming from hyperscan()
x = should be numeric (one of the columns in data)
y = should be numeric
size = should be numeric
color = should be numeric
title = a string to be used as the title of the plot.
The default is 'auto' that generates a title
based on the inputs.
'''
if title is 'auto':
title = x + ' & ' + y + ' correlation'
fig = plt.figure(num=None, figsize=(8, 8), dpi=80, facecolor='w',
edgecolor='k')
s_label = size
c_label = color
temp = np.array(data[size].astype(float))
temp *= (500 / temp.max())
s = temp
color = data[color]**data[color]
plt.scatter(data[x], data[y], edgecolors='black', cmap='coolwarm', s=s,
c=color)
plt.tick_params(axis='both', which='major', pad=15)
plt.title(title, fontsize=23, y=1.09, color="gray")
plt.suptitle("size = " + s_label + " || " "hue = " + c_label,
verticalalignment='top',
fontsize=16,
y=.93,
x=0.52,
color="gray")
plt.xlabel(x, fontsize=18, labelpad=15, color="gray")
plt.ylabel(y, fontsize=18, labelpad=15, color="gray")
plt.tick_params(axis='both', which='major', pad=25)
plt.grid(b=False)
plt.show()
| mit |
gfyoung/pandas | pandas/tests/series/methods/test_clip.py | 2 | 3378 | import numpy as np
import pytest
import pandas as pd
from pandas import Series, Timestamp, isna, notna
import pandas._testing as tm
class TestSeriesClip:
def test_clip(self, datetime_series):
val = datetime_series.median()
assert datetime_series.clip(lower=val).min() == val
assert datetime_series.clip(upper=val).max() == val
result = datetime_series.clip(-0.5, 0.5)
expected = np.clip(datetime_series, -0.5, 0.5)
tm.assert_series_equal(result, expected)
assert isinstance(expected, Series)
def test_clip_types_and_nulls(self):
sers = [
Series([np.nan, 1.0, 2.0, 3.0]),
Series([None, "a", "b", "c"]),
Series(pd.to_datetime([np.nan, 1, 2, 3], unit="D")),
]
for s in sers:
thresh = s[2]
lower = s.clip(lower=thresh)
upper = s.clip(upper=thresh)
assert lower[notna(lower)].min() == thresh
assert upper[notna(upper)].max() == thresh
assert list(isna(s)) == list(isna(lower))
assert list(isna(s)) == list(isna(upper))
def test_clip_with_na_args(self):
"""Should process np.nan argument as None """
# GH#17276
s = Series([1, 2, 3])
tm.assert_series_equal(s.clip(np.nan), Series([1, 2, 3]))
tm.assert_series_equal(s.clip(upper=np.nan, lower=np.nan), Series([1, 2, 3]))
# GH#19992
tm.assert_series_equal(s.clip(lower=[0, 4, np.nan]), Series([1, 4, np.nan]))
tm.assert_series_equal(s.clip(upper=[1, np.nan, 1]), Series([1, np.nan, 1]))
def test_clip_against_series(self):
# GH#6966
s = Series([1.0, 1.0, 4.0])
lower = Series([1.0, 2.0, 3.0])
upper = Series([1.5, 2.5, 3.5])
tm.assert_series_equal(s.clip(lower, upper), Series([1.0, 2.0, 3.5]))
tm.assert_series_equal(s.clip(1.5, upper), Series([1.5, 1.5, 3.5]))
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("upper", [[1, 2, 3], np.asarray([1, 2, 3])])
def test_clip_against_list_like(self, inplace, upper):
# GH#15390
original = Series([5, 6, 7])
result = original.clip(upper=upper, inplace=inplace)
expected = Series([1, 2, 3])
if inplace:
result = original
tm.assert_series_equal(result, expected, check_exact=True)
def test_clip_with_datetimes(self):
# GH#11838
# naive and tz-aware datetimes
t = Timestamp("2015-12-01 09:30:30")
s = Series([Timestamp("2015-12-01 09:30:00"), Timestamp("2015-12-01 09:31:00")])
result = s.clip(upper=t)
expected = Series(
[Timestamp("2015-12-01 09:30:00"), Timestamp("2015-12-01 09:30:30")]
)
tm.assert_series_equal(result, expected)
t = Timestamp("2015-12-01 09:30:30", tz="US/Eastern")
s = Series(
[
Timestamp("2015-12-01 09:30:00", tz="US/Eastern"),
Timestamp("2015-12-01 09:31:00", tz="US/Eastern"),
]
)
result = s.clip(upper=t)
expected = Series(
[
Timestamp("2015-12-01 09:30:00", tz="US/Eastern"),
Timestamp("2015-12-01 09:30:30", tz="US/Eastern"),
]
)
tm.assert_series_equal(result, expected)
| bsd-3-clause |
xavierwu/scikit-learn | sklearn/linear_model/sag.py | 64 | 9815 | """Solvers for Ridge and LogisticRegression using SAG algorithm"""
# Authors: Tom Dupre la Tour <[email protected]>
#
# Licence: BSD 3 clause
import numpy as np
import warnings
from ..utils import ConvergenceWarning
from ..utils import check_array
from .base import make_dataset
from .sgd_fast import Log, SquaredLoss
from .sag_fast import sag, get_max_squared_sum
def get_auto_step_size(max_squared_sum, alpha_scaled, loss, fit_intercept):
"""Compute automatic step size for SAG solver
The step size is set to 1 / (alpha_scaled + L + fit_intercept) where L is
the max sum of squares for over all samples.
Parameters
----------
max_squared_sum : float
Maximum squared sum of X over samples.
alpha_scaled : float
Constant that multiplies the regularization term, scaled by
1. / n_samples, the number of samples.
loss : string, in {"log", "squared"}
The loss function used in SAG solver.
fit_intercept : bool
Specifies if a constant (a.k.a. bias or intercept) will be
added to the decision function.
Returns
-------
step_size : float
Step size used in SAG solver.
"""
if loss == 'log':
# inverse Lipschitz constant for log loss
return 4.0 / (max_squared_sum + int(fit_intercept)
+ 4.0 * alpha_scaled)
elif loss == 'squared':
# inverse Lipschitz constant for squared loss
return 1.0 / (max_squared_sum + int(fit_intercept) + alpha_scaled)
else:
raise ValueError("Unknown loss function for SAG solver, got %s "
"instead of 'log' or 'squared'" % loss)
def sag_solver(X, y, sample_weight=None, loss='log', alpha=1.,
max_iter=1000, tol=0.001, verbose=0, random_state=None,
check_input=True, max_squared_sum=None,
warm_start_mem=dict()):
"""SAG solver for Ridge and LogisticRegression
SAG stands for Stochastic Average Gradient: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a constant learning rate.
IMPORTANT NOTE: 'sag' solver converges faster on columns that are on the
same scale. You can normalize the data by using
sklearn.preprocessing.StandardScaler on your data before passing it to the
fit method.
This implementation works with data represented as dense numpy arrays or
sparse scipy arrays of floating point values for the features. It will
fit the data according to squared loss or log loss.
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using the squared euclidean norm L2.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
loss : 'log' | 'squared'
Loss function that will be optimized.
'log' is used for classification, like in LogisticRegression.
'squared' is used for regression, like in Ridge.
alpha : float, optional
Constant that multiplies the regularization term. Defaults to 1.
max_iter: int, optional
The max number of passes over the training data if the stopping
criterea is not reached. Defaults to 1000.
tol: double, optional
The stopping criterea for the weights. The iterations will stop when
max(change in weights) / max(weights) < tol. Defaults to .001
verbose: integer, optional
The verbosity level.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. If None, it will be computed,
going through all the samples. The value should be precomputed
to speed up cross validation.
warm_start_mem: dict, optional
The initialization parameters used for warm starting. It is currently
not used in Ridge.
Returns
-------
coef_ : array, shape (n_features)
Weight vector.
n_iter_ : int
The number of full pass on all samples.
warm_start_mem : dict
Contains a 'coef' key with the fitted result, and eventually the
fitted intercept at the end of the array. Contains also other keys
used for warm starting.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> X = np.random.randn(n_samples, n_features)
>>> y = np.random.randn(n_samples)
>>> clf = linear_model.Ridge(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='sag', tol=0.001)
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> clf = linear_model.LogisticRegression(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
LogisticRegression(C=1.0, class_weight=None, dual=False,
fit_intercept=True, intercept_scaling=1, max_iter=100,
multi_class='ovr', n_jobs=1, penalty='l2', random_state=None,
solver='sag', tol=0.0001, verbose=0, warm_start=False)
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/PDF/sag_journal.pdf
See also
--------
Ridge, SGDRegressor, ElasticNet, Lasso, SVR, and
LogisticRegression, SGDClassifier, LinearSVC, Perceptron
"""
# Ridge default max_iter is None
if max_iter is None:
max_iter = 1000
if check_input:
X = check_array(X, dtype=np.float64, accept_sparse='csr', order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='C')
n_samples, n_features = X.shape[0], X.shape[1]
# As in SGD, the alpha is scaled by n_samples.
alpha_scaled = float(alpha) / n_samples
# initialization
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
if 'coef' in warm_start_mem.keys():
coef_init = warm_start_mem['coef']
else:
coef_init = np.zeros(n_features, dtype=np.float64, order='C')
# coef_init contains possibly the intercept_init at the end.
# Note that Ridge centers the data before fitting, so fit_intercept=False.
fit_intercept = coef_init.size == (n_features + 1)
if fit_intercept:
intercept_init = coef_init[-1]
coef_init = coef_init[:-1]
else:
intercept_init = 0.0
if 'intercept_sum_gradient' in warm_start_mem.keys():
intercept_sum_gradient_init = warm_start_mem['intercept_sum_gradient']
else:
intercept_sum_gradient_init = 0.0
if 'gradient_memory' in warm_start_mem.keys():
gradient_memory_init = warm_start_mem['gradient_memory']
else:
gradient_memory_init = np.zeros(n_samples, dtype=np.float64,
order='C')
if 'sum_gradient' in warm_start_mem.keys():
sum_gradient_init = warm_start_mem['sum_gradient']
else:
sum_gradient_init = np.zeros(n_features, dtype=np.float64, order='C')
if 'seen' in warm_start_mem.keys():
seen_init = warm_start_mem['seen']
else:
seen_init = np.zeros(n_samples, dtype=np.int32, order='C')
if 'num_seen' in warm_start_mem.keys():
num_seen_init = warm_start_mem['num_seen']
else:
num_seen_init = 0
dataset, intercept_decay = make_dataset(X, y, sample_weight, random_state)
if max_squared_sum is None:
max_squared_sum = get_max_squared_sum(X)
step_size = get_auto_step_size(max_squared_sum, alpha_scaled, loss,
fit_intercept)
if step_size * alpha_scaled == 1:
raise ZeroDivisionError("Current sag implementation does not handle "
"the case step_size * alpha_scaled == 1")
if loss == 'log':
class_loss = Log()
elif loss == 'squared':
class_loss = SquaredLoss()
else:
raise ValueError("Invalid loss parameter: got %r instead of "
"one of ('log', 'squared')" % loss)
intercept_, num_seen, n_iter_, intercept_sum_gradient = \
sag(dataset, coef_init.ravel(),
intercept_init, n_samples,
n_features, tol,
max_iter,
class_loss,
step_size, alpha_scaled,
sum_gradient_init.ravel(),
gradient_memory_init.ravel(),
seen_init.ravel(),
num_seen_init,
fit_intercept,
intercept_sum_gradient_init,
intercept_decay,
verbose)
if n_iter_ == max_iter:
warnings.warn("The max_iter was reached which means "
"the coef_ did not converge", ConvergenceWarning)
coef_ = coef_init
if fit_intercept:
coef_ = np.append(coef_, intercept_)
warm_start_mem = {'coef': coef_, 'sum_gradient': sum_gradient_init,
'intercept_sum_gradient': intercept_sum_gradient,
'gradient_memory': gradient_memory_init,
'seen': seen_init, 'num_seen': num_seen}
return coef_, n_iter_, warm_start_mem
| bsd-3-clause |
tbtraltaa/medianshape | medianshape/experiment/msfn/msfndemo3d.py | 1 | 1682 | # encoding: utf-8
'''
++++++++++++
MSFN demo 3D
++++++++++++
'''
from __future__ import absolute_import
import numpy as np
import matplotlib.pyplot as plt
from scipy.sparse import csr_matrix
from medianshape.simplicial.meshgen import distmesh3d, scipy_mesh3d
from medianshape.simplicial.mesh import Mesh3D
from medianshape import utils
from medianshape.simplicial import pointgen3d, currentgen
from medianshape.viz import plot3d
from medianshape.core.msfn import msfn
import distmesh as dm
def msfndemo3d():
'''
MSFN demo 3D
'''
fig = plt.figure(figsize=(19,8))
mesh = Mesh3D()
# l - initial length of triangle sides. Change it to vary traingle size
mesh.bbox = (0,0,0, 1, 1, 1)
mesh.set_boundary_points()
mesh.set_diagonal()
mesh.set_boundary_values()
mesh.points, mesh.simplices = scipy_mesh3d(mesh.bbox, mesh.fixed_points, l=0.2)
mesh.triangles = utils.get_simplices(mesh.simplices)
mesh.edges = utils.get_simplices(mesh.triangles)
plot3d.plotmesh3d(mesh)
print mesh.get_info()
curves = [pointgen3d.curve2(mesh.bbox)]
points = np.array(curves)
vertices, paths, input_currents = currentgen.push_curves_on_mesh(mesh, points)
title = mesh.get_info()
plot3d.plot_curves_approx(mesh, points, vertices, paths, title)
plt.show()
lambdas = [0.0001]
for input_current in input_currents:
for l in lambdas:
title = "lambda=%.04f"%l
x, s, norm = msfn(mesh.points, mesh.triangles, mesh.edges, input_current, l)
plot3d.plot_decomposition(mesh, input_currents, x.T, None, s, title)
plt.show()
if __name__ == "__main__":
msfndemo3d()
| gpl-3.0 |
PatrickOReilly/scikit-learn | examples/ensemble/plot_adaboost_regression.py | 311 | 1529 | """
======================================
Decision Tree Regression with AdaBoost
======================================
A decision tree is boosted using the AdaBoost.R2 [1] algorithm on a 1D
sinusoidal dataset with a small amount of Gaussian noise.
299 boosts (300 decision trees) is compared with a single decision tree
regressor. As the number of boosts is increased the regressor can fit more
detail.
.. [1] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
# importing necessary libraries
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
# Create the dataset
rng = np.random.RandomState(1)
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0])
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=4)
regr_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4),
n_estimators=300, random_state=rng)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
y_1 = regr_1.predict(X)
y_2 = regr_2.predict(X)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="training samples")
plt.plot(X, y_1, c="g", label="n_estimators=1", linewidth=2)
plt.plot(X, y_2, c="r", label="n_estimators=300", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Boosted Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
acbecker/pyhm | pyhm/walkers.py | 1 | 2993 | import matplotlib.pyplot as plt
import numpy as np
def runningMoments(data):
x = np.cumsum(data)
x2 = np.cumsum(data**2)
n = np.arange(1, len(data)+1)
mean = x / n
std = np.sqrt((x2*n - x*x) / (n * (n-1)))
return mean, std
def walkers(data, labels, npts=250, fontmin=11):
# Modified from: http://conference.scipy.org/jhepc2013/2013/entry5/index.html
nwalkers, nstep, ndim = data.shape
subsample = nstep / npts
red = "#cf6775"
blue = "#67a9cf"
fontcolor = "#dddddd"
fig = plt.figure(facecolor='#222222', figsize=(8.5, 11))
fig.subplots_adjust(hspace=0.01, wspace=0.01)
for i in range(ndim):
spA = plt.subplot2grid((ndim,3), (i,0), colspan=2)
spB = plt.subplot2grid((ndim,3), (i,2))
spA.set_axis_bgcolor("#333333")
spB.set_axis_bgcolor("#333333")
spA.tick_params(direction="out", colors="w")
spB.tick_params(axis="x", bottom="off", top="off")
spB.tick_params(axis="y", left="off", right="off")
spA.tick_params(axis="y", right="off")
if i != ndim-1: spA.tick_params(axis="x", bottom="off", top="off")
else: spA.tick_params(axis="x", top="off")
cmap = np.log(np.std(data[:,:,i], axis=1))
cmap -= np.min(cmap)
cmap /= np.max(cmap)
for j in range(nwalkers):
wdata = data[j,:,i]
rmean, rstd = runningMoments(wdata)
wdata = wdata[::subsample][1:] # [1:] since std[0] = nan
rmean = rmean[::subsample][1:]
rstd = rstd[::subsample][1:]
nsub = np.arange(nstep)[::subsample][1:]
cmap = np.abs(wdata-rmean)/rstd
#spA.plot(nsub, wdata, drawstyle="steps", color="w", alpha=0.15)
spA.plot(nsub, wdata, drawstyle="steps", color=plt.cm.bone_r(cmap[i]), alpha=0.15)
spA.plot(nsub, rmean, color=red, linestyle="-")
spA.fill_between(nsub, rmean-rstd, rmean+rstd, facecolor=blue, alpha=0.15)
spB.hist(np.ravel(data[:,:,i]), orientation='horizontal', facecolor=red, bins=50, edgecolor="none")
spB.set_ylabel(labels[i], rotation='horizontal', fontsize=fontmin+3, labelpad=15, weight="bold", color=fontcolor)
spB.set_ylim(spA.get_ylim())
spB.xaxis.set_visible(False)
spB.yaxis.tick_right()
spB.yaxis.set_label_position("right")
plt.setp(spB.get_yticklabels(), visible=False)
spA.locator_params(nbins=7, axis="y")
spA.set_yticks(spA.get_yticks()[1:-1])
spA.set_xlim(0, nstep)
if i != ndim-1:
plt.setp(spA.get_xticklabels(), visible=False)
else:
spA.set_xlabel("Step", fontsize=fontmin+3, labelpad=8, weight="bold", color=fontcolor)
plt.setp(spA.get_xticklabels(), fontsize=fontmin, weight="bold", color=fontcolor)
plt.setp(spA.get_yticklabels(), fontsize=fontmin, weight="bold", color=fontcolor)
| mit |
sphinx-gallery/sphinx-gallery | examples/plot_3_capture_repr.py | 1 | 4917 | # -*- coding: utf-8 -*-
"""
.. _capture_repr_examples:
Capturing output representations
================================
This example demonstrates how the configuration ``capture_repr``
(:ref:`capture_repr`) works. The default ``capture_repr`` setting is
``capture_repr: ('_repr_html_', '__repr__')`` and was used when building the
Sphinx-Gallery documentation. The output that is captured with this setting
is demonstrated in this example. Differences in outputs that would be captured
with other ``capture_repr`` settings is also explained.
"""
#%%
# Nothing is captured for the code block below because no data is directed to
# standard output and the last statement is an assignment, not an expression.
# example 1
a = 2
b = 10
#%%
# If you did wish to capture the value of ``b``, you would need to use:
# example 2
a = 2
b = 10
b # this is an expression
#%%
# Sphinx-Gallery first attempts to capture the ``_repr_html_`` of ``b`` as this
# is the first 'representation' method in the ``capture_repr`` tuple. As this
# method does not exist for ``b``, Sphinx-Gallery moves on and tries to capture
# the ``__repr__`` method, which is second in the tuple. This does exist for
# ``b`` so it is captured and the output is seen above.
#
# A pandas dataframe is used in the code block below to provide an example of
# an expression with a ``_repr_html_`` method.
# example 3
import pandas as pd
df = pd.DataFrame(data = {'col1': [1, 2], 'col2': [3, 4]})
df
#%%
# The pandas dataframe ``df`` has both a ``__repr__`` and ``_repr_html_``
# method. As ``_repr_html_`` appears first in the ``capture_repr`` tuple, the
# ``_repr_html_`` is captured in preference to ``__repr__``.
#
# For the example below, there is data directed to standard output and the last
# statement is an expression.
# example 4
print('Hello world')
a + b
#%%
# Statsmodels tables should also be styled appropriately:
# example 5
import numpy as np
import statsmodels.iolib.table
statsmodels.iolib.table.SimpleTable(np.zeros((3, 3)))
#%%
# ``print()`` outputs to standard output, which is always captured. The
# string ``'Hello world'`` is thus captured. A 'representation' of the last
# expression is also captured. Again, since this expression ``a + b`` does not
# have a ``_repr_html_`` method, the ``__repr__`` method is captured.
#
# Matplotlib output
# ##################
#
# Matplotlib function calls generally return a Matplotlib object as well as
# outputting the figure. For code blocks where the last statement is a
# Matplotlib expression, a 'representation' of the object will be captured, as
# well as the plot. This is because Matplotlib objects have a ``__repr__``
# method and our ``capture_repr`` tuple contains ``__repr__``. Note that
# Matplotlib objects also have a ``__str__`` method.
#
# In the example below, ``matplotlib.pyplot.plot()`` returns a list of
# ``Line2D`` objects representing the plotted data and the ``__repr__`` of the
# list is captured as well as the figure:
import matplotlib.pyplot as plt
plt.plot([1,2,3])
#%%
# To avoid capturing the text representation, you can assign the last Matplotlib
# expression to a temporary variable:
_ = plt.plot([1,2,3])
#%%
# Alternatively, you can add ``plt.show()``, which does not return anything,
# to the end of the code block:
plt.plot([1,2,3])
plt.show()
#%%
# The ``capture_repr`` configuration
# ##################################
#
# The ``capture_repr`` configuration is ``('_repr_html_', '__repr__')`` by
# default. This directs Sphinx-Gallery to capture 'representations' of the last
# statement of a code block, if it is an expression. Sphinx-Gallery does
# this according to the order 'representations' appear in the tuple. With the
# default ``capture_repr`` setting, ``_repr_html_`` is attempted to be captured
# first. If this method does not exist, the ``__repr__`` method would be
# captured. If the ``__repr__`` also does not exist (unlikely for non-user
# defined objects), nothing would be captured. For example, if the the
# configuration was set to ``'capture_repr': ('_repr_html_')`` nothing would be
# captured for example 2 as ``b`` does not have a ``_repr_html_``.
# You can change the 'representations' in the ``capture_repr`` tuple to finely
# tune what is captured in your example ``.py`` files.
#
# To only capture data directed to standard output you can set ``capture_repr``
# to be an empty tuple: ``capture_repr: ()``. With this setting, only data
# directed to standard output is captured. For the examples above, output would
# only be captured for example 4. Although the last statement is an expression
# for examples 2, 3 and 4 no 'representation' of the last expression would be
# output. You would need to add ``print()`` to the last expression to capture
# a 'representation' of it. The empty tuple setting imitates the behaviour of
# Sphinx-Gallery prior to v0.5.0, when this configuration was introduced.
| bsd-3-clause |
supriyagarg/pydatalab | google/datalab/data/_csv_file.py | 6 | 7141 | # Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
"""Implements usefule CSV utilities."""
from __future__ import absolute_import
from __future__ import unicode_literals
from builtins import next
from builtins import str as newstr
from builtins import range
from builtins import object
import csv
import os
import pandas as pd
import random
import sys
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import tempfile
import google.datalab.storage
import google.datalab.utils
_MAX_CSV_BYTES = 10000000
class CsvFile(object):
"""Represents a CSV file in GCS or locally with same schema.
"""
def __init__(self, path, delimiter=b','):
"""Initializes an instance of a Csv instance.
Args:
path: path of the Csv file.
delimiter: the separator used to parse a Csv line.
"""
self._path = path
self._delimiter = delimiter
@property
def path(self):
return self._path
@staticmethod
def _read_gcs_lines(path, max_lines=None):
return google.datalab.storage.Object.from_url(path).read_lines(max_lines)
@staticmethod
def _read_local_lines(path, max_lines=None):
lines = []
for line in open(path):
if max_lines is not None and len(lines) >= max_lines:
break
lines.append(line)
return lines
def _is_probably_categorical(self, column):
if newstr(column.dtype) != 'object':
# only string types (represented in DataFrame as object) can potentially be categorical
return False
if len(max(column, key=lambda p: len(newstr(p)))) > 100:
return False # value too long to be a category
if len(set(column)) > 100:
return False # too many unique values to be a category
return True
def browse(self, max_lines=None, headers=None):
"""Try reading specified number of lines from the CSV object.
Args:
max_lines: max number of lines to read. If None, the whole file is read
headers: a list of strings as column names. If None, it will use "col0, col1..."
Returns:
A pandas DataFrame with the schema inferred from the data.
Raises:
Exception if the csv object cannot be read or not enough lines to read, or the
headers size does not match columns size.
"""
if self.path.startswith('gs://'):
lines = CsvFile._read_gcs_lines(self.path, max_lines)
else:
lines = CsvFile._read_local_lines(self.path, max_lines)
if len(lines) == 0:
return pd.DataFrame(columns=headers)
columns_size = len(next(csv.reader([lines[0]], delimiter=self._delimiter)))
if headers is None:
headers = ['col' + newstr(e) for e in range(columns_size)]
if len(headers) != columns_size:
raise Exception('Number of columns in CSV do not match number of headers')
buf = StringIO()
for line in lines:
buf.write(line)
buf.write('\n')
buf.seek(0)
df = pd.read_csv(buf, names=headers, delimiter=self._delimiter)
for key, col in df.iteritems():
if self._is_probably_categorical(col):
df[key] = df[key].astype('category')
return df
def _create_external_data_source(self, skip_header_rows):
import google.datalab.bigquery as bq
df = self.browse(1, None)
# read each column as STRING because we only want to sample rows.
schema_train = bq.Schema([{'name': name, 'type': 'STRING'} for name in df.keys()])
options = bq.CSVOptions(skip_leading_rows=(1 if skip_header_rows is True else 0))
return bq.ExternalDataSource(self.path,
csv_options=options,
schema=schema_train,
max_bad_records=0)
def _get_gcs_csv_row_count(self, external_data_source):
import google.datalab.bigquery as bq
results = bq.Query('SELECT count(*) from data',
data_sources={'data': external_data_source}).execute().result()
return results[0].values()[0]
def sample_to(self, count, skip_header_rows, strategy, target):
"""Sample rows from GCS or local file and save results to target file.
Args:
count: number of rows to sample. If strategy is "BIGQUERY", it is used as approximate number.
skip_header_rows: whether to skip first row when reading from source.
strategy: can be "LOCAL" or "BIGQUERY". If local, the sampling happens in local memory,
and number of resulting rows matches count. If BigQuery, sampling is done
with BigQuery in cloud, and the number of resulting rows will be approximated to
count.
target: The target file path, can be GCS or local path.
Raises:
Exception if strategy is "BIGQUERY" but source is not a GCS path.
"""
if sys.version_info.major > 2:
xrange = range # for python 3 compatibility
# TODO(qimingj) Add unit test
# Read data from source into DataFrame.
if strategy == 'BIGQUERY':
import google.datalab.bigquery as bq
if not self.path.startswith('gs://'):
raise Exception('Cannot use BIGQUERY if data is not in GCS')
external_data_source = self._create_external_data_source(skip_header_rows)
row_count = self._get_gcs_csv_row_count(external_data_source)
query = bq.Query('SELECT * from data', data_sources={'data': external_data_source})
sampling = bq.Sampling.random(count * 100 / float(row_count))
sample = query.sample(sampling=sampling)
df = sample.to_dataframe()
elif strategy == 'LOCAL':
local_file = self.path
if self.path.startswith('gs://'):
local_file = tempfile.mktemp()
google.datalab.utils.gcs_copy_file(self.path, local_file)
with open(local_file) as f:
row_count = sum(1 for line in f)
start_row = 1 if skip_header_rows is True else 0
skip_count = row_count - count - 1 if skip_header_rows is True else row_count - count
skip = sorted(random.sample(xrange(start_row, row_count), skip_count))
header_row = 0 if skip_header_rows is True else None
df = pd.read_csv(local_file, skiprows=skip, header=header_row, delimiter=self._delimiter)
if self.path.startswith('gs://'):
os.remove(local_file)
else:
raise Exception('strategy must be BIGQUERY or LOCAL')
# Write to target.
if target.startswith('gs://'):
with tempfile.NamedTemporaryFile() as f:
df.to_csv(f, header=False, index=False)
f.flush()
google.datalab.utils.gcs_copy_file(f.name, target)
else:
with open(target, 'w') as f:
df.to_csv(f, header=False, index=False, sep=str(self._delimiter))
| apache-2.0 |
nok/sklearn-porter | tests/estimator/classifier/SVC/SVCCTest.py | 1 | 2445 | # -*- coding: utf-8 -*-
from unittest import TestCase
import numpy as np
from sklearn.svm.classes import SVC
from tests.estimator.classifier.Classifier import Classifier
from tests.language.C import C
class SVCCTest(C, Classifier, TestCase):
def setUp(self):
super(SVCCTest, self).setUp()
self.estimator = SVC(C=1., kernel='rbf',
gamma=0.001, random_state=0)
def tearDown(self):
super(SVCCTest, self).tearDown()
def test_linear_kernel(self):
self.estimator = SVC(C=1., kernel='linear',
gamma=0.001, random_state=0)
self.load_iris_data()
self._port_estimator()
amin = np.amin(self.X, axis=0)
amax = np.amax(self.X, axis=0)
preds, ground_truth = [], []
for _ in range(self.TEST_N_RANDOM_FEATURE_SETS):
x = np.random.uniform(amin, amax, self.n_features)
preds.append(self.pred_in_custom(x))
ground_truth.append(self.pred_in_py(x))
self._clear_estimator()
# noinspection PyUnresolvedReferences
self.assertListEqual(preds, ground_truth)
def test_sigmoid_kernel(self):
self.estimator = SVC(C=1., kernel='sigmoid',
gamma=0.001, random_state=0)
self.load_iris_data()
self._port_estimator()
amin = np.amin(self.X, axis=0)
amax = np.amax(self.X, axis=0)
preds, ground_truth = [], []
for _ in range(self.TEST_N_RANDOM_FEATURE_SETS):
x = np.random.uniform(amin, amax, self.n_features)
preds.append(self.pred_in_custom(x))
ground_truth.append(self.pred_in_py(x))
self._clear_estimator()
# noinspection PyUnresolvedReferences
self.assertListEqual(preds, ground_truth)
def test_auto_gamma(self):
self.estimator = SVC(C=1., gamma='auto', random_state=0)
self.load_iris_data()
self._port_estimator()
amin = np.amin(self.X, axis=0)
amax = np.amax(self.X, axis=0)
preds, ground_truth = [], []
for _ in range(self.TEST_N_RANDOM_FEATURE_SETS):
x = np.random.uniform(amin, amax, self.n_features)
preds.append(self.pred_in_custom(x))
ground_truth.append(self.pred_in_py(x))
self._clear_estimator()
# noinspection PyUnresolvedReferences
self.assertListEqual(preds, ground_truth)
| mit |
lavizhao/sentiment | regression.py | 1 | 5831 | # coding: utf-8
'''
用线性回归跑一遍,然后归一化看看效果= =
'''
import csv
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn import linear_model
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.svm import SVR
from scipy import sparse
def ct(sent,sym):
if sent.count(sym)!=0:
return 1
else:
return 0
def find_rules(sent):
"""
"""
tcount = []
tcount.append(ct(sent,"!"))
tcount.append(ct(sent,":)"))
tcount.append(ct(sent,":("))
tcount.append(ct(sent,"#"))
tcount.append(ct(sent,"was"))
tcount.append(ct(sent,"!"))
tcount.append(ct(sent,":-]"))
tcount.append(ct(sent,"%"))
tcount.append(ct(sent,"=_="))
tcount.append(ct(sent,"(:"))
tcount.append(ct(sent,"?"))
tcount.append(ct(sent,":D"))
tcount.append(ct(sent,"tommoro"))
tcount.append(1.0*len(sent)/100)
tcount.append(ct(sent,":"))
tcount.append(ct(sent,"{link}"))
tcount.append(ct(sent,";)"))
tcount.append(ct(sent,"="))
tcount.append(ct(sent,":-P"))
return tcount
def read_csv():
f = open("train.csv","U")
reader = csv.reader(f)
train,label = [],[]
etrain = []
etest = []
a = 0
for row in reader:
if a == 0:
a = a + 1
else:
train.append(row[1]+" "+row[2]+" "+row[3])
sub_row = row[4:]
sub_row = [float(i) for i in sub_row]
label.append(sub_row)
etrain.append(find_rules(row[1]))
f.close()
f = open("test.csv","U")
reader = csv.reader(f)
test,ans = [],[]
a = 0
for row in reader:
if a == 0:
a = a + 1
else:
ans.append(int(row[0]))
test.append(row[1])
etest.append(find_rules(row[1]))
f.close()
return train,label,test,ans,etrain,etest
def remain(a,n):
mn = 0.001
for i in range(len(a)) :
if a[i] < mn:
a[i] = 0.0
for i in range(n):
a[ind[i]] = 0
a = a + abs(a.min())
a = 1.0*a/np.sum(a)
a = a/np.sum(a)
return a
def remain2(a,n):
ind = range(len(a))
ind.sort(lambda x,y:cmp(a[x],a[y]))
mn = 0.001
for i in range(len(a)) :
if a[i] < mn:
a[i] = 0.0
for i in range(n):
a[ind[i]] = 0
a = a + abs(a.min())
a = 1.0*a/np.sum(a)
return a
def remain3(a,n):
for i in range(len(a)):
if a[i]<0.1:
a[i] = 0
elif a[i] >0.9:
a[i] = 1
if np.sum(a) < 1:
a = a/np.sum(a)
return a
def readtopic():
"""
"""
train,test = [],[]
print "读topic"
f1 = open("topic_train.txt")
for line in f1.readlines():
sp = line.split()
sp = [float(j) for j in sp]
train.append(sp)
f2 = open("topic_test.txt")
for line in f2.readlines():
sp = line.split()
sp = [float(j) for j in sp]
test.append(sp)
return train,test
if __name__ == "__main__":
print "读文件"
train,label,test,ans,etrain,etest = read_csv()
print "读主题"
topic_train,topic_test = readtopic()
#vectorizer = TfidfVectorizer(max_features=None,min_df=3,max_df=1.0,sublinear_tf=True,ngram_range=(1,2),smooth_idf=True,token_pattern=r'\w{1,}',analyzer='word',strip_accents='unicode',use_idf=False,binary=True)
vectorizer = TfidfVectorizer(max_features=None,min_df=10,max_df=1.0,sublinear_tf=True,ngram_range=(1,2),smooth_idf=True,token_pattern=r'\w{1,}',analyzer='word',strip_accents='unicode',use_idf=True,binary=False)
#vectorizer = TfidfVectorizer(max_features=None,min_df=10,max_df=1.0,sublinear_tf=True,ngram_range=(1,2),smooth_idf=True,analyzer='word',strip_accents='unicode',use_idf=True,binary=False)
#vectorizer = CountVectorizer(max_features=None,min_df=10,max_df=1.0,ngram_range=(1,2),token_pattern=r'\w{1,}',analyzer='word',strip_accents='unicode',binary=False)
length_train = len(train)
x_all = train + test
print "转化成tf-idf矩阵"
x_all = vectorizer.fit_transform(x_all)
x = x_all[:length_train]
t = x_all[length_train:]
label = np.array(label)
length_test = len(test)
n = label.shape[1]
print "x shape",x.shape
print "t shape",t.shape
print "合并"
x = sparse.hstack((x,etrain,topic_train)).tocsr()
t = sparse.hstack((t,etest,topic_test)).tocsr()
print "x shape",x.shape
print "t shape",t.shape
#构造结果的矩阵
answer = []
print "开始回归"
clf = linear_model.Ridge(alpha=1.2,fit_intercept=True,normalize=True,tol=1e-9)
for i in range(n):
print "第%s个"%(i)
clf.fit(x,label[:,i])
temp_answer = clf.predict(t)
answer.append(temp_answer)
answer = np.array(answer)
answer = answer.T
print answer.shape
print "归一化"
s = answer[:,0:5]
w = answer[:,5:9]
k = answer[:,9:24]
print "s shape",s.shape
print "w shape",w.shape
print "k shpae",k.shape
#s = s/np.mean(s,axis=1)
#w = w/np.mean(w,axis=1)
#k = k/np.mean(k,axis=1)
print "写入文件"
head = "id,s1,s2,s3,s4,s5,w1,w2,w3,w4,k1,k2,k3,k4,k5,k6,k7,k8,k9,k10,k11,k12,k13,k14,k15"
f = open("ans_regression1.csv","w")
f.write(head+"\n")
for i in xrange(len(test)):
ts,tw,tk = s[i],w[i],k[i]
ts = remain(ts,0)
tw = remain2(tw,0)
tk = remain3(tk,13)
str_s = [str(j) for j in ts]
str_w = [str(j) for j in tw]
str_k = [str(j) for j in tk]
str_s = ','.join(str_s)
str_w = ','.join(str_w)
str_k = ','.join(str_k)
f.write("%s,%s,%s,%s\n"%(ans[i],str_s,str_w,str_k))
| apache-2.0 |
yaukwankiu/armor | defaultParameters.py | 1 | 11354 | # -*- coding: utf-8 -*-
####################################
# imports
import time, datetime
import platform
####################################
# computer settings
computer = 'asus-laptop'
if platform.node() == 'k-Aspire-E1-571G' or platform.node()=='yan-Aspire-E1-571G':
computer = 'acer-laptop'
elif platform.node() == 'k-801':
computer = 'k-801' # 801-desktop
elif platform.node() == 'k-acer':
computer = 'k-acer' # ubuntu 12.04/Win7 acer laptop
elif platform.node() == 'zxc-Aspire-E1-571G':
computer = 'k-acer' # ubuntu 12.04/Win7 acer laptop
elif platform.node()== 'Qoo-PC':
computer = 'Qoo-PC'
elif platform.node() =='user-PC': #2014-09-10
computer = 'user-PC'
#computer = 'i5-desktop'
print "computer: ", computer
defaultInputFolder = "../data_temp/"
defaultOutputFolder = "testing/"
defaultOutputFolderForImages = defaultOutputFolder
defaultImageFolder = defaultOutputFolderForImages #alias
defaultDatabase = "" #not used yet
defaultImageExtension = '.png' # added 2013-09-27
defaultImageTopDown = False
if computer == 'acer-laptop':
#usbDriveName = 'k/KINGSTON'
#externalHardDriveName = 'k/Seagate Expansion Drive'
#externalHardDriveName2= 'k/A4ECB939ECB90718'
#hardDriveName = 'DATA'
usbDriveName = 'TOSHIBA EXT'
externalHardDriveName = 'TOSHIBA EXT'
externalHardDriveName2= 'TOSHIBA EXT'
hardDriveName = 'DATA'
elif computer == 'k-801':
usbDriveName = 'k/KINGSTON'
externalHardDriveName = 'k/FreeAgent Drive' #?!
externalHardDriveName2= 'k/A4ECB939ECB90718'
hardDriveName = '../home/k'
elif computer == 'k-acer' or computer=='user-PC': #acer- ubuntu - main platform 2014-02-19
#usbDriveName = 'KINGSTON' #user-PC : 2014-09-10
#externalHardDriveName = 'FreeAgent Drive' #?!
#externalHardDriveName2= 'A4ECB939ECB90718'
#hardDriveName = 'home/k'
#hardDriveName2 = 'data'
usbDriveName = 'TOSHIBA EXT'
externalHardDriveName = 'TOSHIBA EXT'
externalHardDriveName2= 'TOSHIBA EXT'
#hardDriveName = '051798CD6AAE5EEF'
hardDriveName = 'media/TOSHIBA EXT'
else:
usbDriveName = 'KINGSTON'
externalHardDriveName = 'Seagate Expansion Drive'
externalHardDriveName2= 'A4ECB939ECB90718'
hardDriveName = 'host'
if computer == 'acer-laptop':
usbDriveLetter ='H'
externalHardDriveLetter = 'G'
hardDriveLetter = 'D'
elif computer == 'i5-desktop':
usbDriveLetter ='G'
externalHardDriveLetter = 'D'
hardDriveLetter = 'C'
elif computer == 'asus-laptop':
usbDriveLetter ='I' #forgot
externalHardDriveLetter = 'G' #forgot
hardDriveLetter = 'D'
elif computer == 'k-801':
usbDriveLetter ='I' #forgot
externalHardDriveLetter = 'G' #forgot
hardDriveLetter = 'D' #FORGOT
elif computer == 'k-acer':
usbDriveLetter ='I' #don't know yet
externalHardDriveLetter = 'G' #don't know yet
hardDriveLetter = 'D' #don't know yet
elif computer == 'user-PC': #2014-09-10
usbDriveLetter ='F' #don't know yet
externalHardDriveLetter = 'F' #don't know yet
hardDriveLetter = 'F' #don't know yet
hardDriveLetter2 = 'F' #don't know yet
elif computer =='Qoo-PC':
usbDriveLetter ='D'
externalHardDriveLetter = 'D'
hardDriveLetter = 'D'
hardDriveLetter2 = 'D'
################################
# need to check the following
#elif computer == 'k-801':
# usbDriveLetter ='I' #forgot
# externalHardDriveLetter = 'G' #forgot
# hardDriveLetter = 'D'
import os
if computer == 'acer-laptop' and os.getcwd() == '/home/k/ARMOR/python':
usbRoot = '/home/k/ARMOR/python'
externalHardDriveRoot = '/home/k/ARMOR/python'
externalHardDriveRoot2 = '/home/k/ARMOR/python'
hardDriveRoot = '/home/k/ARMOR/python'
elif os.sep == "/":
usbRoot = '/media/%s/ARMOR/' % usbDriveName
externalHardDriveRoot = '/media/%s/ARMOR/' % externalHardDriveName
externalHardDriveRoot2 = '/media/%s/ARMOR/' % externalHardDriveName2
hardDriveRoot = '/%s/ARMOR/' % hardDriveName
else:
usbRoot = '%s:/ARMOR/' % usbDriveLetter
externalHardDriveRoot = '%s:/ARMOR/' % externalHardDriveLetter
externalHardDriveRoot2 = '%s:/ARMOR/' % hardDriveLetter2 # don't know this yet
hardDriveRoot = '%s:/ARMOR/' % hardDriveLetter
defaultRootFolder = usbRoot # can choose (= .../ARMOR/)
defaultRoot = defaultRootFolder #alias
root = defaultRootFolder #alias
rootFolder = defaultRootFolder #alias
defaultLabReportsFolder = defaultRootFolder + 'labReports/'
defaultLabReportFolder = defaultLabReportsFolder #alias
defaultLabLogsFolder = 'labLogs/'
defaultLabLogFolder = defaultLabLogsFolder #alias
defaultTestScriptFolder = defaultRootFolder+ 'python/armor/tests/'
testFolder = defaultTestScriptFolder
defaultCWBfolder = defaultRootFolder + '../CWB/'
CWBfolder = defaultCWBfolder #alias
defaultImageDataFolder = defaultCWBfolder # maybe alias, maybe not, it's a local setting
if computer=='acer-laptop':
if os.path.exists('/media/TOURO S/CWB/'):
defaultImageDataFolder = '/media/TOURO S/CWB/'
else:
print "TOURO S-drive not found!"
defaultImageDataFolder = root + '../CWB/'
else:
defaultImageDataFolder = root + '../CWB/'
################################################################
# geography
# taichung park coordinates (24.145056°N 120.683329°E)
#defaultTaiwanReliefDataFolder = defaultRootFolder+'armor/taiwanReliefData/'
defaultTaiwanReliefDataFolder = defaultRootFolder+'data/taiwanRelief881/' #2014-06-12
defaultTaiwanReliefDataFolder881 = defaultRootFolder+'data/taiwanRelief881/' #2014-06-12
defaultTaiwanReliefDataFolder150 = defaultRootFolder+'data/taiwanRelief150/' #2014-06-12
taichungParkLatitude = 24.145056
taichungParkLongitude= 120.683329
# 2014-01-19
taipeiLatitude = 25.0333 # google
taipeiLongitude = 121.6333
taipei = (taipeiLatitude, taipeiLongitude)
tainanLatitude = 22.9833
tainanLongitude = 120.1833
tainan = ( tainanLatitude, tainanLongitude)
kentingLatitude = 21.9800
kentingLongitude = 120.7970
kenting = (kentingLatitude, kentingLongitude )
hualienLatitude = 23.9722
hualienLongitude = 121.6064
hualien = (hualienLatitude ,hualienLongitude )
taitungLatitude = 22.7583
taitungLongitude = 121.1444
taitung = ( taitungLatitude,taitungLongitude )
# from armor.tests.projectProposal20140119
taipeiCounty = (530, 500, 60, 60)
taichungCounty = (475, 435, 40, 80)
tainanCounty = (390, 400, 40, 50)
kaohsiungCounty = (360, 410, 70, 70)
yilanCounty = (500, 500, 50, 50)
hualienCounty = (410, 480, 100, 60)
taitungCounty = (335, 460, 100, 60)
kenting = (319, 464)
sector7 = (464, 504, 96, 60)
sector2 = (428, 396, 96, 108)
sector1 = (500, 432, 96, 120)
allReg = (300, 100, 500, 600)
##############################################################
# input image info
# ..........................................................
# COMPREF
defaultHeight = 881
defaultWidth = 921
defaultLowerLeftCornerLatitudeLongitude = (18., 115.) # p.25, https://docs.google.com/file/d/0B84wEiWytQMwemhzX3JkQ1dSaTA/edit
defaultUpperRightCornerLatitudeLongitude = (29., 126.5) # p.25, https://docs.google.com/file/d/0B84wEiWytQMwemhzX3JkQ1dSaTA/edit
#missingDataThreshold = -150. # anything below " missingDataThreshold is marked as "masked"
defaultMissingDataThreshold = -150. # anything below " missingDataThreshold is marked as "masked" # this line replaces the line above 2014-02-20
missingDataThreshold = defaultMissingDataThreshold # this line is for backward compatibility if any
#defaultThreshold = missingDataThreshold
# ..........................................................
# WRF
"""
defaultHeight = 881
defaultWidth = 921
defaultLowerLeftCornerLatitudeLongitude = (18., 115.) # p.25, https://docs.google.com/file/d/0B84wEiWytQMwemhzX3JkQ1dSaTA/edit
defaultUpperRightCornerLatitudeLongitude = (29., 126.5) # p.25, https://docs.google.com/file/d/0B84wEiWytQMwemhzX3JkQ1dSaTA/edit
#missingDataThreshold = -150. # anything below " missingDataThreshold is marked as "masked"
defaultMissingDataThreshold = -150. # anything below " missingDataThreshold is marked as "masked" # this line replaces the line above 2014-02-20
missingDataThreshold = defaultMissingDataThreshold # this line is for backward compatibility if any
#defaultThreshold = missingDataThreshold
"""
defaultWRFHeight = 150
defaultWRFWidth = 140
COMPREF2WRFwindow = (200,200,600,560)
defaultWRFLowerLeftCornerLatitudeLongitude = (20., 117.5)
#defaultWRFUpperRightCornerLatitudeLongitude = (28., 124.5)
defaultWRFUpperRightCornerLatitudeLongitude = (27.-0.05, 124.5-0.05) #2014-06-13
LL = defaultWRFLowerLeftCornerLatitudeLongitude #alias #2014-06-02
UR = defaultWRFUpperRightCornerLatitudeLongitude #alias
################################################################
# plotting
from . import colourbar
defaultCmap = colourbar.cmap
coloursList = ['b','c','g','y','r','m','k',] * 30 #http://matplotlib.org/api/colors_api.html
################################################################
# CWB "chart2" colour bar information - to be used in back-converting downloaded images to data
# data obtained through analysis by armor/tests/imageToData_chartsTest2.py
chart2ColourBar = {
273: [209, 200, 227],
253: [255, 255, 255],
238: [150, 0, 245],
219: [255, 0, 255],
206: [152, 0, 0],
193: [200, 0, 0],
175: [251, 2, 0],
160: [251, 121, 1],
145: [255, 199, 4],
130: [255, 253, 0],
115: [ 2, 149, 0],
100: [ 1, 200, 0],
80: [ 3, 252, 12],
67: [ 2, 1, 253],
52: [ 0, 150, 254],
34: [ 0, 255, 252],
19: [175, 175, 175],
}
################################################################
# filenames
defaultImageSuffix = ".png"
#defaultDataSuffix1 = ".txt"
#defaultDataSuffix2 = ".dat"
###########################################################
# parameters for analyses
defaultMinComponentSize=100 # armor/tests/imageToData11.py , armor/pattern.py localShapeFeatures()
################################################################
# misc
defaultTimeString = str(int(time.time()))
localtime = time.localtime()
year = localtime.tm_year
month = localtime.tm_mon
day = localtime.tm_mday
hour = localtime.tm_hour
minute = localtime.tm_min
second = localtime.tm_sec
| cc0-1.0 |
stoqs/stoqs | stoqs/contrib/analysis/drift_data.py | 3 | 21334 | #!/usr/bin/env python
__author__ = "Mike McCann"
__copyright__ = "Copyright 2014, MBARI"
__license__ = "GPL"
__maintainer__ = "Mike McCann"
__email__ = "mccann at mbari.org"
__status__ = "Development"
__doc__ = '''
Script produce products (plots, kml, etc.) to help understand drifting data.
- Make progressive vector diagram from moored ADCP data (read from STOQS)
- Plot drogued drifter, ship, and other data (read from Tracking DB)
- Plot sensor data (read from STOQS)
Output as a .png map, .kml file, or ...
Mike McCann
MBARI 22 September 2014
@var __date__: Date of last svn commit
@undocumented: __doc__ parser
@author: __author__
@status: __status__
@license: __license__
'''
import os
import sys
project_dir = os.path.join(os.path.dirname(__file__), "../../") # settings.py is two dirs up
sys.path.insert(0, project_dir)
if 'DJANGO_SETTINGS_MODULE' not in os.environ:
os.environ['DJANGO_SETTINGS_MODULE'] = 'config.settings.local'
import django
django.setup()
import csv
import time
import pyproj
import requests
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pytz
from datetime import datetime
from collections import defaultdict
from contextlib import closing
from django.conf import settings
from stoqs.models import MeasuredParameter, NominalLocation, ActivityParameter
from django.http import HttpRequest
from utils.Viz.plotting import readCLT
from utils.Viz.KML import KML
from mpl_toolkits.basemap import Basemap
class Drift():
'''Data and methods to support drift data product preparation
'''
trackDrift = defaultdict(lambda: {'es': [], 'lon': [], 'lat': []}) # To be keyed by platform name
adcpDrift = defaultdict(lambda: {'es': [], 'lon': [], 'lat': []}) # To be keyed by depth
# Not to be confused with Stokes Drift - To be keyed by parameter,platform,min,max
stoqsDrift = defaultdict(lambda: {'es': [], 'lon': [], 'lat': [], 'depth': [], 'datavalue':[]})
def loadTrackingData(self):
'''Fill up trackDrift dictionary
'''
for url in self.args.trackData:
# Careful - trackingdb returns the records in reverse time order
if self.args.verbose:
print('Opening', url)
with closing(requests.get(url, stream=True)) as resp:
if resp.status_code != 200:
logger.error('Cannot read %s, resp.status_code = %s', url, resp.status_code)
return
r_decoded = (line.decode('utf-8') for line in resp.iter_lines())
for r in csv.DictReader(r_decoded):
# Use logic to skip inserting values if one or the other or both start and end are specified
if self.startDatetime:
if datetime.utcfromtimestamp(float(r['epochSeconds'])) < self.startDatetime:
continue
if self.endDatetime:
if datetime.utcfromtimestamp(float(r['epochSeconds'])) > self.endDatetime:
continue
self.trackDrift[r['platformName']]['es'].insert(0, float(r['epochSeconds']))
self.trackDrift[r['platformName']]['lat'].insert(0, float(r['latitude']))
self.trackDrift[r['platformName']]['lon'].insert(0, float(r['longitude']))
def computeADCPDrift(self):
'''Read data from database and put computed progressive vectors into adcpDrift dictionary
'''
if self.args.adcpPlatform:
adcpQS = MeasuredParameter.objects.using(self.args.database).filter(
measurement__instantpoint__activity__platform__name=self.args.adcpPlatform)
if self.startDatetime:
adcpQS = adcpQS.filter(measurement__instantpoint__timevalue__gte=self.startDatetime)
if self.endDatetime:
adcpQS = adcpQS.filter(measurement__instantpoint__timevalue__lte=self.endDatetime)
if self.args.adcpMinDepth:
adcpQS = adcpQS.filter(measurement__depth__gte=self.args.adcpMinDepth)
if self.args.adcpMaxDepth:
adcpQS = adcpQS.filter(measurement__depth__lte=self.args.adcpMaxDepth)
utd = adcpQS.filter(parameter__standard_name='eastward_sea_water_velocity').values_list(
'datavalue', 'measurement__instantpoint__timevalue', 'measurement__depth').order_by(
'measurement__depth', 'measurement__instantpoint__timevalue')
vtd = adcpQS.filter(parameter__standard_name='northward_sea_water_velocity').values_list(
'datavalue', 'measurement__instantpoint__timevalue', 'measurement__depth').order_by(
'measurement__depth', 'measurement__instantpoint__timevalue')
# Compute positions (progressive vectors) - horizontal displacement in meters
x = defaultdict(lambda: [])
y = defaultdict(lambda: [])
last_udiff = None
for i, ((u, ut, ud), (v, vt, vd)) in enumerate(zip(utd, vtd)):
try:
udiff = utd[i+1][1] - ut
vdiff = vtd[i+1][1] - vt
except IndexError as e:
# Extrapolate using last time difference, assuming it's regular and that we are at the last point, works only for very last point
udiff = last_udiff
vdiff = last_udiff
else:
last_udiff = udiff
if udiff != vdiff:
raise Exception('udiff != vdiff')
else:
dt = udiff.seconds + udiff.days * 24 * 3600
if dt < 0:
# For intermediate depths where (utd[i+1][1] - ut) is a diff with the time of the next depth
dt = last_dt
if ud != vd:
raise Exception('ud != vd')
else:
x[ud].append(u * dt / 100)
y[vd].append(v * dt / 100)
self.adcpDrift[ud]['es'].append(time.mktime(ut.timetuple()))
last_dt = dt
# Work in UTM space to add x & y offsets to begining position of the mooring
g0 = NominalLocation.objects.using(self.args.database).filter(activity__platform__name=self.args.adcpPlatform).values_list('geom')[0][0]
p = pyproj.Proj(proj='utm', zone=10, ellps='WGS84')
e0, n0 = p(g0.x, g0.y)
for depth in x:
eList = np.cumsum([e0] + x[depth])
nList = np.cumsum([n0] + y[depth])
lonList, latList = p(eList, nList, inverse=True)
self.adcpDrift[depth]['lon'] = lonList
self.adcpDrift[depth]['lat'] = latList
def loadSTOQSData(self):
'''Fill up stoqsDrift dictionary with platform_parameter as key
'''
for url in self.args.stoqsData:
# Careful - trackingdb returns the records in reverse time order
if self.args.verbose:
print('Opening', url)
with closing(requests.get(url.replace(' ', '%20'), stream=True)) as resp:
if resp.status_code != 200:
logger.error('Cannot read %s, resp.status_code = %s', url, resp.status_code)
return
r_decoded = (line.decode('utf-8') for line in resp.iter_lines())
for r in csv.DictReader(r_decoded):
# Use logic to skip inserting values if one or the other or both start and end are specified
dt = datetime.strptime(r['measurement__instantpoint__timevalue'], '%Y-%m-%d %H:%M:%S')
if self.startDatetime:
if dt < self.startDatetime:
continue
if self.endDatetime:
if dt > self.endDatetime:
continue
if self.args.verbose > 1:
print(r)
apQS = ActivityParameter.objects.using(self.args.database).filter(
activity__name=r['measurement__instantpoint__activity__name'],
parameter__name=r['parameter__name'])
# Mash together a key composed of parameter, platform, min, max for the Activity
key = "%s,%s,%f,%f" % ( r['parameter__name'], r['measurement__instantpoint__activity__platform__name'],
apQS[0].p025, apQS[0].p975 )
self.stoqsDrift[key]['es'].append(time.mktime(dt.timetuple()))
lon, lat = r['measurement__geom'].split('(')[-1].split(')')[0].split(' ')
self.stoqsDrift[key]['lat'].append(float(lat))
self.stoqsDrift[key]['lon'].append(float(lon))
self.stoqsDrift[key]['depth'].append(float(r['measurement__depth']))
self.stoqsDrift[key]['datavalue'].append(r['datavalue'])
def process(self):
'''Read in data and build structures that we can generate products from
'''
if self.args.trackData:
self.loadTrackingData()
if self.args.adcpPlatform:
self.computeADCPDrift()
if self.args.stoqsData:
self.loadSTOQSData()
def getExtent(self):
'''For all data members find the min and max latitude and longitude
'''
if self.args.extent:
return [float(e) for e in self.args.extent]
else:
lonMin = 180
lonMax = -180
latMin = 90
latMax = -90
for drift in (self.trackDrift, self.adcpDrift, self.stoqsDrift):
for k,v in list(drift.items()):
if np.min(v['lon']) < lonMin:
lonMin = np.min(v['lon'])
if np.max(v['lon']) > lonMax:
lonMax = np.max(v['lon'])
if np.min(v['lat']) < latMin:
latMin = np.min(v['lat'])
if np.max(v['lat']) > latMax:
latMax = np.max(v['lat'])
# Expand the computed extent by extendDeg degrees
extendDeg = self.args.extend
return lonMin - extendDeg, latMin - extendDeg, lonMax + extendDeg, latMax + extendDeg
def createPNG(self, fileName=None, forGeotiff=False):
'''Draw processed data on a map and save it as a .png file
'''
if not forGeotiff:
fig = plt.figure(figsize=(18, 12))
ax = plt.axes()
else:
fig = plt.figure()
ax = fig.add_axes((0,0,1,1))
if not fileName:
fileName = self.args.pngFileName
e = self.getExtent()
m = Basemap(llcrnrlon=e[0], llcrnrlat=e[1], urcrnrlon=e[2], urcrnrlat=e[3], projection='cyl', resolution='l', ax=ax)
if not forGeotiff:
m.arcgisimage(server='http://services.arcgisonline.com/ArcGIS', service='Ocean_Basemap')
for depth, drift in list(self.adcpDrift.items()):
m.plot(drift['lon'], drift['lat'], '-', c='black', linewidth=1)
plt.text(drift['lon'][-1], drift['lat'][-1], '%i m' % depth, size='small')
for platform, drift in list(self.trackDrift.items()):
# Ad hoc coloring of platforms...
if platform.startswith('stella'):
color = 'yellow'
elif platform.startswith('daphne'):
color = 'orange'
elif platform.startswith('makai'):
color = 'magenta'
else:
color = 'red'
m.plot(drift['lon'], drift['lat'], '-', c=color, linewidth=2)
plt.text(drift['lon'][-1], drift['lat'][-1], platform, size='small')
# Plot each data point with it's own color based on the activity statistics from STOQS
coloredDotSize = 30
clt = readCLT(os.path.join(settings.ROOT_DIR('static'), 'colormaps', 'jetplus.txt'))
cm_jetplus = matplotlib.colors.ListedColormap(np.array(clt))
for key, drift in list(self.stoqsDrift.items()):
min, max = key.split(',')[2:4]
ax.scatter(drift['lon'], drift['lat'], c=drift['datavalue'], s=coloredDotSize, cmap=cm_jetplus, lw=0, vmin=min, vmax=max)
label = '%s from %s' % tuple(key.split(',')[:2])
plt.text(drift['lon'][-1], drift['lat'][-1], label, size='small')
nowLocal = str(pytz.utc.localize(datetime.now()).astimezone(pytz.timezone('America/Los_Angeles'))).split('.')[0]
plt.text(0.99, 0.01, 'Created: ' + nowLocal + ' Local', horizontalalignment='right', verticalalignment='bottom', transform=ax.transAxes)
if not forGeotiff:
m.drawparallels(np.linspace(e[1],e[3],num=3), labels=[True,False,False,False], linewidth=0)
m.drawmeridians(np.linspace(e[0],e[2],num=3), labels=[False,False,False,True], linewidth=0)
try:
plt.title(self.title)
except AttributeError:
pass
fig.savefig(fileName)
print('Wrote file', self.args.pngFileName)
else:
plt.axis('off')
try:
plt.text(0.5, 0.95, self.title, horizontalalignment='center', verticalalignment='top', transform=ax.transAxes)
except AttributeError:
pass
fig.savefig(fileName, transparent=True, dpi=300, bbox_inches='tight', pad_inches=0)
plt.clf()
plt.close()
def createGeoTiff(self):
'''Your image must be only the geoplot with no decorations like axis titles, axis labels, etc., and you
will need accurate upper-left and lower-right coordinates in EPSG:4326 projection, also known as WGS 84 projection,...
The syntax is pretty straightforward, something like the following will convert your image to the correct format:
gdal_translate <image.png> <image.tiff> -a_ullr -122.25 37.1 -121.57365 36.67558
There is also a python wrapper for the GDAL library
https://pypi.python.org/pypi/GDAL/
'''
e = self.getExtent()
self.createPNG(self.args.geotiffFileName + '.png', forGeotiff=True)
cmd = 'gdal_translate %s %s -a_ullr %s %s %s %s' % (self.args.geotiffFileName + '.png',
self.args.geotiffFileName, e[0], e[3], e[2], e[1])
print("Executing:\n", cmd)
os.system(cmd)
os.remove(self.args.geotiffFileName + '.png')
print('Wrote file', self.args.geotiffFileName)
def createKML(self):
'''Reuse STOQS utils/Viz code to build some simple KML. Use 'position' for Parameter Name.
Fudge data value to distinguish platforms by color, use 0.0 for depth except for adcp data.
'''
request = HttpRequest()
qs = None
qparams = {}
stoqs_object_name = None
kml = KML(request, qs, qparams, stoqs_object_name, withTimeStamps=True, withLineStrings=True, withFullIconURL=True)
# Put data into form that KML() expects - use different datavalues (-1, 1) to color the platforms
dataHash = defaultdict(lambda: [])
colors = {}
values = np.linspace(-1, 1, len(list(self.trackDrift.keys())))
for i, k in enumerate(self.trackDrift.keys()):
colors[k] = values[i]
for platform, drift in list(self.trackDrift.items()):
for es, lo, la in zip(drift['es'], drift['lon'], drift['lat']):
dataHash[platform].append([datetime.utcfromtimestamp(es), lo, la, 0.0, 'position', colors[platform], platform])
for depth, drift in list(self.adcpDrift.items()):
for es, lo, la in zip(drift['es'], drift['lon'], drift['lat']):
dataHash[depth].append([datetime.utcfromtimestamp(es), lo, la, float(depth), 'position', 0.0, 'adcp'])
for key, drift in list(self.stoqsDrift.items()):
parm, plat = key.split(',')[:2]
for es, lo, la, de, dv in zip(drift['es'], drift['lon'], drift['lat'], drift['depth'], drift['datavalue']):
dataHash[parm].append([datetime.utcfromtimestamp(es), lo, la, de, parm, dv, plat])
try:
title = self.title
except AttributeError:
title = 'Product of STOQS drift_data.py'
description = self.commandline.replace('&','&')
kml = kml.makeKML(self.args.database, dataHash, 'position', title, description, 0.0, 0.0 )
fh = open(self.args.kmlFileName, 'w')
fh.write(kml)
fh.close()
print('Wrote file', self.args.kmlFileName)
def process_command_line(self):
'''The argparse library is included in Python 2.7 and is an added package for STOQS.
'''
import argparse
from argparse import RawTextHelpFormatter
examples = 'Examples:' + '\n\n'
examples += "M1 ADCP progressive vector diagram and Stella and Rachel Carson position data:\n"
examples += sys.argv[0] + " --database stoqs_september2014 --adcpPlatform M1_Mooring --adcpMinDepth 30 --adcpMaxDepth 40"
examples += " --trackData http://odss.mbari.org/trackingdb/position/stella101/between/20140922T171500/20141010T000000/data.csv"
examples += " http://odss.mbari.org/trackingdb/position/R_CARSON/between/20140922T171500/20141010T000000/data.csv"
examples += " http://odss.mbari.org/trackingdb/position/stella122/between/20140922T171500/20141010T000000/data.csv"
examples += " --pngFileName foo.png --start 20140923T180000 --end 20140925T150000"
examples += "\n"
examples += '\nIf running from cde-package replace ".py" with ".py.cde".'
parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter,
description='Script to produce products to help understand drift caused by currents in the ocean',
epilog=examples)
parser.add_argument('-d', '--database', action='store', help='Database alias', default='stoqs_september2014')
parser.add_argument('--adcpPlatform', action='store', help='STOQS Platform Name for ADCP data')
parser.add_argument('--adcpMinDepth', action='store', help='Minimum depth of ADCP data for progressive vector data', type=float)
parser.add_argument('--adcpMaxDepth', action='store', help='Maximum depth of ADCP data for progressive vector data', type=float)
parser.add_argument('--trackData', action='store', help='List of MBARItracking database .csv urls for data from drifters, ships, etc.', nargs='*', default=[])
parser.add_argument('--stoqsData', action='store', help='List of STOQS MeasuredParameter Data Access .csv urls for parameter data', nargs='*', default=[])
parser.add_argument('--start', action='store', help='Start time in YYYYMMDDTHHMMSS format')
parser.add_argument('--end', action='store', help='End time in YYYYMMDDTHHMMSS format')
parser.add_argument('--extend', action='store', help='Extend the data extent for the map boundaries by this value in degrees', default=0.05, type=float)
parser.add_argument('--extent', action='store', help='Space separated specific map boundary in degrees: ll_lon ll_lat ur_lon ur_lat', nargs=4, default=[])
parser.add_argument('--title', action='store', help='Title for plots, will override default title created if --start specified')
parser.add_argument('--kmlFileName', action='store', help='Name of file for KML output')
parser.add_argument('--pngFileName', action='store', help='Name of file for PNG image of map')
parser.add_argument('--geotiffFileName', action='store', help='Name of file for geotiff image of map')
parser.add_argument('-v', '--verbose', nargs='?', choices=[1,2,3], type=int, help='Turn on verbose output. Higher number = more output.', const=1)
self.args = parser.parse_args()
self.commandline = ' '.join(sys.argv)
self.startDatetime = None
# Make both naiive and timezone aware datetime data members
if self.args.start:
self.startDatetime = datetime.strptime(self.args.start, '%Y%m%dT%H%M%S')
self.startDatetimeUTC = pytz.utc.localize(self.startDatetime)
self.startDatetimeLocal = self.startDatetimeUTC.astimezone(pytz.timezone('America/Los_Angeles'))
self.title = 'Drift since %s' % self.startDatetimeLocal
self.endDatetime = None
if self.args.end:
self.endDatetime = datetime.strptime(self.args.end, '%Y%m%dT%H%M%S')
self.endDatetimeUTC = pytz.utc.localize(self.endDatetime)
self.endDatetimeLocal = self.endDatetimeUTC.astimezone(pytz.timezone('America/Los_Angeles'))
if self.args.title:
self.title = self.args.title
if __name__ == '__main__':
d = Drift()
d.process_command_line()
d.process()
if d.args.pngFileName:
d.createPNG()
if d.args.geotiffFileName:
d.createGeoTiff()
if d.args.kmlFileName:
d.createKML()
| gpl-3.0 |
walterreade/scikit-learn | examples/ensemble/plot_feature_transformation.py | 115 | 4327 | """
===============================================
Feature transformations with ensembles of trees
===============================================
Transform your features into a higher dimensional, sparse space. Then
train a linear model on these features.
First fit an ensemble of trees (totally random trees, a random
forest, or gradient boosted trees) on the training set. Then each leaf
of each tree in the ensemble is assigned a fixed arbitrary feature
index in a new feature space. These leaf indices are then encoded in a
one-hot fashion.
Each sample goes through the decisions of each tree of the ensemble
and ends up in one leaf per tree. The sample is encoded by setting
feature values for these leaves to 1 and the other feature values to 0.
The resulting transformer has then learned a supervised, sparse,
high-dimensional categorical embedding of the data.
"""
# Author: Tim Head <[email protected]>
#
# License: BSD 3 clause
import numpy as np
np.random.seed(10)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import (RandomTreesEmbedding, RandomForestClassifier,
GradientBoostingClassifier)
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve
from sklearn.pipeline import make_pipeline
n_estimator = 10
X, y = make_classification(n_samples=80000)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5)
# It is important to train the ensemble of trees on a different subset
# of the training data than the linear regression model to avoid
# overfitting, in particular if the total number of leaves is
# similar to the number of training samples
X_train, X_train_lr, y_train, y_train_lr = train_test_split(X_train,
y_train,
test_size=0.5)
# Unsupervised transformation based on totally random trees
rt = RandomTreesEmbedding(max_depth=3, n_estimators=n_estimator,
random_state=0)
rt_lm = LogisticRegression()
pipeline = make_pipeline(rt, rt_lm)
pipeline.fit(X_train, y_train)
y_pred_rt = pipeline.predict_proba(X_test)[:, 1]
fpr_rt_lm, tpr_rt_lm, _ = roc_curve(y_test, y_pred_rt)
# Supervised transformation based on random forests
rf = RandomForestClassifier(max_depth=3, n_estimators=n_estimator)
rf_enc = OneHotEncoder()
rf_lm = LogisticRegression()
rf.fit(X_train, y_train)
rf_enc.fit(rf.apply(X_train))
rf_lm.fit(rf_enc.transform(rf.apply(X_train_lr)), y_train_lr)
y_pred_rf_lm = rf_lm.predict_proba(rf_enc.transform(rf.apply(X_test)))[:, 1]
fpr_rf_lm, tpr_rf_lm, _ = roc_curve(y_test, y_pred_rf_lm)
grd = GradientBoostingClassifier(n_estimators=n_estimator)
grd_enc = OneHotEncoder()
grd_lm = LogisticRegression()
grd.fit(X_train, y_train)
grd_enc.fit(grd.apply(X_train)[:, :, 0])
grd_lm.fit(grd_enc.transform(grd.apply(X_train_lr)[:, :, 0]), y_train_lr)
y_pred_grd_lm = grd_lm.predict_proba(
grd_enc.transform(grd.apply(X_test)[:, :, 0]))[:, 1]
fpr_grd_lm, tpr_grd_lm, _ = roc_curve(y_test, y_pred_grd_lm)
# The gradient boosted model by itself
y_pred_grd = grd.predict_proba(X_test)[:, 1]
fpr_grd, tpr_grd, _ = roc_curve(y_test, y_pred_grd)
# The random forest model by itself
y_pred_rf = rf.predict_proba(X_test)[:, 1]
fpr_rf, tpr_rf, _ = roc_curve(y_test, y_pred_rf)
plt.figure(1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_rt_lm, tpr_rt_lm, label='RT + LR')
plt.plot(fpr_rf, tpr_rf, label='RF')
plt.plot(fpr_rf_lm, tpr_rf_lm, label='RF + LR')
plt.plot(fpr_grd, tpr_grd, label='GBT')
plt.plot(fpr_grd_lm, tpr_grd_lm, label='GBT + LR')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.legend(loc='best')
plt.show()
plt.figure(2)
plt.xlim(0, 0.2)
plt.ylim(0.8, 1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_rt_lm, tpr_rt_lm, label='RT + LR')
plt.plot(fpr_rf, tpr_rf, label='RF')
plt.plot(fpr_rf_lm, tpr_rf_lm, label='RF + LR')
plt.plot(fpr_grd, tpr_grd, label='GBT')
plt.plot(fpr_grd_lm, tpr_grd_lm, label='GBT + LR')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve (zoomed in at top left)')
plt.legend(loc='best')
plt.show()
| bsd-3-clause |
JohannesUIBK/oggm | oggm/sandbox/itmix/plot_submitted.py | 2 | 1067 | import glob
import os
import matplotlib.pyplot as plt
import salem
from .itmix import find_path
from .itmix_cfg import DATA_DIR, ITMIX_ODIR, PLOTS_DIR
pdir = os.path.join(PLOTS_DIR, 'submitted') + '/'
if not os.path.exists(pdir):
os.mkdir(pdir)
for dgn in glob.glob(os.path.join(ITMIX_ODIR, '*')):
gname = os.path.basename(dgn)
print(gname)
ifile = find_path(os.path.join(DATA_DIR, 'itmix', 'glaciers_sorted'),
'02_surface_' + gname + '*.asc')
ds = salem.EsriITMIX(ifile)
itmix_topo = ds.get_vardata()
ifiles = find_path(ITMIX_ODIR, '*' + gname + '*.asc', allow_more=True)
for ifile in ifiles:
ds2 = salem.EsriITMIX(ifile)
oggm_topo = ds2.get_vardata()
thick = itmix_topo - oggm_topo
cm = salem.Map(ds.grid)
cm.set_plot_params(nlevels=256)
cm.set_cmap(plt.get_cmap('viridis'))
cm.set_data(thick)
cm.visualize()
pname = os.path.basename(ifile).split('.')[0]
plt.savefig(os.path.join(pdir, pname) + '.png')
plt.close() | gpl-3.0 |
brianlorenz/COSMOS_IMACS_Redshifts | Emission_Fitting/Find_Avs.py | 1 | 8407 | #Plots the Av magnitude due to the balmer decerment
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import ascii
import sys, os, string
import pandas as pd
from astropy.io import fits
import collections
#Folder to save the figures
figout = '/Users/blorenz/COSMOS/Reports/2018/Images/'
#The location with the file for all of our data
fluxdatapath = '/Users/blorenz/COSMOS/COSMOSData/lineflux.txt'
#Read the datafile:
fluxdata = ascii.read(fluxdatapath).to_pandas()
#The location to store the scale and its stddev of each line
qualdatapath = '/Users/blorenz/COSMOS/COSMOSData/dataqual.txt'
#Read in the scale of the lines
dataqual = ascii.read(qualdatapath).to_pandas()
#USIG ERRORS
#File with the error array
errdatapath = '/Users/blorenz/COSMOS/COSMOSData/errs.txt'
#Read in the scale of the lines
err_df = ascii.read(errdatapath,data_start=1,header_start=0,format='csv').to_pandas()
'''
#BIWT DUP ERRORS
#File with the error array
errdatapath = '/Users/blorenz/COSMOS/COSMOSData/biwt_dup.txt'
#Read in the scale of the lines
err_df = ascii.read(errdatapath,data_start=1,header_start=0,format='csv').to_pandas()
'''
#File to write the Av array to
dataout = '/Users/blorenz/COSMOS/COSMOSData/balmer_avs.txt'
#The location of the muzzin et al data:
mdatapath = '/Users/blorenz/COSMOS/muzzin_data/UVISTA_final_colors_sfrs_v4.1.dat'
#Read in the muzzin data
mdata = ascii.read(mdatapath).to_pandas()
mdata = mdata.rename(columns={'ID':'OBJID'})
fluxdata = pd.merge(fluxdata,mdata)
#Fontsizes for plotting
axisfont = 18
ticksize = 16
titlefont = 24
legendfont = 16
textfont = 16
#Division function
def divz(X,Y):
return X/np.where(Y,Y,Y+1)*np.not_equal(Y,0)
lines = ['6563_fix','4861','4340']
#Fontsizes for plotting
axisfont = 18
ticksize = 16
titlefont = 24
legendfont = 16
textfont = 16
#Division function
def divz(X,Y):
return X/np.where(Y,Y,Y+1)*np.not_equal(Y,0)
#Create the figure
fig,axarr = plt.subplots(3,3,figsize=(25,22))
#Plotting parameters
ms = 3
lw=0.5
mark='o'
#Set the Rv value - this one is taken for Calzetti
Rv = 4.05 #+-0.80
#Reddening law from Calzetti et al (2000)
def Calzetti_k(wave):
waveum = wave*.0001
if ((waveum >= 0.63) and (waveum <= 2.2)):
k = 2.659*(-1.857+divz(1.040,waveum))+Rv
if ((waveum < 0.63) and (waveum >= 0.12)):
k = 2.659*(-2.156+divz(1.509,waveum)-divz(0.198,waveum**2)+divz(0.011,waveum**3))+Rv
return k
#Finds the ratio and errors in that ratio of any two lines
def getAv(pd_df,err_df,L1,L2,bdec):
#Calibrate the fluxes by dividing by scale
calL1 = divz(pd_df[L1+'_flux'],pd_df[L1+'_scale'])
calL2 = divz(pd_df[L2+'_flux'],pd_df[L2+'_scale'])
#Find the ratio
rat = divz(calL1,calL2)
#Find the error in the ratio
erat = np.sqrt((divz(1,calL2) * err_df[L1])**2 + (divz(-calL1,(calL2**2)) * err_df[L2])**2)
#Get the integer of the line
if len(L1)==8: iL1=int(L1[0:4])
else: iL1 = int(L1)
if len(L2)==8: iL2=int(L2[0:4])
else: iL2 = int(L2)
#Get the k value for each line
L1k = Calzetti_k(iL1)
L2k = Calzetti_k(iL2)
#Compute the Av
Av = divz(np.log10(rat/bdec),(0.4*((L2k/Rv)-(L1k/Rv))))
#And its error
eAv = divz((1/np.log(10))*divz((erat/bdec),rat),(0.4*((L2k/Rv)-(L1k/Rv))))
return Av,eAv
d = {'True': True, 'False': False}
lines0 = ['6563_fix','4861']
lines1 = ['4861','4340']
lines2 = ['6563_fix','4340']
c = 0
Av_df = pd.DataFrame()
#Add the fluxfile so that this can later be merged with the main frame
Av_df['fluxfile'] = fluxdata['fluxfile']
Av_df['LMASS'] = fluxdata['LMASS']
for lines in [lines0,lines1,lines2]:
#Filter the data
goodlines = [dataqual[line+'_good'].map(d) for line in lines]
#Needs to be good in all lines to be good
allgood = np.logical_and.reduce(goodlines)
#Needs to be bad in any line to be bad
badlines = [dataqual[line+'_bad'].map(d) for line in lines]
baddata = np.logical_or.reduce(badlines)
lowlines = [dataqual[line+'_low'].map(d) for line in lines]
#Needs to be low in any line to be low, and also not bad in a line
somelow = np.logical_and(np.logical_or.reduce(lowlines),np.logical_not(baddata))
if c==0:
bdec=2.86
elif c==1:
bdec=2.137
else:
bdec=2.86*2.137
Av,eAv = getAv(fluxdata,err_df,lines[0],lines[1],bdec)
if c==0:
Av_df['AvHaHb'] = Av
Av_df['AvHaHberr'] = eAv
elif c==1:
Av_df['AvHbHg'] = Av
Av_df['AvHbHgerr'] = eAv
elif c==2:
Av_df['AvHaHg'] = Av
Av_df['AvHaHgerr'] = eAv
c=c+1
#Get the average between the two good Avs and its error
Av_df['AvHa_avg'] = (Av_df['AvHaHb']+Av_df['AvHaHg'])/2
Av_df['AvHa_avgerr'] = (Av_df['AvHaHberr']+Av_df['AvHaHgerr'])/2
#Find the mass_weighted medians
mr1 = (fluxdata['LMASS']<9.25)
mr2 = np.logical_and(fluxdata['LMASS']>=9.25,fluxdata['LMASS']<9.5)
mr3 = np.logical_and(fluxdata['LMASS']>=9.5,fluxdata['LMASS']<9.75)
mr4 = (fluxdata['LMASS']>=9.75)
med1 = np.median(Av_df[np.logical_and(allgood,mr1)]['AvHaHb'])
med2 = np.median(Av_df[np.logical_and(allgood,mr2)]['AvHaHb'])
med3 = np.median(Av_df[np.logical_and(allgood,mr3)]['AvHaHb'])
med4 = np.median(Av_df[np.logical_and(allgood,mr4)]['AvHaHb'])
'''
#Linear fit for the medians
coeff = np.polyfit(fluxdata[goodidx]['LMASS'],AvHaHg[goodidx],1)
'''
Av_df = Av_df.replace(-np.inf,-99.99999999999)
d = {'True': True, 'False': False}
ulim=4 #Upper Av limit to consider good
#Number of stddevs away form median to be good
sig = 2
for i in range(0,len(fluxdata)):
'''
use 0 - 'AvHaHb'
use 1 - 'AvHbHg'
use 2 - 'AvHaHg'
use 3 - 'AvMedian'
use 4 - 'AvHa_avf'
'''
row = fluxdata.iloc[i]
#Mass-weighted medians
if (row['LMASS'] < 9.25): Av_df.at[i,'AvMedian'] = med1
elif np.logical_and(row['LMASS'] >= 9.25,row['LMASS'] < 9.5,): Av_df.at[i,'AvMedian'] = med2
elif np.logical_and(row['LMASS'] >= 9.5,row['LMASS'] < 9.75): Av_df.at[i,'AvMedian'] = med3
elif (row['LMASS'] >= 9.75): Av_df.at[i,'AvMedian'] = med4
'''
#Linear fit for medians
Av_df.at[i,'AvMedian'] = coeff[0]*row['LMASS']+coeff[1]
'''
Avrow = Av_df.iloc[i]
if np.logical_or((Avrow['AvHaHb'] < 0),((Avrow['AvHaHb'] > ulim))): cHaHb = 10**80
else: cHaHb = Avrow['AvHaHb']
if np.logical_or((Avrow['AvHbHg'] < 0),((Avrow['AvHbHg'] > ulim))): cHbHg = 10**90
else: cHbHg = Avrow['AvHbHg']
if np.logical_or((Avrow['AvHaHg'] < 0),((Avrow['AvHaHg'] > ulim))): cHaHg = 10**100
else: cHaHg = Avrow['AvHaHg']
use = 3
#Find out which lines are good. (Ha,Hb,Hg)
l1g = dataqual['6563_fix_good'].map(d).iloc[i]
l2g = dataqual['4861_good'].map(d).iloc[i]
l3g = dataqual['4340_good'].map(d).iloc[i]
goodlines = (l1g,l2g,l3g)
#If only Ha and Hb are good, check those
if goodlines == (1,1,0):
Av_df.at[i,'AvHaHbok'] = Avrow['AvHaHb']
if (cHaHb < 10): use=0
#If only Ha and Hg are good, check those
if goodlines == (1,0,1):
if (cHaHg < 10): use=1
#If all lines are good,
if goodlines == (1,1,1):
#Compare HaHb and HaHg. if they are within each other's errors, average them
diff = np.abs(cHaHb-cHaHg)
err = Avrow['AvHaHberr']+Avrow['AvHaHgerr']
#If they are within each other's errors, check if the error bars are large on one of the measurements
if (diff < err):
if (divz(Avrow['AvHaHberr'],Avrow['AvHaHgerr']) < 0.5):
if (cHaHb < 10): use=0
elif (divz(Avrow['AvHaHberr'],Avrow['AvHaHgerr']) > 2):
if (cHaHg < 10): use=1
else:
if (Avrow['AvHa_avg'] > 0): use = 4
#If they are not close, use whichever is closest to the median
else:
diffHaHb = np.abs(cHaHb-Avrow['AvMedian'])
diffHaHg = np.abs(cHaHg-Avrow['AvMedian'])
arr = np.array([diffHaHb,diffHaHg])
if (5 > arr[np.argmin(arr)]):
use = np.argmin(arr)
if use == 0: usestr = 'AvHaHb'
elif use == 1: usestr = 'AvHaHg'
elif use == 3: usestr = 'AvMedian'
elif use == 4: usestr = 'AvHa_avg'
Av_df.at[i,'useAv'] = usestr
#Write to csv
Av_df = Av_df.reindex(sorted(Av_df.columns), axis=1)
Av_df.to_csv(dataout,index=False)
| mit |
lin-credible/scikit-learn | sklearn/manifold/tests/test_t_sne.py | 162 | 9771 | import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils import check_random_state
from sklearn.manifold.t_sne import _joint_probabilities
from sklearn.manifold.t_sne import _kl_divergence
from sklearn.manifold.t_sne import _gradient_descent
from sklearn.manifold.t_sne import trustworthiness
from sklearn.manifold.t_sne import TSNE
from sklearn.manifold._utils import _binary_search_perplexity
from scipy.optimize import check_grad
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
def test_gradient_descent_stops():
# Test stopping conditions of gradient descent.
class ObjectiveSmallGradient:
def __init__(self):
self.it = -1
def __call__(self, _):
self.it += 1
return (10 - self.it) / 10.0, np.array([1e-5])
def flat_function(_):
return 0.0, np.ones(1)
# Gradient norm
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=1e-5, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 1.0)
assert_equal(it, 0)
assert("gradient norm" in out)
# Error difference
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.2, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.9)
assert_equal(it, 1)
assert("error difference" in out)
# Maximum number of iterations without improvement
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
flat_function, np.zeros(1), 0, n_iter=100,
n_iter_without_progress=10, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=-1.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 11)
assert("did not make any progress" in out)
# Maximum number of iterations
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=11,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 10)
assert("Iteration 10" in out)
def test_binary_search():
# Test if the binary search finds Gaussians with desired perplexity.
random_state = check_random_state(0)
distances = random_state.randn(50, 2)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
desired_perplexity = 25.0
P = _binary_search_perplexity(distances, desired_perplexity, verbose=0)
P = np.maximum(P, np.finfo(np.double).eps)
mean_perplexity = np.mean([np.exp(-np.sum(P[i] * np.log(P[i])))
for i in range(P.shape[0])])
assert_almost_equal(mean_perplexity, desired_perplexity, decimal=3)
def test_gradient():
# Test gradient of Kullback-Leibler divergence.
random_state = check_random_state(0)
n_samples = 50
n_features = 2
n_components = 2
alpha = 1.0
distances = random_state.randn(n_samples, n_features)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
X_embedded = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, desired_perplexity=25.0,
verbose=0)
fun = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[0]
grad = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[1]
assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0,
decimal=5)
def test_trustworthiness():
# Test trustworthiness score.
random_state = check_random_state(0)
# Affine transformation
X = random_state.randn(100, 2)
assert_equal(trustworthiness(X, 5.0 + X / 10.0), 1.0)
# Randomly shuffled
X = np.arange(100).reshape(-1, 1)
X_embedded = X.copy()
random_state.shuffle(X_embedded)
assert_less(trustworthiness(X, X_embedded), 0.6)
# Completely different
X = np.arange(5).reshape(-1, 1)
X_embedded = np.array([[0], [2], [4], [1], [3]])
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 0.2)
def test_preserve_trustworthiness_approximately():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
for init in ('random', 'pca'):
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
init=init, random_state=0)
X_embedded = tsne.fit_transform(X)
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_fit_csr_matrix():
# X can be a sparse matrix.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
random_state=0)
X_embedded = tsne.fit_transform(X_csr)
assert_almost_equal(trustworthiness(X_csr, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_preserve_trustworthiness_approximately_with_precomputed_distances():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
D = squareform(pdist(X), "sqeuclidean")
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
metric="precomputed", random_state=0)
X_embedded = tsne.fit_transform(D)
assert_almost_equal(trustworthiness(D, X_embedded, n_neighbors=1,
precomputed=True), 1.0, decimal=1)
def test_early_exaggeration_too_small():
# Early exaggeration factor must be >= 1.
tsne = TSNE(early_exaggeration=0.99)
assert_raises_regexp(ValueError, "early_exaggeration .*",
tsne.fit_transform, np.array([[0.0]]))
def test_too_few_iterations():
# Number of gradient descent iterations must be at least 200.
tsne = TSNE(n_iter=199)
assert_raises_regexp(ValueError, "n_iter .*", tsne.fit_transform,
np.array([[0.0]]))
def test_non_square_precomputed_distances():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed")
assert_raises_regexp(ValueError, ".* square distance matrix",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_init_not_available():
# 'init' must be 'pca' or 'random'.
assert_raises_regexp(ValueError, "'init' must be either 'pca' or 'random'",
TSNE, init="not available")
def test_distance_not_available():
# 'metric' must be valid.
tsne = TSNE(metric="not available")
assert_raises_regexp(ValueError, "Unknown metric not available.*",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_pca_initialization_not_compatible_with_precomputed_kernel():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed", init="pca")
assert_raises_regexp(ValueError, "The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_verbose():
random_state = check_random_state(0)
tsne = TSNE(verbose=2)
X = random_state.randn(5, 2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[t-SNE]" in out)
assert("Computing pairwise distances" in out)
assert("Computed conditional probabilities" in out)
assert("Mean sigma" in out)
assert("Finished" in out)
assert("early exaggeration" in out)
assert("Finished" in out)
def test_chebyshev_metric():
# t-SNE should allow metrics that cannot be squared (issue #3526).
random_state = check_random_state(0)
tsne = TSNE(metric="chebyshev")
X = random_state.randn(5, 2)
tsne.fit_transform(X)
def test_reduction_to_one_component():
# t-SNE should allow reduction to one component (issue #4154).
random_state = check_random_state(0)
tsne = TSNE(n_components=1)
X = random_state.randn(5, 2)
X_embedded = tsne.fit(X).embedding_
assert(np.all(np.isfinite(X_embedded)))
| bsd-3-clause |
marqh/cartopy | lib/cartopy/tests/mpl/test_gridliner.py | 1 | 3051 | # (C) British Crown Copyright 2011 - 2012, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from matplotlib.testing.decorators import image_comparison as mpl_image_comparison
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib.collections import PatchCollection
from matplotlib.path import Path
import shapely.geometry
import cartopy.crs as ccrs
from cartopy.tests.mpl import image_comparison
@image_comparison(baseline_images=['gridliner1'])
def test_gridliner():
desired_gridline_prj = [ccrs.PlateCarree(), ccrs.OSGB()]
projections = [ccrs.PlateCarree(), ccrs.OSGB(), ccrs.RotatedPole(37, 50)]
ny, nx = 2, 4
plt.figure(figsize=(10, 10))
ax = plt.subplot(nx, ny, 1, projection=ccrs.PlateCarree())
ax.set_global()
ax.coastlines()
ax.gridlines()
ax = plt.subplot(nx, ny, 2, projection=ccrs.OSGB())
ax.set_global()
ax.coastlines()
ax.gridlines()
ax = plt.subplot(nx, ny, 3, projection=ccrs.OSGB())
ax.set_global()
ax.coastlines()
ax.gridlines(ccrs.PlateCarree(), color='blue', linestyle='-')
ax.gridlines(ccrs.OSGB())
ax = plt.subplot(nx, ny, 4, projection=ccrs.PlateCarree())
ax.set_global()
ax.coastlines()
ax.gridlines(ccrs.NorthPolarStereo(), alpha=0.5, linewidth=1.5, linestyle='-')
ax = plt.subplot(nx, ny, 5, projection=ccrs.PlateCarree())
ax.set_global()
ax.coastlines()
osgb = ccrs.OSGB()
ax.set_extent(tuple(osgb.x_limits) + tuple(osgb.y_limits), crs=osgb)
ax.gridlines(osgb)
ax = plt.subplot(nx, ny, 6, projection=ccrs.NorthPolarStereo())
ax.set_global()
ax.coastlines()
ax.gridlines(alpha=0.5, linewidth=1.5, linestyle='-')
ax = plt.subplot(nx, ny, 7, projection=ccrs.NorthPolarStereo())
ax.set_global()
ax.coastlines()
osgb = ccrs.OSGB()
ax.set_extent(tuple(osgb.x_limits) + tuple(osgb.y_limits), crs=osgb)
ax.gridlines(osgb)
ax = plt.subplot(nx, ny, 8, projection=ccrs.Robinson(central_longitude=135))
ax.set_global()
ax.coastlines()
ax.gridlines(ccrs.PlateCarree(), alpha=0.5, linewidth=1.5, linestyle='-')
delta = 1.5e-2
plt.subplots_adjust(left=0 + delta, right=1 - delta, top=1 - delta, bottom=0 + delta)
if __name__=='__main__':
import nose
nose.runmodule(argv=['-s','--with-doctest'], exit=False) | gpl-3.0 |
Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/pandas/tests/indexes/timedeltas/test_tools.py | 7 | 7572 | import pytest
from datetime import time, timedelta
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.util.testing import assert_series_equal
from pandas import (Series, Timedelta, to_timedelta, isnull,
TimedeltaIndex)
from pandas._libs.tslib import iNaT
class TestTimedeltas(object):
_multiprocess_can_split_ = True
def test_to_timedelta(self):
def conv(v):
return v.astype('m8[ns]')
d1 = np.timedelta64(1, 'D')
assert (to_timedelta('1 days 06:05:01.00003', box=False) ==
conv(d1 + np.timedelta64(6 * 3600 + 5 * 60 + 1, 's') +
np.timedelta64(30, 'us')))
assert (to_timedelta('15.5us', box=False) ==
conv(np.timedelta64(15500, 'ns')))
# empty string
result = to_timedelta('', box=False)
assert result.astype('int64') == iNaT
result = to_timedelta(['', ''])
assert isnull(result).all()
# pass thru
result = to_timedelta(np.array([np.timedelta64(1, 's')]))
expected = pd.Index(np.array([np.timedelta64(1, 's')]))
tm.assert_index_equal(result, expected)
# ints
result = np.timedelta64(0, 'ns')
expected = to_timedelta(0, box=False)
assert result == expected
# Series
expected = Series([timedelta(days=1), timedelta(days=1, seconds=1)])
result = to_timedelta(Series(['1d', '1days 00:00:01']))
tm.assert_series_equal(result, expected)
# with units
result = TimedeltaIndex([np.timedelta64(0, 'ns'), np.timedelta64(
10, 's').astype('m8[ns]')])
expected = to_timedelta([0, 10], unit='s')
tm.assert_index_equal(result, expected)
# single element conversion
v = timedelta(seconds=1)
result = to_timedelta(v, box=False)
expected = np.timedelta64(timedelta(seconds=1))
assert result == expected
v = np.timedelta64(timedelta(seconds=1))
result = to_timedelta(v, box=False)
expected = np.timedelta64(timedelta(seconds=1))
assert result == expected
# arrays of various dtypes
arr = np.array([1] * 5, dtype='int64')
result = to_timedelta(arr, unit='s')
expected = TimedeltaIndex([np.timedelta64(1, 's')] * 5)
tm.assert_index_equal(result, expected)
arr = np.array([1] * 5, dtype='int64')
result = to_timedelta(arr, unit='m')
expected = TimedeltaIndex([np.timedelta64(1, 'm')] * 5)
tm.assert_index_equal(result, expected)
arr = np.array([1] * 5, dtype='int64')
result = to_timedelta(arr, unit='h')
expected = TimedeltaIndex([np.timedelta64(1, 'h')] * 5)
tm.assert_index_equal(result, expected)
arr = np.array([1] * 5, dtype='timedelta64[s]')
result = to_timedelta(arr)
expected = TimedeltaIndex([np.timedelta64(1, 's')] * 5)
tm.assert_index_equal(result, expected)
arr = np.array([1] * 5, dtype='timedelta64[D]')
result = to_timedelta(arr)
expected = TimedeltaIndex([np.timedelta64(1, 'D')] * 5)
tm.assert_index_equal(result, expected)
# Test with lists as input when box=false
expected = np.array(np.arange(3) * 1000000000, dtype='timedelta64[ns]')
result = to_timedelta(range(3), unit='s', box=False)
tm.assert_numpy_array_equal(expected, result)
result = to_timedelta(np.arange(3), unit='s', box=False)
tm.assert_numpy_array_equal(expected, result)
result = to_timedelta([0, 1, 2], unit='s', box=False)
tm.assert_numpy_array_equal(expected, result)
# Tests with fractional seconds as input:
expected = np.array(
[0, 500000000, 800000000, 1200000000], dtype='timedelta64[ns]')
result = to_timedelta([0., 0.5, 0.8, 1.2], unit='s', box=False)
tm.assert_numpy_array_equal(expected, result)
def test_to_timedelta_invalid(self):
# bad value for errors parameter
msg = "errors must be one of"
tm.assert_raises_regex(ValueError, msg, to_timedelta,
['foo'], errors='never')
# these will error
pytest.raises(ValueError, lambda: to_timedelta([1, 2], unit='foo'))
pytest.raises(ValueError, lambda: to_timedelta(1, unit='foo'))
# time not supported ATM
pytest.raises(ValueError, lambda: to_timedelta(time(second=1)))
assert to_timedelta(time(second=1), errors='coerce') is pd.NaT
pytest.raises(ValueError, lambda: to_timedelta(['foo', 'bar']))
tm.assert_index_equal(TimedeltaIndex([pd.NaT, pd.NaT]),
to_timedelta(['foo', 'bar'], errors='coerce'))
tm.assert_index_equal(TimedeltaIndex(['1 day', pd.NaT, '1 min']),
to_timedelta(['1 day', 'bar', '1 min'],
errors='coerce'))
# gh-13613: these should not error because errors='ignore'
invalid_data = 'apple'
assert invalid_data == to_timedelta(invalid_data, errors='ignore')
invalid_data = ['apple', '1 days']
tm.assert_numpy_array_equal(
np.array(invalid_data, dtype=object),
to_timedelta(invalid_data, errors='ignore'))
invalid_data = pd.Index(['apple', '1 days'])
tm.assert_index_equal(invalid_data, to_timedelta(
invalid_data, errors='ignore'))
invalid_data = Series(['apple', '1 days'])
tm.assert_series_equal(invalid_data, to_timedelta(
invalid_data, errors='ignore'))
def test_to_timedelta_via_apply(self):
# GH 5458
expected = Series([np.timedelta64(1, 's')])
result = Series(['00:00:01']).apply(to_timedelta)
tm.assert_series_equal(result, expected)
result = Series([to_timedelta('00:00:01')])
tm.assert_series_equal(result, expected)
def test_to_timedelta_on_missing_values(self):
# GH5438
timedelta_NaT = np.timedelta64('NaT')
actual = pd.to_timedelta(Series(['00:00:01', np.nan]))
expected = Series([np.timedelta64(1000000000, 'ns'),
timedelta_NaT], dtype='<m8[ns]')
assert_series_equal(actual, expected)
actual = pd.to_timedelta(Series(['00:00:01', pd.NaT]))
assert_series_equal(actual, expected)
actual = pd.to_timedelta(np.nan)
assert actual.value == timedelta_NaT.astype('int64')
actual = pd.to_timedelta(pd.NaT)
assert actual.value == timedelta_NaT.astype('int64')
def test_to_timedelta_on_nanoseconds(self):
# GH 9273
result = Timedelta(nanoseconds=100)
expected = Timedelta('100ns')
assert result == expected
result = Timedelta(days=1, hours=1, minutes=1, weeks=1, seconds=1,
milliseconds=1, microseconds=1, nanoseconds=1)
expected = Timedelta(694861001001001)
assert result == expected
result = Timedelta(microseconds=1) + Timedelta(nanoseconds=1)
expected = Timedelta('1us1ns')
assert result == expected
result = Timedelta(microseconds=1) - Timedelta(nanoseconds=1)
expected = Timedelta('999ns')
assert result == expected
result = Timedelta(microseconds=1) + 5 * Timedelta(nanoseconds=-2)
expected = Timedelta('990ns')
assert result == expected
pytest.raises(TypeError, lambda: Timedelta(nanoseconds='abc'))
| agpl-3.0 |
sanketloke/scikit-learn | examples/plot_kernel_ridge_regression.py | 39 | 6259 | """
=============================================
Comparison of kernel ridge regression and SVR
=============================================
Both kernel ridge regression (KRR) and SVR learn a non-linear function by
employing the kernel trick, i.e., they learn a linear function in the space
induced by the respective kernel which corresponds to a non-linear function in
the original space. They differ in the loss functions (ridge versus
epsilon-insensitive loss). In contrast to SVR, fitting a KRR can be done in
closed-form and is typically faster for medium-sized datasets. On the other
hand, the learned model is non-sparse and thus slower than SVR at
prediction-time.
This example illustrates both methods on an artificial dataset, which
consists of a sinusoidal target function and strong noise added to every fifth
datapoint. The first figure compares the learned model of KRR and SVR when both
complexity/regularization and bandwidth of the RBF kernel are optimized using
grid-search. The learned functions are very similar; however, fitting KRR is
approx. seven times faster than fitting SVR (both with grid-search). However,
prediction of 100000 target values is more than tree times faster with SVR
since it has learned a sparse model using only approx. 1/3 of the 100 training
datapoints as support vectors.
The next figure compares the time for fitting and prediction of KRR and SVR for
different sizes of the training set. Fitting KRR is faster than SVR for medium-
sized training sets (less than 1000 samples); however, for larger training sets
SVR scales better. With regard to prediction time, SVR is faster than
KRR for all sizes of the training set because of the learned sparse
solution. Note that the degree of sparsity and thus the prediction time depends
on the parameters epsilon and C of the SVR.
"""
# Authors: Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
from __future__ import division
import time
import numpy as np
from sklearn.svm import SVR
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import learning_curve
from sklearn.kernel_ridge import KernelRidge
import matplotlib.pyplot as plt
rng = np.random.RandomState(0)
#############################################################################
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 3 * (0.5 - rng.rand(X.shape[0]/5))
X_plot = np.linspace(0, 5, 100000)[:, None]
#############################################################################
# Fit regression model
train_size = 100
svr = GridSearchCV(SVR(kernel='rbf', gamma=0.1), cv=5,
param_grid={"C": [1e0, 1e1, 1e2, 1e3],
"gamma": np.logspace(-2, 2, 5)})
kr = GridSearchCV(KernelRidge(kernel='rbf', gamma=0.1), cv=5,
param_grid={"alpha": [1e0, 0.1, 1e-2, 1e-3],
"gamma": np.logspace(-2, 2, 5)})
t0 = time.time()
svr.fit(X[:train_size], y[:train_size])
svr_fit = time.time() - t0
print("SVR complexity and bandwidth selected and model fitted in %.3f s"
% svr_fit)
t0 = time.time()
kr.fit(X[:train_size], y[:train_size])
kr_fit = time.time() - t0
print("KRR complexity and bandwidth selected and model fitted in %.3f s"
% kr_fit)
sv_ratio = svr.best_estimator_.support_.shape[0] / train_size
print("Support vector ratio: %.3f" % sv_ratio)
t0 = time.time()
y_svr = svr.predict(X_plot)
svr_predict = time.time() - t0
print("SVR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], svr_predict))
t0 = time.time()
y_kr = kr.predict(X_plot)
kr_predict = time.time() - t0
print("KRR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], kr_predict))
#############################################################################
# look at the results
sv_ind = svr.best_estimator_.support_
plt.scatter(X[sv_ind], y[sv_ind], c='r', s=50, label='SVR support vectors',
zorder=2)
plt.scatter(X[:100], y[:100], c='k', label='data', zorder=1)
plt.hold('on')
plt.plot(X_plot, y_svr, c='r',
label='SVR (fit: %.3fs, predict: %.3fs)' % (svr_fit, svr_predict))
plt.plot(X_plot, y_kr, c='g',
label='KRR (fit: %.3fs, predict: %.3fs)' % (kr_fit, kr_predict))
plt.xlabel('data')
plt.ylabel('target')
plt.title('SVR versus Kernel Ridge')
plt.legend()
# Visualize training and prediction time
plt.figure()
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(X.shape[0]/5))
sizes = np.logspace(1, 4, 7)
for name, estimator in {"KRR": KernelRidge(kernel='rbf', alpha=0.1,
gamma=10),
"SVR": SVR(kernel='rbf', C=1e1, gamma=10)}.items():
train_time = []
test_time = []
for train_test_size in sizes:
t0 = time.time()
estimator.fit(X[:train_test_size], y[:train_test_size])
train_time.append(time.time() - t0)
t0 = time.time()
estimator.predict(X_plot[:1000])
test_time.append(time.time() - t0)
plt.plot(sizes, train_time, 'o-', color="r" if name == "SVR" else "g",
label="%s (train)" % name)
plt.plot(sizes, test_time, 'o--', color="r" if name == "SVR" else "g",
label="%s (test)" % name)
plt.xscale("log")
plt.yscale("log")
plt.xlabel("Train size")
plt.ylabel("Time (seconds)")
plt.title('Execution Time')
plt.legend(loc="best")
# Visualize learning curves
plt.figure()
svr = SVR(kernel='rbf', C=1e1, gamma=0.1)
kr = KernelRidge(kernel='rbf', alpha=0.1, gamma=0.1)
train_sizes, train_scores_svr, test_scores_svr = \
learning_curve(svr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
train_sizes_abs, train_scores_kr, test_scores_kr = \
learning_curve(kr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
plt.plot(train_sizes, test_scores_svr.mean(1), 'o-', color="r",
label="SVR")
plt.plot(train_sizes, test_scores_kr.mean(1), 'o-', color="g",
label="KRR")
plt.xlabel("Train size")
plt.ylabel("Mean Squared Error")
plt.title('Learning curves')
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
kagayakidan/scikit-learn | examples/applications/plot_stock_market.py | 227 | 8284 | """
=======================================
Visualizing the stock market structure
=======================================
This example employs several unsupervised learning techniques to extract
the stock market structure from variations in historical quotes.
The quantity that we use is the daily variation in quote price: quotes
that are linked tend to cofluctuate during a day.
.. _stock_market:
Learning a graph structure
--------------------------
We use sparse inverse covariance estimation to find which quotes are
correlated conditionally on the others. Specifically, sparse inverse
covariance gives us a graph, that is a list of connection. For each
symbol, the symbols that it is connected too are those useful to explain
its fluctuations.
Clustering
----------
We use clustering to group together quotes that behave similarly. Here,
amongst the :ref:`various clustering techniques <clustering>` available
in the scikit-learn, we use :ref:`affinity_propagation` as it does
not enforce equal-size clusters, and it can choose automatically the
number of clusters from the data.
Note that this gives us a different indication than the graph, as the
graph reflects conditional relations between variables, while the
clustering reflects marginal properties: variables clustered together can
be considered as having a similar impact at the level of the full stock
market.
Embedding in 2D space
---------------------
For visualization purposes, we need to lay out the different symbols on a
2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D
embedding.
Visualization
-------------
The output of the 3 models are combined in a 2D graph where nodes
represents the stocks and edges the:
- cluster labels are used to define the color of the nodes
- the sparse covariance model is used to display the strength of the edges
- the 2D embedding is used to position the nodes in the plan
This example has a fair amount of visualization-related code, as
visualization is crucial here to display the graph. One of the challenge
is to position the labels minimizing overlap. For this we use an
heuristic based on the direction of the nearest neighbor along each
axis.
"""
print(__doc__)
# Author: Gael Varoquaux [email protected]
# License: BSD 3 clause
import datetime
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import finance
from matplotlib.collections import LineCollection
from sklearn import cluster, covariance, manifold
###############################################################################
# Retrieve the data from Internet
# Choose a time period reasonnably calm (not too long ago so that we get
# high-tech firms, and before the 2008 crash)
d1 = datetime.datetime(2003, 1, 1)
d2 = datetime.datetime(2008, 1, 1)
# kraft symbol has now changed from KFT to MDLZ in yahoo
symbol_dict = {
'TOT': 'Total',
'XOM': 'Exxon',
'CVX': 'Chevron',
'COP': 'ConocoPhillips',
'VLO': 'Valero Energy',
'MSFT': 'Microsoft',
'IBM': 'IBM',
'TWX': 'Time Warner',
'CMCSA': 'Comcast',
'CVC': 'Cablevision',
'YHOO': 'Yahoo',
'DELL': 'Dell',
'HPQ': 'HP',
'AMZN': 'Amazon',
'TM': 'Toyota',
'CAJ': 'Canon',
'MTU': 'Mitsubishi',
'SNE': 'Sony',
'F': 'Ford',
'HMC': 'Honda',
'NAV': 'Navistar',
'NOC': 'Northrop Grumman',
'BA': 'Boeing',
'KO': 'Coca Cola',
'MMM': '3M',
'MCD': 'Mc Donalds',
'PEP': 'Pepsi',
'MDLZ': 'Kraft Foods',
'K': 'Kellogg',
'UN': 'Unilever',
'MAR': 'Marriott',
'PG': 'Procter Gamble',
'CL': 'Colgate-Palmolive',
'GE': 'General Electrics',
'WFC': 'Wells Fargo',
'JPM': 'JPMorgan Chase',
'AIG': 'AIG',
'AXP': 'American express',
'BAC': 'Bank of America',
'GS': 'Goldman Sachs',
'AAPL': 'Apple',
'SAP': 'SAP',
'CSCO': 'Cisco',
'TXN': 'Texas instruments',
'XRX': 'Xerox',
'LMT': 'Lookheed Martin',
'WMT': 'Wal-Mart',
'WBA': 'Walgreen',
'HD': 'Home Depot',
'GSK': 'GlaxoSmithKline',
'PFE': 'Pfizer',
'SNY': 'Sanofi-Aventis',
'NVS': 'Novartis',
'KMB': 'Kimberly-Clark',
'R': 'Ryder',
'GD': 'General Dynamics',
'RTN': 'Raytheon',
'CVS': 'CVS',
'CAT': 'Caterpillar',
'DD': 'DuPont de Nemours'}
symbols, names = np.array(list(symbol_dict.items())).T
quotes = [finance.quotes_historical_yahoo(symbol, d1, d2, asobject=True)
for symbol in symbols]
open = np.array([q.open for q in quotes]).astype(np.float)
close = np.array([q.close for q in quotes]).astype(np.float)
# The daily variations of the quotes are what carry most information
variation = close - open
###############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
###############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
###############################################################################
# Find a low-dimension embedding for visualization: find the best position of
# the nodes (the stocks) on a 2D plane
# We use a dense eigen_solver to achieve reproducibility (arpack is
# initiated with random vectors that we don't control). In addition, we
# use a large number of neighbors to capture the large-scale structure.
node_position_model = manifold.LocallyLinearEmbedding(
n_components=2, eigen_solver='dense', n_neighbors=6)
embedding = node_position_model.fit_transform(X.T).T
###############################################################################
# Visualization
plt.figure(1, facecolor='w', figsize=(10, 8))
plt.clf()
ax = plt.axes([0., 0., 1., 1.])
plt.axis('off')
# Display a graph of the partial correlations
partial_correlations = edge_model.precision_.copy()
d = 1 / np.sqrt(np.diag(partial_correlations))
partial_correlations *= d
partial_correlations *= d[:, np.newaxis]
non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)
# Plot the nodes using the coordinates of our embedding
plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,
cmap=plt.cm.spectral)
# Plot the edges
start_idx, end_idx = np.where(non_zero)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[embedding[:, start], embedding[:, stop]]
for start, stop in zip(start_idx, end_idx)]
values = np.abs(partial_correlations[non_zero])
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, .7 * values.max()))
lc.set_array(values)
lc.set_linewidths(15 * values)
ax.add_collection(lc)
# Add a label to each node. The challenge here is that we want to
# position the labels to avoid overlap with other labels
for index, (name, label, (x, y)) in enumerate(
zip(names, labels, embedding.T)):
dx = x - embedding[0]
dx[index] = 1
dy = y - embedding[1]
dy[index] = 1
this_dx = dx[np.argmin(np.abs(dy))]
this_dy = dy[np.argmin(np.abs(dx))]
if this_dx > 0:
horizontalalignment = 'left'
x = x + .002
else:
horizontalalignment = 'right'
x = x - .002
if this_dy > 0:
verticalalignment = 'bottom'
y = y + .002
else:
verticalalignment = 'top'
y = y - .002
plt.text(x, y, name, size=10,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
bbox=dict(facecolor='w',
edgecolor=plt.cm.spectral(label / float(n_labels)),
alpha=.6))
plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),
embedding[0].max() + .10 * embedding[0].ptp(),)
plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),
embedding[1].max() + .03 * embedding[1].ptp())
plt.show()
| bsd-3-clause |
losonczylab/Zaremba_NatNeurosci_2017 | losonczy_analysis_bundle/lab/plotting/analysis_plotting.py | 1 | 30484 | """Analysis-specific plotting methods"""
import warnings
import numpy as np
import scipy as sp
import itertools as it
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import datetime
import lab
from ..classes.classes import ExperimentGroup
import plotting as plotting
import plotting_helpers as plotting_helpers
from lab.misc import signalsmooth
from ..analysis import behavior_analysis as ba
from ..analysis import place_cell_analysis as place
from ..analysis import imaging_analysis as ia
from ..analysis import calc_activity as calc_activity
from ..classes import exceptions as exc
def activityPlot(
trial, ax, dFOverF='median', demixed=False,
yOffsets=None, linearTransform=None, window_width=100,
dFOverF_percentile=8, timeInterval=None, removeNanBoutons=False,
colorbarAx=None, smoothSize=0, resampling=None, style='color',
colorcode=None, markerDuration=5, colorRange=[-0.2, 1],
label_x_axis=False, channel='Ch2', label=None, roi_filter=None):
"""Plot the activity of all boutons at each time as a heatmap"""
times = trial.parent.imagingTimes()
imData = trial.parent.imagingData(
dFOverF=dFOverF, demixed=demixed,
linearTransform=linearTransform, window_width=window_width,
dFOverF_percentile=dFOverF_percentile,
removeNanBoutons=removeNanBoutons, channel=channel, label=label,
roi_filter=roi_filter)[:, :, trial.trialNum()]
if timeInterval is not None:
imData = imData[:, trial.parent.imagingIndex(
timeInterval[0]):trial.parent.imagingIndex(timeInterval[1])]
times = np.array(times)[trial.parent.imagingIndex(timeInterval[0]):
trial.parent.imagingIndex(timeInterval[1])]
if smoothSize:
for roiIdx in range(imData.shape[0]):
imData[roiIdx] = signalsmooth.smooth(
imData[roiIdx], window_len=smoothSize, window='hanning')
# imData = imData[:,int(smoothSize/2):-int(smoothSize/2)]
# times = times[:-(2*int(smoothSize/2))]
if resampling is not None:
imData = sp.signal.decimate(imData, resampling, axis=1)
times = times[::resampling]
if style == 'color':
roiNums = np.arange(0, imData.shape[0] + 1) + 0.5
TIMES, ROI_NUMS = np.meshgrid(times, roiNums)
im = ax.pcolor(TIMES, ROI_NUMS, imData, vmin=colorRange[0],
vmax=colorRange[1], rasterized=True)
if colorbarAx is not None:
ticks = colorRange
if 0 > ticks[0] and 0 < ticks[1]:
ticks.append(0)
if not colorbarAx == ax:
cbar = colorbarAx.figure.colorbar(
im, ax=colorbarAx, ticks=ticks, fraction=1)
else:
cbar = colorbarAx.figure.colorbar(
im, ax=colorbarAx, ticks=ticks)
cbar.set_label(r'$\Delta$F/F', labelpad=-10)
""" Label the ROIs """
ROIs = [roi.id for roi in trial.rois(channel=channel, label=label)
if roi_filter(roi)]
try:
roiGroups, roiGroupNames = bouton.BoutonSet(ROIs).boutonGroups()
except:
ax.set_yticks(range(len(ROIs)))
ax.set_yticklabels(ROIs)
else:
if colorcode == 'postSynaptic':
for k, group in enumerate(roiGroups):
for roi in group:
# if roiGroupNames[k] != 'other':
ax.add_patch(plt.Rectangle(
(-2, ROIs.index(roi.name) + 0.5), 1, 1,
color=bouton.groupPointStyle(roiGroupNames[k])[0],
lw=0))
""" Plot the behavior data beneath the plot """
framePeriod = trial.parent.frame_period()
for interval in ba.runningIntervals(trial) * framePeriod:
ax.add_patch(plt.Rectangle(
(interval[0], -1), interval[1] - interval[0], 1.3,
color='g', lw=0))
height = -1
for key, color in [('air', 'r'), ('airpuff', 'r'),
('licking', 'b'), ('odorA', 'c'),
('odorB', 'm')]:
try:
intervals = trial.behaviorData()[key]
except KeyError:
pass
else:
height -= 1
for interval in intervals:
ax.add_patch(Rectangle(
(interval[0], height), interval[1] - interval[0],
1, facecolor=color, lw=0))
ax.set_xlim([-2, times[-1]])
ax.spines['left'].set_bounds(1, len(roiNums) - 1)
ax.spines['left'].set_position(('outward', 2))
for side in ['right', 'top', 'bottom']:
ax.spines[side].set_color('none')
ax.set_yticks([1, len(roiNums) - 1])
if label_x_axis:
ax.set_xlabel('time (s)')
else:
ax.set_xticks([])
ax.set_ylabel('ROI #', labelpad=-9)
ax.yaxis.set_ticks_position('left')
ax.tick_params(axis='y', direction='out')
ax.set_ylim([height, len(roiNums) - 0.5])
elif style == 'traces':
data = [imData.reshape([imData.shape[0], imData.shape[1], 1])]
plotting.tracePlot(
ax, data, times, ROIs, stimulusDurations=None, shading=None,
yOffsets=yOffsets, markerDuration=markerDuration)
framePeriod = trial.parent.frame_period()
yMin = ax.get_ylim()[0]
for interval in ba.runningIntervals(trial) * framePeriod:
ax.add_patch(plt.Rectangle(
(interval[0], yMin - 1), interval[1] - interval[0], 1,
color='g', lw=0))
# ax.set_xlim([-2, times[-1]])
ax.set_ylim(bottom=yMin - 1)
# ADDED SUPPORT FOR LASER PLOT
def behaviorPlot(
trial, ax, keys=['velocity', 'running', 'position', 'licking', 'tone', 'light',
'water', 'reward', 'airpuff', 'motion', 'laser'],
colors=None, include_empty=False, y_start=-1):
"""Plot behavior data over time
Keyword arguments:
ax -- axis to plot on
keys -- behavior data to plot, id data is missing it is skipped
colors -- colors list to use, will be iterated over
include_empty -- if True, plot data that has no intervals, if false,
exclude those rows
y_start -- bottom of first plot, decreases by one for each successive plot
"""
try:
bd = trial.behaviorData()
except exc.MissingBehaviorData:
return
if colors is None:
colors = lab.plotting.color_cycle()
else:
colors = iter(colors)
next_y = y_start
labels = []
label_colors = []
for key in keys:
if key == 'velocity':
try:
velocity = ba.velocity(
trial, imageSync=False, sampling_interval='actual',
smoothing='hanning', window_length=71)
bd['recordingDuration']
except (exc.MissingBehaviorData, KeyError):
bd['recordingDuration']
except (exc.MissingBehaviorData, KeyError):
pass
else:
labels.append('velocity')
next_color = colors.next()
label_colors.append(next_color)
velocity -= np.amin(velocity)
velocity /= np.amax(velocity) / 0.9
velocity += next_y + 0.05
ax.plot(np.linspace(0, bd['recordingDuration'],
len(velocity)), velocity,
color=next_color)
next_y -= 1
elif key == 'position':
try:
position = bd['treadmillPosition']
bd['recordingDuration']
bd['samplingInterval']
except KeyError:
pass
else:
labels.append('position')
next_color = colors.next()
label_colors.append(next_color)
full_position = np.empty(int(np.ceil(
bd['recordingDuration'] / bd['samplingInterval'])))
for t, pos in position:
full_position[int(t / bd['samplingInterval']):] = pos
full_position *= 0.9
full_position += next_y + 0.05
ax.plot(np.linspace(0, bd['recordingDuration'],
len(full_position)), full_position,
color=next_color)
next_y -= 1
else:
try:
if key == 'running':
data = ba.runningIntervals(trial, imageSync=False) *\
bd['samplingInterval']
else:
data = bd[key]
except KeyError:
pass
else:
if include_empty or len(data) > 0:
labels.append(key)
next_color = colors.next()
label_colors.append(next_color)
for interval in data:
ax.add_patch(Rectangle(
(interval[0], next_y),
interval[1] - interval[0], 1,
facecolor=next_color, lw=0))
next_y -= 1
if next_y == y_start:
return
ax.set_yticks(np.arange(-0.5, next_y + 0.5, -1))
ax.set_yticklabels(labels)
for tick, c in zip(ax.get_yticklabels(), label_colors):
tick.set_color(c)
try:
ax.set_xlim([0, int(bd['recordingDuration'])])
except KeyError:
pass
ax.set_ylim([next_y + 1, 0])
ax.set_xlabel('Time (s)')
ax.set_title('{0}:{1}'.format(trial.parent.parent.get('mouseID'),
trial.get('time')))
def plot_imaging_and_behavior(
trial, ax, start_time=0, stop_time=None, channel='Ch2', label=None,
roi_filter=None, label_rois=False,
keys=['running', 'licking', 'water', 'airpuff', 'tone', 'light'],
colors=None, include_empty=False, dFOverF='from_file'):
"""Plot imaging data for all ROIs with behavior data underneath"""
imaging_data = trial.parent.imagingData(
channel=channel, label=label, roi_filter=roi_filter,
dFOverF=dFOverF)[..., trial.parent.findall('trial').index(trial)]
if not imaging_data.shape[0]:
return
frame_period = trial.parent.frame_period()
start_frame = int(start_time / frame_period)
if stop_time is None:
stop_frame = imaging_data.shape[1]
else:
stop_frame = int(stop_time / frame_period)
if stop_time is None:
stop_time = trial.parent.imagingTimes(channel=channel)[-1]
imaging_data = imaging_data[:, start_frame:stop_frame]
t_range = np.linspace(start_time, stop_time, imaging_data.shape[1])
max_F = np.nanmax(imaging_data)
# Normalize and re-scale so they can all be plotted on top of eachother
imaging_data /= max_F
imaging_data += np.arange(imaging_data.shape[0]).reshape((-1, 1)) + 0.5
ax.plot(t_range, imaging_data.T)
behaviorPlot(
trial, ax, keys=keys, colors=colors, include_empty=include_empty)
if label_rois:
roi_ids = trial.parent.roi_ids(
channel=channel, label=label, roi_filter=roi_filter)
x_range = ax.get_xlim()[1]
for idx, roi_id in enumerate(roi_ids):
ax.text(x_range * -0.01, idx + 0.5, roi_id, ha='right')
ax.set_ylim(top=imaging_data.shape[0] + 0.5)
plotting_helpers.add_scalebar(
ax, matchx=False, matchy=False, hidex=False, hidey=False,
sizey=0.5 / max_F, labely='0.5', bar_thickness=0, loc=1,
borderpad=0.5)
def responsePairPlot(exptGrp, ax, stim1, stim2, stimuliLabels=None,
excludeRunning=True, boutonGroupLabeling=False,
linearTransform=None, axesCenter=True, channel='Ch2',
label=None, roi_filter=None):
if not isinstance(stim1, list):
stim1 = [stim1]
if not isinstance(stim2, list):
stim2 = [stim2]
if stimuliLabels is None:
stimuliLabels = [stim1[0], stim2[0]]
ROIs = exptGrp.sharedROIs(
roiType='GABAergic', channel=channel, label=label,
roi_filter=roi_filter)
shared_filter = lambda x: x.id in ROIs
rIntegrals = []
for stim in [stim1, stim2]:
if stim == ['running']:
rIntegrals.append(ia.runningModulation(
exptGrp, linearTransform=linearTransform, channel=channel,
label=label, roi_filter=shared_filter).reshape([-1, 1]))
elif stim == ['licking']:
rIntegrals.append(ia.lickingModulation(
exptGrp, linearTransform=linearTransform, channel=channel,
label=label, roi_filter=shared_filter).reshape([-1, 1]))
else:
rIntegrals.append(ia.responseIntegrals(
exptGrp, stim, excludeRunning=excludeRunning,
sharedBaseline=True, linearTransform=linearTransform,
dFOverF='mean', channel=channel, label=label,
roi_filter=shared_filter))
if not boutonGroupLabeling:
ROIs = None
plotting.ellipsePlot(
ax, rIntegrals[0].mean(axis=1), rIntegrals[1].mean(axis=1),
2 * np.sqrt(rIntegrals[0].var(axis=1) / rIntegrals[0].shape[1]),
2 * np.sqrt(rIntegrals[1].var(axis=1) / rIntegrals[1].shape[1]),
boutonGroupLabeling=ROIs, color='k', axesCenter=axesCenter)
ax.set_xlabel(stimuliLabels[0], labelpad=1)
ax.set_ylabel(stimuliLabels[1], labelpad=1)
# TODO: LOOKS BROKEN IF YOU PASS IN AX
def plotLickRate(exptGrp, ax=None, minTrialDuration=0):
"""Generate a figure showing the lick rate for each trial in this
ExperimentGroup.
Keyword arguments:
ax -- axis to plot on, created if 'None'
minTrialLength -- minimum length of trial (in seconds) to be included in
analysis
"""
lickrates = []
dates = []
for experiment in exptGrp:
for trial in experiment.findall('trial'):
try:
bd = trial.behaviorData()
if 'licking' in bd.keys() and \
'recordingDuration' in bd.keys() and \
bd['recordingDuration'] >= minTrialDuration:
lickrates.append(bd['licking'].shape[0] /
bd['recordingDuration'])
dates.append(trial.attrib['time'])
except exc.MissingBehaviorData:
pass
if len(lickrates) > 0:
if ax is None:
fig = plt.figure(figsize=(11, 8))
ax = fig.add_subplot(111)
ax.bar(np.arange(len(lickrates)), lickrates, 0.5)
ax.set_ylabel('Lick rate (Hz)')
ax.set_title('lick rate per trial')
ax.set_xticks(np.arange(len(lickrates)) + 0.25)
ax.set_xticklabels(
dates, ha='right', rotation_mode='anchor', rotation=30)
return fig
# SAME, LOOKS BROKEN IF YOU PASS IN AX
def plotLapRate(exptGrp, ax=None, minTrialDuration=0):
"""Generates a figure showing the number of laps completed per minute.
Keyword arguments:
ax -- axis to plot on, created if 'None'
minTrialLength -- minimum length of trial (in seconds) to be included
in analysis
"""
laprates = []
dates = []
for experiment in exptGrp:
for trial in experiment.findall('trial'):
try:
bd = trial.behaviorData()
if 'lapCounter' in bd.keys() and \
len(bd['lapCounter']) > 0 and \
'recordingDuration' in bd.keys() and \
bd['recordingDuration'] >= minTrialDuration:
laprates.append(sum(bd['lapCounter'][:, 1] == 1) /
bd['recordingDuration'] * 60.0)
dates.append(trial.attrib['time'])
except exc.MissingBehaviorData:
pass
if len(laprates) > 0:
if ax is None:
fig = plt.figure(figsize=(11, 8))
ax = fig.add_subplot(111)
ax.bar(np.arange(len(laprates)), laprates, 0.5)
ax.set_ylabel('Lap rate (laps/minute)')
ax.set_title('lap rate per trial')
ax.set_xticks(np.arange(len(laprates)) + 0.25)
ax.set_xticklabels(
dates, ha='right', rotation_mode='anchor', rotation=15)
return fig
def plotLapRateByDays(exptGrp, ax=None, color=None):
"""Plots lap rate by days of training"""
if ax is None:
ax = plt.axes()
if color is None:
color = lab.plotting.color_cycle().next()
training_days = exptGrp.priorDaysOfExposure(ignoreBelt=True)
lap_rates = {}
for expt in exptGrp:
for trial in expt.findall('trial'):
try:
bd = trial.behaviorData()
except exc.MissingBehaviorData:
continue
else:
if len(bd.get('lapCounter', [])) > 0 \
and 'recordingDuration' in bd:
if training_days[expt] not in lap_rates:
lap_rates[training_days[expt]] = []
lap_rates[training_days[expt]].append(
np.sum(bd['lapCounter'][:, 1] == 1) /
bd['recordingDuration'] * 60.0)
if len(lap_rates) > 0:
days = lap_rates.keys()
days.sort()
day_means = []
for day in days:
# Jitter x position
x = (np.random.rand(len(lap_rates[day])) * 0.2) - 0.1 + day
ax.plot(x, lap_rates[day], '.', color=color, markersize=7)
day_means.append(np.mean(lap_rates[day]))
ax.plot(days, day_means, '-', label=exptGrp.label(), color=color)
ax.set_ylabel('Lap rate (laps/minute)')
ax.set_xlabel('Days of training')
ax.set_title('Average running by days of belt exposure')
def activityComparisonPlot(exptGrp, method, ax=None, mask1=None, mask2=None,
label1=None, label2=None, roiNamesToLabel=None,
normalize=False, rasterized=False,
dF='from_file', channel='Ch2', label=None,
roi_filter=None, demixed=False):
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
fig = ax.get_figure()
if len(exptGrp) != 2:
warnings.warn(
'activityComparisonPlot requires an experimentGroup of 2 ' +
'experiments. Using the first two elements of {}'.format(exptGrp))
grp = exptGrp[:2]
else:
grp = exptGrp
ROIs = grp.sharedROIs(channel=channel, label=label,
roi_filter=roi_filter)
shared_filter = lambda x: x.id in ROIs
exp1ROIs = grp[0].roi_ids(channel=channel, label=label,
roi_filter=shared_filter)
exp2ROIs = grp[1].roi_ids(channel=channel, label=label,
roi_filter=shared_filter)
order1 = np.array([exp1ROIs.index(x) for x in ROIs])
order2 = np.array([exp2ROIs.index(x) for x in ROIs])
# inds of the roiNamesToLabel (in terms of exp1 indices)
if roiNamesToLabel:
order3 = np.array(
[exp1ROIs.index(x) for x in roiNamesToLabel if x in ROIs])
activity1 = calc_activity(
grp[0], method=method, interval=mask1, dF=dF, channel=channel,
label=label, roi_filter=roi_filter, demixed=demixed)
activity2 = calc_activity(
grp[1], method=method, interval=mask2, dF=dF, channel=channel,
label=label, roi_filter=roi_filter, demixed=demixed)
# ordering corresponds to sharedROIs() ordering
activity1 = np.array([activity1[x] for x in order1]).flatten()
activity2 = np.array([activity2[x] for x in order2]).flatten()
if normalize:
activity1 = activity1 / float(np.amax(activity1))
activity2 = activity2 / float(np.amax(activity2))
# -1 flips sort so it's actually high to low and also puts NaNs at the end
order = np.argsort(-1 * activity2)
bar_lefts = np.arange(len(ROIs))
width = 1
if not label1:
label1 = grp[0].get('startTime')
if not label2:
label2 = grp[1].get('startTime')
ax.bar(np.array(bar_lefts), activity1[order], width, color='b',
alpha=0.5, label=label1, rasterized=rasterized)
ax.bar(np.array(bar_lefts), np.negative(activity2)[order], width,
color='r', alpha=0.5, label=label2, rasterized=rasterized)
max_y = np.amax(np.abs(ax.get_ylim()))
ax.set_ylim(-max_y, max_y)
ax.set_xlim(right=len(ROIs))
# roiIndsToIndicate = [np.argwhere(order1[order]==roi)[0][0] for roi in exp1RoisToIndicate if roi in order1[order]]
if roiNamesToLabel:
# ylim = ax.get_ylim()
roiIndsToIndicate = [
np.argwhere(order1[order] == x)[0][0] for x in order3]
for idx in roiIndsToIndicate:
ax.axvline(
idx + 0.5, linestyle='dashed', color='k',
rasterized=rasterized)
# ax.vlines(np.array(roiIndsToIndicate)+0.5, ylim[0], ylim[1], linestyles='dashed', color='k')
# ax.set_ylim(ylim)
# make all y-axis labels positive
ax.set_yticklabels(np.abs(ax.get_yticks()))
ax.set_xlabel('ROI index')
ax.set_ylabel('Activity = {}'.format(method))
ax.legend()
return fig
def activityByExposure(exptGrp, ax=None, stat='mean',
combineTimes=datetime.timedelta(hours=12),
ignoreContext=False, **kwargs):
"""Plots cdf of activity of ROIs by days of context exposure
Keyword arguments:
stat -- statistic to plot, see calc_activity.py for details
combineTimes -- experiments within this timedelta of each other are
considered the same day for determining exposure
ignoreContext -- if True, ignores context for determining exposure
**kwargs -- any additional arguments will be passed in to
place.calc_activity_statistic
"""
if ax is None:
_, ax = plt.subplots()
exptsByExposure = ExperimentGroup.dictByExposure(
exptGrp, combineTimes=combineTimes, ignoreBelt=ignoreContext,
ignoreContext=ignoreContext)
colors = lab.plotting.color_cycle()
for exposure in sorted(exptsByExposure):
exgrp = ExperimentGroup(
exptsByExposure[exposure],
label='1 day' if exposure == 0 else str(exposure + 1) +
' days')
place.calc_activity_statistic(
exgrp, ax=ax, stat=stat, plot_method='cdf',
label=exgrp.label(), c=colors.next(), **kwargs)
ax.legend(loc='lower right')
ax.set_title('{} by exposure - {}'.format(
stat,
'ignoring context' if ignoreContext else 'including context'))
def compare_bouton_responses(
exptGrp, ax, stimuli, comp_method='angle', plot_method='cdf',
channel='Ch2', label=None, roi_filter=None, **response_kwargs):
"""Compare various pairs of boutons, based on several conventions:
'bouton' in label of bouton ROIs
boutons targeting a cell soma are tagged with the cell number they are
targeting, i.e. 'cell1', 'cell2', etc.
boutons on an axons are tagged with the fiber number they are on,
i.e. 'fiber1', 'fiber2', etc.
boutons with no tags have no information about their axon or target
"""
response_matrix, rois = ia.response_matrix(
exptGrp, stimuli, channel=channel, label=label, roi_filter=roi_filter,
return_full=True, **response_kwargs)
data = {}
data['mouse'] = [roi[0] for roi in rois]
data['loc'] = [roi[1] for roi in rois]
data['label'] = [roi[2] for roi in rois]
tags = []
for mouse, loc, name in it.izip(
data['mouse'], data['loc'], data['label']):
roi_tags = set()
for expt in exptGrp:
if expt.parent == mouse \
and expt.get('uniqueLocationKey') == loc:
for roi in expt.rois(
channel=channel, label=label,
roi_filter=roi_filter):
if roi.label == name:
# NOTE: Taking the union of all tags,
# so mis-matched tags will just be combined
roi_tags = roi_tags.union(roi.tags)
tags.append(roi_tags)
data['tags'] = tags
data['responses'] = [response for response in response_matrix]
df = pd.DataFrame(data)
if comp_method == 'angle':
ax.set_xlabel('Response similarity (angle)')
def compare(roi1, roi2):
return np.dot(roi1, roi2) / np.linalg.norm(roi1) \
/ np.linalg.norm(roi2)
elif comp_method == 'abs angle':
ax.set_xlabel('Response similarity (abs angle)')
def compare(roi1, roi2):
return np.abs(np.dot(roi1, roi2) / np.linalg.norm(roi1)
/ np.linalg.norm(roi2))
elif comp_method == 'corr':
ax.set_xlabel('Response similarity (corr)')
def compare(roi1, roi2):
return np.corrcoef(roi1, roi2)[0, 1]
elif comp_method == 'abs corr':
ax.set_xlabel('Response similarity (abs corr)')
def compare(roi1, roi2):
return np.abs(np.corrcoef(roi1, roi2)[0, 1])
elif comp_method == 'mean diff':
ax.set_xlabel('Response similarity (mean diff)')
def compare(roi1, roi2):
return np.abs(roi1 - roi2).mean()
else:
raise ValueError('Unrecognized compare method argument')
same_fiber = []
fiber_with_not = []
same_soma = []
soma_with_not = []
bouton_with_fiber = []
diff_all = []
for name, group in df.groupby(['mouse', 'loc']):
for roi1, roi2 in it.combinations(group.iterrows(), 2):
r1_responses = roi1[1]['responses']
r2_responses = roi2[1]['responses']
non_nan = np.isfinite(r1_responses) & np.isfinite(r2_responses)
comp = compare(r1_responses[non_nan], r2_responses[non_nan])
if np.isnan(comp):
continue
fiber1 = set(
[tag for tag in roi1[1]['tags'] if 'fiber' in tag])
fiber2 = set(
[tag for tag in roi2[1]['tags'] if 'fiber' in tag])
cell1 = set([tag for tag in roi1[1]['tags'] if 'cell' in tag])
cell2 = set([tag for tag in roi2[1]['tags'] if 'cell' in tag])
if len(fiber1.intersection(fiber2)):
same_fiber.append(comp)
elif len(fiber1) or len(fiber2):
fiber_with_not.append(comp)
if len(cell1.intersection(cell2)):
same_soma.append(comp)
elif len(cell1) or len(cell2):
soma_with_not.append(comp)
if len(fiber1) and roi2[1]['label'] in fiber1 \
or len(fiber2) and roi1[1]['label'] in fiber2:
bouton_with_fiber.append(comp)
elif not len(fiber1.intersection(fiber2)) \
and not len(cell1.intersection(cell2)):
diff_all.append(comp)
if plot_method == 'cdf':
plotting.cdf(
ax, same_fiber, bins='exact', label='same fiber')
plotting.cdf(
ax, same_soma, bins='exact', label='same soma')
plotting.cdf(
ax, bouton_with_fiber, bins='exact', label='bouton with fiber')
plotting.cdf(
ax, fiber_with_not, bins='exact', label='fiber with not')
plotting.cdf(
ax, soma_with_not, bins='exact', label='soma with not')
plotting.cdf(
ax, diff_all, bins='exact', label='diff all')
elif plot_method == 'hist':
colors = lab.plotting.color_cycle()
plotting.histogram(
ax, same_fiber, bins=50, color=colors.next(), normed=True,
label='same fiber')
plotting.histogram(
ax, same_soma, bins=50, color=colors.next(), normed=True,
label='same soma')
plotting.histogram(
ax, bouton_with_fiber, bins=50, color=colors.next(),
normed=True, label='bouton with fiber')
plotting.histogram(
ax, fiber_with_not, bins=50, color=colors.next(), normed=True,
label='fiber with not')
plotting.histogram(
ax, soma_with_not, bins=50, color=colors.next(), normed=True,
label='soma with not')
plotting.histogram(
ax, diff_all, bins=50, color=colors.next(), normed=True,
label='diff all')
# ax.legend()
return {'same fiber': same_fiber, 'same soma': same_soma,
'bouton_with_fiber': bouton_with_fiber,
'fiber_with_not': fiber_with_not,
'soma_with_not': soma_with_not, 'diff all': diff_all}
def stim_response_heatmap(
exptGrp, ax, stims, sort_by=None, method='responsiveness',
z_score=True, aspect_ratio=0.25, **response_kwargs):
"""Plot a heatmap of stim responses per ROI."""
data = ia.response_matrix(
exptGrp, stims, method=method, z_score=z_score, **response_kwargs)
if sort_by is not None:
if isinstance(sort_by, list):
# If we get a list of stims, sort by the mean of them
indices = [stims.index(stim) for stim in sort_by]
to_sort = data[:, indices].mean(1)
# Remove rows that have a nan in any of the sort by cols
non_nan_rows = np.isfinite(to_sort)
data = data[non_nan_rows]
order = to_sort[non_nan_rows].argsort()[::-1]
data = data[order]
else:
# If we get a single stim, sort by the response to that stim
sort_column = stims.index(sort_by)
# Remove rows that have NaN's in the sort_by column
non_nan_rows = np.isfinite(data[:, sort_column])
data = data[non_nan_rows, :]
order = data[:, sort_column].argsort()[::-1]
data = data[order]
ax.imshow(data, interpolation='none', aspect=aspect_ratio)
ax.xaxis.tick_top()
ax.set_xticks(np.arange(len(stims)))
ax.set_xticklabels(stims)
ax.tick_params(labelbottom=False, bottom=False, left=False, top=False,
right=False)
| mit |
TheKingInYellow/PySeidon | pyseidon/tidegaugeClass/plotsTidegauge.py | 2 | 4305 | #!/usr/bin/python2.7
# encoding: utf-8
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.tri as Tri
import matplotlib.ticker as ticker
import matplotlib.patches as mpatches
import seaborn
import pandas as pd
class PlotsTidegauge:
"""
**'Plots' subset of Tidegauge class gathers plotting functions**
"""
def __init__(self, variable, debug=False):
self._debug = debug
setattr(self, '_var', variable)
def _def_fig(self):
"""Defines figure window"""
self._fig = plt.figure(figsize=(18,10))
plt.rc('font',size='22')
def plot_xy(self, x, y, xerror=[], yerror=[],
title=' ', xLabel=' ', yLabel=' ', dump=False, **kwargs):
"""
Simple X vs Y plot
Inputs:
- x = 1D array
- y = 1D array
Options:
- xerror = error on 'x', 1D array
- yerror = error on 'y', 1D array
- title = plot title, string
- xLabel = title of the x-axis, string
- yLabel = title of the y-axis, string
- dump = boolean, dump profile data in csv file
- kwargs = keyword options associated with pandas.DataFrame.to_csv, such as:
sep, header, na_rep, index,...etc
Check doc. of "to_csv" for complete list of options
"""
#fig = plt.figure(figsize=(18,10))
#plt.rc('font',size='22')
self._def_fig()
self._ax = self._fig.add_subplot(111)
self._ax.plot(x, y, label=title)
scale = 1
self._ax.set_ylabel(yLabel)
self._ax.set_xlabel(xLabel)
self._ax.get_xaxis().set_minor_locator(ticker.AutoMinorLocator())
self._ax.get_yaxis().set_minor_locator(ticker.AutoMinorLocator())
self._ax.grid(b=True, which='major', color='w', linewidth=1.5)
self._ax.grid(b=True, which='minor', color='w', linewidth=0.5)
if not yerror==[]:
self._ax.fill_between(x, y-yerror, y+yerror,
alpha=0.2, edgecolor='#1B2ACC', facecolor='#089FFF', antialiased=True)
if not xerror==[]:
self._ax.fill_betweenx(y, x-xerror, x+xerror,
alpha=0.2, edgecolor='#1B2ACC', facecolor='#089FFF', antialiased=True)
if (not xerror==[]) or (not yerror==[]):
blue_patch = mpatches.Patch(color='#089FFF',
label='Standard deviation',alpha=0.2)
plt.legend(handles=[blue_patch],loc=1, fontsize=12)
#plt.legend([blue_patch],loc=1, fontsize=12)
self._fig.show()
if dump: self._dump_profile_data_as_csv(x, y,xerror=xerror, yerror=yerror,
title=title, xLabel=xLabel,
yLabel=yLabel, **kwargs)
def _dump_profile_data_as_csv(self, x, y, xerror=[], yerror=[],
title=' ', xLabel=' ', yLabel=' ', **kwargs):
"""
Dumps profile data in csv file
Inputs:
- x = 1D array
- y = 1D array
Options:
- xerror = error on 'x', 1D array
- yerror = error on 'y', 1D array
- title = file name, string
- xLabel = name of the x-data, string
- yLabel = name of the y-data, string
- kwargs = keyword options associated with pandas.DataFrame.to_csv, such as:
sep, header, na_rep, index,...etc
Check doc. of "to_csv" for complete list of options
"""
if title == ' ': title = 'dump_profile_data'
filename=title + '.csv'
if xLabel == ' ': xLabel = 'X'
if yLabel == ' ': yLabel = 'Y'
if not xerror == []:
df = pd.DataFrame({xLabel:x[:], yLabel:y[:], 'error': xerror[:]})
elif not yerror == []:
df = pd.DataFrame({xLabel:x[:], yLabel:y[:], 'error': yerror[:]})
else:
df = pd.DataFrame({xLabel:x[:], yLabel:y[:]})
df.to_csv(filename, encoding='utf-8', **kwargs)
#TR_comments: templates
# def whatever(self, debug=False):
# if debug or self._debug:
# print 'Start whatever...'
#
# if debug or self._debug:
# print '...Passed'
| agpl-3.0 |
reimandlab/Visualistion-Framework-for-Genome-Mutations | website/stats/plots/active_driver.py | 1 | 8658 | from collections import Counter
from operator import attrgetter
from pathlib import Path
from types import FunctionType
from typing import Mapping
from pandas import Series, DataFrame
from sqlalchemy import func
from analyses import per_cancer_analysis, pan_cancer_analysis, clinvar_analysis
from analyses.active_driver import mutations_from_significant_genes
from analyses.enrichment import active_driver_genes_enrichment
from database import db
from helpers.plots import bar_plot, stacked_bar_plot
from models import Mutation, Protein, Gene, Site, MC3Mutation, Cancer, InheritedMutation, MutationSource
from stats.analyses.ontology import Ontology, draw_ontology_graph
from ..store import cases
from .common import site_types_with_any, site_types
from .ptm_mutations import gather_ptm_muts_impacts
def subset_analysis_as_function_of_site(analyses_collection, key, name):
def analysis(site_type):
return analyses_collection(site_type)[key]
analysis.__name__ = name
return analysis
Analysis = FunctionType
whole_dataset_analyses: Mapping[Analysis, MutationSource] = {
pan_cancer_analysis: MC3Mutation,
clinvar_analysis: InheritedMutation
}
subset_analyses = {
subset_analysis_as_function_of_site(
per_cancer_analysis,
cancer.code,
f'per_cancer_analysis_{cancer.code}'
): MC3Mutation
for cancer in Cancer.query
}
all_analyses = {
**whole_dataset_analyses,
**subset_analyses
}
def count_mutations_by_gene(source, genes, site_type, filters=None):
counts = []
for gene in genes:
query = (
db.session.query(func.sum(source.count))
.select_from(source)
.join(Mutation).join(Protein)
.filter(Mutation.affected_sites.any(
Site.types.contains(site_type)
))
.join(Gene, Gene.preferred_isoform_id == Protein.id)
.filter(Gene.name == gene)
)
if filters is not None:
query = query.filter(filters)
counts.append(query.scalar())
return counts
def by_muts_count(result, source: MutationSource, site_type, filters=None):
top_fdr = result['top_fdr']
mutation_counts = count_mutations_by_gene(source, top_fdr.gene, site_type, filters)
return (
top_fdr.gene,
mutation_counts,
[f'Mutations: {count}<br>FDR: {fdr}' for count, fdr in zip(mutation_counts, top_fdr.fdr)]
)
def by_muts_stacked(result, source, site_type):
top_fdr = result['top_fdr']
muts = result['all_active_mutations']
genes = top_fdr.sort_values(by='fdr').gene
grouped = gather_ptm_muts_impacts(source, site_type, limit_to_genes=genes, limit_to_muts=muts)
return {
impact: (genes, [muts_by_gene[gene_name] for gene_name in genes], [])
for impact, muts_by_gene in grouped.items()
}
active_driver_cases = cases(
analysis=all_analyses,
site_type=site_types_with_any,
).set_mode('product')
@active_driver_cases
@stacked_bar_plot
def muts_by_impact(analysis, site_type):
source = all_analyses[analysis]
try:
result = analysis(site_type.name)
except KeyError:
print(f'No results for {analysis}')
return {}
return by_muts_stacked(result, source, site_type)
@active_driver_cases
@bar_plot
def muts(analysis, site_type):
source = all_analyses[analysis]
result = analysis(site_type.name)
return by_muts_count(result, source, site_type)
@cases(cancer_code={cancer.code for cancer in Cancer.query})
@bar_plot
def per_cancer_glycosylation(cancer_code, site_type='glycosylation'):
results = per_cancer_analysis(site_type)
try:
result = results[cancer_code]
except KeyError:
print(f'No results for {cancer_code}')
return [], []
return by_muts_count(result, MC3Mutation, site_type, MC3Mutation.cancer_code == cancer_code)
def counts_by(column_name, df: DataFrame, by_unique=False) -> dict:
if by_unique:
column = getattr(df, column_name)
return column.value_counts()
counts = Counter()
get_by = attrgetter(column_name)
for row in df.itertuples():
counts[get_by(row)] += int(row.count)
return counts
@cases(site_type=site_types)
@bar_plot
def cancers(site_type):
result = pan_cancer_analysis(site_type)
counts = counts_by('cancer_type', result['all_active_mutations'])
cancer_by_code = {cancer.code: cancer.name for cancer in Cancer.query}
return (
counts.keys(),
counts.values(),
[
f'{cancer_by_code[cancer_type]}: {count} mutations'
for cancer_type, count in counts.items()
]
)
def ontology_plots(
terms, analysis_name, vector=True, thresholds=(70, 75, 80, 85, 90, 95), allow_misses=True,
limit_to=None, unflatten=900
):
predefined_ontologies = {
'phenotypes': 'data/hp.obo',
'diseases': 'data/HumanDO.obo',
'mondo': 'data/mondo.obo'
}
ontology_subsets = {
'mondo': [None, 'disease', 'disease characteristic']
}
ontologies = {
name: Ontology(path)
for name, path in predefined_ontologies.items()
if not limit_to or name in limit_to
}
path = Path('analyses_output') / 'active_driver_plots' / analysis_name
path.mkdir(parents=True, exist_ok=True)
plots = {}
for name, ontology in ontologies.items():
subsets = ontology_subsets.get(name, [None])
for subset in subsets:
for above_percentile in thresholds:
graph = ontology.process_graph(terms, above_percentile, root_name=subset, allow_misses=allow_misses, show_progress=True)
plot = draw_ontology_graph(
graph,
path / f'{name}_{above_percentile}{"_" + subset if subset else ""}.{"svg" if vector else "png"}',
unflatten
)
plots[name, subset, above_percentile] = plot
return plot
def cancer_mutations(result, significant=True):
mutations = (
mutations_from_significant_genes(result, mutation_model=MC3Mutation)
if significant else
result['all_active_mutations']
)
cancer_by_code = {cancer.code: cancer.name for cancer in Cancer.query}
mutations = mutations.assign(cancer_name=Series(
cancer_by_code[mutation.cancer_type]
for mutation in mutations.itertuples(index=False)
).values)
return mutations
def merged_cancer_mutations(site_type):
all_cancer_mutations = [cancer_mutations(pan_cancer_analysis(site_type))]
ad_per_cancer = per_cancer_analysis(site_type)
for cancer_type, result in ad_per_cancer.items():
mutations = cancer_mutations(result, cancer_type=cancer_type)
all_cancer_mutations.append(mutations)
@cases(site_type=site_types)
def cancers_ontology(site_type, significant=True, vector=False):
result = pan_cancer_analysis(site_type)
mutations = cancer_mutations(result, significant=significant)
terms = counts_by('cancer_name', mutations)
return ontology_plots(
terms, 'cancers', vector,
[0, 70, 75, 80, 85, 90, 95],
allow_misses=False, limit_to=['diseases', 'mondo']
)
def disease_mutations(result, significant=True):
mutations = (
mutations_from_significant_genes(result, mutation_model=InheritedMutation)
if significant else
result['all_active_mutations']
)
return mutations
@cases(site_type=site_types)
def diseases_wordcloud(site_type, significant=True):
result = clinvar_analysis(site_type.name)
mutations = disease_mutations(result)
print(
'Copy-paste following text into a wordcloud generation program, '
'e.g. https://www.jasondavies.com/wordcloud/'
)
print(' '.join(mutations.disease))
@cases(site_type=site_types)
def diseases_ontology(site_type, significant=True, vector=False):
result = clinvar_analysis(site_type.name)
mutations = disease_mutations(result)
terms = counts_by('disease', mutations)
return ontology_plots(terms, 'diseases', vector)
@cases(site_type=site_types)
@bar_plot
def cancer_census_enrichment(site_type):
analyses = {
'TCGA': pan_cancer_analysis,
'ClinVar': clinvar_analysis
}
results = {}
p_values = {}
for name, analysis in analyses.items():
result = analysis(site_type)
observed_count, expected_count, contingency_table, oddsratio, pvalue = active_driver_genes_enrichment(result)
results[name] = oddsratio
p_values[name] = pvalue
return [results.keys(), results.values(), p_values.values()]
| lgpl-2.1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.