repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
miladh/lgn-simulator | tools/analysis/plotting_tools.py | 1 | 18502 | import matplotlib.pyplot as plt
import colormaps as cmaps
import numpy as np
import pretty_plotting as pp
def raster(spike_times,
ax = None,
figsize = (12,8),
ylabel = "Cell Id",
title = None,
color="k"):
"""
Raster plot
"""
if not isinstance(spike_times, list):
spike_times = [spike_times]
num_cells = len(spike_times)
if ax ==None:
f = plt.figure(figsize=figsize)
ax = f.add_subplot(111)
#pretty plot functions
pp.spines_edge_color(ax)
pp.remove_ticks(ax)
pp.set_font()
for i in range(num_cells):
for t in spike_times[i][:]:
ax.vlines(t, i + .9, i + 1.1, color=color)
plt.ylim(0.8, num_cells+0.2)
plt.xlabel('t [s]')
plt.ylabel(ylabel)
plt.yticks(range(1,num_cells+1))
if not title==None:
ax.set_title(title)
plt.tight_layout()
return ax
def animate_imshow_plots(data,
dt = None,
figsize = (8,15),
cmap = cmaps.inferno,
colorbar = False,
remove_axes = True,
save_animation = False,
animation_name = "unnamed" ):
"""
Animate imshow plots
"""
import matplotlib.animation as animation
from mpl_toolkits.mplot3d import axes3d
from mpl_toolkits.axes_grid1 import make_axes_locatable
import time
num_subplots = len(data)
plots = []
nStates = len(data[0]["t_points"])
dt = 1 if dt==None else data[0]["t_points"][1]-data[0]["t_points"][0]
num_cols = 2 if num_subplots >= 2 else num_subplots
num_rows = int(np.ceil((num_subplots-1)/2.))+1
fig = plt.figure(figsize=figsize)
def init():
for i in range(num_subplots):
plots[i].set_data(data[i]["value"][0,:,:])
ttl.set_text("")
return plots, ttl
def animate(j):
for i in range(num_subplots):
plots[i].set_data(data[i]["value"][j,:,:])
# plots[i].autoscale()
t = j*dt
ttl.set_text("Time = " + str('%.2f' % (t,)) + "s")
return plots, ttl
ttl = plt.suptitle("",fontsize = 16)
extent=[data[0]["spatial_vec"][0],data[0]["spatial_vec"][-1],
data[0]["spatial_vec"][0],data[0]["spatial_vec"][-1]]
# ttl = plt.suptitle("",fontsize = 16, x = 0.45, horizontalalignment = "left")
#Plot stimulus
colspanStim = 2
ax = plt.subplot2grid((num_rows, num_cols),(0, 0), colspan = colspanStim)
if remove_axes: simpleaxis(ax)
ax.set_title(data[0]["type"])
ax.set_xlabel(r"$x(\theta)$")
ax.set_ylabel(r"$y(\theta)$")
plots.append(ax.imshow(data[0]["value"][0,:,:],
origin = "lower",cmap="gray", interpolation="none", extent = extent))
if(colorbar):
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(plots[-1], ax=ax, orientation='vertical', cax = cax)
k = 1
for i in range(1, num_rows):
for j in range(num_cols):
if(k>= num_subplots):
break
ax = plt.subplot2grid((num_rows, num_cols),(i, j))
if remove_axes: simpleaxis(ax)
ax.set_title(data[k]["type"])
ax.set_xlabel(r"$x(\theta)$")
ax.set_ylabel(r"$y(\theta)$")
plots.append(ax.imshow(data[k]["value"][0,:,:], cmap=cmap,
interpolation="none",
origin = "lower",
extent = extent,
vmin = (data[k]["value"]).min(),
vmax = (data[k]["value"]).max()))
if(colorbar):
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(plots[-1], ax=ax, orientation='vertical',cax = cax)
k+=1
plt.tight_layout()
plt.subplots_adjust(top=0.95)
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=nStates, interval=20, blit=False)
if(save_animation):
anim.save(animation_name + ".mp4",fps=30, writer="avconv", codec="libx264")
plt.show()
return anim
def imshowPlotsOfImpulseResponses(data,
x_imshow=True,
y_imshow=True,
idx=0,
idy=0,
figsize=(14,8),
cmap=cmaps.viridis,
colorbar=True,
save_figure=False,
figure_name="unnamed"):
"""
Imshow plots of impulse response functions
"""
num_cols = len(data)
num_rows = int(x_imshow) + int(y_imshow)
fig, axarr = plt.subplots(num_rows, num_cols, figsize=figsize, sharey=True)
# pp.set_font()
# for j in range(num_cols):
# for i in range(num_rows):
# ax = axarr[i,j]
# pp.spines_edge_color(ax)
# pp.remove_ticks(ax)
if(num_rows == 1):
axarr = np.array(axarr).reshape(1,num_cols)
if(num_cols ==1):
axarr = np.array(axarr).reshape(num_rows,1)
levels = np.arange(-5., 5., 0.23)
for j in range(num_cols):
axarr[0,j].set_title(data[j]["type"])
i=0
if(x_imshow):
axarr[i,j].set_adjustable('box-forced')
extent=[data[j]["spatial_vec"][0],data[j]["spatial_vec"][-1],
data[j]["t_points"][0],data[j]["t_points"][-1]]
im = axarr[i,j].imshow(data[j]["value"][:,idy,:], extent= extent,
cmap=cmap, origin="lower", aspect="auto",
interpolation="gaussian")
axarr[i,j].contour(data[j]["value"][:,idy,:], levels,
colors='w',linewidths=0.4,
extent= extent, aspect="auto")
axarr[i,j].set_xlabel(r"$x(\theta)$")
axarr[i,0].set_ylabel(r"$\tau(s)$")
if(colorbar):
fig.colorbar(im, ax=axarr[i,j],orientation='horizontal')
i+=1
if(y_imshow):
axarr[i,j].set_adjustable('box-forced')
extent=[data[j]["spatial_vec"][0],data[j]["spatial_vec"][-1],
data[j]["t_points"][0],data[j]["t_points"][-1]]
im = axarr[i,j].imshow(data[j]["value"][:,:,idx], extent= extent,
cmap=cmap, origin="lower",aspect="auto",
interpolation="gaussian")
axarr[i,j].contour(data[j]["value"][:,:,idx], levels,
colors='w',linewidths=0.4,
extent= extent, aspect="auto")
axarr[i,j].set_xlabel(r"$y(\theta)$")
axarr[i,0].set_ylabel(r"$\tau(s)$")
if(colorbar):
fig.colorbar(im, ax=axarr[i,j], orientation='horizontal')
i+=1
if(save_figure):
fig.save(animation_name + ".svg")
plt.show()
def plot3dOfImpulseResponses(data,
x_3d=True,
y_3d=True,
num_skip = 2,
idx=0,
idy=0,
figsize=(15,10),
cmap=cmaps.inferno,
colorbar=False,
save_figure=False,
figure_name="unnamed"):
"""
3D plots of impulse response functions
"""
from mpl_toolkits.mplot3d import Axes3D
num_cols = len(data)
num_rows = int(x_3d) + int(y_3d)
fig = plt.figure(figsize=figsize)
for j in range(num_cols):
S = data[j]["spatial_vec"]
T = data[j]["t_points"]
T, S = np.meshgrid(S, T)
i=0
if(x_3d):
ax = plt.subplot2grid((num_rows, num_cols),(i,j), projection='3d')
surf = ax.plot_surface(S[::num_skip,::num_skip],
T[::num_skip,::num_skip],
data[j]["value"][::num_skip, idy, ::num_skip],
cmap=cmap, edgecolors="k", alpha=0.9, shade=False,
vmin = (data[j]["value"]).min(),
vmax = (data[j]["value"]).max(),
rstride=1, cstride=1, linewidth=0.0, antialiased=False)
ax.set_title(data[j]["type"])
ax.set_ylabel(r"$x(\theta)$")
ax.set_xlabel(r"$\tau(s)$")
ax.set_zlabel(r"$W$")
ax.set_zlim3d(np.min(data[j]["value"][::num_skip, idy, ::num_skip]),
np.max(data[j]["value"][::num_skip, idy, ::num_skip]))
ax.view_init(elev=46., azim=130)
if(colorbar):
cbar = plt.colorbar(surf, ax=ax, orientation='horizontal')
i+=1
if(y_3d):
ax = plt.subplot2grid((num_rows, num_cols),(i,j), projection='3d')
surf = ax.plot_surface(S[::num_skip,::num_skip],
T[::num_skip,::num_skip],
data[j]["value"][::num_skip, ::num_skip, idx],
cmap=cmap, edgecolors="k", alpha=0.9, shade=False,
vmin = (data[j]["value"]).min(),
vmax = (data[j]["value"]).max(),
rstride=1, cstride=1, linewidth=0.0, antialiased=False)
ax.set_title(data[j]["type"])
ax.set_ylabel(r"$y(\theta)$")
ax.set_xlabel(r"$\tau(s)$")
ax.set_zlabel(r"$W$")
ax.set_zlim3d(np.min(data[j]["value"][::num_skip, ::num_skip, idx]),
np.max(data[j]["value"][::num_skip, ::num_skip, idx]))
ax.view_init(elev=46., azim=130)
if(colorbar):
fig.colorbar(surf, ax=ax, orientation='horizontal')
i+=1
fig.tight_layout()
if(save_figure):
fig.save(animation_name + ".svg")
plt.show()
def line3dPlotsOfImpulseResponses(data,
x_line3d=True,
y_line3d=True,
num_skip = 10,
idx=64,
idy=64,
figsize = (14,8),
cmap = cmaps.inferno,
colorbar = False,
save_figure = False,
figure_name = "unnamed"):
"""
3D line plot of impulse response functions
"""
from mpl_toolkits.mplot3d import Axes3D
num_cols = len(data)
num_rows = int(x_line3d) + int(y_line3d)
fig = plt.figure(figsize=figsize)
for j in range(num_cols):
ids = range(0, len(data[j]["spatial_vec"]), num_skip)
i=0
if(x_line3d):
ax = plt.subplot2grid((num_rows, num_cols),(i,j), projection='3d')
for x in ids:
ax.plot(data[j]["t_points"],
data[j]["spatial_vec"][x]*np.ones(len(data[j]["t_points"])),
data[j]["value"][:,idy, x],
color=pp.colormap(0), linewidth=1.0)
ax.set_title(data[j]["type"])
ax.set_xlabel(r"$\tau(s)$")
ax.set_ylabel(r"$x(\theta)$")
ax.set_zlabel(r"$W(x, y_a, \tau)$")
# ax.view_init(elev=17., azim=128)
ax.set_xlim3d(data[j]["t_points"][0], data[j]["t_points"][-1])
ax.set_ylim3d(data[j]["spatial_vec"][0], data[j]["spatial_vec"][-1])
ax.set_zlim3d(np.min(data[j]["value"][:,idy, ids]),
np.max(data[j]["value"][:,idy, ids]))
#pretty plot functions
pp.spines_edge_color(ax)
pp.remove_ticks(ax)
pp.set_font()
i+=1
if(y_line3d):
ax = plt.subplot2grid((num_rows, num_cols),(i,j), projection='3d')
for y in ids:
ax.plot(data[j]["t_points"],
data[j]["spatial_vec"][y]*np.ones(len(data[j]["t_points"])),
data[j]["value"][:,y, idx],
"-b", linewidth=1.0)
ax.set_title(data[j]["type"])
ax.set_xlabel(r"$\tau(s)$")
ax.set_ylabel(r"$y(\theta)$")
ax.set_zlabel(r"$W(x_a, y, \tau)$")
# ax.view_init(elev=17., azim=128)
ax.set_xlim3d(data[j]["t_points"][0], data[j]["t_points"][-1])
ax.set_ylim3d(data[j]["spatial_vec"][0], data[j]["spatial_vec"][-1])
ax.set_zlim3d(np.min(data[j]["value"][:,ids, idx]),
np.max(data[j]["value"][:,ids, idx]))
i+=1
plt.tight_layout()
if(save_figure):
fig.save(animation_name + ".svg")
plt.show()
if __name__ == "__main__":
import h5py
from glob import glob
import Simulation as sim
plt.close("all")
outputFile = "/home/milad/Dropbox/projects/lgn/code/lgn-simulator/apps/firingSynchrony/firingSynchrony.h5"
f = h5py.File(outputFile, "r")
exp = sim.Simulation(None, f)
Ns = exp.integrator.Ns
Nt = exp.integrator.Nt
# S = {"type" : "Stimulus",
# "value" : exp.stimulus.spatio_temporal(),
# "t_points" : exp.integrator.t_points,
# "spatial_vec" : exp.integrator.s_points
# }
# Wg = {"type" : "Ganglion",
# "value" : exp.ganglion.irf(),
# "t_points" : exp.integrator.t_points,
# "spatial_vec" : exp.integrator.s_points
# }
# Wr = {"type" : "Relay",
# "value" : exp.relay.irf(),
# "t_points" : exp.integrator.t_points,
# "spatial_vec" : exp.integrator.s_points
# }
# Wi = {"type" : "Interneuron",
# "value" : exp.interneuron.irf(),
# "t_points" : exp.integrator.t_points,
# "spatial_vec" : exp.integrator.s_points
# }
#
# Wc = {"type" : "Cortical",
# "value" : exp.cortical.irf(),
# "t_points" : exp.integrator.t_points,
# "spatial_vec" : exp.integrator.s_points
# }
# #
#
# Wr_ft = {"type" : "Relay",
# "value" : exp.relay.irf_ft().imag,
# "t_points" : exp.integrator.w_points,
# "spatial_vec" : exp.integrator.k_points
# }
# # # Response FT--------------------------------------------------------------------------
# S_ft = {"type" : "Stimulus",
# "value" : exp.stimulus.fourier_transform().real,
# "t_points" : exp.integrator.w_points,
# "spatial_vec" : exp.integrator.k_points
# }
# Rg_ft = {"type" : "Ganglion",
# "value" : np.real(exp.ganglion.resp_ft()),
# "t_points" : exp.integrator.w_points,
# "spatial_vec" : exp.integrator.k_points
# }
# Rr_ft = {"type" : "Relay",
# "value" : np.real(exp.relay.resp_ft()),
# "t_points" : exp.integrator.w_points,
# "spatial_vec" : exp.integrator.k_points
# }
# Ri_ft = {"type" : "Interneuron",
# "value" : np.real(exp.interneuron.resp_ft()),
# "t_points" : exp.integrator.w_points,
# "spatial_vec" : exp.integrator.k_points
# }
#
# Rc_ft = {"type" : "Cortical",
# "t_points" : exp.integrator.w_points,
# "value" : np.real(exp.cortical.resp_ft()),
# "spatial_vec" : exp.integrator.k_points
# }
# # Response --------------------------------------------------------------------------
# Rg = {"type" : "Ganglion",
# "value" : exp.ganglion.resp(),
# "t_points" : exp.integrator.t_points,
# "spatial_vec" : exp.integrator.s_points
# }
Rr = {"type" : "Relay",
"value" : exp.relay.resp(),
"t_points" : exp.integrator.t_points,
"spatial_vec" : exp.integrator.s_points
}
#
# Ri = {"type" : "Interneuron",
# "value" : exp.interneuron.resp(),
# "t_points" : exp.integrator.t_points,
# "spatial_vec" : exp.integrator.s_points
# }
# Rc = {"type" : "Cortical",
# "value" : exp.cortical.resp(),
# "t_points" : exp.integrator.t_points,
# "spatial_vec" : exp.integrator.s_points
# }
#############################################################################################
# data = [S, Rg_ft, Rr_ft, Ri_ft, Rc_ft]
#line3dPlotsOfImpulseResponses([Wg, Wr], num_skip = 30, idx=Ns/2, idy=Ns/2,y_line3d=False,)
# imshowPlotsOfImpulseResponses([Wg, Wr, Wi, Wc], idx=Ns/2, idy=Ns/2,y_imshow=False,)
# plt.figure()
# plt.plot(exp.integrator.t_points, exp.stimulus.spatio_temporal()[:, Ns/2,Ns/2], '-r', label="S")
# plt.plot(exp.integrator.t_points, exp.ganglion.resp()[:,Ns/2,Ns/2], '-g', label="G")
plt.plot(exp.integrator.t_points, exp.relay.resp()[:,Ns/2,Ns/2], '-b', label="R")
# plt.legend()
animate_imshow_plots([Rr, Rr], exp.integrator.dt,
colorbar = True, remove_axes = False,
save_animation = False, animation_name = "newTest")
# plt.figure()
# # plt.plot( exp.integrator.k_points,exp.stimulus.fourier_transform()[0,Ns/2,:], '-g', label="S_ft")
#
# plt.plot( exp.integrator.k_points,exp.ganglion.irf_ft()[0,Ns/2,:], '-r', label="irf_ft")
# plt.plot( exp.integrator.k_points,exp.ganglion.resp_ft()[0,Ns/2,:], '-b', label="resp_ft")
# plt.legend()
#
# plt.figure()
# plt.plot( exp.integrator.s_points,exp.ganglion.irf()[0,Ns/2,:], '-r', label="irf")
# plt.plot( exp.integrator.s_points,exp.ganglion.resp()[0,Ns/2,:], '-b', label="resp")
#
# plt.legend()
# plt.legend()
# plt.plot( exp.relay.irf()[:,1,1])
# print np.argmax(exp.relay.irf()[:,1,1])
# print np.argmin(exp.relay.irf()[:,1,1])
#plt.plot( exp.relay.irf()[:,2,2])
plt.show()
| gpl-3.0 |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/matplotlib/texmanager.py | 2 | 22890 | """
This module supports embedded TeX expressions in matplotlib via dvipng
and dvips for the raster and postscript backends. The tex and
dvipng/dvips information is cached in ~/.matplotlib/tex.cache for reuse between
sessions
Requirements:
* latex
* \\*Agg backends: dvipng>=1.6
* PS backend: psfrag, dvips, and Ghostscript>=8.60
Backends:
* \\*Agg
* PS
* PDF
For raster output, you can get RGBA numpy arrays from TeX expressions
as follows::
texmanager = TexManager()
s = ('\\TeX\\ is Number '
'$\\displaystyle\\sum_{n=1}^\\infty\\frac{-e^{i\\pi}}{2^n}$!')
Z = texmanager.get_rgba(s, fontsize=12, dpi=80, rgb=(1,0,0))
To enable tex rendering of all text in your matplotlib figure, set
text.usetex in your matplotlibrc file or include these two lines in
your script::
from matplotlib import rc
rc('text', usetex=True)
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import copy
import glob
import os
import shutil
import sys
import warnings
from hashlib import md5
import distutils.version
import numpy as np
import matplotlib as mpl
from matplotlib import rcParams
from matplotlib._png import read_png
from matplotlib.cbook import mkdirs, Locked
from matplotlib.compat.subprocess import subprocess, Popen, PIPE, STDOUT
import matplotlib.dviread as dviread
import re
DEBUG = False
if sys.platform.startswith('win'):
cmd_split = '&'
else:
cmd_split = ';'
@mpl.cbook.deprecated("2.1")
def dvipng_hack_alpha():
try:
p = Popen([str('dvipng'), '-version'], stdin=PIPE, stdout=PIPE,
stderr=STDOUT, close_fds=(sys.platform != 'win32'))
stdout, stderr = p.communicate()
except OSError:
mpl.verbose.report('No dvipng was found', 'helpful')
return False
lines = stdout.decode(sys.getdefaultencoding()).split('\n')
for line in lines:
if line.startswith('dvipng '):
version = line.split()[-1]
mpl.verbose.report('Found dvipng version %s' % version,
'helpful')
version = distutils.version.LooseVersion(version)
return version < distutils.version.LooseVersion('1.6')
mpl.verbose.report('Unexpected response from dvipng -version', 'helpful')
return False
class TexManager(object):
"""
Convert strings to dvi files using TeX, caching the results to a
working dir
"""
oldpath = mpl.get_home()
if oldpath is None:
oldpath = mpl.get_data_path()
oldcache = os.path.join(oldpath, '.tex.cache')
cachedir = mpl.get_cachedir()
if cachedir is not None:
texcache = os.path.join(cachedir, 'tex.cache')
else:
# Should only happen in a restricted environment (such as Google App
# Engine). Deal with this gracefully by not creating a cache directory.
texcache = None
if os.path.exists(oldcache):
if texcache is not None:
try:
shutil.move(oldcache, texcache)
except IOError as e:
warnings.warn('File could not be renamed: %s' % e)
else:
warnings.warn("""\
Found a TeX cache dir in the deprecated location "%s".
Moving it to the new default location "%s".""" % (oldcache, texcache))
else:
warnings.warn("""\
Could not rename old TeX cache dir "%s": a suitable configuration
directory could not be found.""" % oldcache)
if texcache is not None:
mkdirs(texcache)
# mappable cache of
rgba_arrayd = {}
grey_arrayd = {}
postscriptd = {}
pscnt = 0
serif = ('cmr', '')
sans_serif = ('cmss', '')
monospace = ('cmtt', '')
cursive = ('pzc', '\\usepackage{chancery}')
font_family = 'serif'
font_families = ('serif', 'sans-serif', 'cursive', 'monospace')
font_info = {'new century schoolbook': ('pnc',
r'\renewcommand{\rmdefault}{pnc}'),
'bookman': ('pbk', r'\renewcommand{\rmdefault}{pbk}'),
'times': ('ptm', '\\usepackage{mathptmx}'),
'palatino': ('ppl', '\\usepackage{mathpazo}'),
'zapf chancery': ('pzc', '\\usepackage{chancery}'),
'cursive': ('pzc', '\\usepackage{chancery}'),
'charter': ('pch', '\\usepackage{charter}'),
'serif': ('cmr', ''),
'sans-serif': ('cmss', ''),
'helvetica': ('phv', '\\usepackage{helvet}'),
'avant garde': ('pag', '\\usepackage{avant}'),
'courier': ('pcr', '\\usepackage{courier}'),
'monospace': ('cmtt', ''),
'computer modern roman': ('cmr', ''),
'computer modern sans serif': ('cmss', ''),
'computer modern typewriter': ('cmtt', '')}
_rc_cache = None
_rc_cache_keys = (('text.latex.preamble', ) +
tuple(['font.' + n for n in ('family', ) +
font_families]))
def __init__(self):
if self.texcache is None:
raise RuntimeError(
('Cannot create TexManager, as there is no cache directory '
'available'))
mkdirs(self.texcache)
ff = rcParams['font.family']
if len(ff) == 1 and ff[0].lower() in self.font_families:
self.font_family = ff[0].lower()
elif isinstance(ff, six.string_types) and ff.lower() in self.font_families:
self.font_family = ff.lower()
else:
mpl.verbose.report(
'font.family must be one of (%s) when text.usetex is True. '
'serif will be used by default.' %
', '.join(self.font_families),
'helpful')
self.font_family = 'serif'
fontconfig = [self.font_family]
for font_family, font_family_attr in [(ff, ff.replace('-', '_'))
for ff in self.font_families]:
for font in rcParams['font.' + font_family]:
if font.lower() in self.font_info:
setattr(self, font_family_attr,
self.font_info[font.lower()])
if DEBUG:
print('family: %s, font: %s, info: %s' %
(font_family, font,
self.font_info[font.lower()]))
break
else:
if DEBUG:
print('$s font is not compatible with usetex')
else:
mpl.verbose.report('No LaTeX-compatible font found for the '
'%s font family in rcParams. Using '
'default.' % font_family, 'helpful')
setattr(self, font_family_attr, self.font_info[font_family])
fontconfig.append(getattr(self, font_family_attr)[0])
# Add a hash of the latex preamble to self._fontconfig so that the
# correct png is selected for strings rendered with same font and dpi
# even if the latex preamble changes within the session
preamble_bytes = six.text_type(self.get_custom_preamble()).encode('utf-8')
fontconfig.append(md5(preamble_bytes).hexdigest())
self._fontconfig = ''.join(fontconfig)
# The following packages and commands need to be included in the latex
# file's preamble:
cmd = [self.serif[1], self.sans_serif[1], self.monospace[1]]
if self.font_family == 'cursive':
cmd.append(self.cursive[1])
while '\\usepackage{type1cm}' in cmd:
cmd.remove('\\usepackage{type1cm}')
cmd = '\n'.join(cmd)
self._font_preamble = '\n'.join(['\\usepackage{type1cm}', cmd,
'\\usepackage{textcomp}'])
def get_basefile(self, tex, fontsize, dpi=None):
"""
returns a filename based on a hash of the string, fontsize, and dpi
"""
s = ''.join([tex, self.get_font_config(), '%f' % fontsize,
self.get_custom_preamble(), str(dpi or '')])
# make sure hash is consistent for all strings, regardless of encoding:
bytes = six.text_type(s).encode('utf-8')
return os.path.join(self.texcache, md5(bytes).hexdigest())
def get_font_config(self):
"""Reinitializes self if relevant rcParams on have changed."""
if self._rc_cache is None:
self._rc_cache = dict.fromkeys(self._rc_cache_keys)
changed = [par for par in self._rc_cache_keys
if rcParams[par] != self._rc_cache[par]]
if changed:
if DEBUG:
print('DEBUG following keys changed:', changed)
for k in changed:
if DEBUG:
print('DEBUG %-20s: %-10s -> %-10s' %
(k, self._rc_cache[k], rcParams[k]))
# deepcopy may not be necessary, but feels more future-proof
self._rc_cache[k] = copy.deepcopy(rcParams[k])
if DEBUG:
print('DEBUG RE-INIT\nold fontconfig:', self._fontconfig)
self.__init__()
if DEBUG:
print('DEBUG fontconfig:', self._fontconfig)
return self._fontconfig
def get_font_preamble(self):
"""
returns a string containing font configuration for the tex preamble
"""
return self._font_preamble
def get_custom_preamble(self):
"""returns a string containing user additions to the tex preamble"""
return '\n'.join(rcParams['text.latex.preamble'])
def make_tex(self, tex, fontsize):
"""
Generate a tex file to render the tex string at a specific font size
returns the file name
"""
basefile = self.get_basefile(tex, fontsize)
texfile = '%s.tex' % basefile
custom_preamble = self.get_custom_preamble()
fontcmd = {'sans-serif': r'{\sffamily %s}',
'monospace': r'{\ttfamily %s}'}.get(self.font_family,
r'{\rmfamily %s}')
tex = fontcmd % tex
if rcParams['text.latex.unicode']:
unicode_preamble = """\\usepackage{ucs}
\\usepackage[utf8x]{inputenc}"""
else:
unicode_preamble = ''
s = """\\documentclass{article}
%s
%s
%s
\\usepackage[papersize={72in,72in},body={70in,70in},margin={1in,1in}]{geometry}
\\pagestyle{empty}
\\begin{document}
\\fontsize{%f}{%f}%s
\\end{document}
""" % (self._font_preamble, unicode_preamble, custom_preamble,
fontsize, fontsize * 1.25, tex)
with open(texfile, 'wb') as fh:
if rcParams['text.latex.unicode']:
fh.write(s.encode('utf8'))
else:
try:
fh.write(s.encode('ascii'))
except UnicodeEncodeError as err:
mpl.verbose.report("You are using unicode and latex, but "
"have not enabled the matplotlib "
"'text.latex.unicode' rcParam.",
'helpful')
raise
return texfile
_re_vbox = re.compile(
r"MatplotlibBox:\(([\d.]+)pt\+([\d.]+)pt\)x([\d.]+)pt")
def make_tex_preview(self, tex, fontsize):
"""
Generate a tex file to render the tex string at a specific
font size. It uses the preview.sty to determine the dimension
(width, height, descent) of the output.
returns the file name
"""
basefile = self.get_basefile(tex, fontsize)
texfile = '%s.tex' % basefile
custom_preamble = self.get_custom_preamble()
fontcmd = {'sans-serif': r'{\sffamily %s}',
'monospace': r'{\ttfamily %s}'}.get(self.font_family,
r'{\rmfamily %s}')
tex = fontcmd % tex
if rcParams['text.latex.unicode']:
unicode_preamble = """\\usepackage{ucs}
\\usepackage[utf8x]{inputenc}"""
else:
unicode_preamble = ''
# newbox, setbox, immediate, etc. are used to find the box
# extent of the rendered text.
s = """\\documentclass{article}
%s
%s
%s
\\usepackage[active,showbox,tightpage]{preview}
\\usepackage[papersize={72in,72in},body={70in,70in},margin={1in,1in}]{geometry}
%% we override the default showbox as it is treated as an error and makes
%% the exit status not zero
\\def\\showbox#1{\\immediate\\write16{MatplotlibBox:(\\the\\ht#1+\\the\\dp#1)x\\the\\wd#1}}
\\begin{document}
\\begin{preview}
{\\fontsize{%f}{%f}%s}
\\end{preview}
\\end{document}
""" % (self._font_preamble, unicode_preamble, custom_preamble,
fontsize, fontsize * 1.25, tex)
with open(texfile, 'wb') as fh:
if rcParams['text.latex.unicode']:
fh.write(s.encode('utf8'))
else:
try:
fh.write(s.encode('ascii'))
except UnicodeEncodeError as err:
mpl.verbose.report("You are using unicode and latex, but "
"have not enabled the matplotlib "
"'text.latex.unicode' rcParam.",
'helpful')
raise
return texfile
def make_dvi(self, tex, fontsize):
"""
generates a dvi file containing latex's layout of tex string
returns the file name
"""
if rcParams['text.latex.preview']:
return self.make_dvi_preview(tex, fontsize)
basefile = self.get_basefile(tex, fontsize)
dvifile = '%s.dvi' % basefile
if DEBUG or not os.path.exists(dvifile):
texfile = self.make_tex(tex, fontsize)
command = [str("latex"), "-interaction=nonstopmode",
os.path.basename(texfile)]
mpl.verbose.report(command, 'debug')
with Locked(self.texcache):
try:
report = subprocess.check_output(command,
cwd=self.texcache,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as exc:
raise RuntimeError(
('LaTeX was not able to process the following '
'string:\n%s\n\n'
'Here is the full report generated by LaTeX:\n%s '
'\n\n' % (repr(tex.encode('unicode_escape')),
exc.output.decode("utf-8"))))
mpl.verbose.report(report, 'debug')
for fname in glob.glob(basefile + '*'):
if fname.endswith('dvi'):
pass
elif fname.endswith('tex'):
pass
else:
try:
os.remove(fname)
except OSError:
pass
return dvifile
def make_dvi_preview(self, tex, fontsize):
"""
generates a dvi file containing latex's layout of tex
string. It calls make_tex_preview() method and store the size
information (width, height, descent) in a separte file.
returns the file name
"""
basefile = self.get_basefile(tex, fontsize)
dvifile = '%s.dvi' % basefile
baselinefile = '%s.baseline' % basefile
if (DEBUG or not os.path.exists(dvifile) or
not os.path.exists(baselinefile)):
texfile = self.make_tex_preview(tex, fontsize)
command = [str("latex"), "-interaction=nonstopmode",
os.path.basename(texfile)]
mpl.verbose.report(command, 'debug')
try:
report = subprocess.check_output(command,
cwd=self.texcache,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as exc:
raise RuntimeError(
('LaTeX was not able to process the following '
'string:\n%s\n\n'
'Here is the full report generated by LaTeX:\n%s '
'\n\n' % (repr(tex.encode('unicode_escape')),
exc.output.decode("utf-8"))))
mpl.verbose.report(report, 'debug')
# find the box extent information in the latex output
# file and store them in ".baseline" file
m = TexManager._re_vbox.search(report.decode("utf-8"))
with open(basefile + '.baseline', "w") as fh:
fh.write(" ".join(m.groups()))
for fname in glob.glob(basefile + '*'):
if fname.endswith('dvi'):
pass
elif fname.endswith('tex'):
pass
elif fname.endswith('baseline'):
pass
else:
try:
os.remove(fname)
except OSError:
pass
return dvifile
def make_png(self, tex, fontsize, dpi):
"""
generates a png file containing latex's rendering of tex string
returns the filename
"""
basefile = self.get_basefile(tex, fontsize, dpi)
pngfile = '%s.png' % basefile
# see get_rgba for a discussion of the background
if DEBUG or not os.path.exists(pngfile):
dvifile = self.make_dvi(tex, fontsize)
command = [str("dvipng"), "-bg", "Transparent", "-D", str(dpi),
"-T", "tight", "-o", os.path.basename(pngfile),
os.path.basename(dvifile)]
mpl.verbose.report(command, 'debug')
try:
report = subprocess.check_output(command,
cwd=self.texcache,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as exc:
raise RuntimeError(
('dvipng was not able to process the following '
'string:\n%s\n\n'
'Here is the full report generated by dvipng:\n%s '
'\n\n' % (repr(tex.encode('unicode_escape')),
exc.output.decode("utf-8"))))
mpl.verbose.report(report, 'debug')
return pngfile
def make_ps(self, tex, fontsize):
"""
generates a postscript file containing latex's rendering of tex string
returns the file name
"""
basefile = self.get_basefile(tex, fontsize)
psfile = '%s.epsf' % basefile
if DEBUG or not os.path.exists(psfile):
dvifile = self.make_dvi(tex, fontsize)
command = [str("dvips"), "-q", "-E", "-o",
os.path.basename(psfile),
os.path.basename(dvifile)]
mpl.verbose.report(command, 'debug')
try:
report = subprocess.check_output(command,
cwd=self.texcache,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as exc:
raise RuntimeError(
('dvips was not able to process the following '
'string:\n%s\n\n'
'Here is the full report generated by dvips:\n%s '
'\n\n' % (repr(tex.encode('unicode_escape')),
exc.output.decode("utf-8"))))
mpl.verbose.report(report, 'debug')
return psfile
def get_ps_bbox(self, tex, fontsize):
"""
returns a list containing the postscript bounding box for latex's
rendering of the tex string
"""
psfile = self.make_ps(tex, fontsize)
with open(psfile) as ps:
for line in ps:
if line.startswith('%%BoundingBox:'):
return [int(val) for val in line.split()[1:]]
raise RuntimeError('Could not parse %s' % psfile)
def get_grey(self, tex, fontsize=None, dpi=None):
"""returns the alpha channel"""
key = tex, self.get_font_config(), fontsize, dpi
alpha = self.grey_arrayd.get(key)
if alpha is None:
pngfile = self.make_png(tex, fontsize, dpi)
X = read_png(os.path.join(self.texcache, pngfile))
self.grey_arrayd[key] = alpha = X[:, :, -1]
return alpha
def get_rgba(self, tex, fontsize=None, dpi=None, rgb=(0, 0, 0)):
"""
Returns latex's rendering of the tex string as an rgba array
"""
if not fontsize:
fontsize = rcParams['font.size']
if not dpi:
dpi = rcParams['savefig.dpi']
r, g, b = rgb
key = tex, self.get_font_config(), fontsize, dpi, tuple(rgb)
Z = self.rgba_arrayd.get(key)
if Z is None:
alpha = self.get_grey(tex, fontsize, dpi)
Z = np.zeros((alpha.shape[0], alpha.shape[1], 4), float)
Z[:, :, 0] = r
Z[:, :, 1] = g
Z[:, :, 2] = b
Z[:, :, 3] = alpha
self.rgba_arrayd[key] = Z
return Z
def get_text_width_height_descent(self, tex, fontsize, renderer=None):
"""
return width, heigth and descent of the text.
"""
if tex.strip() == '':
return 0, 0, 0
if renderer:
dpi_fraction = renderer.points_to_pixels(1.)
else:
dpi_fraction = 1.
if rcParams['text.latex.preview']:
# use preview.sty
basefile = self.get_basefile(tex, fontsize)
baselinefile = '%s.baseline' % basefile
if DEBUG or not os.path.exists(baselinefile):
dvifile = self.make_dvi_preview(tex, fontsize)
with open(baselinefile) as fh:
l = fh.read().split()
height, depth, width = [float(l1) * dpi_fraction for l1 in l]
return width, height + depth, depth
else:
# use dviread. It sometimes returns a wrong descent.
dvifile = self.make_dvi(tex, fontsize)
with dviread.Dvi(dvifile, 72 * dpi_fraction) as dvi:
page = next(iter(dvi))
# A total height (including the descent) needs to be returned.
return page.width, page.height + page.descent, page.descent
| mit |
jzt5132/scikit-learn | sklearn/utils/tests/test_fixes.py | 281 | 1829 | # Authors: Gael Varoquaux <[email protected]>
# Justin Vincent
# Lars Buitinck
# License: BSD 3 clause
import numpy as np
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_true
from numpy.testing import (assert_almost_equal,
assert_array_almost_equal)
from sklearn.utils.fixes import divide, expit
from sklearn.utils.fixes import astype
def test_expit():
# Check numerical stability of expit (logistic function).
# Simulate our previous Cython implementation, based on
#http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression
assert_almost_equal(expit(1000.), 1. / (1. + np.exp(-1000.)), decimal=16)
assert_almost_equal(expit(-1000.), np.exp(-1000.) / (1. + np.exp(-1000.)),
decimal=16)
x = np.arange(10)
out = np.zeros_like(x, dtype=np.float32)
assert_array_almost_equal(expit(x), expit(x, out=out))
def test_divide():
assert_equal(divide(.6, 1), .600000000000)
def test_astype_copy_memory():
a_int32 = np.ones(3, np.int32)
# Check that dtype conversion works
b_float32 = astype(a_int32, dtype=np.float32, copy=False)
assert_equal(b_float32.dtype, np.float32)
# Changing dtype forces a copy even if copy=False
assert_false(np.may_share_memory(b_float32, a_int32))
# Check that copy can be skipped if requested dtype match
c_int32 = astype(a_int32, dtype=np.int32, copy=False)
assert_true(c_int32 is a_int32)
# Check that copy can be forced, and is the case by default:
d_int32 = astype(a_int32, dtype=np.int32, copy=True)
assert_false(np.may_share_memory(d_int32, a_int32))
e_int32 = astype(a_int32, dtype=np.int32)
assert_false(np.may_share_memory(e_int32, a_int32))
| bsd-3-clause |
georgyberdyshev/ascend | pygtk/matplotlib-example/pygladematplotlib.py | 1 | 9904 | import sys
import matplotlib
matplotlib.use('GTK')
from matplotlib.figure import Figure
from matplotlib.axes import Subplot
from matplotlib.backends.backend_gtk import FigureCanvasGTK, NavigationToolbar
from matplotlib.numerix import arange, sin, pi
try:
import pygtk
pygtk.require("2.0")
except:
pass
try:
import gtk
import gtk.glade
except:
sys.exit(1)
host = "***"
user = "***"
passwd = "***"
db = "***"
class appGui:
def __init__(self):
gladefile = "project2.glade"
self.windowname = "gtkbench"
self.wTree = gtk.glade.XML(gladefile, self.windowname)
self.win = self.wTree.get_widget("gtkbench")
self.win.maximize()
dic = {"on_window1_destroy" : gtk.main_quit,
"on_button1_clicked" : self.submitDB,
"on_button3_clicked" : self.fillTree,
"on_notebook1_switch_page" : self.selectNotebookPage,
"on_treeview1_button_press_event" : self.clickTree,
"on_button2_clicked" : self.createProjectGraph
}
self.wTree.signal_autoconnect(dic)
# start with database selection
self.wDialog = gtk.glade.XML("project2.glade", "dbSelector")
# setup matplotlib stuff on first notebook page (empty graph)
self.figure = Figure(figsize=(6,4), dpi=72)
self.axis = self.figure.add_subplot(111)
self.axis.set_xlabel('Yepper')
self.axis.set_ylabel('Flabber')
self.axis.set_title('An Empty Graph')
self.axis.grid(True)
self.canvas = FigureCanvasGTK(self.figure) # a gtk.DrawingArea
self.canvas.show()
self.graphview = self.wTree.get_widget("vbox1")
self.graphview.pack_start(self.canvas, True, True)
# setup listview for database
self.listview = self.wTree.get_widget("treeview1")
self.listmodel = gtk.ListStore(str, int, int, str, str)
self.listview.set_model(self.listmodel)
renderer = gtk.CellRendererText()
column = gtk.TreeViewColumn("Name",renderer, text=0)
column.set_clickable(True)
column.set_sort_column_id(0)
column.connect("clicked", self.createDBGraph)
column.set_resizable(True)
self.listview.append_column(column)
#renderer = gtk.CellRendererText()
column = gtk.TreeViewColumn("Age",renderer, text=1)
column.set_clickable(True)
column.set_sort_column_id(1)
column.connect("clicked", self.createDBGraph)
column.set_resizable(True)
self.listview.append_column(column)
#self.listview.show()
column = gtk.TreeViewColumn("Shoesize",renderer, text=2)
column.set_clickable(True)
column.set_sort_column_id(2)
column.connect("clicked", self.createDBGraph)
column.set_resizable(True)
self.listview.append_column(column)
#self.listview.show()
column = gtk.TreeViewColumn("Created",renderer, text=3)
column.set_clickable(True)
column.set_sort_column_id(3)
column.connect("clicked", self.createDBGraph)
column.set_resizable(True)
self.listview.append_column(column)
#self.listview.show()
#renderer = gtk.CellRendererText()
column = gtk.TreeViewColumn("Updated",renderer, text=4)
column.set_clickable(True)
column.set_sort_column_id(4)
column.connect("clicked", self.createDBGraph)
column.set_resizable(True)
self.listview.append_column(column)
return
# callbacks.
def submitDB(self, widget):
while True:
try:
name = self.wTree.get_widget("entry1").get_text()
age = self.wTree.get_widget("entry2").get_text()
size = self.wTree.get_widget("entry3").get_text()
assert name != ""
assert age != ""
assert size != ""
dataUsr = name, age, size
sd = DBStuff.Eb_db(host, user, passwd, db)
sd.subMit(dataUsr)
break
except AssertionError:
self.wDialog = gtk.glade.XML("project2.glade", "dbWarningEmpty")
close = self.wDialog.get_widget("dbWarningEmpty")
response = close.run()
if response == gtk.RESPONSE_CLOSE:
close.destroy()
break
except DBStuff.MySQLdb.IntegrityError:
def callback():
self.wDialog = gtk.glade.XML("project2.glade", "dbWarningOverwrite")
close = self.wDialog.get_widget("dbWarningOverwrite")
response = close.run()
if response == gtk.RESPONSE_CANCEL:
close.destroy()
if response == gtk.RESPONSE_OK:
sd.delRow(name)
wd = DBStuff.Eb_db(host, user, passwd, db)
wd.subMit(dataUsr)
close.destroy()
callback()
break
def fillTree(self, widget):
model = self.listmodel
self.listmodel.clear()
fg = DBStuff.Eb_db(host, user, passwd, db)
fg.getData("Age")
for i in range(len(fg.data)):
# note: all data from table "bench" is appended, but that is something you don't want
# possible solution: create a seperate table for the listbox (eg. ommit Timestamp, keep it in another table)
self.listmodel.append(fg.data[i])
self.createDBGraph(self)
def selectNotebookPage(self, widget, notebookpage, page_number):
# if database page is selected (nr. 1 for now!), retrieve data
if page_number == 1:
self.fillTree(self)
if page_number == 0:
print "clicked first tab"
def clickTree(self, treeview, event):
if event.button == 3:
x = int(event.x)
y = int(event.y)
time = event.time
pthinfo = treeview.get_path_at_pos(x, y)
if pthinfo != None:
path, col, cellx, celly = pthinfo
treeview.grab_focus()
treeview.set_cursor( path, col, 0)
self.wDialog = gtk.glade.XML("project2.glade", "treeRightClick")
close = self.wDialog.get_widget("treeRightClick")
response = close.run()
if response == gtk.RESPONSE_OK:
close.destroy()
print x
print y
print pthinfo
#self.popup.popup( None, None, None, event.button, time)
return 1
def createProjectGraph(self, widget):
while True:
try:
# empty axis if neccesary, and reset title and stuff
self.axis.clear()
self.axis.set_xlabel('Yepper')
self.axis.set_ylabel('Flabber')
self.axis.set_title('A Graph')
self.axis.grid(True)
# get data
age = self.wTree.get_widget("entry2").get_text()
size = self.wTree.get_widget("entry3").get_text()
age != ""
size != ""
N = 1
ind = arange(N) # the x locations for the groups
width = 0.35 # the width of the bars
p1 = self.axis.bar(ind, int(age), width, color='r')
p2 = self.axis.bar(ind+width, int(size), width, color='y')
self.axis.legend((p1[0], p2[0]), ("Age", "Size"), shadow = True)
#self.axis.set_xticks(ind+width, ('G1') )
self.axis.set_xlim(-width,len(ind))
self.canvas.destroy()
self.canvas = FigureCanvasGTK(self.figure) # a gtk.DrawingArea
self.canvas.show()
self.grahview = self.wTree.get_widget("vbox1")
self.grahview.pack_start(self.canvas, True, True)
break
except ValueError:
self.wDialog = gtk.glade.XML("project2.glade", "cannotCreateProjGraph")
close = self.wDialog.get_widget("cannotCreateProjGraph")
response = close.run()
if response == gtk.RESPONSE_OK:
close.destroy()
break
def createDBGraph(self, widget):
self.axis.clear()
self.axis.set_xlabel('Samples (n)')
self.axis.set_ylabel('Value (-)')
self.axis.set_title('Another Graph (click on the columnheader to sort)')
self.axis.grid(True)
# get columns from listmodel
age = []
for row in self.listmodel:
age.append(row[1])
size = []
for row in self.listmodel:
size.append(row[2])
# get number of rows
N = len(age)
ind = arange(N) # the x locations for the groups
width = 0.35 # the width of the bars
p1 = self.axis.bar(ind, age, width, color='b')
p2 = self.axis.bar(ind+width, size, width, color='r')
# destroy graph if it already exists
while True:
try:
self.canvas2.destroy()
break
except:
print "nothing to destroy"
break
self.canvas2 = FigureCanvasGTK(self.figure) # a gtk.DrawingArea
self.canvas2.show()
self.grahview = self.wTree.get_widget("vbox2")
self.grahview.pack_start(self.canvas2, True, True)
app = appGui()
gtk.main()
| gpl-2.0 |
astropy/astropy | astropy/visualization/wcsaxes/tests/test_grid_paths.py | 6 | 1050 | import numpy as np
import pytest
from matplotlib.lines import Path
from astropy.visualization.wcsaxes.grid_paths import get_lon_lat_path
@pytest.mark.parametrize('step_in_degrees', [10, 1, 0.01])
def test_round_trip_visibility(step_in_degrees):
zero = np.zeros(100)
# The pixel values are irrelevant for this test
pixel = np.stack([zero, zero]).T
# Create a grid line of constant latitude with a point every step
line = np.stack([np.arange(100), zero]).T * step_in_degrees
# Create a modified grid line where the point spacing is larger by 5%
# Starting with point 20, the discrepancy between `line` and `line_round` is greater than `step`
line_round = line * 1.05
# Perform the round-trip check
path = get_lon_lat_path(line, pixel, line_round)
# The grid line should be visible for only the initial part line (19 points)
codes_check = np.full(100, Path.MOVETO)
codes_check[line_round[:, 0] - line[:, 0] < step_in_degrees] = Path.LINETO
assert np.all(path.codes[1:] == codes_check[1:])
| bsd-3-clause |
jreback/pandas | pandas/tests/indexes/interval/test_setops.py | 1 | 8030 | import numpy as np
import pytest
from pandas import Index, IntervalIndex, Timestamp, interval_range
import pandas._testing as tm
def monotonic_index(start, end, dtype="int64", closed="right"):
return IntervalIndex.from_breaks(np.arange(start, end, dtype=dtype), closed=closed)
def empty_index(dtype="int64", closed="right"):
return IntervalIndex(np.array([], dtype=dtype), closed=closed)
class TestIntervalIndex:
def test_union(self, closed, sort):
index = monotonic_index(0, 11, closed=closed)
other = monotonic_index(5, 13, closed=closed)
expected = monotonic_index(0, 13, closed=closed)
result = index[::-1].union(other, sort=sort)
if sort is None:
tm.assert_index_equal(result, expected)
assert tm.equalContents(result, expected)
result = other[::-1].union(index, sort=sort)
if sort is None:
tm.assert_index_equal(result, expected)
assert tm.equalContents(result, expected)
tm.assert_index_equal(index.union(index, sort=sort), index)
tm.assert_index_equal(index.union(index[:1], sort=sort), index)
def test_union_empty_result(self, closed, sort):
# GH 19101: empty result, same dtype
index = empty_index(dtype="int64", closed=closed)
result = index.union(index, sort=sort)
tm.assert_index_equal(result, index)
# GH 19101: empty result, different numeric dtypes -> common dtype is f8
other = empty_index(dtype="float64", closed=closed)
result = index.union(other, sort=sort)
expected = other
tm.assert_index_equal(result, expected)
other = index.union(index, sort=sort)
tm.assert_index_equal(result, expected)
other = empty_index(dtype="uint64", closed=closed)
result = index.union(other, sort=sort)
tm.assert_index_equal(result, expected)
result = other.union(index, sort=sort)
tm.assert_index_equal(result, expected)
def test_intersection(self, closed, sort):
index = monotonic_index(0, 11, closed=closed)
other = monotonic_index(5, 13, closed=closed)
expected = monotonic_index(5, 11, closed=closed)
result = index[::-1].intersection(other, sort=sort)
if sort is None:
tm.assert_index_equal(result, expected)
assert tm.equalContents(result, expected)
result = other[::-1].intersection(index, sort=sort)
if sort is None:
tm.assert_index_equal(result, expected)
assert tm.equalContents(result, expected)
tm.assert_index_equal(index.intersection(index, sort=sort), index)
# GH 26225: nested intervals
index = IntervalIndex.from_tuples([(1, 2), (1, 3), (1, 4), (0, 2)])
other = IntervalIndex.from_tuples([(1, 2), (1, 3)])
expected = IntervalIndex.from_tuples([(1, 2), (1, 3)])
result = index.intersection(other)
tm.assert_index_equal(result, expected)
# GH 26225
index = IntervalIndex.from_tuples([(0, 3), (0, 2)])
other = IntervalIndex.from_tuples([(0, 2), (1, 3)])
expected = IntervalIndex.from_tuples([(0, 2)])
result = index.intersection(other)
tm.assert_index_equal(result, expected)
# GH 26225: duplicate nan element
index = IntervalIndex([np.nan, np.nan])
other = IntervalIndex([np.nan])
expected = IntervalIndex([np.nan])
result = index.intersection(other)
tm.assert_index_equal(result, expected)
def test_intersection_empty_result(self, closed, sort):
index = monotonic_index(0, 11, closed=closed)
# GH 19101: empty result, same dtype
other = monotonic_index(300, 314, closed=closed)
expected = empty_index(dtype="int64", closed=closed)
result = index.intersection(other, sort=sort)
tm.assert_index_equal(result, expected)
# GH 19101: empty result, different numeric dtypes -> common dtype is float64
other = monotonic_index(300, 314, dtype="float64", closed=closed)
result = index.intersection(other, sort=sort)
expected = other[:0]
tm.assert_index_equal(result, expected)
other = monotonic_index(300, 314, dtype="uint64", closed=closed)
result = index.intersection(other, sort=sort)
tm.assert_index_equal(result, expected)
def test_intersection_duplicates(self):
# GH#38743
index = IntervalIndex.from_tuples([(1, 2), (1, 2), (2, 3), (3, 4)])
other = IntervalIndex.from_tuples([(1, 2), (2, 3)])
expected = IntervalIndex.from_tuples([(1, 2), (2, 3)])
result = index.intersection(other)
tm.assert_index_equal(result, expected)
def test_difference(self, closed, sort):
index = IntervalIndex.from_arrays([1, 0, 3, 2], [1, 2, 3, 4], closed=closed)
result = index.difference(index[:1], sort=sort)
expected = index[1:]
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(result, expected)
# GH 19101: empty result, same dtype
result = index.difference(index, sort=sort)
expected = empty_index(dtype="int64", closed=closed)
tm.assert_index_equal(result, expected)
# GH 19101: empty result, different dtypes
other = IntervalIndex.from_arrays(
index.left.astype("float64"), index.right, closed=closed
)
result = index.difference(other, sort=sort)
tm.assert_index_equal(result, expected)
def test_symmetric_difference(self, closed, sort):
index = monotonic_index(0, 11, closed=closed)
result = index[1:].symmetric_difference(index[:-1], sort=sort)
expected = IntervalIndex([index[0], index[-1]])
if sort is None:
tm.assert_index_equal(result, expected)
assert tm.equalContents(result, expected)
# GH 19101: empty result, same dtype
result = index.symmetric_difference(index, sort=sort)
expected = empty_index(dtype="int64", closed=closed)
if sort is None:
tm.assert_index_equal(result, expected)
assert tm.equalContents(result, expected)
# GH 19101: empty result, different dtypes
other = IntervalIndex.from_arrays(
index.left.astype("float64"), index.right, closed=closed
)
result = index.symmetric_difference(other, sort=sort)
expected = empty_index(dtype="float64", closed=closed)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"op_name", ["union", "intersection", "difference", "symmetric_difference"]
)
def test_set_incompatible_types(self, closed, op_name, sort):
index = monotonic_index(0, 11, closed=closed)
set_op = getattr(index, op_name)
# TODO: standardize return type of non-union setops type(self vs other)
# non-IntervalIndex
if op_name == "difference":
expected = index
else:
expected = getattr(index.astype("O"), op_name)(Index([1, 2, 3]))
result = set_op(Index([1, 2, 3]), sort=sort)
tm.assert_index_equal(result, expected)
# mixed closed
msg = (
"can only do set operations between two IntervalIndex objects "
"that are closed on the same side and have compatible dtypes"
)
for other_closed in {"right", "left", "both", "neither"} - {closed}:
other = monotonic_index(0, 11, closed=other_closed)
with pytest.raises(TypeError, match=msg):
set_op(other, sort=sort)
# GH 19016: incompatible dtypes
other = interval_range(Timestamp("20180101"), periods=9, closed=closed)
msg = (
"can only do set operations between two IntervalIndex objects "
"that are closed on the same side and have compatible dtypes"
)
with pytest.raises(TypeError, match=msg):
set_op(other, sort=sort)
| bsd-3-clause |
jlegendary/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/table.py | 69 | 16757 | """
Place a table below the x-axis at location loc.
The table consists of a grid of cells.
The grid need not be rectangular and can have holes.
Cells are added by specifying their row and column.
For the purposes of positioning the cell at (0, 0) is
assumed to be at the top left and the cell at (max_row, max_col)
is assumed to be at bottom right.
You can add additional cells outside this range to have convenient
ways of positioning more interesting grids.
Author : John Gill <[email protected]>
Copyright : 2004 John Gill and John Hunter
License : matplotlib license
"""
from __future__ import division
import warnings
import artist
from artist import Artist
from patches import Rectangle
from cbook import is_string_like
from text import Text
from transforms import Bbox
class Cell(Rectangle):
"""
A cell is a Rectangle with some associated text.
"""
PAD = 0.1 # padding between text and rectangle
def __init__(self, xy, width, height,
edgecolor='k', facecolor='w',
fill=True,
text='',
loc=None,
fontproperties=None
):
# Call base
Rectangle.__init__(self, xy, width=width, height=height,
edgecolor=edgecolor, facecolor=facecolor,
)
self.set_clip_on(False)
# Create text object
if loc is None: loc = 'right'
self._loc = loc
self._text = Text(x=xy[0], y=xy[1], text=text,
fontproperties=fontproperties)
self._text.set_clip_on(False)
def set_transform(self, trans):
Rectangle.set_transform(self, trans)
# the text does not get the transform!
def set_figure(self, fig):
Rectangle.set_figure(self, fig)
self._text.set_figure(fig)
def get_text(self):
'Return the cell Text intance'
return self._text
def set_fontsize(self, size):
self._text.set_fontsize(size)
def get_fontsize(self):
'Return the cell fontsize'
return self._text.get_fontsize()
def auto_set_font_size(self, renderer):
""" Shrink font size until text fits. """
fontsize = self.get_fontsize()
required = self.get_required_width(renderer)
while fontsize > 1 and required > self.get_width():
fontsize -= 1
self.set_fontsize(fontsize)
required = self.get_required_width(renderer)
return fontsize
def draw(self, renderer):
if not self.get_visible(): return
# draw the rectangle
Rectangle.draw(self, renderer)
# position the text
self._set_text_position(renderer)
self._text.draw(renderer)
def _set_text_position(self, renderer):
""" Set text up so it draws in the right place.
Currently support 'left', 'center' and 'right'
"""
bbox = self.get_window_extent(renderer)
l, b, w, h = bbox.bounds
# draw in center vertically
self._text.set_verticalalignment('center')
y = b + (h / 2.0)
# now position horizontally
if self._loc == 'center':
self._text.set_horizontalalignment('center')
x = l + (w / 2.0)
elif self._loc == 'left':
self._text.set_horizontalalignment('left')
x = l + (w * self.PAD)
else:
self._text.set_horizontalalignment('right')
x = l + (w * (1.0 - self.PAD))
self._text.set_position((x, y))
def get_text_bounds(self, renderer):
""" Get text bounds in axes co-ordinates. """
bbox = self._text.get_window_extent(renderer)
bboxa = bbox.inverse_transformed(self.get_data_transform())
return bboxa.bounds
def get_required_width(self, renderer):
""" Get width required for this cell. """
l,b,w,h = self.get_text_bounds(renderer)
return w * (1.0 + (2.0 * self.PAD))
def set_text_props(self, **kwargs):
'update the text properties with kwargs'
self._text.update(kwargs)
class Table(Artist):
"""
Create a table of cells.
Table can have (optional) row and column headers.
Each entry in the table can be either text or patches.
Column widths and row heights for the table can be specifified.
Return value is a sequence of text, line and patch instances that make
up the table
"""
codes = {'best' : 0,
'upper right' : 1, # default
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'center left' : 5,
'center right' : 6,
'lower center' : 7,
'upper center' : 8,
'center' : 9,
'top right' : 10,
'top left' : 11,
'bottom left' : 12,
'bottom right' : 13,
'right' : 14,
'left' : 15,
'top' : 16,
'bottom' : 17,
}
FONTSIZE = 10
AXESPAD = 0.02 # the border between the axes and table edge
def __init__(self, ax, loc=None, bbox=None):
Artist.__init__(self)
if is_string_like(loc) and loc not in self.codes:
warnings.warn('Unrecognized location %s. Falling back on bottom; valid locations are\n%s\t' %(loc, '\n\t'.join(self.codes.keys())))
loc = 'bottom'
if is_string_like(loc): loc = self.codes.get(loc, 1)
self.set_figure(ax.figure)
self._axes = ax
self._loc = loc
self._bbox = bbox
# use axes coords
self.set_transform(ax.transAxes)
self._texts = []
self._cells = {}
self._autoRows = []
self._autoColumns = []
self._autoFontsize = True
self._cachedRenderer = None
def add_cell(self, row, col, *args, **kwargs):
""" Add a cell to the table. """
xy = (0,0)
cell = Cell(xy, *args, **kwargs)
cell.set_figure(self.figure)
cell.set_transform(self.get_transform())
cell.set_clip_on(False)
self._cells[(row, col)] = cell
def _approx_text_height(self):
return self.FONTSIZE/72.0*self.figure.dpi/self._axes.bbox.height * 1.2
def draw(self, renderer):
# Need a renderer to do hit tests on mouseevent; assume the last one will do
if renderer is None:
renderer = self._cachedRenderer
if renderer is None:
raise RuntimeError('No renderer defined')
self._cachedRenderer = renderer
if not self.get_visible(): return
renderer.open_group('table')
self._update_positions(renderer)
keys = self._cells.keys()
keys.sort()
for key in keys:
self._cells[key].draw(renderer)
#for c in self._cells.itervalues():
# c.draw(renderer)
renderer.close_group('table')
def _get_grid_bbox(self, renderer):
"""Get a bbox, in axes co-ordinates for the cells.
Only include those in the range (0,0) to (maxRow, maxCol)"""
boxes = [self._cells[pos].get_window_extent(renderer)
for pos in self._cells.keys()
if pos[0] >= 0 and pos[1] >= 0]
bbox = Bbox.union(boxes)
return bbox.inverse_transformed(self.get_transform())
def contains(self,mouseevent):
"""Test whether the mouse event occurred in the table.
Returns T/F, {}
"""
if callable(self._contains): return self._contains(self,mouseevent)
# TODO: Return index of the cell containing the cursor so that the user
# doesn't have to bind to each one individually.
if self._cachedRenderer is not None:
boxes = [self._cells[pos].get_window_extent(self._cachedRenderer)
for pos in self._cells.keys()
if pos[0] >= 0 and pos[1] >= 0]
bbox = bbox_all(boxes)
return bbox.contains(mouseevent.x,mouseevent.y),{}
else:
return False,{}
def get_children(self):
'Return the Artists contained by the table'
return self._cells.values()
get_child_artists = get_children # backward compatibility
def get_window_extent(self, renderer):
'Return the bounding box of the table in window coords'
boxes = [c.get_window_extent(renderer) for c in self._cells]
return bbox_all(boxes)
def _do_cell_alignment(self):
""" Calculate row heights and column widths.
Position cells accordingly.
"""
# Calculate row/column widths
widths = {}
heights = {}
for (row, col), cell in self._cells.iteritems():
height = heights.setdefault(row, 0.0)
heights[row] = max(height, cell.get_height())
width = widths.setdefault(col, 0.0)
widths[col] = max(width, cell.get_width())
# work out left position for each column
xpos = 0
lefts = {}
cols = widths.keys()
cols.sort()
for col in cols:
lefts[col] = xpos
xpos += widths[col]
ypos = 0
bottoms = {}
rows = heights.keys()
rows.sort()
rows.reverse()
for row in rows:
bottoms[row] = ypos
ypos += heights[row]
# set cell positions
for (row, col), cell in self._cells.iteritems():
cell.set_x(lefts[col])
cell.set_y(bottoms[row])
def auto_set_column_width(self, col):
self._autoColumns.append(col)
def _auto_set_column_width(self, col, renderer):
""" Automagically set width for column.
"""
cells = [key for key in self._cells if key[1] == col]
# find max width
width = 0
for cell in cells:
c = self._cells[cell]
width = max(c.get_required_width(renderer), width)
# Now set the widths
for cell in cells:
self._cells[cell].set_width(width)
def auto_set_font_size(self, value=True):
""" Automatically set font size. """
self._autoFontsize = value
def _auto_set_font_size(self, renderer):
if len(self._cells) == 0:
return
fontsize = self._cells.values()[0].get_fontsize()
cells = []
for key, cell in self._cells.iteritems():
# ignore auto-sized columns
if key[1] in self._autoColumns: continue
size = cell.auto_set_font_size(renderer)
fontsize = min(fontsize, size)
cells.append(cell)
# now set all fontsizes equal
for cell in self._cells.itervalues():
cell.set_fontsize(fontsize)
def scale(self, xscale, yscale):
""" Scale column widths by xscale and row heights by yscale. """
for c in self._cells.itervalues():
c.set_width(c.get_width() * xscale)
c.set_height(c.get_height() * yscale)
def set_fontsize(self, size):
"""
Set the fontsize of the cell text
ACCEPTS: a float in points
"""
for cell in self._cells.itervalues():
cell.set_fontsize(size)
def _offset(self, ox, oy):
'Move all the artists by ox,oy (axes coords)'
for c in self._cells.itervalues():
x, y = c.get_x(), c.get_y()
c.set_x(x+ox)
c.set_y(y+oy)
def _update_positions(self, renderer):
# called from renderer to allow more precise estimates of
# widths and heights with get_window_extent
# Do any auto width setting
for col in self._autoColumns:
self._auto_set_column_width(col, renderer)
if self._autoFontsize:
self._auto_set_font_size(renderer)
# Align all the cells
self._do_cell_alignment()
bbox = self._get_grid_bbox(renderer)
l,b,w,h = bbox.bounds
if self._bbox is not None:
# Position according to bbox
rl, rb, rw, rh = self._bbox
self.scale(rw/w, rh/h)
ox = rl - l
oy = rb - b
self._do_cell_alignment()
else:
# Position using loc
(BEST, UR, UL, LL, LR, CL, CR, LC, UC, C,
TR, TL, BL, BR, R, L, T, B) = range(len(self.codes))
# defaults for center
ox = (0.5-w/2)-l
oy = (0.5-h/2)-b
if self._loc in (UL, LL, CL): # left
ox = self.AXESPAD - l
if self._loc in (BEST, UR, LR, R, CR): # right
ox = 1 - (l + w + self.AXESPAD)
if self._loc in (BEST, UR, UL, UC): # upper
oy = 1 - (b + h + self.AXESPAD)
if self._loc in (LL, LR, LC): # lower
oy = self.AXESPAD - b
if self._loc in (LC, UC, C): # center x
ox = (0.5-w/2)-l
if self._loc in (CL, CR, C): # center y
oy = (0.5-h/2)-b
if self._loc in (TL, BL, L): # out left
ox = - (l + w)
if self._loc in (TR, BR, R): # out right
ox = 1.0 - l
if self._loc in (TR, TL, T): # out top
oy = 1.0 - b
if self._loc in (BL, BR, B): # out bottom
oy = - (b + h)
self._offset(ox, oy)
def get_celld(self):
'return a dict of cells in the table'
return self._cells
def table(ax,
cellText=None, cellColours=None,
cellLoc='right', colWidths=None,
rowLabels=None, rowColours=None, rowLoc='left',
colLabels=None, colColours=None, colLoc='center',
loc='bottom', bbox=None):
"""
TABLE(cellText=None, cellColours=None,
cellLoc='right', colWidths=None,
rowLabels=None, rowColours=None, rowLoc='left',
colLabels=None, colColours=None, colLoc='center',
loc='bottom', bbox=None)
Factory function to generate a Table instance.
Thanks to John Gill for providing the class and table.
"""
# Check we have some cellText
if cellText is None:
# assume just colours are needed
rows = len(cellColours)
cols = len(cellColours[0])
cellText = [[''] * rows] * cols
rows = len(cellText)
cols = len(cellText[0])
for row in cellText:
assert len(row) == cols
if cellColours is not None:
assert len(cellColours) == rows
for row in cellColours:
assert len(row) == cols
else:
cellColours = ['w' * cols] * rows
# Set colwidths if not given
if colWidths is None:
colWidths = [1.0/cols] * cols
# Check row and column labels
rowLabelWidth = 0
if rowLabels is None:
if rowColours is not None:
rowLabels = [''] * cols
rowLabelWidth = colWidths[0]
elif rowColours is None:
rowColours = 'w' * rows
if rowLabels is not None:
assert len(rowLabels) == rows
offset = 0
if colLabels is None:
if colColours is not None:
colLabels = [''] * rows
offset = 1
elif colColours is None:
colColours = 'w' * cols
offset = 1
if rowLabels is not None:
assert len(rowLabels) == rows
# Set up cell colours if not given
if cellColours is None:
cellColours = ['w' * cols] * rows
# Now create the table
table = Table(ax, loc, bbox)
height = table._approx_text_height()
# Add the cells
for row in xrange(rows):
for col in xrange(cols):
table.add_cell(row+offset, col,
width=colWidths[col], height=height,
text=cellText[row][col],
facecolor=cellColours[row][col],
loc=cellLoc)
# Do column labels
if colLabels is not None:
for col in xrange(cols):
table.add_cell(0, col,
width=colWidths[col], height=height,
text=colLabels[col], facecolor=colColours[col],
loc=colLoc)
# Do row labels
if rowLabels is not None:
for row in xrange(rows):
table.add_cell(row+offset, -1,
width=rowLabelWidth or 1e-15, height=height,
text=rowLabels[row], facecolor=rowColours[row],
loc=rowLoc)
if rowLabelWidth == 0:
table.auto_set_column_width(-1)
ax.add_table(table)
return table
artist.kwdocd['Table'] = artist.kwdoc(Table)
| gpl-3.0 |
phoebe-project/phoebe2-docs | 2.3/examples/extinction_eclipse_depth_v_teff.py | 2 | 6679 | #!/usr/bin/env python
# coding: utf-8
# Extinction: Eclipse Depth Difference as Function of Temperature
# ============================
#
# In this example, we'll reproduce Figure 3 in the extinction release paper ([Jones et al. 2020](http://phoebe-project.org/publications/2020Jones+)).
#
# **NOTE**: this script takes a long time to run.
#
# <img src="jones+20_fig3.png" alt="Figure 3" width="800px"/>
#
# Setup
# -----------------------------
# Let's first make sure we have the latest version of PHOEBE 2.3 installed (uncomment this line if running in an online notebook session such as colab).
# In[ ]:
#!pip install -I "phoebe>=2.3,<2.4"
# As always, let's do imports and initialize a logger and a new bundle.
# In[1]:
import matplotlib
matplotlib.rcParams['text.usetex'] = True
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
matplotlib.rcParams['mathtext.fontset'] = 'stix'
matplotlib.rcParams['font.family'] = 'STIXGeneral'
from matplotlib import gridspec
# In[2]:
get_ipython().run_line_magic('matplotlib', 'inline')
# In[3]:
from astropy.table import Table
# In[4]:
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger('error')
b = phoebe.default_binary()
# First we'll define the system parameters
# In[5]:
b['period@orbit']=10*u.d
b['teff@secondary']=5780.*u.K
b['requiv@secondary']=1.0*u.solRad
b.flip_constraint('mass@primary', solve_for='sma@binary')
b.flip_constraint('mass@secondary', solve_for='q')
# And then create three light curve datasets at the same times, but in different passbands
# In[6]:
times = phoebe.linspace(0, 10, 301)
b.add_dataset('lc', times=times, dataset='B', passband="Johnson:B")
b.add_dataset('lc', times=times, dataset='R', passband="Cousins:R")
# Now we'll set some atmosphere and limb-darkening options
# In[7]:
b.set_value_all('gravb_bol', 0.0)
b.set_value_all('ld_mode', 'manual')
b.set_value_all('ld_func', 'linear')
b.set_value_all('ld_coeffs', [0.0])
# And flip the extinction constraint so we can provide E(B-V).
# In[9]:
b.flip_constraint('ebv', solve_for='Av')
# In[10]:
masses=np.array([ 0.6 , 0.7 , 0.8 , 0.9 , 1. , 1.1 , 1.2 , 1.3 , 1.4 ,
1.5 , 1.6 , 1.7 , 1.8 , 1.9 , 1.95, 2. , 2.1 , 2.2 ,
2.3 , 2.5 , 3. , 3.5 , 4. , 4.5 , 5. , 6. , 7. ,
8. , 10. , 12. , 15. , 20. ])
temps=np.array([ 4285., 4471., 4828., 5242.,
5616., 5942., 6237., 6508.,
6796., 7121., 7543., 7968.,
8377., 8759., 8947., 9130.,
9538., 9883., 10155., 10801.,
12251., 13598., 14852., 16151.,
17092., 19199., 21013., 22526.,
25438., 27861., 30860., 34753.])
radii=np.array([0.51, 0.63, 0.72, 0.80, 0.90,
1.01, 1.13, 1.26, 1.36, 1.44,
1.48, 1.51, 1.54, 1.57, 1.59,
1.61, 1.65, 1.69, 1.71, 1.79,
1.97, 2.14, 2.30, 2.48, 2.59,
2.90, 3.17, 3.39, 3.87, 4.29,
4.85, 5.69])
t=Table(names=('Mass','Tdiff','B1','B2','R1','R2'), dtype=('f4', 'f4', 'f8', 'f8', 'f8', 'f8'))
# In[11]:
def binmodel(teff,requiv,mass):
b.set_value('teff', component='primary', value=teff*u.K)
b.set_value('requiv', component='primary', value=requiv*u.solRad)
b.set_value('mass', component='primary', value=mass*u.solMass)
b.set_value('mass', component='secondary', value=1.0*u.solMass)
b.set_value('ebv', value=0.0)
b.run_compute(distortion_method='rotstar', irrad_method='none', model='noext', overwrite=True)
b.set_value('ebv', value=1.0)
b.run_compute(distortion_method='rotstar', irrad_method='none', model='ext', overwrite=True)
Bextmags=-2.5*np.log10(b['value@fluxes@B@ext@model'])
Bnoextmags=-2.5*np.log10(b['value@fluxes@B@noext@model'])
Bdiff=(Bextmags-Bextmags.min())-(Bnoextmags-Bnoextmags.min())
Rextmags=-2.5*np.log10(b['value@fluxes@R@ext@model'])
Rnoextmags=-2.5*np.log10(b['value@fluxes@R@noext@model'])
Rdiff=(Rextmags-Rextmags.min())-(Rnoextmags-Rnoextmags.min())
tdiff=teff-5780
t.add_row((mass, tdiff, Bdiff[0],Bdiff[150],Rdiff[0],Rdiff[150]))
# In[12]:
def binmodel_teff(teff):
b.set_value('teff', component='primary', value=teff*u.K)
b.set_value('ebv', value=0.0)
b.run_compute(distortion_method='rotstar', irrad_method='none', model='noext', overwrite=True)
b.set_value('ebv', value=1.0)
b.run_compute(distortion_method='rotstar', irrad_method='none', model='ext', overwrite=True)
Bextmags=-2.5*np.log10(b['value@fluxes@B@ext@model'])
Bnoextmags=-2.5*np.log10(b['value@fluxes@B@noext@model'])
Bdiff=(Bextmags-Bextmags.min())-(Bnoextmags-Bnoextmags.min())
Rextmags=-2.5*np.log10(b['value@fluxes@R@ext@model'])
Rnoextmags=-2.5*np.log10(b['value@fluxes@R@noext@model'])
Rdiff=(Rextmags-Rextmags.min())-(Rnoextmags-Rnoextmags.min())
tdiff=teff-5780
t_teff.add_row((tdiff, Bdiff[0],Bdiff[150],Rdiff[0],Rdiff[150]))
# In[13]:
# NOTE: this loop takes a long time to run
for i in range(0,len(masses)):
binmodel(temps[i], radii[i], masses[i])
#t.write("Extinction_G2V_ZAMS.dat", format='ascii', overwrite=True)
# In[15]:
#t=Table.read("Extinction_G2V_ZAMS.dat", format='ascii')
plt.clf()
plt.plot(t['Tdiff'],t['B1'],color="b",ls="-", label="G2V eclipsed")
plt.plot(t['Tdiff'],t['B2'],color="b",ls="--", label="Secondary eclipsed")
plt.plot(t['Tdiff'],t['R1'],color="r",ls="-", label="")
plt.plot(t['Tdiff'],t['R2'],color="r",ls="--", label="")
plt.ylabel(r'$\Delta m$ ')
plt.xlabel(r'$T_\mathrm{secondary} - T_\mathrm{G2V}$')
plt.legend()
plt.xlim([-1450,25000])
# In[14]:
t_teff=Table(names=('Tdiff','B1','B2','R1','R2'), dtype=('f4', 'f8', 'f8', 'f8', 'f8'))
b.set_value('requiv', component='primary', value=1.0*u.solRad)
b.set_value('mass', component='primary', value=1.0*u.solMass)
b.set_value('mass', component='secondary', value=1.0*u.solMass)
# NOTE: this loop takes a long time to run
for i in range(0,len(temps)):
binmodel_teff(temps[i])
#t_teff.write("Extinction_Solar_exceptTeff_test.dat", format='ascii', overwrite=True)
# In[16]:
#t_teff=Table.read("Extinction_Solar_exceptTeff_test.dat", format='ascii')
plt.clf()
plt.plot(t_teff['Tdiff'],t_teff['B1'],color="b",ls="-", label="G2V eclipsed")
plt.plot(t_teff['Tdiff'],t_teff['B2'],color="b",ls="--", label="Secondary eclipsed")
plt.plot(t_teff['Tdiff'],t_teff['R1'],color="r",ls="-", label="")
plt.plot(t_teff['Tdiff'],t_teff['R2'],color="r",ls="--", label="")
plt.ylabel(r'$\Delta m$ ')
plt.xlabel(r'$T_\mathrm{secondary} - T_\mathrm{G2V}$')
plt.legend()
plt.xlim([-1450,25000])
# In[ ]:
| gpl-3.0 |
PanDAWMS/panda-server | pandaserver/dataservice/DDMHandler.py | 1 | 1988 | '''
master hander for DDM
'''
import re
import threading
from pandaserver.dataservice.Finisher import Finisher
from pandaserver.dataservice.Activator import Activator
from pandacommon.pandalogger.PandaLogger import PandaLogger
from pandacommon.pandalogger.LogWrapper import LogWrapper
# logger
_logger = PandaLogger().getLogger('DDMHandler')
class DDMHandler (threading.Thread):
# constructor
def __init__(self,taskBuffer,vuid,site=None,dataset=None,scope=None):
threading.Thread.__init__(self)
self.vuid = vuid
self.taskBuffer = taskBuffer
self.site = site
self.scope = scope
self.dataset = dataset
# main
def run(self):
# get logger
tmpLog = LogWrapper(_logger,'<vuid={0} site={1} name={2}>'.format(self.vuid,
self.site,
self.dataset))
# query dataset
tmpLog.debug("start")
if self.vuid is not None:
dataset = self.taskBuffer.queryDatasetWithMap({'vuid':self.vuid})
else:
dataset = self.taskBuffer.queryDatasetWithMap({'name':self.dataset})
if dataset is None:
tmpLog.error("Not found")
tmpLog.debug("end")
return
tmpLog.debug("type:%s name:%s" % (dataset.type,dataset.name))
if dataset.type == 'dispatch':
# activate jobs in jobsDefined
Activator(self.taskBuffer,dataset).start()
if dataset.type == 'output':
if dataset.name is not None and re.search('^panda\..*_zip$',dataset.name) is not None:
# start unmerge jobs
Activator(self.taskBuffer,dataset,enforce=True).start()
else:
# finish transferring jobs
Finisher(self.taskBuffer,dataset,site=self.site).start()
tmpLog.debug("end")
| apache-2.0 |
scls19fr/morse-talk | morse_talk/plot.py | 1 | 1932 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A Morse Binary plot
Copyright (C) 2015 by
Sébastien Celles <[email protected]>
All rights reserved.
"""
import matplotlib.pyplot as plt
from morse_talk.encoding import _encode_binary
def _create_ax(ax):
"""
Create a Matplotlib Axe from ax
if ax is None a new Matplotlib figure is create
and also an ax
else ax is returned
"""
if ax is None:
_, axs = plt.subplots(1, 1)
return axs
else:
return ax
def _create_x_y(l, duration=1):
"""
Create 2 lists
x: time (as unit of dot (dit)
y: bits
from a list of bit
>>> l = [1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1]
>>> x, y = _create_x_y(l)
>>> x
[-1, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, 28]
>>> y
[0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0]
"""
l = [0] + l + [0]
y = []
x = []
for i, bit in enumerate(l):
y.append(bit)
y.append(bit)
x.append((i - 1) * duration)
x.append(i * duration)
return x, y
def plot(message, duration=1, ax = None):
"""
Plot a message
Returns: ax a Matplotlib Axe
"""
lst_bin = _encode_binary(message)
x, y = _create_x_y(lst_bin, duration)
ax = _create_ax(ax)
ax.plot(x, y, linewidth=2.0)
delta_y = 0.1
ax.set_ylim(-delta_y, 1 + delta_y)
ax.set_yticks([0, 1])
delta_x = 0.5 * duration
ax.set_xlim(-delta_x, len(lst_bin) * duration + delta_x)
return ax
def main():
import doctest
doctest.testmod()
if __name__ == '__main__':
main()
| gpl-2.0 |
ptrendx/mxnet | example/rcnn/symdata/loader.py | 11 | 8759 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
import numpy as np
from symdata.anchor import AnchorGenerator, AnchorSampler
from symdata.image import imdecode, resize, transform, get_image, tensor_vstack
def load_test(filename, short, max_size, mean, std):
# read and transform image
im_orig = imdecode(filename)
im, im_scale = resize(im_orig, short, max_size)
height, width = im.shape[:2]
im_info = mx.nd.array([height, width, im_scale])
# transform into tensor and normalize
im_tensor = transform(im, mean, std)
# for 1-batch inference purpose, cannot use batchify (or nd.stack) to expand dims
im_tensor = mx.nd.array(im_tensor).expand_dims(0)
im_info = mx.nd.array(im_info).expand_dims(0)
# transform cv2 BRG image to RGB for matplotlib
im_orig = im_orig[:, :, (2, 1, 0)]
return im_tensor, im_info, im_orig
def generate_batch(im_tensor, im_info):
"""return batch"""
data = [im_tensor, im_info]
data_shapes = [('data', im_tensor.shape), ('im_info', im_info.shape)]
data_batch = mx.io.DataBatch(data=data, label=None, provide_data=data_shapes, provide_label=None)
return data_batch
class TestLoader(mx.io.DataIter):
def __init__(self, roidb, batch_size, short, max_size, mean, std):
super(TestLoader, self).__init__()
# save parameters as properties
self._roidb = roidb
self._batch_size = batch_size
self._short = short
self._max_size = max_size
self._mean = mean
self._std = std
# infer properties from roidb
self._size = len(self._roidb)
self._index = np.arange(self._size)
# decide data and label names (only for training)
self._data_name = ['data', 'im_info']
self._label_name = None
# status variable
self._cur = 0
self._data = None
self._label = None
# get first batch to fill in provide_data and provide_label
self.next()
self.reset()
@property
def provide_data(self):
return [(k, v.shape) for k, v in zip(self._data_name, self._data)]
@property
def provide_label(self):
return None
def reset(self):
self._cur = 0
def iter_next(self):
return self._cur + self._batch_size <= self._size
def next(self):
if self.iter_next():
data_batch = mx.io.DataBatch(data=self.getdata(), label=self.getlabel(),
pad=self.getpad(), index=self.getindex(),
provide_data=self.provide_data, provide_label=self.provide_label)
self._cur += self._batch_size
return data_batch
else:
raise StopIteration
def getdata(self):
indices = self.getindex()
im_tensor, im_info = [], []
for index in indices:
roi_rec = self._roidb[index]
b_im_tensor, b_im_info, _ = get_image(roi_rec, self._short, self._max_size, self._mean, self._std)
im_tensor.append(b_im_tensor)
im_info.append(b_im_info)
im_tensor = mx.nd.array(tensor_vstack(im_tensor, pad=0))
im_info = mx.nd.array(tensor_vstack(im_info, pad=0))
self._data = im_tensor, im_info
return self._data
def getlabel(self):
return None
def getindex(self):
cur_from = self._cur
cur_to = min(cur_from + self._batch_size, self._size)
return np.arange(cur_from, cur_to)
def getpad(self):
return max(self._cur + self.batch_size - self._size, 0)
class AnchorLoader(mx.io.DataIter):
def __init__(self, roidb, batch_size, short, max_size, mean, std,
feat_sym, anchor_generator: AnchorGenerator, anchor_sampler: AnchorSampler,
shuffle=False):
super(AnchorLoader, self).__init__()
# save parameters as properties
self._roidb = roidb
self._batch_size = batch_size
self._short = short
self._max_size = max_size
self._mean = mean
self._std = std
self._feat_sym = feat_sym
self._ag = anchor_generator
self._as = anchor_sampler
self._shuffle = shuffle
# infer properties from roidb
self._size = len(roidb)
self._index = np.arange(self._size)
# decide data and label names
self._data_name = ['data', 'im_info', 'gt_boxes']
self._label_name = ['label', 'bbox_target', 'bbox_weight']
# status variable
self._cur = 0
self._data = None
self._label = None
# get first batch to fill in provide_data and provide_label
self.next()
self.reset()
@property
def provide_data(self):
return [(k, v.shape) for k, v in zip(self._data_name, self._data)]
@property
def provide_label(self):
return [(k, v.shape) for k, v in zip(self._label_name, self._label)]
def reset(self):
self._cur = 0
if self._shuffle:
np.random.shuffle(self._index)
def iter_next(self):
return self._cur + self._batch_size <= self._size
def next(self):
if self.iter_next():
data_batch = mx.io.DataBatch(data=self.getdata(), label=self.getlabel(),
pad=self.getpad(), index=self.getindex(),
provide_data=self.provide_data, provide_label=self.provide_label)
self._cur += self._batch_size
return data_batch
else:
raise StopIteration
def getdata(self):
indices = self.getindex()
im_tensor, im_info, gt_boxes = [], [], []
for index in indices:
roi_rec = self._roidb[index]
b_im_tensor, b_im_info, b_gt_boxes = get_image(roi_rec, self._short, self._max_size, self._mean, self._std)
im_tensor.append(b_im_tensor)
im_info.append(b_im_info)
gt_boxes.append(b_gt_boxes)
im_tensor = mx.nd.array(tensor_vstack(im_tensor, pad=0))
im_info = mx.nd.array(tensor_vstack(im_info, pad=0))
gt_boxes = mx.nd.array(tensor_vstack(gt_boxes, pad=-1))
self._data = im_tensor, im_info, gt_boxes
return self._data
def getlabel(self):
im_tensor, im_info, gt_boxes = self._data
# all stacked image share same anchors
_, out_shape, _ = self._feat_sym.infer_shape(data=im_tensor.shape)
feat_height, feat_width = out_shape[0][-2:]
anchors = self._ag.generate(feat_height, feat_width)
# assign anchor according to their real size encoded in im_info
label, bbox_target, bbox_weight = [], [], []
for batch_ind in range(im_info.shape[0]):
b_im_info = im_info[batch_ind].asnumpy()
b_gt_boxes = gt_boxes[batch_ind].asnumpy()
b_im_height, b_im_width = b_im_info[:2]
b_label, b_bbox_target, b_bbox_weight = self._as.assign(anchors, b_gt_boxes, b_im_height, b_im_width)
b_label = b_label.reshape((feat_height, feat_width, -1)).transpose((2, 0, 1)).flatten()
b_bbox_target = b_bbox_target.reshape((feat_height, feat_width, -1)).transpose((2, 0, 1))
b_bbox_weight = b_bbox_weight.reshape((feat_height, feat_width, -1)).transpose((2, 0, 1))
label.append(b_label)
bbox_target.append(b_bbox_target)
bbox_weight.append(b_bbox_weight)
label = mx.nd.array(tensor_vstack(label, pad=-1))
bbox_target = mx.nd.array(tensor_vstack(bbox_target, pad=0))
bbox_weight = mx.nd.array(tensor_vstack(bbox_weight, pad=0))
self._label = label, bbox_target, bbox_weight
return self._label
def getindex(self):
cur_from = self._cur
cur_to = min(cur_from + self._batch_size, self._size)
return np.arange(cur_from, cur_to)
def getpad(self):
return max(self._cur + self.batch_size - self._size, 0)
| apache-2.0 |
dsquareindia/scikit-learn | examples/model_selection/plot_precision_recall.py | 23 | 6873 | """
================
Precision-Recall
================
Example of Precision-Recall metric to evaluate classifier output quality.
In information retrieval, precision is a measure of result relevancy, while
recall is a measure of how many truly relevant results are returned. A high
area under the curve represents both high recall and high precision, where high
precision relates to a low false positive rate, and high recall relates to a
low false negative rate. High scores for both show that the classifier is
returning accurate results (high precision), as well as returning a majority of
all positive results (high recall).
A system with high recall but low precision returns many results, but most of
its predicted labels are incorrect when compared to the training labels. A
system with high precision but low recall is just the opposite, returning very
few results, but most of its predicted labels are correct when compared to the
training labels. An ideal system with high precision and high recall will
return many results, with all results labeled correctly.
Precision (:math:`P`) is defined as the number of true positives (:math:`T_p`)
over the number of true positives plus the number of false positives
(:math:`F_p`).
:math:`P = \\frac{T_p}{T_p+F_p}`
Recall (:math:`R`) is defined as the number of true positives (:math:`T_p`)
over the number of true positives plus the number of false negatives
(:math:`F_n`).
:math:`R = \\frac{T_p}{T_p + F_n}`
These quantities are also related to the (:math:`F_1`) score, which is defined
as the harmonic mean of precision and recall.
:math:`F1 = 2\\frac{P \\times R}{P+R}`
It is important to note that the precision may not decrease with recall. The
definition of precision (:math:`\\frac{T_p}{T_p + F_p}`) shows that lowering
the threshold of a classifier may increase the denominator, by increasing the
number of results returned. If the threshold was previously set too high, the
new results may all be true positives, which will increase precision. If the
previous threshold was about right or too low, further lowering the threshold
will introduce false positives, decreasing precision.
Recall is defined as :math:`\\frac{T_p}{T_p+F_n}`, where :math:`T_p+F_n` does
not depend on the classifier threshold. This means that lowering the classifier
threshold may increase recall, by increasing the number of true positive
results. It is also possible that lowering the threshold may leave recall
unchanged, while the precision fluctuates.
The relationship between recall and precision can be observed in the
stairstep area of the plot - at the edges of these steps a small change
in the threshold considerably reduces precision, with only a minor gain in
recall. See the corner at recall = .59, precision = .8 for an example of this
phenomenon.
Precision-recall curves are typically used in binary classification to study
the output of a classifier. In order to extend Precision-recall curve and
average precision to multi-class or multi-label classification, it is necessary
to binarize the output. One curve can be drawn per label, but one can also draw
a precision-recall curve by considering each element of the label indicator
matrix as a binary prediction (micro-averaging).
.. note::
See also :func:`sklearn.metrics.average_precision_score`,
:func:`sklearn.metrics.recall_score`,
:func:`sklearn.metrics.precision_score`,
:func:`sklearn.metrics.f1_score`
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from itertools import cycle
from sklearn import svm, datasets
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# setup plot details
colors = cycle(['navy', 'turquoise', 'darkorange', 'cornflowerblue', 'teal'])
lw = 2
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# Split into training and test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=random_state)
# Run classifier
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute Precision-Recall and plot curve
precision = dict()
recall = dict()
average_precision = dict()
for i in range(n_classes):
precision[i], recall[i], _ = precision_recall_curve(y_test[:, i],
y_score[:, i])
average_precision[i] = average_precision_score(y_test[:, i], y_score[:, i])
# Compute micro-average ROC curve and ROC area
precision["micro"], recall["micro"], _ = precision_recall_curve(y_test.ravel(),
y_score.ravel())
average_precision["micro"] = average_precision_score(y_test, y_score,
average="micro")
# Plot Precision-Recall curve
plt.clf()
plt.plot(recall[0], precision[0], lw=lw, color='navy',
label='Precision-Recall curve')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('Precision-Recall example: AUC={0:0.2f}'.format(average_precision[0]))
plt.legend(loc="lower left")
plt.show()
# Plot Precision-Recall curve for each class and iso-f1 curves
plt.clf()
f_scores = np.linspace(0.2, 0.8, num=4)
lines = []
labels = []
for f_score in f_scores:
x = np.linspace(0.01, 1)
y = f_score * x / (2 * x - f_score)
l, = plt.plot(x[y >= 0], y[y >= 0], color='gray', alpha=0.2)
plt.annotate('f1={0:0.1f}'.format(f_score), xy=(0.9, y[45] + 0.02))
lines.append(l)
labels.append('iso-f1 curves')
l, = plt.plot(recall["micro"], precision["micro"], color='gold', lw=lw)
lines.append(l)
labels.append('micro-average Precision-recall curve (area = {0:0.2f})'
''.format(average_precision["micro"]))
for i, color in zip(range(n_classes), colors):
l, = plt.plot(recall[i], precision[i], color=color, lw=lw)
lines.append(l)
labels.append('Precision-recall curve of class {0} (area = {1:0.2f})'
''.format(i, average_precision[i]))
fig = plt.gcf()
fig.set_size_inches(7, 7)
fig.subplots_adjust(bottom=0.25)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Extension of Precision-Recall curve to multi-class')
plt.figlegend(lines, labels, loc='lower center')
plt.show()
| bsd-3-clause |
kperkins411/morphing_faces | visualize.py | 3 | 2399 | #!/usr/bin/env python
"""
Interactive face generator
Dependends on:
* numpy 1.7
* matplotlib
"""
from morpher import Morpher
from matplotlib import pyplot
if __name__ == "__main__":
# Initialize face generator
morpher = Morpher()
# Make sure I/O doesn't hang when displaying the image
pyplot.ion()
# Build visualization window
fig = pyplot.figure(figsize=(1, 1), dpi=300)
im = pyplot.imshow(X=morpher.generate_face(),
interpolation='nearest',
cmap='gray')
pyplot.axis('off')
def onmove(event):
width, height = fig.canvas.get_width_height()
x = 2 * event.x / float(width) - 1
y = 2 * event.y / float(height) - 1
morpher.set_coordinates(x, y)
im.set_array(morpher.generate_face())
pyplot.draw()
def onclick(event):
morpher.toggle_freeze()
fig.canvas.mpl_connect('motion_notify_event', onmove)
fig.canvas.mpl_connect('button_press_event', onclick)
pyplot.show()
# Program loop
quit = False
help_message = (
"\n" +
"Move your mouse over the image to make it morph, click it to\n" +
"freeze / unfreeze the image.\n" +
"\n" +
"COMMANDS:\n" +
" h Display this help message\n" +
" q Quit the program\n" +
" r Randomize face\n" +
" d D1 D2 Select dimensions D1 and D2\n"
)
print help_message
while not quit:
command_args = raw_input('Type a command: ').strip().split(" ")
if command_args[0] == 'h':
print help_message
elif command_args[0] == 'q':
quit = True
elif command_args[0] == 'r':
morpher.shuffle()
im.set_array(morpher.generate_face())
pyplot.draw()
elif command_args[0] == 'd':
if len(command_args) < 3:
print" ERROR: need two dimensions to select"
else:
try:
arg0 = int(command_args[1])
arg1 = int(command_args[2])
morpher.select_dimensions(arg0, arg1)
except ValueError:
print " ERROR: Dimensions must be integers in [0, 28]"
except KeyError:
print " ERROR: Dimensions must be integers in [0, 28]"
| gpl-2.0 |
mhue/scikit-learn | sklearn/datasets/tests/test_svmlight_format.py | 12 | 10796 | from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_equal(X1.data, X2.data)
assert_array_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 22)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
# 21 features in file
assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, gzip.open(tmp.name, "wb"))
Xgz, ygz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xgz.toarray())
assert_array_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, BZ2File(tmp.name, "wb"))
Xbz, ybz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xbz.toarray())
assert_array_equal(y, ybz)
@raises(ValueError)
def test_load_invalid_file():
load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
load_svmlight_file("trou pic nic douille")
def test_dump():
Xs, y = load_svmlight_file(datafile)
Xd = Xs.toarray()
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
Xsliced = Xs[np.arange(Xs.shape[0])]
for X in (Xs, Xd, Xsliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
if dtype == np.float32:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 4)
else:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 15)
assert_array_equal(y, y2)
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert_equal(f.readline(),
b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
assert_equal(f.readline(), b("3.01 \n"))
assert_equal(f.readline(), b("1.000000000000001 \n"))
assert_equal(f.readline(), b("1 \n"))
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
| bsd-3-clause |
CartoDB/crankshaft | release/python/0.8.2/crankshaft/test/test_clustering_kmeans.py | 6 | 2722 | import unittest
import numpy as np
from helper import fixture_file
from crankshaft.clustering import Kmeans
from crankshaft.analysis_data_provider import AnalysisDataProvider
import crankshaft.clustering as cc
from crankshaft import random_seeds
import json
from collections import OrderedDict
class FakeDataProvider(AnalysisDataProvider):
def __init__(self, mocked_result):
self.mocked_result = mocked_result
def get_spatial_kmeans(self, query):
return self.mocked_result
def get_nonspatial_kmeans(self, query):
return self.mocked_result
class KMeansTest(unittest.TestCase):
"""Testing class for k-means spatial"""
def setUp(self):
self.cluster_data = json.loads(
open(fixture_file('kmeans.json')).read())
self.params = {"subquery": "select * from table",
"no_clusters": "10"}
def test_kmeans(self):
"""
"""
data = [{'xs': d['xs'],
'ys': d['ys'],
'ids': d['ids']} for d in self.cluster_data]
random_seeds.set_random_seeds(1234)
kmeans = Kmeans(FakeDataProvider(data))
clusters = kmeans.spatial('subquery', 2)
labels = [a[1] for a in clusters]
c1 = [a for a in clusters if a[1] == 0]
c2 = [a for a in clusters if a[1] == 1]
self.assertEqual(len(np.unique(labels)), 2)
self.assertEqual(len(c1), 20)
self.assertEqual(len(c2), 20)
class KMeansNonspatialTest(unittest.TestCase):
"""Testing class for k-means non-spatial"""
def setUp(self):
self.params = {"subquery": "SELECT * FROM TABLE",
"n_clusters": 5}
def test_kmeans_nonspatial(self):
"""
test for k-means non-spatial
"""
# data from:
# http://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html#sklearn-cluster-kmeans
data_raw = [OrderedDict([("arr_col1", [1, 1, 1, 4, 4, 4]),
("arr_col2", [2, 4, 0, 2, 4, 0]),
("rowid", [1, 2, 3, 4, 5, 6])])]
random_seeds.set_random_seeds(1234)
kmeans = Kmeans(FakeDataProvider(data_raw))
clusters = kmeans.nonspatial('subquery', ['col1', 'col2'], 2)
cl1 = clusters[0][0]
cl2 = clusters[3][0]
for idx, val in enumerate(clusters):
if idx < 3:
self.assertEqual(val[0], cl1)
else:
self.assertEqual(val[0], cl2)
# raises exception for no data
with self.assertRaises(Exception):
kmeans = Kmeans(FakeDataProvider([]))
kmeans.nonspatial('subquery', ['col1', 'col2'], 2)
| bsd-3-clause |
pmorissette/bt | tests/test_core.py | 1 | 94648 | from __future__ import division
import copy
import bt
from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy
from bt.core import FixedIncomeStrategy, HedgeSecurity, FixedIncomeSecurity
from bt.core import CouponPayingSecurity, CouponPayingHedgeSecurity
from bt.core import is_zero
import pandas as pd
import numpy as np
from nose.tools import assert_almost_equal as aae
import sys
if sys.version_info < (3, 3):
import mock
else:
from unittest import mock
def test_node_tree1():
# Create a regular strategy
c1 = Node('c1')
c2 = Node('c2')
p = Node('p', children=[c1, c2, 'c3', 'c4'])
assert 'c1' in p.children
assert 'c2' in p.children
assert p['c1'] != c1
assert p['c1'] != c2
c1 = p['c1']
c2 = p['c2']
assert len(p.children) == 2
assert p == c1.parent
assert p == c2.parent
assert p == c1.root
assert p == c2.root
# Create a new parent strategy with a child sub-strategy
m = Node('m', children=[p, c1])
p = m['p']
mc1 = m['c1']
c1 = p['c1']
c2 = p['c2']
assert len(m.children) == 2
assert 'p' in m.children
assert 'c1' in m.children
assert mc1 != c1
assert p.parent == m
assert len(p.children) == 2
assert 'c1' in p.children
assert 'c2' in p.children
assert p == c1.parent
assert p == c2.parent
assert m == p.root
assert m == c1.root
assert m == c2.root
# Add a new node into the strategy
c0 = Node('c0', parent=p)
c0 = p['c0']
assert 'c0' in p.children
assert p == c0.parent
assert m == c0.root
assert len(p.children) == 3
# Add a new sub-strategy into the parent strategy
p2 = Node( 'p2', children = [c0, c1], parent=m )
p2 = m['p2']
c0 = p2['c0']
c1 = p2['c1']
assert 'p2' in m.children
assert p2.parent == m
assert len(p2.children) == 2
assert 'c0' in p2.children
assert 'c1' in p2.children
assert c0 != p['c0']
assert c1 != p['c1']
assert p2 == c0.parent
assert p2 == c1.parent
assert m == p2.root
assert m == c0.root
assert m == c1.root
def test_node_tree2():
# Just like test_node_tree1, but using the dictionary constructor
c = Node('template')
p = Node('p', children={'c1':c, 'c2':c, 'c3':'', 'c4':''})
assert 'c1' in p.children
assert 'c2' in p.children
assert p['c1'] != c
assert p['c1'] != c
c1 = p['c1']
c2 = p['c2']
assert len(p.children) == 2
assert c1.name == 'c1'
assert c2.name == 'c2'
assert p == c1.parent
assert p == c2.parent
assert p == c1.root
assert p == c2.root
def test_node_tree3():
c1 = Node('c1')
c2 = Node('c1') # Same name!
raised = False
try:
p = Node('p', children=[c1, c2, 'c3', 'c4'])
except ValueError:
raised = True
assert raised
raised = False
try:
p = Node('p', children=['c1', 'c1'])
except ValueError:
raised = True
assert raised
c1 = Node('c1')
c2 = Node('c2')
p = Node('p', children=[c1, c2, 'c3', 'c4'])
raised = False
try:
Node('c1', parent = p )
except ValueError:
raised = True
assert raised
# This does not raise, as it's just providing an implementation of 'c3',
# which had been declared earlier
c3 = Node('c3', parent = p )
assert 'c3' in p.children
def test_integer_positions():
c1 = Node('c1')
c2 = Node('c2')
c1.integer_positions = False
p = Node('p', children=[c1, c2])
c1 = p['c1']
c2 = p['c2']
assert p.integer_positions
assert c1.integer_positions
assert c2.integer_positions
p.use_integer_positions(False)
assert not p.integer_positions
assert not c1.integer_positions
assert not c2.integer_positions
c3 = Node('c3', parent=p)
c3 = p['c3']
assert not c3.integer_positions
p2 = Node( 'p2', children = [p] )
p = p2['p']
c1 = p['c1']
c2 = p['c2']
assert p2.integer_positions
assert p.integer_positions
assert c1.integer_positions
assert c2.integer_positions
def test_strategybase_tree():
s1 = SecurityBase('s1')
s2 = SecurityBase('s2')
s = StrategyBase('p', [s1, s2])
s1 = s['s1']
s2 = s['s2']
assert len(s.children) == 2
assert 's1' in s.children
assert 's2' in s.children
assert s == s1.parent
assert s == s2.parent
def test_node_members():
s1 = SecurityBase('s1')
s2 = SecurityBase('s2')
s = StrategyBase('p', [s1, s2])
s1 = s['s1']
s2 = s['s2']
actual = s.members
assert len(actual) == 3
assert s1 in actual
assert s2 in actual
assert s in actual
actual = s1.members
assert len(actual) == 1
assert s1 in actual
actual = s2.members
assert len(actual) == 1
assert s2 in actual
def test_node_full_name():
s1 = SecurityBase('s1')
s2 = SecurityBase('s2')
s = StrategyBase('p', [s1, s2])
# we cannot access s1 and s2 directly since they are copied
# we must therefore access through s
assert s.full_name == 'p'
assert s['s1'].full_name == 'p>s1'
assert s['s2'].full_name == 'p>s2'
def test_security_setup_prices():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 105
assert len(c1.prices) == 1
assert c1.prices[0] == 105
assert c2.price == 95
assert len(c2.prices) == 1
assert c2.prices[0] == 95
# now with setup
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 105
assert len(c1.prices) == 1
assert c1.prices[0] == 105
assert c2.price == 95
assert len(c2.prices) == 1
assert c2.prices[0] == 95
def test_strategybase_tree_setup():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
assert len(s.data) == 3
assert len(c1.data) == 3
assert len(c2.data) == 3
assert len(s._prices) == 3
assert len(c1._prices) == 3
assert len(c2._prices) == 3
assert len(s._values) == 3
assert len(c1._values) == 3
assert len(c2._values) == 3
def test_strategybase_tree_adjust():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.adjust(1000)
assert s.capital == 1000
assert s.value == 1000
assert c1.value == 0
assert c2.value == 0
assert c1.weight == 0
assert c2.weight == 0
s.update(dts[0])
assert s.flows[ dts[0] ] == 1000
def test_strategybase_tree_update():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 100
assert c2.price == 100
i = 1
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 105
assert c2.price == 95
i = 2
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 100
assert c2.price == 100
def test_update_fails_if_price_is_nan_and_position_open():
c1 = SecurityBase('c1')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1'], data=100)
data['c1'][dts[1]] = np.nan
c1.setup(data)
i = 0
# mock in position
c1._position = 100
c1.update(dts[i], data.loc[dts[i]])
# test normal case - position & non-nan price
assert c1._value == 100 * 100
i = 1
# this should fail, because we have non-zero position, and price is nan, so
# bt has no way of updating the _value
try:
c1.update(dts[i], data.loc[dts[i]])
assert False
except Exception as e:
assert str(e).startswith('Position is open')
# on the other hand, if position was 0, this should be fine, and update
# value to 0
c1._position = 0
c1.update(dts[i], data.loc[dts[i]])
assert c1._value == 0
def test_strategybase_tree_allocate():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
# since children have w == 0 this should stay in s
s.allocate(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now allocate directly to child
c1.allocate(500)
assert c1.position == 5
assert c1.value == 500
assert s.capital == 1000 - 500
assert s.value == 1000
assert c1.weight == 500.0 / 1000
assert c2.weight == 0
def test_strategybase_tree_allocate_child_from_strategy():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
# since children have w == 0 this should stay in s
s.allocate(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now allocate to c1
s.allocate(500, 'c1')
assert c1.position == 5
assert c1.value == 500
assert s.capital == 1000 - 500
assert s.value == 1000
assert c1.weight == 500.0 / 1000
assert c2.weight == 0
def test_strategybase_tree_allocate_level2():
c1 = SecurityBase('c1')
c12 = copy.deepcopy(c1)
c2 = SecurityBase('c2')
c22 = copy.deepcopy(c2)
s1 = StrategyBase('s1', [c1, c2])
s2 = StrategyBase('s2', [c12, c22])
m = StrategyBase('m', [s1, s2])
s1 = m['s1']
s2 = m['s2']
c1 = s1['c1']
c2 = s1['c2']
c12 = s2['c1']
c22 = s2['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
m.setup(data)
i = 0
m.update(dts[i], data.loc[dts[i]])
m.adjust(1000)
# since children have w == 0 this should stay in s
m.allocate(1000)
assert m.value == 1000
assert m.capital == 1000
assert s1.value == 0
assert s2.value == 0
assert c1.value == 0
assert c2.value == 0
# now allocate directly to child
s1.allocate(500)
assert s1.value == 500
assert m.capital == 1000 - 500
assert m.value == 1000
assert s1.weight == 500.0 / 1000
assert s2.weight == 0
# now allocate directly to child of child
c1.allocate(200)
assert s1.value == 500
assert s1.capital == 500 - 200
assert c1.value == 200
assert c1.weight == 200.0 / 500
assert c1.position == 2
assert m.capital == 1000 - 500
assert m.value == 1000
assert s1.weight == 500.0 / 1000
assert s2.weight == 0
assert c12.value == 0
def test_strategybase_tree_allocate_long_short():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
c1.allocate(500)
assert c1.position == 5
assert c1.value == 500
assert c1.weight == 500.0 / 1000
assert s.capital == 1000 - 500
assert s.value == 1000
c1.allocate(-200)
assert c1.position == 3
assert c1.value == 300
assert c1.weight == 300.0 / 1000
assert s.capital == 1000 - 500 + 200
assert s.value == 1000
c1.allocate(-400)
assert c1.position == -1
assert c1.value == -100
assert c1.weight == -100.0 / 1000
assert s.capital == 1000 - 500 + 200 + 400
assert s.value == 1000
# close up
c1.allocate(-c1.value)
assert c1.position == 0
assert c1.value == 0
assert c1.weight == 0
assert s.capital == 1000 - 500 + 200 + 400 - 100
assert s.value == 1000
def test_strategybase_tree_allocate_update():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
assert s.price == 100
s.adjust(1000)
assert s.price == 100
assert s.value == 1000
assert s._value == 1000
c1.allocate(500)
assert c1.position == 5
assert c1.value == 500
assert c1.weight == 500.0 / 1000
assert s.capital == 1000 - 500
assert s.value == 1000
assert s.price == 100
i = 1
s.update(dts[i], data.loc[dts[i]])
assert c1.position == 5
assert c1.value == 525
assert c1.weight == 525.0 / 1025
assert s.capital == 1000 - 500
assert s.value == 1025
assert np.allclose(s.price, 102.5)
def test_strategybase_universe():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i])
assert len(s.universe) == 1
assert 'c1' in s.universe
assert 'c2' in s.universe
assert s.universe['c1'][dts[i]] == 105
assert s.universe['c2'][dts[i]] == 95
# should not have children unless allocated
assert len(s.children) == 0
def test_strategybase_allocate():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 100
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i])
s.adjust(1000)
s.allocate(100, 'c1')
c1 = s['c1']
assert c1.position == 1
assert c1.value == 100
assert s.value == 1000
def test_strategybase_lazy():
# A mix of test_strategybase_universe and test_strategybase_allocate
# to make sure that assets with lazy_add work correctly.
c1 = SecurityBase('c1', multiplier=2, lazy_add=True, )
c2 = FixedIncomeSecurity('c2', lazy_add=True)
s = StrategyBase('s', [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i])
assert len(s.universe) == 1
assert 'c1' in s.universe
assert 'c2' in s.universe
assert s.universe['c1'][dts[i]] == 105
assert s.universe['c2'][dts[i]] == 95
# should not have children unless allocated
assert len(s.children) == 0
s.adjust(1000)
s.allocate(100, 'c1')
s.allocate(100, 'c2')
c1 = s['c1']
c2 = s['c2']
assert c1.multiplier == 2
assert isinstance( c2, FixedIncomeSecurity)
def test_strategybase_close():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
i = 0
s.update(dts[i])
s.adjust(1000)
s.allocate(100, 'c1')
c1 = s['c1']
assert c1.position == 1
assert c1.value == 100
assert s.value == 1000
s.close('c1')
assert c1.position == 0
assert c1.value == 0
assert s.value == 1000
def test_strategybase_flatten():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
i = 0
s.update(dts[i])
s.adjust(1000)
s.allocate(100, 'c1')
c1 = s['c1']
s.allocate(100, 'c2')
c2 = s['c2']
assert c1.position == 1
assert c1.value == 100
assert c2.position == 1
assert c2.value == 100
assert s.value == 1000
s.flatten()
assert c1.position == 0
assert c1.value == 0
assert s.value == 1000
def test_strategybase_multiple_calls():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data.c2[dts[0]] = 95
data.c1[dts[1]] = 95
data.c2[dts[2]] = 95
data.c2[dts[3]] = 95
data.c2[dts[4]] = 95
data.c1[dts[4]] = 105
s.setup(data)
# define strategy logic
def algo(target):
# close out any open positions
target.flatten()
# get stock w/ lowest price
c = target.universe.loc[target.now].idxmin()
# allocate all capital to that stock
target.allocate(target.value, c)
# replace run logic
s.run = algo
# start w/ 1000
s.adjust(1000)
# loop through dates manually
i = 0
# update t0
s.update(dts[i])
assert len(s.children) == 0
assert s.value == 1000
# run t0
s.run(s)
assert len(s.children) == 1
assert s.value == 1000
assert s.capital == 50
c2 = s['c2']
assert c2.value == 950
assert c2.weight == 950.0 / 1000
assert c2.price == 95
# update out t0
s.update(dts[i])
c2 = s['c2']
assert len(s.children) == 1
assert s.value == 1000
assert s.capital == 50
assert c2.value == 950
assert c2.weight == 950.0 / 1000
assert c2.price == 95
# update t1
i = 1
s.update(dts[i])
assert s.value == 1050
assert s.capital == 50
assert len(s.children) == 1
assert 'c2' in s.children
c2 = s['c2']
assert c2.value == 1000
assert c2.weight == 1000.0 / 1050.0
assert c2.price == 100
# run t1 - close out c2, open c1
s.run(s)
assert len(s.children) == 2
assert s.value == 1050
assert s.capital == 5
c1 = s['c1']
assert c1.value == 1045
assert c1.weight == 1045.0 / 1050
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
# update out t1
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1050
assert s.capital == 5
assert c1 == s['c1']
assert c1.value == 1045
assert c1.weight == 1045.0 / 1050
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
# update t2
i = 2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 5
assert c1.value == 1100
assert c1.weight == 1100.0 / 1105
assert c1.price == 100
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 95
# run t2
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update t3
i = 3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# run t3
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update t4
i = 4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
# accessing price should refresh - this child has been idle for a while -
# must make sure we can still have a fresh prices
assert c1.price == 105
assert len(c1.prices) == 5
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# run t4
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
def test_strategybase_multiple_calls_preset_secs():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('s', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data.c2[dts[0]] = 95
data.c1[dts[1]] = 95
data.c2[dts[2]] = 95
data.c2[dts[3]] = 95
data.c2[dts[4]] = 95
data.c1[dts[4]] = 105
s.setup(data)
# define strategy logic
def algo(target):
# close out any open positions
target.flatten()
# get stock w/ lowest price
c = target.universe.loc[target.now].idxmin()
# allocate all capital to that stock
target.allocate(target.value, c)
# replace run logic
s.run = algo
# start w/ 1000
s.adjust(1000)
# loop through dates manually
i = 0
# update t0
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1000
# run t0
s.run(s)
assert len(s.children) == 2
assert s.value == 1000
assert s.capital == 50
assert c2.value == 950
assert c2.weight == 950.0 / 1000
assert c2.price == 95
# update out t0
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1000
assert s.capital == 50
assert c2.value == 950
assert c2.weight == 950.0 / 1000
assert c2.price == 95
# update t1
i = 1
s.update(dts[i])
assert s.value == 1050
assert s.capital == 50
assert len(s.children) == 2
assert c2.value == 1000
assert c2.weight == 1000.0 / 1050.
assert c2.price == 100
# run t1 - close out c2, open c1
s.run(s)
assert c1.value == 1045
assert c1.weight == 1045.0 / 1050
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
assert len(s.children) == 2
assert s.value == 1050
assert s.capital == 5
# update out t1
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1050
assert s.capital == 5
assert c1.value == 1045
assert c1.weight == 1045.0 / 1050
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
# update t2
i = 2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 5
assert c1.value == 1100
assert c1.weight == 1100.0 / 1105
assert c1.price == 100
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 95
# run t2
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update t3
i = 3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# run t3
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update t4
i = 4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
# accessing price should refresh - this child has been idle for a while -
# must make sure we can still have a fresh prices
assert c1.price == 105
assert len(c1.prices) == 5
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# run t4
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
def test_strategybase_multiple_calls_no_post_update():
s = StrategyBase('s')
s.set_commissions(lambda q, p: 1)
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data.c2[dts[0]] = 95
data.c1[dts[1]] = 95
data.c2[dts[2]] = 95
data.c2[dts[3]] = 95
data.c2[dts[4]] = 95
data.c1[dts[4]] = 105
s.setup(data)
# define strategy logic
def algo(target):
# close out any open positions
target.flatten()
# get stock w/ lowest price
c = target.universe.loc[target.now].idxmin()
# allocate all capital to that stock
target.allocate(target.value, c)
# replace run logic
s.run = algo
# start w/ 1000
s.adjust(1000)
# loop through dates manually
i = 0
# update t0
s.update(dts[i])
assert len(s.children) == 0
assert s.value == 1000
# run t0
s.run(s)
assert len(s.children) == 1
assert s.value == 999
assert s.capital == 49
c2 = s['c2']
assert c2.value == 950
assert c2.weight == 950.0 / 999
assert c2.price == 95
# update t1
i = 1
s.update(dts[i])
assert s.value == 1049
assert s.capital == 49
assert len(s.children) == 1
assert 'c2' in s.children
c2 = s['c2']
assert c2.value == 1000
assert c2.weight == 1000.0 / 1049.0
assert c2.price == 100
# run t1 - close out c2, open c1
s.run(s)
assert len(s.children) == 2
assert s.value == 1047
assert s.capital == 2
c1 = s['c1']
assert c1.value == 1045
assert c1.weight == 1045.0 / 1047
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
# update t2
i = 2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1102
assert s.capital == 2
assert c1.value == 1100
assert c1.weight == 1100.0 / 1102
assert c1.price == 100
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 95
# run t2
s.run(s)
assert len(s.children) == 2
assert s.value == 1100
assert s.capital == 55
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1100
assert c2.price == 95
# update t3
i = 3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1100
assert s.capital == 55
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1100
assert c2.price == 95
# run t3
s.run(s)
assert len(s.children) == 2
assert s.value == 1098
assert s.capital == 53
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1098
assert c2.price == 95
# update t4
i = 4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1098
assert s.capital == 53
assert c1.value == 0
assert c1.weight == 0
# accessing price should refresh - this child has been idle for a while -
# must make sure we can still have a fresh prices
assert c1.price == 105
assert len(c1.prices) == 5
assert c2.value == 1045
assert c2.weight == 1045.0 / 1098
assert c2.price == 95
# run t4
s.run(s)
assert len(s.children) == 2
assert s.value == 1096
assert s.capital == 51
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1096
assert c2.price == 95
def test_strategybase_prices():
dts = pd.date_range('2010-01-01', periods=21)
rawd = [13.555, 13.75, 14.16, 13.915, 13.655,
13.765, 14.02, 13.465, 13.32, 14.65,
14.59, 14.175, 13.865, 13.865, 13.89,
13.85, 13.565, 13.47, 13.225, 13.385,
12.89]
data = pd.DataFrame(index=dts, data=rawd, columns=['a'])
s = StrategyBase('s')
s.set_commissions(lambda q, p: 1)
s.setup(data)
# buy 100 shares on day 1 - hold until end
# just enough to buy 100 shares + 1$ commission
s.adjust(1356.50)
s.update(dts[0])
# allocate all capital to child a
# a should be dynamically created and should have
# 100 shares allocated. s.capital should be 0
s.allocate(s.value, 'a')
assert s.capital == 0
assert s.value == 1355.50
assert len(s.children) == 1
aae(s.price, 99.92628, 5)
a = s['a']
assert a.position == 100
assert a.value == 1355.50
assert a.weight == 1
assert a.price == 13.555
assert len(a.prices) == 1
# update through all dates and make sure price is ok
s.update(dts[1])
aae(s.price, 101.3638, 4)
s.update(dts[2])
aae(s.price, 104.3863, 4)
s.update(dts[3])
aae(s.price, 102.5802, 4)
# finish updates and make sure ok at end
for i in range(4, 21):
s.update(dts[i])
assert len(s.prices) == 21
aae(s.prices[-1], 95.02396, 5)
aae(s.prices[-2], 98.67306, 5)
def test_fail_if_root_value_negative():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 100
data['c2'][dts[0]] = 95
s.setup(data)
s.adjust(-100)
# trigger update
s.update(dts[0])
assert s.bankrupt
# make sure only triggered if root negative
c1 = StrategyBase('c1')
s = StrategyBase('s', children=[c1])
c1 = s['c1']
s.setup(data)
s.adjust(1000)
c1.adjust(-100)
s.update(dts[0])
# now make it trigger
c1.adjust(-1000)
# trigger update
s.update(dts[0])
assert s.bankrupt
def test_fail_if_0_base_in_return_calc():
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 100
data['c2'][dts[0]] = 95
# must setup tree because if not negative root error pops up first
c1 = StrategyBase('c1')
s = StrategyBase('s', children=[c1])
c1 = s['c1']
s.setup(data)
s.adjust(1000)
c1.adjust(100)
s.update(dts[0])
c1.adjust(-100)
s.update(dts[1])
try:
c1.adjust(-100)
s.update(dts[1])
assert False
except ZeroDivisionError as e:
if 'Could not update' not in str(e):
assert False
def test_strategybase_tree_rebalance():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
s.set_commissions(lambda q, p: 1)
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now rebalance c1
s.rebalance(0.5, 'c1', update=True)
assert s.root.stale == True
assert c1.position == 4
assert c1.value == 400
assert s.capital == 1000 - 401
assert s.value == 999
assert c1.weight == 400.0 / 999
assert c2.weight == 0
# Check that rebalance with update=False
# does not mark the node as stale
s.rebalance(0.6, 'c1', update=False)
assert s.root.stale == False
def test_strategybase_tree_decimal_position_rebalance():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
s.use_integer_positions(False)
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000.2)
s.rebalance(0.42, 'c1')
s.rebalance(0.58, 'c2')
aae(c1.value, 420.084)
aae(c2.value, 580.116)
aae(c1.value + c2.value, 1000.2)
def test_rebalance_child_not_in_tree():
s = StrategyBase('p')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i])
s.adjust(1000)
# rebalance to 0 w/ child that is not present - should ignore
s.rebalance(0, 'c2')
assert s.value == 1000
assert s.capital == 1000
assert len(s.children) == 0
def test_strategybase_tree_rebalance_to_0():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now rebalance c1
s.rebalance(0.5, 'c1')
assert c1.position == 5
assert c1.value == 500
assert s.capital == 1000 - 500
assert s.value == 1000
assert c1.weight == 500.0 / 1000
assert c2.weight == 0
# now rebalance c1
s.rebalance(0, 'c1')
assert c1.position == 0
assert c1.value == 0
assert s.capital == 1000
assert s.value == 1000
assert c1.weight == 0
assert c2.weight == 0
def test_strategybase_tree_rebalance_level2():
c1 = SecurityBase('c1')
c12 = copy.deepcopy(c1)
c2 = SecurityBase('c2')
c22 = copy.deepcopy(c2)
s1 = StrategyBase('s1', [c1, c2])
s2 = StrategyBase('s2', [c12, c22])
m = StrategyBase('m', [s1, s2])
s1 = m['s1']
s2 = m['s2']
c1 = s1['c1']
c2 = s1['c2']
c12 = s2['c1']
c22 = s2['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
m.setup(data)
i = 0
m.update(dts[i], data.loc[dts[i]])
m.adjust(1000)
assert m.value == 1000
assert m.capital == 1000
assert s1.value == 0
assert s2.value == 0
assert c1.value == 0
assert c2.value == 0
# now rebalance child s1 - since its children are 0, no waterfall alloc
m.rebalance(0.5, 's1')
assert s1.value == 500
assert m.capital == 1000 - 500
assert m.value == 1000
assert s1.weight == 500.0 / 1000
assert s2.weight == 0
# now allocate directly to child of child
s1.rebalance(0.4, 'c1')
assert s1.value == 500
assert s1.capital == 500 - 200
assert c1.value == 200
assert c1.weight == 200.0 / 500
assert c1.position == 2
assert m.capital == 1000 - 500
assert m.value == 1000
assert s1.weight == 500.0 / 1000
assert s2.weight == 0
assert c12.value == 0
# now rebalance child s1 again and make sure c1 also gets proportional
# increase
m.rebalance(0.8, 's1')
assert s1.value == 800
aae(m.capital, 200, 1)
assert m.value == 1000
assert s1.weight == 800 / 1000
assert s2.weight == 0
assert c1.value == 300.0
assert c1.weight == 300.0 / 800
assert c1.position == 3
# now rebalance child s1 to 0 - should close out s1 and c1 as well
m.rebalance(0, 's1')
assert s1.value == 0
assert m.capital == 1000
assert m.value == 1000
assert s1.weight == 0
assert s2.weight == 0
assert c1.weight == 0
def test_strategybase_tree_rebalance_base():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
s.set_commissions(lambda q, p: 1)
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# check that 2 rebalances of equal weight lead to two different allocs
# since value changes after first call
s.rebalance(0.5, 'c1')
assert c1.position == 4
assert c1.value == 400
assert s.capital == 1000 - 401
assert s.value == 999
assert c1.weight == 400.0 / 999
assert c2.weight == 0
s.rebalance(0.5, 'c2')
assert c2.position == 4
assert c2.value == 400
assert s.capital == 1000 - 401 - 401
assert s.value == 998
assert c2.weight == 400.0 / 998
assert c1.weight == 400.0 / 998
# close out everything
s.flatten()
# adjust to get back to 1000
s.adjust(4)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now rebalance but set fixed base
base = s.value
s.rebalance(0.5, 'c1', base=base)
assert c1.position == 4
assert c1.value == 400
assert s.capital == 1000 - 401
assert s.value == 999
assert c1.weight == 400.0 / 999
assert c2.weight == 0
s.rebalance(0.5, 'c2', base=base)
assert c2.position == 4
assert c2.value == 400
assert s.capital == 1000 - 401 - 401
assert s.value == 998
assert c2.weight == 400.0 / 998
assert c1.weight == 400.0 / 998
def test_algo_stack():
a1 = mock.MagicMock(return_value=True)
a2 = mock.MagicMock(return_value=False)
a3 = mock.MagicMock(return_value=True)
# no run_always for now
del a1.run_always
del a2.run_always
del a3.run_always
stack = AlgoStack(a1, a2, a3)
target = mock.MagicMock()
assert not stack(target)
assert a1.called
assert a2.called
assert not a3.called
# now test that run_always marked are run
a1 = mock.MagicMock(return_value=True)
a2 = mock.MagicMock(return_value=False)
a3 = mock.MagicMock(return_value=True)
# a3 will have run_always
del a1.run_always
del a2.run_always
stack = AlgoStack(a1, a2, a3)
target = mock.MagicMock()
assert not stack(target)
assert a1.called
assert a2.called
assert a3.called
def test_set_commissions():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.set_commissions(lambda x, y: 1.0)
s.setup(data)
s.update(dts[0])
s.adjust(1000)
s.allocate(500, 'c1')
assert s.capital == 599
s.set_commissions(lambda x, y: 0.0)
s.allocate(-400, 'c1')
assert s.capital == 999
def test_strategy_tree_proper_return_calcs():
s1 = StrategyBase('s1')
s2 = StrategyBase('s2')
m = StrategyBase('m', [s1, s2])
s1 = m['s1']
s2 = m['s2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data.loc['c1', dts[1]] = 105
data.loc['c2', dts[1]] = 95
m.setup(data)
i = 0
m.update(dts[i], data.loc[dts[i]])
m.adjust(1000)
# since children have w == 0 this should stay in s
m.allocate(1000)
assert m.value == 1000
assert m.capital == 1000
assert m.price == 100
assert s1.value == 0
assert s2.value == 0
# now allocate directly to child
s1.allocate(500)
assert m.capital == 500
assert m.value == 1000
assert m.price == 100
assert s1.value == 500
assert s1.weight == 500.0 / 1000
assert s1.price == 100
assert s2.weight == 0
# allocate to child2 via parent method
m.allocate(500, 's2')
assert m.capital == 0
assert m.value == 1000
assert m.price == 100
assert s1.value == 500
assert s1.weight == 500.0 / 1000
assert s1.price == 100
assert s2.value == 500
assert s2.weight == 500.0 / 1000
assert s2.price == 100
# now allocate and incur commission fee
s1.allocate(500, 'c1')
assert m.capital == 0
assert m.value == 1000
assert m.price == 100
assert s1.value == 500
assert s1.weight == 500.0 / 1000
assert s1.price == 100
assert s2.value == 500
assert s2.weight == 500.0 / 1000.0
assert s2.price == 100
def test_strategy_tree_proper_universes():
def do_nothing(x):
return True
child1 = Strategy('c1', [do_nothing], ['b', 'c'])
parent = Strategy('m', [do_nothing], [child1, 'a'])
child1 = parent['c1']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(
{'a': pd.Series(data=1, index=dts, name='a'),
'b': pd.Series(data=2, index=dts, name='b'),
'c': pd.Series(data=3, index=dts, name='c')})
parent.setup(data, test_data1 = 'test1')
assert len(parent.children) == 1
assert 'c1' in parent.children
assert len(parent._universe.columns) == 2
assert 'c1' in parent._universe.columns
assert 'a' in parent._universe.columns
assert len(child1._universe.columns) == 2
assert 'b' in child1._universe.columns
assert 'c' in child1._universe.columns
assert parent._has_strat_children
assert len(parent._strat_children) == 1
assert parent.get_data( 'test_data1' ) == 'test1'
# New child strategy with parent (and using dictionary notation}
child2 = Strategy('c2', [do_nothing], {'a' : SecurityBase(''), 'b' : ''}, parent=parent)
# Setup the child from the parent, but pass in some additional data
child2.setup_from_parent(test_data2 = 'test2')
assert 'a' in child2._universe.columns
assert 'b' in child2._universe.columns
assert 'c2' in parent._universe.columns
# Make sure child has data from the parent and the additional data
assert child2.get_data('test_data1') == 'test1'
assert child2.get_data('test_data2') == 'test2'
assert len(parent._strat_children) == 2
def test_strategy_tree_paper():
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['a'], data=100.)
data['a'].loc[dts[1]] = 101
data['a'].loc[dts[2]] = 102
s = Strategy('s',
[bt.algos.SelectWhere(data > 100),
bt.algos.WeighEqually(),
bt.algos.Rebalance()])
m = Strategy('m', [], [s])
s = m['s']
m.setup(data)
m.update(dts[0])
m.run()
assert m.price == 100
assert s.price == 100
assert s._paper_trade
assert s._paper.price == 100
s.update(dts[1])
m.run()
assert m.price == 100
assert m.value == 0
assert s.value == 0
assert s.price == 100
s.update(dts[2])
m.run()
assert m.price == 100
assert m.value == 0
assert s.value == 0
assert np.allclose(s.price, 100. * (102 / 101.))
def test_dynamic_strategy():
def do_nothing(x):
return True
# Start with an empty parent
parent = Strategy('p', [do_nothing], [])
dts = pd.date_range('2010-01-01', periods=4)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3'], data=100.)
data['c1'][dts[2]] = 105.
data['c2'][dts[2]] = 95.
parent.setup( data )
# NOTE: Price of the sub-strategy won't be correct in this example because
# we are not using the algo stack to impact weights, and so the paper
# trading strategy does not see the same actions as we are doing.
i = 0
parent.adjust( 1e6 )
parent.update( dts[i] )
assert parent.price == 100.
assert parent.value == 1e6
i = 1
parent.update( dts[i] )
# On this step, we decide to put a trade on c1 vs c2 and track it as a strategy
trade = Strategy('c1_vs_c2', [], children = ['c1', 'c2'], parent = parent )
trade.setup_from_parent()
trade.update( parent.now )
assert trade.price == 100.
assert trade.value == 0
# Allocate capital to the trade
parent.allocate( 1e5, trade.name )
assert trade.value == 1e5
assert trade.price == 100.
# Go long 'c1' and short 'c2'
trade.rebalance( 1., 'c1')
trade.rebalance( -1., 'c2')
assert parent.universe[ trade.name ][ dts[i] ] == 100.
assert parent.positions['c1'][ dts[i] ] == 1e3
assert parent.positions['c2'][ dts[i] ] == -1e3
i = 2
parent.update( dts[i] )
assert trade.value == 1e5 + 10 * 1e3
assert parent.value == 1e6 + 10 * 1e3
# On this step, we close the trade, and allocate capital back to the parent
trade.flatten()
trade.update( trade.now ) # Need to update after flattening (for now)
parent.allocate( -trade.capital, trade.name )
assert trade.value == 0
assert trade.capital == 0
assert parent.value == 1e6 + 10 * 1e3
assert parent.capital == parent.value
assert parent.positions['c1'][ dts[i] ] == 0.
assert parent.positions['c2'][ dts[i] ] == 0.
i = 3
parent.update( dts[i] )
# Just make sure we can update one step beyond closing
# Note that "trade" is still a child of parent, and it also has children,
# so it will keep getting updated (and paper trading will still happen).
assert trade.value == 0
assert trade.capital == 0
assert trade.values[ dts[i] ] == 0.
def test_dynamic_strategy2():
# Start with an empty parent
parent = Strategy('p', [], [])
dts = pd.date_range('2010-01-01', periods=4)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3'], data=100.)
data['c1'][dts[2]] = 105.
data['c2'][dts[2]] = 95.
data['c1'][dts[3]] = 101.
data['c2'][dts[3]] = 99.
parent.setup( data )
i = 0
parent.adjust( 1e6 )
parent.update( dts[i] )
assert parent.price == 100.
assert parent.value == 1e6
i = 1
parent.update( dts[i] )
# On this step, we decide to put a trade on c1 vs c2 and track it as a strategy
def trade_c1_vs_c2( strategy ):
if strategy.now == dts[1]:
strategy.rebalance( 1., 'c1')
strategy.rebalance( -1., 'c2')
trade = Strategy('c1_vs_c2', [trade_c1_vs_c2], children = ['c1', 'c2'], parent = parent )
trade.setup_from_parent()
trade.update( parent.now )
assert trade.price == 100.
assert trade.value == 0
# Allocate capital to the trade
parent.allocate( 1e5, trade.name )
assert trade.value == 1e5
assert trade.price == 100.
# Run the strategy for the timestep
parent.run()
assert parent.universe[ trade.name ][ dts[i] ] == 100.
assert np.isnan( parent.universe[ trade.name ][ dts[0] ] )
assert parent.positions['c1'][ dts[i] ] == 1e3
assert parent.positions['c2'][ dts[i] ] == -1e3
i = 2
parent.update( dts[i] )
trade = parent[ trade.name ]
assert trade.value == 1e5 + 10 * 1e3
assert parent.value == 1e6 + 10 * 1e3
aae( trade.price, 110.)
# Next we close the trade by flattening positions
trade.flatten()
trade.update( trade.now ) # Need to update after flattening (for now)
aae( trade.price, 110.)
# Finally we allocate capital back to the parent to be re-deployed
parent.allocate( -trade.capital, trade.name )
assert trade.value == 0
assert trade.capital == 0
aae( trade.price, 110.) # Price stays the same even after capital de-allocated
assert parent.value == 1e6 + 10 * 1e3
assert parent.capital == parent.value
assert parent.positions['c1'][ dts[i] ] == 0.
assert parent.positions['c2'][ dts[i] ] == 0.
i = 3
parent.update( dts[i] )
# Just make sure we can update one step beyond closing
assert parent.value == 1e6 + 10 * 1e3
# Note that "trade" is still a child of parent, and it also has children,
# so it will keep getting updated (and paper trading will still happen).
assert trade.value == 0
assert trade.capital == 0
assert trade.values[ dts[i] ] == 0.
# Paper trading price, as asset prices have moved, paper trading price
# keeps updating. Note that if the flattening of the position was part
# of the definition of trade_c1_vs_c2, then the paper trading price
# would be fixed after flattening, as it would apply to both real and paper.
aae( trade.price, 102.)
aae( parent.universe[ trade.name ][ dts[i] ], 102. )
def test_outlays():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
# allocate 1000 to strategy
s.adjust(1000)
# now let's see what happens when we allocate 500 to each child
c1.allocate(500)
c2.allocate(500)
#calling outlays should automatically update the strategy, since stale
assert c1.outlays[dts[0]] == (4 * 105)
assert c2.outlays[dts[0]] == (5 * 95)
assert c1.data['outlay'][dts[0]] == (4 * 105)
assert c2.data['outlay'][dts[0]] == (5 * 95)
i = 1
s.update(dts[i], data.loc[dts[i]])
c1.allocate(-400)
c2.allocate(100)
# out update
assert c1.outlays[dts[1]] == (-4 * 100)
assert c2.outlays[dts[1]] == 100
assert c1.data['outlay'][dts[1]] == (-4 * 100)
assert c2.data['outlay'][dts[1]] == 100
def test_child_weight_above_1():
# check for child weights not exceeding 1
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(np.random.randn(3, 2) + 100,
index=dts, columns=['c1', 'c2'])
s.setup(data)
i = 0
s.update(dts[i])
s.adjust(1e6)
s.allocate(1e6, 'c1')
c1 = s['c1']
assert c1.weight <= 1
def test_fixed_commissions():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
# fixed $1 commission per transaction
s.set_commissions(lambda q, p: 1)
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
# allocate 1000 to strategy
s.adjust(1000)
# now let's see what happens when we allocate 500 to each child
c1.allocate(500)
c2.allocate(500)
# out update
s.update(dts[i])
assert c1.value == 400
assert c2.value == 400
assert s.capital == 198
# de-alloc 100 from c1. This should force c1 to sell 2 units to raise at
# least 100 (because of commissions)
c1.allocate(-100)
s.update(dts[i])
assert c1.value == 200
assert s.capital == 198 + 199
# allocate 100 to c2. This should leave things unchaged, since c2 cannot
# buy one unit since the commission will cause total outlay to exceed
# allocation
c2.allocate(100)
s.update(dts[i])
assert c2.value == 400
assert s.capital == 198 + 199
# ok try again w/ 101 allocation. This time, it should work
c2.allocate(101)
s.update(dts[i])
assert c2.value == 500
assert s.capital == 198 + 199 - 101
# ok now let's close the whole position. Since we are closing, we expect
# the allocation to go through, even though the outlay > amount
c2.allocate(-500)
s.update(dts[i])
assert c2.value == 0
assert s.capital == 198 + 199 - 101 + 499
# now we are going to go short c2
# we want to 'raise' 100 dollars. Since we need at a minimum 100, but we
# also have commissions, we will actually short 2 units in order to raise
# at least 100
c2.allocate(-100)
s.update(dts[i])
assert c2.value == -200
assert s.capital == 198 + 199 - 101 + 499 + 199
def test_degenerate_shorting():
# can have situation where you short infinitely if commission/share > share
# price
c1 = SecurityBase('c1')
s = StrategyBase('p', [c1])
# $1/share commission
s.set_commissions(lambda q, p: abs(q) * 1)
c1 = s['c1']
dts = pd.date_range('2010-01-01', periods=3)
# c1 trades at 0.01
data = pd.DataFrame(index=dts, columns=['c1'], data=0.01)
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
try:
c1.allocate(-10)
assert False
except Exception as e:
assert 'full_outlay should always be approaching amount' in str(e)
def test_securitybase_allocate():
c1 = SecurityBase('c1')
s = StrategyBase('p', [c1])
c1 = s['c1']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1'], data=100.)
# set the price
data['c1'][dts[0]] = 91.40246706608193
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
# allocate 100000 to strategy
original_capital = 100000.
s.adjust(original_capital)
# not integer positions
c1.integer_positions = False
# set the full_outlay and amount
full_outlay = 1999.693706988672
amount = 1999.6937069886717
c1.allocate(amount)
# the results that we want to be true
assert np.isclose(full_outlay ,amount,rtol=0.)
# check that the quantity wasn't decreased and the full_outlay == amount
# we can get the full_outlay that was calculated by
# original capital - current capital
assert np.isclose(full_outlay, original_capital - s._capital, rtol=0.)
def test_securitybase_allocate_commisions():
date_span = pd.date_range(start='10/1/2017', end='10/11/2017', freq='B')
numper = len(date_span.values)
comms = 0.01
data = [[10, 15, 20, 25, 30, 35, 40, 45],
[10, 10, 10, 10, 20, 20, 20, 20],
[20, 20, 20, 30, 30, 30, 40, 40],
[20, 10, 20, 10, 20, 10, 20, 10]]
data = [[row[i] for row in data] for i in range(len(data[0]))] # Transpose
price = pd.DataFrame(data=data, index=date_span)
price.columns = ['a', 'b', 'c', 'd']
# price = price[['a', 'b']]
sig1 = pd.DataFrame(price['a'] >= price['b'] + 10, columns=['a'])
sig2 = pd.DataFrame(price['a'] < price['b'] + 10, columns=['b'])
signal = sig1.join(sig2)
signal1 = price.diff(1) > 0
signal2 = price.diff(1) < 0
tw = price.copy()
tw.loc[:,:] = 0 # Initialize Set everything to 0
tw[signal1] = -1.0
tw[signal2] = 1.0
s1 = bt.Strategy('long_short', [bt.algos.WeighTarget(tw),
bt.algos.RunDaily(),
bt.algos.Rebalance()])
####now we create the Backtest , commissions=(lambda q, p: abs(p * q) * comms)
t = bt.Backtest(s1, price, initial_capital=1000000, commissions=(lambda q, p: abs(p * q) * comms), progress_bar=False)
####and let's run it!
res = bt.run(t)
########################
def test_strategybase_tree_transact():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
# since children have w == 0 this should stay in s
s.transact(1)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now allocate directly to child
c1.transact(5)
assert c1.position == 5
assert c1.value == 500
assert s.capital == 1000 - 500
assert s.value == 1000
assert c1.weight == 500.0 / 1000
assert c2.weight == 0
# now transact the parent since weights are nonzero
s.transact(2)
assert c1.position == 6
assert c1.value == 600
assert s.capital == 1000 - 600
assert s.value == 1000
assert c1.weight == 600.0 / 1000
assert c2.weight == 0
def test_strategybase_tree_transact_child_from_strategy():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
# since children have w == 0 this should stay in s
s.transact(1)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now transact in c1
s.transact(5, 'c1')
assert c1.position == 5
assert c1.value == 500
assert s.capital == 1000 - 500
assert s.value == 1000
assert c1.weight == 500.0 / 1000
assert c2.weight == 0
def test_strategybase_tree_transact_level2():
c1 = SecurityBase('c1')
c12 = copy.deepcopy(c1)
c2 = SecurityBase('c2')
c22 = copy.deepcopy(c2)
s1 = StrategyBase('s1', [c1, c2])
s2 = StrategyBase('s2', [c12, c22])
m = StrategyBase('m', [s1, s2])
s1 = m['s1']
s2 = m['s2']
c1 = s1['c1']
c2 = s1['c2']
c12 = s2['c1']
c22 = s2['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
m.setup(data)
i = 0
m.update(dts[i], data.loc[dts[i]])
m.adjust(1000)
# since children have w == 0 this should stay in s
m.transact(1)
assert m.value == 1000
assert m.capital == 1000
assert s1.value == 0
assert s2.value == 0
assert c1.value == 0
assert c2.value == 0
# now transact directly in child. No weights, so nothing happens
s1.transact(1)
assert m.value == 1000
assert m.capital == 1000
assert s1.value == 0
assert s2.value == 0
assert c1.value == 0
assert c2.value == 0
# now transact directly in child of child
s1.allocate(500)
c1.transact(2)
assert s1.value == 500
assert s1.capital == 500 - 200
assert c1.value == 200
assert c1.weight == 200.0 / 500
assert c1.position == 2
assert m.capital == 1000 - 500
assert m.value == 1000
assert s1.weight == 500.0 / 1000
assert s2.weight == 0
assert c12.value == 0
# now transact directly in child again
s1.transact(5)
assert s1.value == 500
assert s1.capital == 500 - 400
assert c1.value == 400
assert c1.weight == 400.0 / 500
assert c1.position == 4
assert m.capital == 1000 - 500
assert m.value == 1000
assert s1.weight == 500.0 / 1000
assert s2.weight == 0
assert c12.value == 0
def test_strategybase_precision():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
c3 = SecurityBase('c3')
s = StrategyBase('p', [c1, c2, c3])
s.use_integer_positions(False)
c1 = s['c1']
c2 = s['c2']
c3 = s['c3']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3'], data=1.)
s.setup(data)
i = 0
s.update(dts[i])
s.adjust(1.0)
s.rebalance(0.1, 'c1')
s.rebalance(0.1, 'c2')
s.rebalance(0.1, 'c3')
s.adjust(-0.7)
aae( s.capital, 0. )
aae( s.value, 0.3 )
aae( s.price, 100. )
assert s.capital != 0 # Due to numerical precision
assert s.value != 0.3 # Created non-zero value out of numerical precision errors
assert s.price != 100.
# Make sure we can still update and calculate return
i=1
s.update(dts[i])
aae( s.price, 100. )
aae( s.value, 0.3 )
assert s.price != 100.
assert s.value != 0.3
def test_securitybase_transact():
c1 = SecurityBase('c1')
s = StrategyBase('p', [c1])
c1 = s['c1']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1'], data=100.)
# set the price
price = 91.40246706608193
data['c1'][dts[0]] = 91.40246706608193
s.setup(data)
i = 0
s.update(dts[i])
# allocate 100000 to strategy
original_capital = 100000.
s.adjust(original_capital)
# not integer positions
c1.integer_positions = False
# set the full_outlay and amount
q = 1000.
amount = q * price
c1.transact(q)
assert np.isclose( c1.value, amount, rtol=0.)
assert np.isclose( c1.weight, amount/original_capital, rtol=0.)
assert c1.position == q
assert np.isclose( c1.outlays[0], amount, rtol=0.)
assert np.isclose( s.capital, (original_capital - amount) )
assert s.weight == 1
assert s.value == original_capital
assert np.isclose( s.outlays[c1.name][0], amount, rtol=0.)
# Call again on the same step (and again) to make sure all updates are working
c1.transact(q)
c1.transact(q)
assert c1.position == 3*q
assert np.isclose( c1.outlays[0], 3*amount, rtol=0.)
assert np.isclose( c1.value, 3*amount, rtol=0.)
assert np.isclose( s.capital, (original_capital - 3*amount) )
assert s.weight == 1
assert s.value == original_capital
assert np.isclose( s.outlays[c1.name][0], 3*amount, rtol=0.)
def test_security_setup_positions():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
i = 0
s.update(dts[i])
assert c1.position == 0
assert len(c1.positions) == 1
assert c1.positions[0] == 0
assert c2.position == 0
assert len(c2.positions) == 1
assert c2.positions[0] == 0
def test_couponpayingsecurity_setup():
c1 = CouponPayingSecurity('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
coupons = pd.DataFrame(index=dts, columns=['c1'], data=0.1)
s.setup(data, coupons = coupons)
i = 0
s.update(dts[i])
assert 'coupon' in c1.data
assert c1.coupon == 0.0
assert len(c1.coupons) == 1
assert c1.coupons[0] == 0.0
assert 'holding_cost' in c1.data
assert c1.holding_cost == 0.0
assert len(c1.holding_costs) == 1
assert c1.holding_costs[0] == 0.0
assert c1.price == 105
assert len(c1.prices) == 1
assert c1.prices[0] == 105
assert c2.price == 95
assert len(c2.prices) == 1
assert c2.prices[0] == 95
def test_couponpayingsecurity_setup_costs():
c1 = CouponPayingSecurity('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
coupons = pd.DataFrame(index=dts, columns=['c1'], data=0.)
cost_long = pd.DataFrame(index=dts, columns=['c1'], data=0.01)
cost_short = pd.DataFrame(index=dts, columns=['c1'], data=0.05)
s.setup(data, coupons=coupons, cost_long=cost_long, cost_short=cost_short)
i = 0
s.update(dts[i])
assert 'coupon' in c1.data
assert c1.coupon == 0.0
assert len(c1.coupons) == 1
assert c1.coupons[0] == 0.0
assert 'holding_cost' in c1.data
assert c1.holding_cost == 0.0
assert len(c1.holding_costs) == 1
assert c1.holding_costs[0] == 0.0
assert c1.price == 105
assert len(c1.prices) == 1
assert c1.prices[0] == 105
assert c2.price == 95
assert len(c2.prices) == 1
assert c2.prices[0] == 95
def test_couponpayingsecurity_carry():
c1 = CouponPayingSecurity('c1')
s = StrategyBase('p', [c1])
c1 = s['c1']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1'], data=1.)
coupons = pd.DataFrame(index=dts, columns=['c1'], data=0.)
coupons['c1'][dts[0]] = 0.1
cost_long = pd.DataFrame(index=dts, columns=['c1'], data=0.)
cost_long['c1'][dts[0]] = 0.01
cost_short = pd.DataFrame(index=dts, columns=['c1'], data=0.05)
s.setup(data, coupons=coupons, cost_long=cost_long, cost_short=cost_short)
i = 0
s.update(dts[i])
# allocate 1000 to strategy
original_capital = 1000.
s.adjust(original_capital)
# set the full_outlay and amount
q = 1000.
c1.transact(q)
assert c1.coupon == 100.
assert len(c1.coupons) == 1
assert c1.coupons[0] == 100.
assert c1.holding_cost == 10.
assert len(c1.holding_costs) == 1
assert c1.holding_costs[0] == 10.
assert s.capital == 0.
assert s.cash[0] == 0.
# On this step, the coupon/costs will be accounted for from the last holding
i = 1
s.update(dts[i])
assert c1.coupon == 0.
assert len(c1.coupons) == 2
assert c1.coupons[1] == 0.
assert c1.holding_cost == 0.
assert len(c1.holding_costs) == 2
assert c1.holding_costs[1] == 0.
assert s.capital == 100. - 10.
assert s.cash[0] == 0.
assert s.cash[1] == 100. - 10.
# Go short q
c1.transact( -2*q )
# Note cost is positive even though we are short.
assert c1.holding_cost == 50.
assert len(c1.holding_costs) == 2
assert c1.holding_costs[1] == 50.
def test_couponpayingsecurity_transact():
c1 = CouponPayingSecurity('c1')
s = StrategyBase('p', [c1])
c1 = s['c1']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1'], data=100.)
# set the price
price = 91.40246706608193
data['c1'][dts[0]] = 91.40246706608193
data['c1'][dts[1]] = 91.40246706608193
coupon = 0.1
coupons = pd.DataFrame(index=dts, columns=['c1'], data=0.)
coupons['c1'][dts[0]] = coupon
s.setup(data, coupons = coupons)
i = 0
s.update(dts[i])
# allocate 100000 to strategy
original_capital = 100000.
s.adjust(original_capital)
# set the full_outlay and amount
q = 1000.
amount = q * price
c1.transact(q)
# The coupon is nonzero, but will only be counted in "value" the next day
assert c1.coupon == coupon * q
assert len(c1.coupons) == 1
assert c1.coupons[0] == coupon * q
assert np.isclose( c1.value, amount, rtol=0.)
assert np.isclose( c1.weight, amount/original_capital, rtol=0.)
assert c1.position == q
assert s.capital == (original_capital - amount)
assert s.cash[0] == (original_capital - amount)
assert s.weight == 1
assert s.value == original_capital
assert c1._capital == coupon * q
# On this step, the coupon will be paid
i = 1
s.update(dts[i])
new_capital = original_capital + coupon * q
assert c1.coupon == 0
assert len(c1.coupons) == 2
assert c1.coupons[0] == coupon * q
assert c1.coupons[1] == 0
assert np.isclose( c1.value, amount, rtol=0.)
assert np.isclose( c1.weight, amount/new_capital, rtol=0.)
assert c1.position == q
assert s.capital == (new_capital - amount)
assert s.weight == 1
assert s.value == new_capital
assert s.cash[0] == (original_capital - amount)
assert s.cash[1] == (new_capital - amount)
assert c1._capital == 0
# Close the position
c1.transact(-q)
assert c1.coupon == 0
assert len(c1.coupons) == 2
assert c1.coupons[0] == coupon * q
assert c1.coupons[1] == 0
assert np.isclose( c1.value, 0., rtol=0.)
assert np.isclose( c1.weight, 0./new_capital, rtol=0.)
assert c1.position == 0
assert s.capital == new_capital
assert s.weight == 1
assert s.value == new_capital
assert s.cash[0] == (original_capital - amount)
assert s.cash[1] == new_capital
assert c1._capital == 0
def test_bidoffer():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
bidoffer = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=1.)
bidoffer['c1'][dts[0]] = 2
bidoffer['c2'][dts[0]] = 1.5
s.setup(data, bidoffer=bidoffer)
s.adjust(100000)
i = 0
s.update(dts[i])
assert c1.bidoffer == 2
assert len(c1.bidoffers) == 1
assert c1.bidoffers[0] == 2
assert c2.bidoffer == 1.5
assert len(c2.bidoffers) == 1
assert c2.bidoffers[0] == 1.5
# Check the outlays are adjusted for bid/offer
s.set_commissions( lambda q,p : 0.1 )
total, outlay, fee, bidoffer = c1.outlay( 100 )
assert bidoffer == 100 * 1
assert fee == 0.1
assert outlay == 100 * (105 + 1)
assert total == outlay + fee
total, outlay, fee, bidoffer = c1.outlay( -100 )
assert bidoffer == 100 * 1
assert fee == 0.1
assert outlay == -100 * (105 - 1)
assert total == outlay + fee
total, outlay, fee, bidoffer = c2.outlay( 100 )
assert bidoffer == 100 * 0.75
assert fee == 0.1
assert outlay == 100 * (95 + 0.75)
assert total == outlay + fee
total, outlay, fee, bidoffer = c2.outlay( -100 )
assert bidoffer == 100 * 0.75
assert fee == 0.1
assert outlay == -100 * (95 - 0.75)
assert total == outlay + fee
# Do some transactions, and check that bidoffer_paid is updated
c1.transact(100)
assert c1.bidoffer_paid == 100 * 1
assert c1.bidoffers_paid[i] == c1.bidoffer_paid
c1.transact(100)
assert c1.bidoffer_paid == 200 * 1
assert c1.bidoffers_paid[i] == c1.bidoffer_paid
c2.transact(-100)
assert c2.bidoffer_paid == 100 * 0.75
assert c2.bidoffers_paid[i] == c2.bidoffer_paid
assert s.bidoffer_paid == 100 * 0.75 + 200 * 1
assert s.bidoffers_paid[i] == s.bidoffer_paid
assert s.fees.iloc[i] == 3 * 0.1
i = 1
s.update(dts[i])
assert c1.bidoffer_paid == 0.
assert c1.bidoffers_paid[i] == c1.bidoffer_paid
assert c2.bidoffer_paid == 0.
assert c2.bidoffers_paid[i] == c2.bidoffer_paid
assert s.bidoffer_paid == 0.
assert s.bidoffers_paid[i] == s.bidoffer_paid
assert s.fees[i] == 0.
def test_outlay_custom():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
s.setup(data)
s.adjust(100000)
i = 0
s.update(dts[i])
# Check the outlays are adjusted for custom prices
s.set_commissions( lambda q,p : 0.1*p )
total, outlay, fee, bidoffer = c1.outlay( 100, 106 )
assert bidoffer == 100 * 1
assert fee == 0.1 * 106
assert outlay == 100 * (106)
assert total == outlay + fee
total, outlay, fee, bidoffer = c1.outlay( -100, 106 )
assert bidoffer == -100 * 1
assert fee == 0.1 * 106
assert outlay == -100 * 106
assert total == outlay + fee
def test_bidoffer_custom():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
# Note: In order to access bidoffer_paid,
# need to pass bidoffer kwarg during setup
s.setup(data, bidoffer = {})
s.adjust(100000)
i = 0
s.update(dts[i])
c1.transact(100, price=106)
assert c1.bidoffer_paid == 100 * 1
assert s.bidoffer_paid == c1.bidoffer_paid
assert s.capital == 100000 - 100*106
assert c1.bidoffers_paid[i] == c1.bidoffer_paid
assert s.bidoffers_paid[i] == s.bidoffer_paid
c1.transact(100, price=106)
assert c1.bidoffer_paid == 200 * 1
assert s.bidoffer_paid == c1.bidoffer_paid
assert s.capital == 100000 - 100*106 - 100*106
assert c1.bidoffers_paid[i] == c1.bidoffer_paid
assert s.bidoffers_paid[i] == s.bidoffer_paid
c1.transact(-100, price=107)
assert c1.bidoffer_paid == 0
assert s.bidoffer_paid == c1.bidoffer_paid
assert s.capital == 100000 - 100*106 - 100*106 + 100*107
assert c1.bidoffers_paid[i] == c1.bidoffer_paid
assert s.bidoffers_paid[i] == s.bidoffer_paid
def test_security_notional_value():
c1 = SecurityBase('c1')
c2 = CouponPayingSecurity('c2')
c3 = HedgeSecurity('c3')
c4 = CouponPayingHedgeSecurity('c4')
c5 = FixedIncomeSecurity('c5')
s = StrategyBase('p', children = [c1, c2, c3, c4, c5])
c1 = s['c1']; c2 = s['c2']; c3 = s['c3']; c4 = s['c4']; c5 = s['c5']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3', 'c4', 'c5'], data=100.)
coupons = pd.DataFrame(index=dts, columns=['c2', 'c4'], data=0.)
s.setup(data, coupons = coupons)
i = 0
s.update(dts[i])
c1.transact(1000)
c2.transact(1000)
c3.transact(1000)
c4.transact(1000)
c5.transact(1000)
for c in [ c1, c2, c3, c4, c5 ]:
assert c.position == 1000
assert c.price == 100
assert c1.notional_value == 1000*100.
assert c2.notional_value == 1000
assert c3.notional_value == 0
assert c4.notional_value == 0
assert c5.notional_value == 1000
for c in [ c1, c2, c3, c4, c5 ]:
assert len( c.notional_values ) == 1
assert c.notional_values[ dts[i] ] == c.notional_value
assert s.notional_value == 2000 + 1000*100 # Strategy notional value always positive
i = 1
s.update(dts[i])
c1.transact(-3000)
c2.transact(-3000)
c3.transact(-3000)
c4.transact(-3000)
c5.transact(-3000)
for c in [ c1, c2, c3, c4, c5 ]:
assert c.position == -2000
assert c.price == 100
assert c1.notional_value == -2000*100.
assert c2.notional_value == -2000
assert c3.notional_value == 0
assert c4.notional_value == 0
assert c5.notional_value == -2000
for c in [ c1, c2, c3, c4, c5 ]:
assert len( c.notional_values ) == 2
assert c.notional_values[ dts[i] ] == c.notional_value
assert s.notional_value == 2000*100 + 4000 # Strategy notional value always positive
# FixedIncomeStrategy Tests
def test_fi_strategy_flag():
s1 = SecurityBase('s1')
s2 = SecurityBase('s2')
s = StrategyBase('p', children = [s1, s2])
assert s.fixed_income == False
s = FixedIncomeStrategy('p', [s1, s2])
assert s.fixed_income == True
def test_fi_strategy_no_bankruptcy():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = FixedIncomeStrategy('p', children = [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.transact( 10, 'c2')
assert s.value == 0.
assert s.capital == -10*100
i = 1
s.update(dts[i], data.loc[dts[i]])
assert s.value == -5*10
assert s.capital == -10*100
assert s.bankrupt == False
def test_fi_strategy_tree_adjust():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = FixedIncomeStrategy('p', children = [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
# Basic setup works with no adjustment
assert s.value == 0
assert c1.value == 0
assert c2.value == 0
assert c1.weight == 0
assert c2.weight == 0
assert c1.notional_value == 0
assert c2.notional_value == 0
# Positive or negative capital adjustments are fine
s.adjust(1000)
assert s.capital == 1000
assert s.value == 1000
s.adjust(-2000)
assert s.capital == -1000
assert s.value == -1000
def test_fi_strategy_tree_update():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = FixedIncomeStrategy('p', children = [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = -5 # Test negative prices
data['c2'][dts[2]] = 0 # Test zero price
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 100
assert c2.price == 100
i = 1
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 105
assert c2.price == -5
i = 2
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 100
assert c2.price == 0
def test_fi_strategy_tree_allocate():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = FixedIncomeStrategy('p', children = [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
# since children have w == 0 this should stay in s
s.allocate(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now allocate directly to child
c1.allocate(500)
assert c1.position == 5
assert c1.value == 500
assert c1.notional_value == 500
assert s.capital == 1000 - 500
assert s.value == 1000
assert s.notional_value == 500 # Capital does not count towards notl
assert c1.weight == 1.
assert c2.weight == 0
def test_fi_strategy_tree_allocate_child_from_strategy():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = FixedIncomeStrategy('p', children = [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
# since children have w == 0 this should stay in s
s.allocate(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now allocate to c1
s.allocate(500, 'c1')
assert c1.position == 5
assert c1.value == 500
assert s.capital == 1000 - 500
assert s.value == 1000
assert c1.weight == 1.0
assert c2.weight == 0
def test_fi_strategy_close():
c1 = SecurityBase('c1')
c2 = CouponPayingSecurity('c2')
c3 = HedgeSecurity('c3')
c4 = CouponPayingHedgeSecurity('c4')
s = FixedIncomeStrategy('p', children = [c1, c2, c3, c4])
c1 = s['c1']; c2 = s['c2']; c3 = s['c3']; c4 = s['c4']
dts = pd.date_range('2010-01-01', periods=3)
# Price
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3', 'c4'], data=100.)
coupons = pd.DataFrame(index=dts, columns=['c2', 'c4'], data=0.)
s.setup(data, coupons = coupons)
i = 0
s.update(dts[i])
for c in [ c1, c2, c3, c4 ]:
s.transact(10, c.name)
assert c.position == 10
assert c.value == 1000
assert s.capital == -1000
assert s.value == 0
s.close( c.name )
assert c.position == 0
assert c.value == 0
assert s.capital == 0
assert s.value == 0
s.transact(-10, c.name)
assert c.position == -10
assert c.value == -1000
assert s.capital == 1000
assert s.value == 0
s.close( c.name )
assert c.position == 0
assert c.value == 0
assert s.capital == 0
assert s.value == 0
def test_fi_strategy_close_zero_price():
c1 = SecurityBase('c1')
c2 = CouponPayingSecurity('c2')
c3 = HedgeSecurity('c3')
c4 = CouponPayingHedgeSecurity('c4')
s = FixedIncomeStrategy('p', children = [c1, c2, c3, c4])
c1 = s['c1']; c2 = s['c2']; c3 = s['c3']; c4 = s['c4']
dts = pd.date_range('2010-01-01', periods=3)
# Zero prices are OK in fixed income space (i.e. swaps)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3', 'c4'], data=0.)
coupons = pd.DataFrame(index=dts, columns=['c2', 'c4'], data=0.)
s.setup(data, coupons = coupons)
i = 0
s.update(dts[i])
for c in [ c1, c2, c3, c4 ]:
s.transact(10, c.name)
assert c.position == 10
assert c.value == 0
s.close( c.name )
assert c.position == 0
assert c.value == 0
s.transact(-10, c.name)
assert c.position == -10
assert c.value == 0
s.close( c.name )
assert c.position == 0
assert c.value == 0
def test_fi_strategy_flatten():
c1 = SecurityBase('c1')
c2 = CouponPayingSecurity('c2')
c3 = HedgeSecurity('c3')
c4 = CouponPayingHedgeSecurity('c4')
s = FixedIncomeStrategy('p', children = [c1, c2, c3, c4])
c1 = s['c1']; c2 = s['c2']; c3 = s['c3']; c4 = s['c4']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3', 'c4'], data=100.)
coupons = pd.DataFrame(index=dts, columns=['c2', 'c4'], data=0.)
s.setup(data, coupons = coupons)
i = 0
s.update(dts[i])
for c in [ c1, c2, c3, c4 ]:
s.transact(10, c.name)
for c in [ c1, c2, c3, c4 ]:
assert c.position == 10
assert c.value == 1000
s.flatten()
for c in [ c1, c2, c3, c4 ]:
assert c.position == 0
assert c.value == 0
def test_fi_strategy_prices():
c1 = CouponPayingSecurity('c1')
s = FixedIncomeStrategy('s', children = [c1] )
c1 = s['c1']
dts = pd.date_range('2010-01-01', periods=4)
rawd = [2, -3, 0, 1]
data = pd.DataFrame(index=dts, data=rawd, columns=['c1'])
coupons = pd.DataFrame(index=dts, columns=['c1'], data=[1,2,3,4])
s.setup(data, coupons = coupons)
i = 0
s.update(dts[i])
s.transact( 10, 'c1')
assert c1.coupon == 10*1
assert s.capital == -10*2
assert s.value == 0
assert len(s.children) == 1
assert s.price == 100
assert s.notional_value == 10
last_coupon = c1.coupon
last_value = s.value
last_notional_value = s.notional_value
last_price = 100.
i=1
s.update(dts[i])
cpn = last_coupon
assert c1.coupon == 10*2
assert s.capital == -10*2 + cpn
assert s.value == -5*10 + cpn # MTM + coupon
assert s.notional_value == 10
assert s.price == last_price + 100 * (s.value-last_value)/last_notional_value
last_value = s.value
last_notional_value = s.notional_value
last_price = s.price
last_coupon = c1.coupon
i=2
s.update(dts[i])
cpn += last_coupon
assert c1.coupon == 10*3
assert s.capital == -10*2 + cpn
assert s.value == -2*10 + cpn # MTM + coupon
assert s.notional_value == 10
assert s.price == last_price + 100 * (s.value - last_value)/last_notional_value
last_value = s.value
last_notional_value = s.notional_value
last_price = s.price
last_coupon = c1.coupon
i=3
s.update(dts[i])
s.transact( 10, 'c1')
# Coupon still from previous period - not affected by new transaction
cpn += last_coupon
assert c1.coupon == 20*4
assert s.capital == -10*2 -10*1 + cpn
assert s.value == -1*10 + 0 + cpn # MTM + coupon
assert s.notional_value == 20
assert s.price == last_price + 100 * (s.value - last_value)/last_notional_value
def test_fi_fail_if_0_base_in_return_calc():
c1 = HedgeSecurity('c1')
s = FixedIncomeStrategy('s', children = [c1] )
c1 = s['c1']
dts = pd.date_range('2010-01-01', periods=4)
rawd = [2, -3, 0, 1]
data = pd.DataFrame(index=dts, data=rawd, columns=['c1'])
s.setup(data)
i=0
s.update(dts[i])
assert s.notional_value == 0
# Hedge security has no notional value, so strategy doesn't either
# and thus shouldn't be making PNL.
i = 1
try:
s.update(dts[i])
except ZeroDivisionError as e:
if 'Could not update' not in str(e):
assert False
def test_fi_strategy_tree_rebalance():
c1 = SecurityBase('c1')
c2 = CouponPayingSecurity('c2')
c3 = HedgeSecurity('c3')
c4 = CouponPayingHedgeSecurity('c4')
s = FixedIncomeStrategy('p', children = [c1, c2, c3, c4])
c1 = s['c1']; c2 = s['c2']; c3 = s['c3']; c4 = s['c4']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3', 'c4'], data=50.)
coupons = pd.DataFrame(index=dts, columns=['c2', 'c4'], data=0.)
s.setup(data, coupons = coupons)
i = 0
s.update(dts[i], data.loc[dts[i]])
assert s.value == 0
assert s.capital == 0
assert c1.value == 0
assert c2.value == 0
# now rebalance c1
s.rebalance(0.5, 'c1', base = 1000)
assert c1.position == 10
assert c1.value == 500
assert c1.notional_value == 500
assert s.capital == -500
assert s.value == 0
assert s.notional_value == 500
assert c1.weight == 1.0
assert c2.weight == 0
assert c2.notional_value == 0
# Now rebalance to s2, with no base weight.
# It takes base weight from strategy weight (500)
s.rebalance(0.5, 'c2')
assert c1.position == 10
assert c1.notional_value == 500
assert c2.position == 250
assert c2.notional_value == 250
assert s.notional_value == c1.notional_value + c2.notional_value
assert c1.weight == 2./3.
assert c2.weight == 1./3.
assert s.value == 0
i = 1
s.update(dts[i], data.loc[dts[i]])
# Now rebalance to a new, higher base with given target weights (including negative)
s.rebalance(0.5, 'c1', 1000, update=False)
s.rebalance(-0.5, 'c2', 1000)
assert c1.weight == 0.5
assert c2.weight == -0.5
assert c1.position == 10
assert c1.notional_value == 500
assert c2.position == -500
assert c2.notional_value == -500
def test_fi_strategy_tree_rebalance_nested():
c1 = CouponPayingSecurity('c1')
c2 = CouponPayingSecurity('c2')
s1 = FixedIncomeStrategy('s1', children = [c1, c2])
s2 = FixedIncomeStrategy('s2', children = [c1, c2])
s = FixedIncomeStrategy('s', children = [s1, s2])
p = FixedIncomeStrategy('p', children = [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=50.)
coupons = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=0.)
s.setup(data, coupons = coupons)
p.setup(data, coupons = coupons)
i = 0
s.update(dts[i])
p.update(dts[i])
s['s1'].transact( 100, 'c1')
s['s2'].transact( 100, 'c2')
p.transact( 100, 'c1')
p.transact( 100, 'c2')
assert s['s1']['c1'].position == 100
assert s['s2']['c2'].position == 100
assert p['c1'].position == 100
assert p['c2'].position == 100
s.update(dts[i]) # Force update to be safe
base = s.notional_value
s.rebalance(0.5, 's1', base*10, update=False)
s.rebalance(-0.5, 's2', base*10)
p.rebalance(5, 'c1', update=False )
p.rebalance(-5, 'c2' )
s.update(dts[i]) # Force update to be safe
assert s['s1']['c1'].position == 1000
assert s['s2']['c2'].position == -1000
assert s['s1']['c1'].weight == 1.
assert s['s2']['c2'].weight == -1
assert p['c1'].position == 1000
assert p['c2'].position == -1000
# Note that even though the security weights are signed,
# the strategy weights are all positive (and hence not equal)
# to the weight passed in to the rebalance call
assert s['s1'].weight == 0.5
assert s['s2'].weight == 0.5
assert s.value == 0.
assert p.value == 0.
assert s.capital == 0
assert p.capital == 0
def test_fi_strategy_precision():
N = 100
children = [ SecurityBase('c%i' %i ) for i in range(N) ]
s = FixedIncomeStrategy('p', children = children)
children = [ s[ c.name ] for c in children ]
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=[c.name for c in children], data=1.)
s.setup(data)
i = 0
s.update(dts[i])
for c in children:
c.transact(0.1)
# Even within tolerance, value is nonzero
aae( s.value, 0, 14)
assert not is_zero( s.value )
# Notional value not quite equal to N * 0.1
assert s.notional_value == sum( 0.1 for _ in range(N) )
assert s.notional_value != N*0.1
assert s.price == 100.
old_value = s.value
old_notional_value = s.notional_value
# Still make sure we can update - PNL nonzero, and last notional value is zero
i = 1
s.update(dts[i])
assert s.price == 100.
# Even within tolerance, value is nonzero
assert s.value == old_value
assert s.notional_value == old_notional_value
# The weights also have numerical precision issues
aae( children[0].weight, 1/float(N), 16)
assert children[0].weight != 1/float(N)
# Now rebalance "out" of an asset with the almost zero weight
new_weight = children[0].weight - 1/float(N)
s.rebalance( new_weight, children[0].name )
# Check that the position is still closed completely
assert children[0].position == 0
def test_fi_strategy_bidoffer():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = FixedIncomeStrategy('p', children=[c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
bidoffer = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=1.)
bidoffer['c1'][dts[0]] = 2
bidoffer['c2'][dts[0]] = 1.5
s.setup(data, bidoffer=bidoffer)
i = 0
s.update(dts[i])
assert s.value == 0.
assert s.price == 100.
# Do some transactions, and check that bidoffer_paid is updated
c1.transact(100)
assert c1.bidoffer_paid == 100 * 1
assert c1.bidoffers_paid[i] == c1.bidoffer_paid
c1.transact(100)
assert c1.bidoffer_paid == 200 * 1
assert c1.bidoffers_paid[i] == c1.bidoffer_paid
c2.transact(-100)
assert c2.bidoffer_paid == 100 * 0.75
assert c2.bidoffers_paid[i] == c2.bidoffer_paid
s.update(dts[i])
assert s.bidoffer_paid == 275.
assert s.bidoffers_paid[i] == s.bidoffer_paid
assert s.value == -275.
assert s.notional_value == 105*200 + 95*100
assert s.price == 100 * (1. - 275. / (105*200 + 95*100))
old_notional = s.notional_value
old_value = s.value
old_price = s.price
i=1
s.update(dts[i])
assert s.bidoffer_paid == 0.
assert s.bidoffers_paid[i] == s.bidoffer_paid
assert s.value == -275. - 200*5 - 100*5 # Bid-offer paid
assert s.notional_value == 100*200 + 100*100
new_value = s.value
assert s.price == old_price + 100 * ( new_value-old_value) / old_notional
| mit |
KallyopeBio/StarCluster | utils/scimage_12_04.py | 20 | 17216 | #!/usr/bin/env python
"""
This script is meant to be run inside of a ubuntu cloud image available at
uec-images.ubuntu.com::
$ EC2_UBUNTU_IMG_URL=http://uec-images.ubuntu.com/precise/current
$ wget $EC2_UBUNTU_IMG_URL/precise-server-cloudimg-amd64.tar.gz
or::
$ wget $EC2_UBUNTU_IMG_URL/precise-server-cloudimg-i386.tar.gz
After downloading a Ubuntu cloud image the next step is to extract the image::
$ tar xvzf precise-server-cloudimg-amd64.tar.gz
Then resize it to 10GB::
$ e2fsck -f precise-server-cloudimg-amd64.img
$ resize2fs precise-server-cloudimg-amd64.img 10G
Next you need to mount the image::
$ mkdir /tmp/img-mount
$ mount precise-server-cloudimg-amd64.img /tmp/img-mount
$ mount -t proc none /tmp/img-mount/proc
$ mount -t sysfs none /tmp/img-mount/sys
$ mount -o bind /dev /tmp/img-mount/dev
$ mount -t devpts none /tmp/img-mount/dev/pts
$ mount -o rbind /var/run/dbus /tmp/img-mount/var/run/dbus
Copy /etc/resolv.conf and /etc/mtab to the image::
$ mkdir -p /tmp/img-mount/var/run/resolvconf
$ cp /etc/resolv.conf /tmp/img-mount/var/run/resolvconf/resolv.conf
$ grep -v rootfs /etc/mtab > /tmp/img-mount/etc/mtab
Next copy this script inside the image::
$ cp /path/to/scimage.py /tmp/img-mount/root/scimage.py
Finally chroot inside the image and run this script:
$ chroot /tmp/img-mount /bin/bash
$ cd $HOME
$ python scimage.py
"""
import os
import sys
import glob
import shutil
import fileinput
import subprocess
import multiprocessing
SRC_DIR = "/usr/local/src"
APT_SOURCES_FILE = "/etc/apt/sources.list"
BUILD_UTILS_PKGS = "build-essential devscripts debconf debconf-utils dpkg-dev "
BUILD_UTILS_PKGS += "gfortran llvm-3.2-dev swig cdbs patch python-dev "
BUILD_UTILS_PKGS += "python-distutils-extra python-setuptools python-pip "
BUILD_UTILS_PKGS += "python-nose"
CLOUD_CFG_FILE = '/etc/cloud/cloud.cfg'
GRID_SCHEDULER_GIT = 'git://github.com/jtriley/gridscheduler.git'
CLOUDERA_ARCHIVE_KEY = 'http://archive.cloudera.com/debian/archive.key'
CLOUDERA_APT = 'http://archive.cloudera.com/debian maverick-cdh3u5 contrib'
CONDOR_APT = 'http://www.cs.wisc.edu/condor/debian/development lenny contrib'
NUMPY_SCIPY_SITE_CFG = """\
[DEFAULT]
library_dirs = /usr/lib
include_dirs = /usr/include:/usr/include/suitesparse
[blas_opt]
libraries = ptf77blas, ptcblas, atlas
[lapack_opt]
libraries = lapack, ptf77blas, ptcblas, atlas
[amd]
amd_libs = amd
[umfpack]
umfpack_libs = umfpack
[fftw]
libraries = fftw3
"""
STARCLUSTER_MOTD = """\
#!/bin/sh
cat<<"EOF"
_ _ _
__/\_____| |_ __ _ _ __ ___| |_ _ ___| |_ ___ _ __
\ / __| __/ _` | '__/ __| | | | / __| __/ _ \ '__|
/_ _\__ \ || (_| | | | (__| | |_| \__ \ || __/ |
\/ |___/\__\__,_|_| \___|_|\__,_|___/\__\___|_|
StarCluster Ubuntu 12.04 AMI
Software Tools for Academics and Researchers (STAR)
Homepage: http://star.mit.edu/cluster
Documentation: http://star.mit.edu/cluster/docs/latest
Code: https://github.com/jtriley/StarCluster
Mailing list: [email protected]
This AMI Contains:
* Open Grid Scheduler (OGS - formerly SGE) queuing system
* Condor workload management system
* OpenMPI compiled with Open Grid Scheduler support
* OpenBLAS - Highly optimized Basic Linear Algebra Routines
* NumPy/SciPy linked against OpenBlas
* IPython 0.13 with parallel and notebook support
* and more! (use 'dpkg -l' to show all installed packages)
Open Grid Scheduler/Condor cheat sheet:
* qstat/condor_q - show status of batch jobs
* qhost/condor_status- show status of hosts, queues, and jobs
* qsub/condor_submit - submit batch jobs (e.g. qsub -cwd ./job.sh)
* qdel/condor_rm - delete batch jobs (e.g. qdel 7)
* qconf - configure Open Grid Scheduler system
Current System Stats:
EOF
landscape-sysinfo | grep -iv 'graph this data'
"""
CLOUD_INIT_CFG = """\
user: ubuntu
disable_root: 0
preserve_hostname: False
# datasource_list: [ "NoCloud", "OVF", "Ec2" ]
cloud_init_modules:
- bootcmd
- resizefs
- set_hostname
- update_hostname
- update_etc_hosts
- rsyslog
- ssh
cloud_config_modules:
- mounts
- ssh-import-id
- locale
- set-passwords
- grub-dpkg
- timezone
- puppet
- chef
- mcollective
- disable-ec2-metadata
- runcmd
cloud_final_modules:
- rightscale_userdata
- scripts-per-once
- scripts-per-boot
- scripts-per-instance
- scripts-user
- keys-to-console
- final-message
apt_sources:
- source: deb $MIRROR $RELEASE multiverse
- source: deb %(CLOUDERA_APT)s
- source: deb-src %(CLOUDERA_APT)s
- source: deb %(CONDOR_APT)s
""" % dict(CLOUDERA_APT=CLOUDERA_APT, CONDOR_APT=CONDOR_APT)
def run_command(cmd, ignore_failure=False, failure_callback=None,
get_output=False):
kwargs = {}
if get_output:
kwargs.update(dict(stdout=subprocess.PIPE, stderr=subprocess.PIPE))
p = subprocess.Popen(cmd, shell=True, **kwargs)
output = []
if get_output:
line = None
while line != '':
line = p.stdout.readline()
if line != '':
output.append(line)
print line,
for line in p.stderr.readlines():
if line != '':
output.append(line)
print line,
retval = p.wait()
if retval != 0:
errmsg = "command '%s' failed with status %d" % (cmd, retval)
if failure_callback:
ignore_failure = failure_callback(retval)
if not ignore_failure:
raise Exception(errmsg)
else:
sys.stderr.write(errmsg + '\n')
if get_output:
return retval, ''.join(output)
return retval
def apt_command(cmd):
dpkg_opts = "Dpkg::Options::='--force-confnew'"
cmd = "apt-get -o %s -y --force-yes %s" % (dpkg_opts, cmd)
cmd = "DEBIAN_FRONTEND='noninteractive' " + cmd
run_command(cmd)
def apt_install(pkgs):
apt_command('install %s' % pkgs)
def chdir(directory):
opts = glob.glob(directory)
isdirlist = [o for o in opts if os.path.isdir(o)]
if len(isdirlist) > 1:
raise Exception("more than one dir matches: %s" % directory)
os.chdir(isdirlist[0])
def _fix_atlas_rules(rules_file='debian/rules'):
for line in fileinput.input(rules_file, inplace=1):
if 'ATLAS=None' not in line:
print line,
def configure_apt_sources():
srcfile = open(APT_SOURCES_FILE)
contents = srcfile.readlines()
srcfile.close()
srclines = []
for line in contents:
if not line.strip() or line.startswith('#'):
continue
parts = line.split()
if parts[0] == 'deb':
parts[0] = 'deb-src'
srclines.append(' '.join(parts).strip())
srcfile = open(APT_SOURCES_FILE, 'w')
srcfile.write(''.join(contents))
srcfile.write('\n'.join(srclines) + '\n')
srcfile.write('deb %s\n' % CLOUDERA_APT)
srcfile.write('deb-src %s\n' % CLOUDERA_APT)
srcfile.write('deb %s\n' % CONDOR_APT)
srcfile.close()
run_command('add-apt-repository ppa:staticfloat/julia-deps -y')
run_command('gpg --keyserver keyserver.ubuntu.com --recv-keys 0F932C9C')
run_command('curl -s %s | sudo apt-key add -' % CLOUDERA_ARCHIVE_KEY)
apt_install('debian-archive-keyring')
def upgrade_packages():
apt_command('update')
apt_command('upgrade')
def install_build_utils():
"""docstring for configure_build"""
apt_install(BUILD_UTILS_PKGS)
def install_gridscheduler():
chdir(SRC_DIR)
apt_command('build-dep gridengine')
if os.path.isfile('gridscheduler-scbuild.tar.gz'):
run_command('tar xvzf gridscheduler-scbuild.tar.gz')
run_command('mv gridscheduler /opt/sge6-fresh')
return
run_command('git clone %s' % GRID_SCHEDULER_GIT)
sts, out = run_command('readlink -f `which java`', get_output=True)
java_home = out.strip().split('/jre')[0]
chdir(os.path.join(SRC_DIR, 'gridscheduler', 'source'))
run_command('git checkout -t -b develop origin/develop')
env = 'JAVA_HOME=%s' % java_home
run_command('%s ./aimk -only-depend' % env)
run_command('%s scripts/zerodepend' % env)
run_command('%s ./aimk depend' % env)
run_command('%s ./aimk -no-secure -no-gui-inst' % env)
sge_root = '/opt/sge6-fresh'
os.mkdir(sge_root)
env += ' SGE_ROOT=%s' % sge_root
run_command('%s scripts/distinst -all -local -noexit -y -- man' % env)
def install_condor():
chdir(SRC_DIR)
run_command("rm /var/lock")
apt_install('condor=7.7.2-1')
run_command('echo condor hold | dpkg --set-selections')
run_command('ln -s /etc/condor/condor_config /etc/condor_config.local')
run_command('mkdir /var/lib/condor/log')
run_command('mkdir /var/lib/condor/run')
run_command('chown -R condor:condor /var/lib/condor/log')
run_command('chown -R condor:condor /var/lib/condor/run')
def install_torque():
chdir(SRC_DIR)
apt_install('torque-server torque-mom torque-client')
def install_pydrmaa():
chdir(SRC_DIR)
run_command('pip install drmaa')
def install_blas_lapack():
"""docstring for install_openblas"""
chdir(SRC_DIR)
apt_install("libopenblas-dev")
def install_numpy_scipy():
"""docstring for install_numpy"""
chdir(SRC_DIR)
run_command('pip install -d . numpy')
run_command('unzip numpy*.zip')
run_command("sed -i 's/return None #/pass #/' numpy*/numpy/core/setup.py")
run_command('pip install scipy')
def install_pandas():
"""docstring for install_pandas"""
chdir(SRC_DIR)
apt_command('build-dep pandas')
run_command('pip install pandas')
def install_matplotlib():
chdir(SRC_DIR)
run_command('pip install matplotlib')
def install_julia():
apt_install("libsuitesparse-dev libncurses5-dev "
"libopenblas-dev libarpack2-dev libfftw3-dev libgmp-dev "
"libunwind7-dev libreadline-dev zlib1g-dev")
buildopts = """\
BUILDOPTS="LLVM_CONFIG=llvm-config-3.2 USE_QUIET=0 USE_LIB64=0"; for lib in \
LLVM ZLIB SUITESPARSE ARPACK BLAS FFTW LAPACK GMP LIBUNWIND READLINE GLPK \
NGINX; do export BUILDOPTS="$BUILDOPTS USE_SYSTEM_$lib=1"; done"""
chdir(SRC_DIR)
if not os.path.exists("julia"):
run_command("git clone git://github.com/JuliaLang/julia.git")
run_command("%s && cd julia && make $BUILDOPTS PREFIX=/usr install" %
buildopts)
def install_mpi():
chdir(SRC_DIR)
apt_install('mpich2')
apt_command('build-dep openmpi')
apt_install('blcr-util')
if glob.glob('*openmpi*.deb'):
run_command('dpkg -i *openmpi*.deb')
else:
apt_command('source openmpi')
chdir('openmpi*')
for line in fileinput.input('debian/rules', inplace=1):
print line,
if '--enable-heterogeneous' in line:
print ' --with-sge \\'
def _deb_failure_callback(retval):
if not glob.glob('../*openmpi*.deb'):
return False
return True
run_command('dch --local=\'+custom\' '
'"custom build on: `uname -s -r -v -m -p -i -o`"')
run_command('dpkg-buildpackage -rfakeroot -b',
failure_callback=_deb_failure_callback)
run_command('dpkg -i ../*openmpi*.deb')
sts, out = run_command('ompi_info | grep -i grid', get_output=True)
if 'gridengine' not in out:
raise Exception("failed to build OpenMPI with "
"Open Grid Scheduler support")
run_command('echo libopenmpi1.3 hold | dpkg --set-selections')
run_command('echo libopenmpi-dev hold | dpkg --set-selections')
run_command('echo libopenmpi-dbg hold | dpkg --set-selections')
run_command('echo openmpi-bin hold | dpkg --set-selections')
run_command('echo openmpi-checkpoint hold | dpkg --set-selections')
run_command('echo openmpi-common hold | dpkg --set-selections')
run_command('echo openmpi-doc hold | dpkg --set-selections')
run_command('pip install mpi4py')
def install_hadoop():
chdir(SRC_DIR)
hadoop_pkgs = ['namenode', 'datanode', 'tasktracker', 'jobtracker',
'secondarynamenode']
pkgs = ['hadoop-0.20'] + ['hadoop-0.20-%s' % pkg for pkg in hadoop_pkgs]
apt_install(' '.join(pkgs))
run_command('easy_install dumbo')
def install_ipython():
chdir(SRC_DIR)
apt_install('libzmq-dev')
run_command('pip install ipython tornado pygments pyzmq')
mjax_install = 'from IPython.external.mathjax import install_mathjax'
mjax_install += '; install_mathjax()'
run_command("python -c '%s'" % mjax_install)
def configure_motd():
for f in glob.glob('/etc/update-motd.d/*'):
os.unlink(f)
motd = open('/etc/update-motd.d/00-starcluster', 'w')
motd.write(STARCLUSTER_MOTD)
motd.close()
os.chmod(motd.name, 0755)
def configure_cloud_init():
"""docstring for configure_cloud_init"""
cloudcfg = open('/etc/cloud/cloud.cfg', 'w')
cloudcfg.write(CLOUD_INIT_CFG)
cloudcfg.close()
def configure_bash():
completion_line_found = False
for line in fileinput.input('/etc/bash.bashrc', inplace=1):
if 'bash_completion' in line and line.startswith('#'):
print line.replace('#', ''),
completion_line_found = True
elif completion_line_found:
print line.replace('#', ''),
completion_line_found = False
else:
print line,
aliasfile = open('/root/.bash_aliases', 'w')
aliasfile.write("alias ..='cd ..'\n")
aliasfile.close()
def setup_environ():
num_cpus = multiprocessing.cpu_count()
os.environ['MAKEFLAGS'] = '-j%d' % (num_cpus + 1)
os.environ['DEBIAN_FRONTEND'] = "noninteractive"
if os.path.isfile('/sbin/initctl') and not os.path.islink('/sbin/initctl'):
run_command('mv /sbin/initctl /sbin/initctl.bak')
run_command('ln -s /bin/true /sbin/initctl')
def install_nfs():
chdir(SRC_DIR)
run_command('initctl reload-configuration')
apt_install('nfs-kernel-server')
run_command('ln -s /etc/init.d/nfs-kernel-server /etc/init.d/nfs')
def install_default_packages():
# stop mysql for interactively asking for password
preseedf = '/tmp/mysql-preseed.txt'
mysqlpreseed = open(preseedf, 'w')
preseeds = """\
mysql-server mysql-server/root_password select
mysql-server mysql-server/root_password seen true
mysql-server mysql-server/root_password_again select
mysql-server mysql-server/root_password_again seen true
"""
mysqlpreseed.write(preseeds)
mysqlpreseed.close()
run_command('debconf-set-selections < %s' % mysqlpreseed.name)
run_command('rm %s' % mysqlpreseed.name)
pkgs = ["git", "mercurial", "subversion", "cvs", "vim", "vim-scripts",
"emacs", "tmux", "screen", "zsh", "ksh", "csh", "tcsh", "encfs",
"keychain", "unzip", "rar", "unace", "ec2-api-tools",
"ec2-ami-tools", "mysql-server", "mysql-client", "apache2",
"libapache2-mod-wsgi", "sysv-rc-conf", "pssh", "cython", "irssi",
"htop", "mosh", "default-jdk", "xvfb", "python-imaging",
"python-ctypes"]
apt_install(' '.join(pkgs))
def install_python_packges():
pypkgs = ['python-boto', 'python-paramiko', 'python-django',
'python-pudb']
for pypkg in pypkgs:
if pypkg.startswith('python-'):
apt_command('build-dep %s' % pypkg.split('python-')[1])
run_command('pip install %s')
def configure_init():
for script in ['nfs-kernel-server', 'hadoop', 'condor', 'apache', 'mysql']:
run_command('find /etc/rc* -iname \*%s\* -delete' % script)
def cleanup():
run_command('rm -f /etc/resolv.conf')
run_command('rm -rf /var/run/resolvconf')
run_command('rm -f /etc/mtab')
run_command('rm -rf /root/*')
exclude = ['/root/.bashrc', '/root/.profile', '/root/.bash_aliases']
for dot in glob.glob("/root/.*"):
if dot not in exclude:
run_command('rm -rf %s' % dot)
for path in glob.glob('/usr/local/src/*'):
if os.path.isdir(path):
shutil.rmtree(path)
run_command('rm -f /var/cache/apt/archives/*.deb')
run_command('rm -f /var/cache/apt/archives/partial/*')
for f in glob.glob('/etc/profile.d'):
if 'byobu' in f:
run_command('rm -f %s' % f)
if os.path.islink('/sbin/initctl') and os.path.isfile('/sbin/initctl.bak'):
run_command('mv -f /sbin/initctl.bak /sbin/initctl')
def main():
"""docstring for main"""
if os.getuid() != 0:
sys.stderr.write('you must be root to run this script\n')
return
setup_environ()
configure_motd()
configure_cloud_init()
configure_bash()
configure_apt_sources()
upgrade_packages()
install_build_utils()
install_default_packages()
install_gridscheduler()
install_condor()
#install_torque()
install_pydrmaa()
install_blas_lapack()
install_numpy_scipy()
install_matplotlib()
install_pandas()
install_ipython()
install_mpi()
install_hadoop()
install_nfs()
install_julia()
configure_init()
cleanup()
if __name__ == '__main__':
main()
| lgpl-3.0 |
kimhungGCZ/combinedAL | study_armi_2_with_decay.py | 1 | 6337 | import numpy as np
import pandas as pd
from scipy.stats import norm
import statsmodels.api as sm
import matplotlib.pyplot as plt
import scripts.common_functions as cmfunc
import sklearn.neighbors as nb
import warnings
warnings.simplefilter('ignore')
def getCSVData(dataPath):
try:
data = pd.read_csv(dataPath)
except IOError("Invalid path to data file."):
return
return data
dataPath_result_bayes = './results/bayesChangePt/realKnownCause/bayesChangePt_dta_tsing.csv'
#dataPath_result = './results/relativeEntropy/realKnownCause/relativeEntropy_dta_tsing.csv'
dataPath_result_numenta = './results/numenta/realKnownCause/numenta_dta_tsing.csv'
#dataPath_result = './results/skyline/realKnownCause/skyline_dta_tsing.csv'
dataPath_raw = './data/realKnownCause/dta_tsing.csv'
result_dta_bayes = getCSVData(dataPath_result_bayes)if dataPath_result_bayes else None
result_dta_numenta = getCSVData(dataPath_result_numenta)if dataPath_result_numenta else None
raw_dta = getCSVData(dataPath_raw)if dataPath_raw else None
result_dta_numenta.anomaly_score[0:150] = np.min(result_dta_numenta.anomaly_score)
# dao ham bac 1
der = cmfunc.change_after_k_seconds(raw_dta.value, k=1)
# dao ham bac 2
sec_der = cmfunc.change_after_k_seconds(raw_dta.value, k=1)
median_sec_der = np.median(sec_der)
std_sec_der = np.std(sec_der)
breakpoint_candidates = list(map(lambda x: (x[1] - median_sec_der) - np.abs(std_sec_der) if (x[1] - median_sec_der) - np.abs(std_sec_der) > 0 else 0, enumerate(sec_der)))
breakpoint_candidates = (breakpoint_candidates - np.min(breakpoint_candidates))/(np.max(breakpoint_candidates) - np.min(breakpoint_candidates))
breakpoint_candidates = np.insert(breakpoint_candidates, 0,0)
#result_dta = result_dta_numenta[150:400].copy()
result_dta = result_dta_numenta.copy()
#result_dta.anomaly_score = result_dta_numenta[150:400].anomaly_score + result_dta_bayes[150:400].anomaly_score #breakpoint_candidates # + 0.5 *
result_dta.anomaly_score = 0.3*result_dta_numenta.anomaly_score + 0.7*breakpoint_candidates #+ result_dta_bayes.anomaly_score #breakpoint_candidates # + 0.5 *
# plt.subplot(411)
# plt.plot(result_dta_bayes.anomaly_score)
# plt.subplot(412)
# plt.plot(result_dta_numenta.anomaly_score)
# plt.subplot(413)
# plt.plot(result_dta.anomaly_score)
# plt.subplot(414)
# plt.plot(raw_dta.value)
# plt.show()
dta_full = result_dta
# dta_noiss = dta_full.copy()
# dta_miss = dta_full.copy()
# dta_truth = raw_dta['truth'].values
#
# dta_noiss.value.index = result_dta.timestamp
# dta_miss.value.index = result_dta.timestamp
dta_full.value.index = result_dta.timestamp
#dta_reverse.value.index = dta_reverse.timestamp
five_percentage = int((0.02* len(result_dta['anomaly_score'])) )
#anomaly_detected = np.array(result_dta.loc[result_dta['anomaly_score'] > 1].value.index)
np.argsort(result_dta['anomaly_score'])
#correct_detected = np.array(result_dta.loc[result_dta['anomaly_score'] == 0].value.index)
anomaly_index = np.array(np.argsort(result_dta['anomaly_score']))[-five_percentage:]
normal_index = np.array(np.argsort(result_dta['anomaly_score']))[:int((0.2* len(result_dta['anomaly_score'])) )]
#anomaly_index = [15, 143, 1860, 1700]
print("Anomaly Point Found", anomaly_index)
print("Correct Point Found", normal_index)
alpha = 0.05
Y = np.zeros(len(result_dta['anomaly_score']))
Z = np.zeros(len(result_dta['anomaly_score']))
X = list(map(lambda x: [x,result_dta.values[x][1]], np.arange(len(result_dta.values))))
tree = nb.KDTree(X, leaf_size=20)
for anomaly_point in anomaly_index:
anomaly_neighboor = np.array(cmfunc.find_inverneghboor_of_point(tree, X, anomaly_point), dtype=np.int32)
for NN_pair in anomaly_neighboor:
Y[NN_pair[1]] = Y[NN_pair[1]] + result_dta['anomaly_score'][anomaly_point] - NN_pair[0] * alpha if result_dta['anomaly_score'][anomaly_point] - NN_pair[0] * alpha > 0 else Y[NN_pair[1]]
for normal_point in normal_index:
anomaly_neighboor = np.array(cmfunc.find_inverneghboor_of_point(tree, X, normal_point), dtype=np.int32)
for NN_pair in anomaly_neighboor:
Z[NN_pair[1]] = Z[NN_pair[1]] + (1-result_dta['anomaly_score'][normal_point]) - NN_pair[0] * alpha if (1-result_dta['anomaly_score'][normal_point]) - NN_pair[0] * alpha > 0 else Z[NN_pair[1]]
plt.subplot(411)
plt.plot(result_dta.anomaly_score, label='Metric of Score')
plt.legend(loc='upper center')
plt.subplot(412)
plt.plot(Y, label='Spreading Anomaly Score')
plt.legend(loc='upper center')
plt.subplot(413)
plt.plot(Z, label='Spreading Normal Score')
plt.legend(loc='upper center')
plt.subplot(414)
plt.plot(raw_dta.value, label='Final Score')
plt.legend(loc='upper center')
plt.show()
result_dta.anomaly_score = result_dta.anomaly_score + Y - Z
final_score = map(lambda x: 0 if x < 0 else x, result_dta.anomaly_score);
#final_score = (final_score - np.min(final_score))/(np.max(final_score) - np.min(final_score - np.min(final_score)))
plt.subplot(511)
plt.plot(raw_dta.value, label='Raw data')
plt.legend(loc='upper center')
plt.subplot(512)
plt.plot(result_dta_bayes.anomaly_score, label='Bayes Result')
plt.legend(loc='upper center')
x1,x2,y1,y2 = plt.axis()
plt.axis((x1,x2,0,2))
plt.subplot(513)
plt.plot(result_dta_numenta.anomaly_score, label='Numenta Result')
plt.legend(loc='upper center')
x1,x2,y1,y2 = plt.axis()
plt.axis((x1,x2,0,2))
# plt.subplot(514)
# plt.plot(breakpoint_candidates)
# plt.legend(loc='upper center')
# x1,x2,y1,y2 = plt.axis()
# plt.axis((x1,x2,0,2))
plt.subplot(515)
plt.plot(final_score, label='Our result')
plt.legend(loc='upper center')
x1,x2,y1,y2 = plt.axis()
plt.axis((x1,x2,0,max(final_score)))
plt.show()
plt.subplot(511)
plt.plot(raw_dta.value[720:800], label='Raw data')
plt.legend(loc='upper center')
plt.subplot(512)
plt.plot(result_dta_bayes.anomaly_score[720:800], label='Bayes Result')
plt.legend(loc='upper center')
x1,x2,y1,y2 = plt.axis()
plt.axis((x1,x2,0,2))
plt.subplot(513)
plt.plot(result_dta_numenta.anomaly_score[720:800], label='Numenta Result')
plt.legend(loc='upper center')
x1,x2,y1,y2 = plt.axis()
plt.axis((x1,x2,0,2))
# plt.subplot(514)
# plt.plot(breakpoint_candidates[720:800])
# x1,x2,y1,y2 = plt.axis()
# plt.axis((x1,x2,0,2))
plt.subplot(515)
plt.plot(final_score[720:800], label='Our result')
plt.legend(loc='upper center')
x1,x2,y1,y2 = plt.axis()
plt.axis((x1,x2,0,max(final_score[720:800])))
plt.show()
| agpl-3.0 |
Bam4d/neon | examples/timeseries_lstm.py | 1 | 14730 | #!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Example that shows how to train on synthetic multi-dimensional time series
After training, the network is able to generate the sequences
Usage:
python examples/timeseries_lstm.py -e 10 -eval 1
Then look at the PNG plots generated.
"""
try:
import matplotlib.pyplot as plt
except ImportError:
print 'matplotlib needs to be installed manually to generate plots needed for this example'
raise ImportError()
import numpy as np
import math
from neon.backends import gen_backend
from neon.initializers import GlorotUniform
from neon.layers import GeneralizedCost, LSTM, Affine, RecurrentLast
from neon.models import Model
from neon.optimizers import RMSProp
from neon.transforms import Logistic, Tanh, Identity, MeanSquared
from neon.callbacks.callbacks import Callbacks
from neon import NervanaObject
from neon.util.argparser import NeonArgparser, extract_valid_args
def rolling_window(a, lag):
"""
Convert a into time-lagged vectors
a : (n, p)
lag : time steps used for prediction
returns (n-lag+1, lag, p) array
(Building time-lagged vectors is not necessary for neon.)
"""
assert a.shape[0] > lag
shape = [a.shape[0] - lag + 1, lag, a.shape[-1]]
strides = [a.strides[0], a.strides[0], a.strides[-1]]
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
class TimeSeries(object):
def __init__(self, npoints=30, ncycles=3, divide=0.2, amplitude=1, curvetype='Lissajous1'):
"""
curvetype (str, optional): 'Lissajous1' or 'Lissajous2'
"""
self.nsamples = npoints * ncycles
self.x = np.linspace(0, ncycles * 2 * math.pi, self.nsamples)
if curvetype not in ('Lissajous1', 'Lissajous2'):
raise NotImplementedError()
sin_scale = 2 if curvetype is 'Lissajous1' else 1
def y_x(x): return 4.0/5 * math.sin(x/sin_scale)
def y_y(x): return 4.0/5 * math.cos(x/2)
self.data = np.zeros((self.nsamples, 2))
self.data[:, 0] = np.asarray([y_x(xs) for xs in self.x]).astype(np.float32)
self.data[:, 1] = np.asarray([y_y(xs) for xs in self.x]).astype(np.float32)
L = len(self.data)
c = int(L * (1 - divide))
self.train = self.data[:c]
self.test = self.data[c:]
class DataIteratorSequence(NervanaObject):
"""
This class takes a sequence and returns an iterator providing data in batches suitable for RNN
prediction. Meant for use when the entire dataset is small enough to fit in memory.
"""
def __init__(self, X, time_steps, forward=1, return_sequences=True):
"""
Implements loading of given data into backend tensor objects. If the backend is specific
to an accelerator device, the data is copied over to that device.
Args:
X (ndarray): Input sequence with feature size within the dataset.
Shape should be specified as (num examples, feature size]
time_steps (int): The number of examples to be put into one sequence.
forward (int, optional): how many forward steps the sequence should predict. default
is 1, which is the next example
return_sequences (boolean, optional): whether the target is a sequence or single step.
Also determines whether data will be formatted
as strides or rolling windows.
If true, target value be a sequence, input data
will be reshaped as strides. If false, target
value will be a single step, input data will be
a rolling_window
"""
self.seq_length = time_steps
self.forward = forward
self.batch_index = 0
self.nfeatures = self.nclass = X.shape[1]
self.nsamples = X.shape[0]
self.shape = (self.nfeatures, time_steps)
self.return_sequences = return_sequences
target_steps = time_steps if return_sequences else 1
# pre-allocate the device buffer to provide data for each minibatch
# buffer size is nfeatures x (times * batch_size), which is handled by backend.iobuf()
self.X_dev = self.be.iobuf((self.nfeatures, time_steps))
self.y_dev = self.be.iobuf((self.nfeatures, target_steps))
if return_sequences is True:
# truncate to make the data fit into multiples of batches
extra_examples = self.nsamples % (self.be.bsz * time_steps)
if extra_examples:
X = X[:-extra_examples]
# calculate how many batches
self.nsamples -= extra_examples
self.nbatches = self.nsamples / (self.be.bsz * time_steps)
self.ndata = self.nbatches * self.be.bsz * time_steps # no leftovers
# y is the lagged version of X
y = np.concatenate((X[forward:], X[:forward]))
self.y_series = y
# reshape this way so sequence is continuous along the batches
self.X = X.reshape(self.be.bsz, self.nbatches, time_steps, self.nfeatures)
self.y = y.reshape(self.be.bsz, self.nbatches, time_steps, self.nfeatures)
else:
self.X = rolling_window(X, time_steps)
self.X = self.X[:-1]
self.y = X[time_steps:]
self.nsamples = self.X.shape[0]
extra_examples = self.nsamples % (self.be.bsz)
if extra_examples:
self.X = self.X[:-extra_examples]
self.y = self.y[:-extra_examples]
# calculate how many batches
self.nsamples -= extra_examples
self.nbatches = self.nsamples / (self.be.bsz)
self.ndata = self.nbatches * self.be.bsz
self.y_series = self.y
Xshape = (self.nbatches, self.be.bsz, time_steps, self.nfeatures)
Yshape = (self.nbatches, self.be.bsz, 1, self.nfeatures)
self.X = self.X.reshape(Xshape).transpose(1, 0, 2, 3)
self.y = self.y.reshape(Yshape).transpose(1, 0, 2, 3)
def reset(self):
"""
For resetting the starting index of this dataset back to zero.
"""
self.batch_index = 0
def __iter__(self):
"""
Generator that can be used to iterate over this dataset.
Yields:
tuple : the next minibatch of data.
"""
self.batch_index = 0
while self.batch_index < self.nbatches:
# get the data for this batch and reshape to fit the device buffer shape
X_batch = self.X[:, self.batch_index].reshape(self.X_dev.shape[::-1]).T.copy()
y_batch = self.y[:, self.batch_index].reshape(self.y_dev.shape[::-1]).T.copy()
# make the data for this batch as backend tensor
self.X_dev.set(X_batch)
self.y_dev.set(y_batch)
self.batch_index += 1
yield self.X_dev, self.y_dev
# replicate neon's mse error metric
def err(y, t):
feature_axis = 1
return (0.5 * np.square(y - t).mean(axis=feature_axis).mean())
if __name__ == '__main__':
# parse the command line arguments
parser = NeonArgparser(__doc__)
parser.add_argument('--curvetype', default='Lissajous1', choices=['Lissajous1', 'Lissajous2'],
help='type of input curve data to use (Lissajous1 or Lissajous2)')
args = parser.parse_args(gen_be=False)
# network hyperparameters
hidden = 32
args.batch_size = 1
# The following flag will switch between 2 training strategies:
# 1. return_sequence True:
# Inputs are sequences, and target outputs will be sequences.
# The RNN layer's output at EVERY step will be used for errors and optimized.
# The RNN model contains a RNN layer and an Affine layer
# The data iterator will format the data accordingly, and will stride along the
# whole series with no overlap
# 2. return_sequence False:
# Inputs are sequences, and target output will be a single step.
# The RNN layer's output at LAST step will be used for errors and optimized.
# The RNN model contains a RNN layer and RNN-output layer (i.g. RecurrentLast, etc.)
# and an Affine layer
# The data iterator will format the data accordingly, using a rolling window to go
# through the data
return_sequences = False
# Note that when the time series has higher or lower frequency, it requires different amounts
# of data to learn the temporal pattern, the sequence length and the batch size for the
# training process also makes a difference on learning performance.
seq_len = 30
npoints = 10
ncycles = 100
num_predict = 200
seed_seq_len = 30
# ================= Main neon script ====================
be = gen_backend(**extract_valid_args(args, gen_backend))
# a file to save the trained model
if args.save_path is None:
args.save_path = 'timeseries.pkl'
if args.callback_args['save_path'] is None:
args.callback_args['save_path'] = args.save_path
if args.callback_args['serialize'] is None:
args.callback_args['serialize'] = 1
# create synthetic data as a whole series
time_series = TimeSeries(npoints, ncycles=ncycles, curvetype=args.curvetype)
# use data iterator to feed X, Y. return_sequence determines training strategy
train_set = DataIteratorSequence(time_series.train, seq_len, return_sequences=return_sequences)
valid_set = DataIteratorSequence(time_series.test, seq_len, return_sequences=return_sequences)
# define weights initialization
init = GlorotUniform() # Uniform(low=-0.08, high=0.08)
# define model: model is different for the 2 strategies (sequence target or not)
if return_sequences is True:
layers = [
LSTM(hidden, init, activation=Logistic(), gate_activation=Tanh(), reset_cells=False),
Affine(train_set.nfeatures, init, bias=init, activation=Identity())
]
else:
layers = [
LSTM(hidden, init, activation=Logistic(), gate_activation=Tanh(), reset_cells=True),
RecurrentLast(),
Affine(train_set.nfeatures, init, bias=init, activation=Identity())
]
model = Model(layers=layers)
cost = GeneralizedCost(MeanSquared())
optimizer = RMSProp(stochastic_round=args.rounding)
callbacks = Callbacks(model, train_set, eval_set=valid_set, **args.callback_args)
# fit model
model.fit(train_set,
optimizer=optimizer,
num_epochs=args.epochs,
cost=cost,
callbacks=callbacks)
# =======visualize how the model does on validation set==============
# run the trained model on train and valid dataset and see how the outputs match
train_output = model.get_outputs(train_set).reshape(-1, train_set.nfeatures)
valid_output = model.get_outputs(valid_set).reshape(-1, valid_set.nfeatures)
train_target = train_set.y_series
valid_target = valid_set.y_series
# calculate accuracy
terr = err(train_output, train_target)
verr = err(valid_output, valid_target)
print 'terr = %g, verr = %g' % (terr, verr)
plt.figure()
plt.plot(train_output[:, 0], train_output[:, 1], 'bo', label='prediction')
plt.plot(train_target[:, 0], train_target[:, 1], 'r.', label='target')
plt.legend()
plt.title('Neon on training set')
plt.savefig('neon_series_training_output.png')
plt.figure()
plt.plot(valid_output[:, 0], valid_output[:, 1], 'bo', label='prediction')
plt.plot(valid_target[:, 0], valid_target[:, 1], 'r.', label='target')
plt.legend()
plt.title('Neon on validatation set')
plt.savefig('neon_series_validation_output.png')
# =====================generate sequence ==================================
# when generating sequence, set sequence length to 1, since it doesn't make a difference
be.bsz = 1
seq_len = 1
if return_sequences is True:
layers = [
LSTM(hidden, init, activation=Logistic(), gate_activation=Tanh(), reset_cells=False),
Affine(train_set.nfeatures, init, bias=init, activation=Identity())
]
else:
layers = [
LSTM(hidden, init, activation=Logistic(), gate_activation=Tanh(), reset_cells=False),
RecurrentLast(),
Affine(train_set.nfeatures, init, bias=init, activation=Identity())
]
model_new = Model(layers=layers)
model_new.load_params(args.save_path)
model_new.initialize(dataset=(train_set.nfeatures, seq_len))
output = np.zeros((train_set.nfeatures, num_predict))
seed = time_series.train[:seed_seq_len]
x = model_new.be.empty((train_set.nfeatures, seq_len))
for s_in in seed:
x.set(s_in.reshape(train_set.nfeatures, seq_len))
y = model_new.fprop(x, inference=False)
for i in range(num_predict):
# Take last prediction and feed into next fprop
pred = y.get()[:, -1]
output[:, i] = pred
x[:] = pred.reshape(train_set.nfeatures, seq_len)
y = model_new.fprop(x, inference=False)
output_seq = np.vstack([seed, output.T])
plt.figure()
plt.plot(output_seq[:, 0], output_seq[:, 1], 'b.-', label='generated sequence')
plt.plot(seed[:, 0], seed[:, 1], 'r.', label='seed sequence')
plt.legend()
plt.title('neon generated sequence')
plt.savefig('neon_generated_sequence_2d.png')
plt.figure()
plt.plot(output_seq, 'b.-', label='generated sequence')
plt.plot(seed, 'r.', label='seed sequence')
plt.legend()
plt.title('neon generated sequence')
plt.savefig('neon_generated_sequence.png')
| apache-2.0 |
fabian-paul/PyEMMA | pyemma/coordinates/clustering/tests/util.py | 3 | 2962 | """
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import numpy as np
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std : float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box : pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
"""
from scipy._lib._util import check_random_state
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
from bhmm._external.sklearn.utils import check_array
centers = check_array(centers)
n_features = centers.shape[1]
if isinstance(cluster_std, numbers.Real):
cluster_std = np.ones(len(centers)) * cluster_std
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
X.append(centers[i] + generator.normal(scale=std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
| lgpl-3.0 |
vivekmishra1991/scikit-learn | sklearn/utils/tests/test_testing.py | 107 | 4210 | import warnings
import unittest
import sys
from nose.tools import assert_raises
from sklearn.utils.testing import (
_assert_less,
_assert_greater,
assert_less_equal,
assert_greater_equal,
assert_warns,
assert_no_warnings,
assert_equal,
set_random_state,
assert_raise_message)
from sklearn.tree import DecisionTreeClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
try:
from nose.tools import assert_less
def test_assert_less():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_less(0, 1)
_assert_less(0, 1)
assert_raises(AssertionError, assert_less, 1, 0)
assert_raises(AssertionError, _assert_less, 1, 0)
except ImportError:
pass
try:
from nose.tools import assert_greater
def test_assert_greater():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_greater(1, 0)
_assert_greater(1, 0)
assert_raises(AssertionError, assert_greater, 0, 1)
assert_raises(AssertionError, _assert_greater, 0, 1)
except ImportError:
pass
def test_assert_less_equal():
assert_less_equal(0, 1)
assert_less_equal(1, 1)
assert_raises(AssertionError, assert_less_equal, 1, 0)
def test_assert_greater_equal():
assert_greater_equal(1, 0)
assert_greater_equal(1, 1)
assert_raises(AssertionError, assert_greater_equal, 0, 1)
def test_set_random_state():
lda = LinearDiscriminantAnalysis()
tree = DecisionTreeClassifier()
# Linear Discriminant Analysis doesn't have random state: smoke test
set_random_state(lda, 3)
set_random_state(tree, 3)
assert_equal(tree.random_state, 3)
def test_assert_raise_message():
def _raise_ValueError(message):
raise ValueError(message)
def _no_raise():
pass
assert_raise_message(ValueError, "test",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "something else",
_raise_ValueError, "test")
assert_raises(ValueError,
assert_raise_message, TypeError, "something else",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "test",
_no_raise)
# multiple exceptions in a tuple
assert_raises(AssertionError,
assert_raise_message, (ValueError, AttributeError),
"test", _no_raise)
# This class is inspired from numpy 1.7 with an alteration to check
# the reset warning filters after calls to assert_warns.
# This assert_warns behavior is specific to scikit-learn because
#`clean_warning_registry()` is called internally by assert_warns
# and clears all previous filters.
class TestWarns(unittest.TestCase):
def test_warn(self):
def f():
warnings.warn("yo")
return 3
# Test that assert_warns is not impacted by externally set
# filters and is reset internally.
# This is because `clean_warning_registry()` is called internally by
# assert_warns and clears all previous filters.
warnings.simplefilter("ignore", UserWarning)
assert_equal(assert_warns(UserWarning, f), 3)
# Test that the warning registry is empty after assert_warns
assert_equal(sys.modules['warnings'].filters, [])
assert_raises(AssertionError, assert_no_warnings, f)
assert_equal(assert_no_warnings(lambda x: x, 1), 1)
def test_warn_wrong_warning(self):
def f():
warnings.warn("yo", DeprecationWarning)
failed = False
filters = sys.modules['warnings'].filters[:]
try:
try:
# Should raise an AssertionError
assert_warns(UserWarning, f)
failed = True
except AssertionError:
pass
finally:
sys.modules['warnings'].filters = filters
if failed:
raise AssertionError("wrong warning caught by assert_warn")
| bsd-3-clause |
anirudhjayaraman/scikit-learn | sklearn/utils/tests/test_validation.py | 79 | 18547 | """Tests for input validation functions"""
import warnings
from tempfile import NamedTemporaryFile
from itertools import product
import numpy as np
from numpy.testing import assert_array_equal
import scipy.sparse as sp
from nose.tools import assert_raises, assert_true, assert_false, assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils import as_float_array, check_array, check_symmetric
from sklearn.utils import check_X_y
from sklearn.utils.mocking import MockDataFrame
from sklearn.utils.estimator_checks import NotAnArray
from sklearn.random_projection import sparse_random_matrix
from sklearn.linear_model import ARDRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
from sklearn.datasets import make_blobs
from sklearn.utils.validation import (
NotFittedError,
has_fit_parameter,
check_is_fitted,
check_consistent_length,
DataConversionWarning,
)
from sklearn.utils.testing import assert_raise_message
def test_as_float_array():
# Test function for as_float_array
X = np.ones((3, 10), dtype=np.int32)
X = X + np.arange(10, dtype=np.int32)
# Checks that the return type is ok
X2 = as_float_array(X, copy=False)
np.testing.assert_equal(X2.dtype, np.float32)
# Another test
X = X.astype(np.int64)
X2 = as_float_array(X, copy=True)
# Checking that the array wasn't overwritten
assert_true(as_float_array(X, False) is not X)
# Checking that the new type is ok
np.testing.assert_equal(X2.dtype, np.float64)
# Here, X is of the right type, it shouldn't be modified
X = np.ones((3, 2), dtype=np.float32)
assert_true(as_float_array(X, copy=False) is X)
# Test that if X is fortran ordered it stays
X = np.asfortranarray(X)
assert_true(np.isfortran(as_float_array(X, copy=True)))
# Test the copy parameter with some matrices
matrices = [
np.matrix(np.arange(5)),
sp.csc_matrix(np.arange(5)).toarray(),
sparse_random_matrix(10, 10, density=0.10).toarray()
]
for M in matrices:
N = as_float_array(M, copy=True)
N[0, 0] = np.nan
assert_false(np.isnan(M).any())
def test_np_matrix():
# Confirm that input validation code does not return np.matrix
X = np.arange(12).reshape(3, 4)
assert_false(isinstance(as_float_array(X), np.matrix))
assert_false(isinstance(as_float_array(np.matrix(X)), np.matrix))
assert_false(isinstance(as_float_array(sp.csc_matrix(X)), np.matrix))
def test_memmap():
# Confirm that input validation code doesn't copy memory mapped arrays
asflt = lambda x: as_float_array(x, copy=False)
with NamedTemporaryFile(prefix='sklearn-test') as tmp:
M = np.memmap(tmp, shape=(10, 10), dtype=np.float32)
M[:] = 0
for f in (check_array, np.asarray, asflt):
X = f(M)
X[:] = 1
assert_array_equal(X.ravel(), M.ravel())
X[:] = 0
def test_ordering():
# Check that ordering is enforced correctly by validation utilities.
# We need to check each validation utility, because a 'copy' without
# 'order=K' will kill the ordering.
X = np.ones((10, 5))
for A in X, X.T:
for copy in (True, False):
B = check_array(A, order='C', copy=copy)
assert_true(B.flags['C_CONTIGUOUS'])
B = check_array(A, order='F', copy=copy)
assert_true(B.flags['F_CONTIGUOUS'])
if copy:
assert_false(A is B)
X = sp.csr_matrix(X)
X.data = X.data[::-1]
assert_false(X.data.flags['C_CONTIGUOUS'])
@ignore_warnings
def test_check_array():
# accept_sparse == None
# raise error on sparse inputs
X = [[1, 2], [3, 4]]
X_csr = sp.csr_matrix(X)
assert_raises(TypeError, check_array, X_csr)
# ensure_2d
assert_warns(DeprecationWarning, check_array, [0, 1, 2])
X_array = check_array([0, 1, 2])
assert_equal(X_array.ndim, 2)
X_array = check_array([0, 1, 2], ensure_2d=False)
assert_equal(X_array.ndim, 1)
# don't allow ndim > 3
X_ndim = np.arange(8).reshape(2, 2, 2)
assert_raises(ValueError, check_array, X_ndim)
check_array(X_ndim, allow_nd=True) # doesn't raise
# force_all_finite
X_inf = np.arange(4).reshape(2, 2).astype(np.float)
X_inf[0, 0] = np.inf
assert_raises(ValueError, check_array, X_inf)
check_array(X_inf, force_all_finite=False) # no raise
# nan check
X_nan = np.arange(4).reshape(2, 2).astype(np.float)
X_nan[0, 0] = np.nan
assert_raises(ValueError, check_array, X_nan)
check_array(X_inf, force_all_finite=False) # no raise
# dtype and order enforcement.
X_C = np.arange(4).reshape(2, 2).copy("C")
X_F = X_C.copy("F")
X_int = X_C.astype(np.int)
X_float = X_C.astype(np.float)
Xs = [X_C, X_F, X_int, X_float]
dtypes = [np.int32, np.int, np.float, np.float32, None, np.bool, object]
orders = ['C', 'F', None]
copys = [True, False]
for X, dtype, order, copy in product(Xs, dtypes, orders, copys):
X_checked = check_array(X, dtype=dtype, order=order, copy=copy)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if order == 'C':
assert_true(X_checked.flags['C_CONTIGUOUS'])
assert_false(X_checked.flags['F_CONTIGUOUS'])
elif order == 'F':
assert_true(X_checked.flags['F_CONTIGUOUS'])
assert_false(X_checked.flags['C_CONTIGUOUS'])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and
X_checked.flags['C_CONTIGUOUS'] == X.flags['C_CONTIGUOUS']
and X_checked.flags['F_CONTIGUOUS'] == X.flags['F_CONTIGUOUS']):
assert_true(X is X_checked)
# allowed sparse != None
X_csc = sp.csc_matrix(X_C)
X_coo = X_csc.tocoo()
X_dok = X_csc.todok()
X_int = X_csc.astype(np.int)
X_float = X_csc.astype(np.float)
Xs = [X_csc, X_coo, X_dok, X_int, X_float]
accept_sparses = [['csr', 'coo'], ['coo', 'dok']]
for X, dtype, accept_sparse, copy in product(Xs, dtypes, accept_sparses,
copys):
with warnings.catch_warnings(record=True) as w:
X_checked = check_array(X, dtype=dtype,
accept_sparse=accept_sparse, copy=copy)
if (dtype is object or sp.isspmatrix_dok(X)) and len(w):
message = str(w[0].message)
messages = ["object dtype is not supported by sparse matrices",
"Can't check dok sparse matrix for nan or inf."]
assert_true(message in messages)
else:
assert_equal(len(w), 0)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if X.format in accept_sparse:
# no change if allowed
assert_equal(X.format, X_checked.format)
else:
# got converted
assert_equal(X_checked.format, accept_sparse[0])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and X.format == X_checked.format):
assert_true(X is X_checked)
# other input formats
# convert lists to arrays
X_dense = check_array([[1, 2], [3, 4]])
assert_true(isinstance(X_dense, np.ndarray))
# raise on too deep lists
assert_raises(ValueError, check_array, X_ndim.tolist())
check_array(X_ndim.tolist(), allow_nd=True) # doesn't raise
# convert weird stuff to arrays
X_no_array = NotAnArray(X_dense)
result = check_array(X_no_array)
assert_true(isinstance(result, np.ndarray))
def test_check_array_pandas_dtype_object_conversion():
# test that data-frame like objects with dtype object
# get converted
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.object)
X_df = MockDataFrame(X)
assert_equal(check_array(X_df).dtype.kind, "f")
assert_equal(check_array(X_df, ensure_2d=False).dtype.kind, "f")
# smoke-test against dataframes with column named "dtype"
X_df.dtype = "Hans"
assert_equal(check_array(X_df, ensure_2d=False).dtype.kind, "f")
def test_check_array_dtype_stability():
# test that lists with ints don't get converted to floats
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
assert_equal(check_array(X).dtype.kind, "i")
assert_equal(check_array(X, ensure_2d=False).dtype.kind, "i")
def test_check_array_dtype_warning():
X_int_list = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
X_float64 = np.asarray(X_int_list, dtype=np.float64)
X_float32 = np.asarray(X_int_list, dtype=np.float32)
X_int64 = np.asarray(X_int_list, dtype=np.int64)
X_csr_float64 = sp.csr_matrix(X_float64)
X_csr_float32 = sp.csr_matrix(X_float32)
X_csc_float32 = sp.csc_matrix(X_float32)
X_csc_int32 = sp.csc_matrix(X_int64, dtype=np.int32)
y = [0, 0, 1]
integer_data = [X_int64, X_csc_int32]
float64_data = [X_float64, X_csr_float64]
float32_data = [X_float32, X_csr_float32, X_csc_float32]
for X in integer_data:
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True)
assert_equal(X_checked.dtype, np.float64)
X_checked = assert_warns(DataConversionWarning, check_array, X,
dtype=np.float64,
accept_sparse=True, warn_on_dtype=True)
assert_equal(X_checked.dtype, np.float64)
# Check that the warning message includes the name of the Estimator
X_checked = assert_warns_message(DataConversionWarning,
'SomeEstimator',
check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=True,
warn_on_dtype=True,
estimator='SomeEstimator')
assert_equal(X_checked.dtype, np.float64)
X_checked, y_checked = assert_warns_message(
DataConversionWarning, 'KNeighborsClassifier',
check_X_y, X, y, dtype=np.float64, accept_sparse=True,
warn_on_dtype=True, estimator=KNeighborsClassifier())
assert_equal(X_checked.dtype, np.float64)
for X in float64_data:
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True, warn_on_dtype=True)
assert_equal(X_checked.dtype, np.float64)
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True, warn_on_dtype=False)
assert_equal(X_checked.dtype, np.float64)
for X in float32_data:
X_checked = assert_no_warnings(check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=True)
assert_equal(X_checked.dtype, np.float32)
assert_true(X_checked is X)
X_checked = assert_no_warnings(check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=['csr', 'dok'],
copy=True)
assert_equal(X_checked.dtype, np.float32)
assert_false(X_checked is X)
X_checked = assert_no_warnings(check_array, X_csc_float32,
dtype=[np.float64, np.float32],
accept_sparse=['csr', 'dok'],
copy=False)
assert_equal(X_checked.dtype, np.float32)
assert_false(X_checked is X_csc_float32)
assert_equal(X_checked.format, 'csr')
def test_check_array_min_samples_and_features_messages():
# empty list is considered 2D by default:
msg = "0 feature(s) (shape=(1, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_array, [[]])
# If considered a 1D collection when ensure_2d=False, then the minimum
# number of samples will break:
msg = "0 sample(s) (shape=(0,)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_array, [], ensure_2d=False)
# Invalid edge case when checking the default minimum sample of a scalar
msg = "Singleton array array(42) cannot be considered a valid collection."
assert_raise_message(TypeError, msg, check_array, 42, ensure_2d=False)
# But this works if the input data is forced to look like a 2 array with
# one sample and one feature:
X_checked = assert_warns(DeprecationWarning, check_array, [42],
ensure_2d=True)
assert_array_equal(np.array([[42]]), X_checked)
# Simulate a model that would need at least 2 samples to be well defined
X = np.ones((1, 10))
y = np.ones(1)
msg = "1 sample(s) (shape=(1, 10)) while a minimum of 2 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_samples=2)
# The same message is raised if the data has 2 dimensions even if this is
# not mandatory
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_samples=2, ensure_2d=False)
# Simulate a model that would require at least 3 features (e.g. SelectKBest
# with k=3)
X = np.ones((10, 2))
y = np.ones(2)
msg = "2 feature(s) (shape=(10, 2)) while a minimum of 3 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_features=3)
# Only the feature check is enabled whenever the number of dimensions is 2
# even if allow_nd is enabled:
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_features=3, allow_nd=True)
# Simulate a case where a pipeline stage as trimmed all the features of a
# 2D dataset.
X = np.empty(0).reshape(10, 0)
y = np.ones(10)
msg = "0 feature(s) (shape=(10, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y)
# nd-data is not checked for any minimum number of features by default:
X = np.ones((10, 0, 28, 28))
y = np.ones(10)
X_checked, y_checked = check_X_y(X, y, allow_nd=True)
assert_array_equal(X, X_checked)
assert_array_equal(y, y_checked)
def test_has_fit_parameter():
assert_false(has_fit_parameter(KNeighborsClassifier, "sample_weight"))
assert_true(has_fit_parameter(RandomForestRegressor, "sample_weight"))
assert_true(has_fit_parameter(SVR, "sample_weight"))
assert_true(has_fit_parameter(SVR(), "sample_weight"))
def test_check_symmetric():
arr_sym = np.array([[0, 1], [1, 2]])
arr_bad = np.ones(2)
arr_asym = np.array([[0, 2], [0, 2]])
test_arrays = {'dense': arr_asym,
'dok': sp.dok_matrix(arr_asym),
'csr': sp.csr_matrix(arr_asym),
'csc': sp.csc_matrix(arr_asym),
'coo': sp.coo_matrix(arr_asym),
'lil': sp.lil_matrix(arr_asym),
'bsr': sp.bsr_matrix(arr_asym)}
# check error for bad inputs
assert_raises(ValueError, check_symmetric, arr_bad)
# check that asymmetric arrays are properly symmetrized
for arr_format, arr in test_arrays.items():
# Check for warnings and errors
assert_warns(UserWarning, check_symmetric, arr)
assert_raises(ValueError, check_symmetric, arr, raise_exception=True)
output = check_symmetric(arr, raise_warning=False)
if sp.issparse(output):
assert_equal(output.format, arr_format)
assert_array_equal(output.toarray(), arr_sym)
else:
assert_array_equal(output, arr_sym)
def test_check_is_fitted():
# Check is ValueError raised when non estimator instance passed
assert_raises(ValueError, check_is_fitted, ARDRegression, "coef_")
assert_raises(TypeError, check_is_fitted, "SVR", "support_")
ard = ARDRegression()
svr = SVR()
try:
assert_raises(NotFittedError, check_is_fitted, ard, "coef_")
assert_raises(NotFittedError, check_is_fitted, svr, "support_")
except ValueError:
assert False, "check_is_fitted failed with ValueError"
# NotFittedError is a subclass of both ValueError and AttributeError
try:
check_is_fitted(ard, "coef_", "Random message %(name)s, %(name)s")
except ValueError as e:
assert_equal(str(e), "Random message ARDRegression, ARDRegression")
try:
check_is_fitted(svr, "support_", "Another message %(name)s, %(name)s")
except AttributeError as e:
assert_equal(str(e), "Another message SVR, SVR")
ard.fit(*make_blobs())
svr.fit(*make_blobs())
assert_equal(None, check_is_fitted(ard, "coef_"))
assert_equal(None, check_is_fitted(svr, "support_"))
def test_check_consistent_length():
check_consistent_length([1], [2], [3], [4], [5])
check_consistent_length([[1, 2], [[1, 2]]], [1, 2], ['a', 'b'])
check_consistent_length([1], (2,), np.array([3]), sp.csr_matrix((1, 2)))
assert_raises_regexp(ValueError, 'inconsistent numbers of samples',
check_consistent_length, [1, 2], [1])
assert_raises_regexp(TypeError, 'got <\w+ \'int\'>',
check_consistent_length, [1, 2], 1)
assert_raises_regexp(TypeError, 'got <\w+ \'object\'>',
check_consistent_length, [1, 2], object())
assert_raises(TypeError, check_consistent_length, [1, 2], np.array(1))
# Despite ensembles having __len__ they must raise TypeError
assert_raises_regexp(TypeError, 'estimator', check_consistent_length,
[1, 2], RandomForestRegressor())
# XXX: We should have a test with a string, but what is correct behaviour?
| bsd-3-clause |
dboyliao/Scipy_Numpy_Learning | python_examples/scipy_352_ex1.py | 2 | 1996 | import numpy as np
from scipy.spatial.distance import pdist, squareform
import scipy.cluster.hierarchy as hy
import matplotlib.pyplot as plt
# Creating a cluster of clusters function
def clusters(number=20, cnumber=5, csize=10):
# Note that the way the clusters are positioned is Gaussian randomness.
rnum = np.random.rand(cnumber, 2)
rn = rnum[:, 0] * number
rn = rn.astype(int)
rn[np.where(rn < 5)] = 5
rn[np.where(rn > number / 2.)] = round(number / 2., 0)
ra = rnum[:, 1] * 2.9
ra[np.where(ra < 1.5)] = 1.5
cls = np.random.randn(number, 3) * csize
# Random multipliers for central point of cluster
rxyz = np.random.randn(cnumber - 1, 3)
for i in xrange(cnumber - 1):
tmp = np.random.randn(rn[i + 1], 3)
x = tmp[:, 0] + (rxyz[i, 0] * csize)
y = tmp[:, 1] + (rxyz[i, 1] * csize)
z = tmp[:, 2] + (rxyz[i, 2] * csize)
tmp = np.column_stack([x, y, z])
cls = np.vstack([cls, tmp])
return cls
# Generate a cluster of clusters and distance matrix.
cls = clusters()
D = pdist(cls[:, 0:2])
D = squareform(D)
# Compute and plot first dendrogram.
fig = plt.figure(figsize=(8, 8))
ax1 = fig.add_axes([0.09, 0.1, 0.2, 0.6])
Y1 = hy.linkage(D, method='complete')
cutoff = 0.3 * np.max(Y1[:, 2])
Z1 = hy.dendrogram(Y1, orientation='right', color_threshold=cutoff)
ax1.xaxis.set_visible(False)
ax1.yaxis.set_visible(False)
# Compute and plot second dendrogram.
ax2 = fig.add_axes([0.3, 0.71, 0.6, 0.2])
Y2 = hy.linkage(D, method='average')
cutoff = 0.3 * np.max(Y2[:, 2])
Z2 = hy.dendrogram(Y2, color_threshold=cutoff)
ax2.xaxis.set_visible(False)
ax2.yaxis.set_visible(False)
# Plot distance matrix.
ax3 = fig.add_axes([0.3, 0.1, 0.6, 0.6])
idx1 = Z1['leaves']
idx2 = Z2['leaves']
D = D[idx1, :]
D = D[:, idx2]
ax3.matshow(D, aspect='auto', origin='lower', cmap=plt.cm.YlGnBu)
ax3.xaxis.set_visible(False)
ax3.yaxis.set_visible(False)
# Plot colorbar.
fig.savefig('scipy_352_ex1.pdf', bbox='tight')
| mit |
intuition-io/intuition | intuition/data/forex.py | 1 | 2448 | # -*- coding: utf-8 -*-
# vim:fenc=utf-8
'''
Forex access thanks to truefx.com
---------------------------------
:copyright (c) 2014 Xavier Bruhiere.
:license: Apache2.0, see LICENSE for more details.
'''
import os
import requests
import string
import random
from pandas import DataFrame, Series
import dna.logging
log = dna.logging.logger(__name__)
def _clean_pairs(pairs):
if not isinstance(pairs, list):
pairs = [pairs]
return ','.join(map(str.upper, pairs))
def _fx_mapping(raw_rates):
''' Map raw output to clearer labels '''
return {pair[0].lower(): {
'timeStamp': pair[1],
'bid': float(pair[2] + pair[3]),
'ask': float(pair[4] + pair[5]),
'high': float(pair[6]),
'low': float(pair[7])
} for pair in map(lambda x: x.split(','), raw_rates)}
class TrueFX(object):
_api_url = 'http://webrates.truefx.com/rates/connect.html'
_full_snapshot = 'y'
_output_format = 'csv'
def __init__(self, credentials='', pairs=[]):
if not credentials:
log.info('No credentials provided, inspecting environment')
credentials = os.environ.get('TRUEFX_API', ':')
self._user = credentials.split(':')[0]
self._pwd = credentials.split(':')[1]
self.state_pairs = _clean_pairs(pairs)
#self._qualifier = 'ozrates'
self._qualifier = ''.join(
random.choice(string.ascii_lowercase) for _ in range(8))
def connect(self):
payload = {
'u': self._user,
'p': self._pwd,
'q': self._qualifier,
'c': self.state_pairs,
'f': self._output_format,
's': self._full_snapshot
}
auth = requests.get(self._api_url, params=payload)
if auth.ok:
log.debug('Truefx authentification successful')
# Remove '\r\n'
self._session = auth.content[:-2]
return auth.ok
def query_rates(self, pairs=[]):
''' Perform a request against truefx data '''
# If no pairs, TrueFx will use the ones given the last time
payload = {'id': self._session}
if pairs:
payload['c'] = _clean_pairs(pairs)
response = requests.get(self._api_url, params=payload)
mapped_data = _fx_mapping(response.content.split('\n')[:-2])
return Series(mapped_data) if len(mapped_data) == 1 \
else DataFrame(mapped_data)
| apache-2.0 |
liyu1990/sklearn | examples/applications/wikipedia_principal_eigenvector.py | 233 | 7819 | """
===============================
Wikipedia principal eigenvector
===============================
A classical way to assert the relative importance of vertices in a
graph is to compute the principal eigenvector of the adjacency matrix
so as to assign to each vertex the values of the components of the first
eigenvector as a centrality score:
http://en.wikipedia.org/wiki/Eigenvector_centrality
On the graph of webpages and links those values are called the PageRank
scores by Google.
The goal of this example is to analyze the graph of links inside
wikipedia articles to rank articles by relative importance according to
this eigenvector centrality.
The traditional way to compute the principal eigenvector is to use the
power iteration method:
http://en.wikipedia.org/wiki/Power_iteration
Here the computation is achieved thanks to Martinsson's Randomized SVD
algorithm implemented in the scikit.
The graph data is fetched from the DBpedia dumps. DBpedia is an extraction
of the latent structured data of the Wikipedia content.
"""
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from bz2 import BZ2File
import os
from datetime import datetime
from pprint import pprint
from time import time
import numpy as np
from scipy import sparse
from sklearn.decomposition import randomized_svd
from sklearn.externals.joblib import Memory
from sklearn.externals.six.moves.urllib.request import urlopen
from sklearn.externals.six import iteritems
print(__doc__)
###############################################################################
# Where to download the data, if not already on disk
redirects_url = "http://downloads.dbpedia.org/3.5.1/en/redirects_en.nt.bz2"
redirects_filename = redirects_url.rsplit("/", 1)[1]
page_links_url = "http://downloads.dbpedia.org/3.5.1/en/page_links_en.nt.bz2"
page_links_filename = page_links_url.rsplit("/", 1)[1]
resources = [
(redirects_url, redirects_filename),
(page_links_url, page_links_filename),
]
for url, filename in resources:
if not os.path.exists(filename):
print("Downloading data from '%s', please wait..." % url)
opener = urlopen(url)
open(filename, 'wb').write(opener.read())
print()
###############################################################################
# Loading the redirect files
memory = Memory(cachedir=".")
def index(redirects, index_map, k):
"""Find the index of an article name after redirect resolution"""
k = redirects.get(k, k)
return index_map.setdefault(k, len(index_map))
DBPEDIA_RESOURCE_PREFIX_LEN = len("http://dbpedia.org/resource/")
SHORTNAME_SLICE = slice(DBPEDIA_RESOURCE_PREFIX_LEN + 1, -1)
def short_name(nt_uri):
"""Remove the < and > URI markers and the common URI prefix"""
return nt_uri[SHORTNAME_SLICE]
def get_redirects(redirects_filename):
"""Parse the redirections and build a transitively closed map out of it"""
redirects = {}
print("Parsing the NT redirect file")
for l, line in enumerate(BZ2File(redirects_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
redirects[short_name(split[0])] = short_name(split[2])
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
# compute the transitive closure
print("Computing the transitive closure of the redirect relation")
for l, source in enumerate(redirects.keys()):
transitive_target = None
target = redirects[source]
seen = set([source])
while True:
transitive_target = target
target = redirects.get(target)
if target is None or target in seen:
break
seen.add(target)
redirects[source] = transitive_target
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
return redirects
# disabling joblib as the pickling of large dicts seems much too slow
#@memory.cache
def get_adjacency_matrix(redirects_filename, page_links_filename, limit=None):
"""Extract the adjacency graph as a scipy sparse matrix
Redirects are resolved first.
Returns X, the scipy sparse adjacency matrix, redirects as python
dict from article names to article names and index_map a python dict
from article names to python int (article indexes).
"""
print("Computing the redirect map")
redirects = get_redirects(redirects_filename)
print("Computing the integer index map")
index_map = dict()
links = list()
for l, line in enumerate(BZ2File(page_links_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
i = index(redirects, index_map, short_name(split[0]))
j = index(redirects, index_map, short_name(split[2]))
links.append((i, j))
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
if limit is not None and l >= limit - 1:
break
print("Computing the adjacency matrix")
X = sparse.lil_matrix((len(index_map), len(index_map)), dtype=np.float32)
for i, j in links:
X[i, j] = 1.0
del links
print("Converting to CSR representation")
X = X.tocsr()
print("CSR conversion done")
return X, redirects, index_map
# stop after 5M links to make it possible to work in RAM
X, redirects, index_map = get_adjacency_matrix(
redirects_filename, page_links_filename, limit=5000000)
names = dict((i, name) for name, i in iteritems(index_map))
print("Computing the principal singular vectors using randomized_svd")
t0 = time()
U, s, V = randomized_svd(X, 5, n_iter=3)
print("done in %0.3fs" % (time() - t0))
# print the names of the wikipedia related strongest compenents of the the
# principal singular vector which should be similar to the highest eigenvector
print("Top wikipedia pages according to principal singular vectors")
pprint([names[i] for i in np.abs(U.T[0]).argsort()[-10:]])
pprint([names[i] for i in np.abs(V[0]).argsort()[-10:]])
def centrality_scores(X, alpha=0.85, max_iter=100, tol=1e-10):
"""Power iteration computation of the principal eigenvector
This method is also known as Google PageRank and the implementation
is based on the one from the NetworkX project (BSD licensed too)
with copyrights by:
Aric Hagberg <[email protected]>
Dan Schult <[email protected]>
Pieter Swart <[email protected]>
"""
n = X.shape[0]
X = X.copy()
incoming_counts = np.asarray(X.sum(axis=1)).ravel()
print("Normalizing the graph")
for i in incoming_counts.nonzero()[0]:
X.data[X.indptr[i]:X.indptr[i + 1]] *= 1.0 / incoming_counts[i]
dangle = np.asarray(np.where(X.sum(axis=1) == 0, 1.0 / n, 0)).ravel()
scores = np.ones(n, dtype=np.float32) / n # initial guess
for i in range(max_iter):
print("power iteration #%d" % i)
prev_scores = scores
scores = (alpha * (scores * X + np.dot(dangle, prev_scores))
+ (1 - alpha) * prev_scores.sum() / n)
# check convergence: normalized l_inf norm
scores_max = np.abs(scores).max()
if scores_max == 0.0:
scores_max = 1.0
err = np.abs(scores - prev_scores).max() / scores_max
print("error: %0.6f" % err)
if err < n * tol:
return scores
return scores
print("Computing principal eigenvector score using a power iteration method")
t0 = time()
scores = centrality_scores(X, max_iter=100, tol=1e-10)
print("done in %0.3fs" % (time() - t0))
pprint([names[i] for i in np.abs(scores).argsort()[-10:]])
| bsd-3-clause |
maheshakya/scikit-learn | sklearn/neighbors/unsupervised.py | 16 | 3198 | """Unsupervised nearest neighbors learner"""
from .base import NeighborsBase
from .base import KNeighborsMixin
from .base import RadiusNeighborsMixin
from .base import UnsupervisedMixin
class NearestNeighbors(NeighborsBase, KNeighborsMixin,
RadiusNeighborsMixin, UnsupervisedMixin):
"""Unsupervised learner for implementing neighbor searches.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p: integer, optional (default = 2)
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> from sklearn.neighbors import NearestNeighbors
>>> samples = [[0, 0, 2], [1, 0, 0], [0, 0, 1]]
>>> neigh = NearestNeighbors(2, 0.4)
>>> neigh.fit(samples) #doctest: +ELLIPSIS
NearestNeighbors(...)
>>> neigh.kneighbors([[0, 0, 1.3]], 2, return_distance=False)
... #doctest: +ELLIPSIS
array([[2, 0]]...)
>>> neigh.radius_neighbors([0, 0, 1.3], 0.4, return_distance=False)
array([[2]])
See also
--------
KNeighborsClassifier
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
BallTree
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, radius=1.0,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, **kwargs):
self._init_params(n_neighbors=n_neighbors,
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, **kwargs)
| bsd-3-clause |
mikelane/FaceRecognition | Sklearn_Face_Recognition/SVM.py | 1 | 5114 |
# coding: utf-8
# In[25]:
from datetime import datetime
import logging
import itertools
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import fetch_lfw_people
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.decomposition import PCA
from sklearn.svm import SVC
import PIL
import numpy as np
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
# In[11]:
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
# introspect the images arrays to find the shapes (for plotting)
n_samples, h, w = lfw_people.images.shape
# for machine learning we use the 2 data directly (as relative pixel
# positions info is ignored by this model)
X = lfw_people.data
n_features = X.shape[1]
# the label to predict is the id of the person
y = lfw_people.target
target_names = lfw_people.target_names
n_classes = target_names.shape[0]
print("Total dataset size:")
print('n_samples: {ns}'.format(ns=n_samples))
print('n_features: {nf}'.format(nf=n_features))
print('n_classes: {}'.format(n_classes))
# In[12]:
# split into a training and testing set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=np.random.RandomState())
# In[13]:
n_components = 150
print("Extracting the top {nc} eigenfaces from {nf} faces".format(nc=n_components, nf=X_train.shape[0]))
start = datetime.now()
pca = PCA(n_components=n_components, svd_solver='randomized', whiten=True).fit(X_train)
print("done in {dur:.3f}s".format(dur=(datetime.now() - start).total_seconds()))
eigenfaces = pca.components_.reshape((n_components, h, w))
print('Projecting the input data on the eigenfaces orthonormal basis')
start = datetime.now()
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
print("done in {dur:.3f}s".format(dur=(datetime.now() - start).total_seconds()))
# In[14]:
print('Fitting the classifier to the training set')
start = datetime.now()
param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1], }
clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid)
clf = clf.fit(X_train_pca, y_train)
print('done in {dur:.3f}s'.format(dur=(datetime.now() - start).total_seconds()))
print('Best estimator found by grid search:')
print(clf.best_estimator_)
# In[33]:
def plot_confusion_matrix(cm, classes, title='Confusion matrix', cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
"""
plt.figure(figsize=(7,7))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title, fontsize=28)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45, fontsize=12)
plt.yticks(tick_marks, classes, fontsize=12)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
verticalalignment="center",
color="white" if cm[i, j] > thresh else "black", fontsize=18)
plt.tight_layout()
plt.ylabel('True label', fontsize=18)
plt.xlabel('Predicted label', fontsize=18)
plt.style.use('seaborn-dark')
plt.show()
# In[35]:
print("Predicting people's names on the test set")
start = datetime.now()
y_pred = clf.predict(X_test_pca)
print("done in {dur:.3f}s".format(dur=(datetime.now() - start).total_seconds()))
print('\nAccuracy: {:.2f}'.format(accuracy_score(y_test, y_pred)))
print(classification_report(y_test, y_pred, target_names=target_names))
cm = confusion_matrix(y_test, y_pred, labels=range(n_classes))
plot_confusion_matrix(cm=cm, classes=np.array(target_names))
# In[16]:
def plot_gallery(images, titles, h, w, n_row=3, n_col=4):
"""Helper function to plot a gallery of portraits"""
plt.figure(figsize=(1.8 * n_col, 2.4 * n_row))
plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
plt.style.use('seaborn-dark')
for i in range(n_row * n_col):
plt.subplot(n_row, n_col, i + 1)
plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)
plt.title(titles[i], size=12)
plt.xticks(())
plt.yticks(())
def title(y_pred, y_test, target_names, i):
"""Helper function to extract the prediction titles"""
pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1]
true_name = target_names[y_test[i]].rsplit(' ', 1)[-1]
return 'predicted: {p}\ntrue: {t}'.format(p=pred_name, t=true_name)
# In[17]:
prediction_titles = [title(y_pred, y_test, target_names, i) for i in range(y_pred.shape[0])]
plot_gallery(X_test, prediction_titles, h, w)
# plot the gallery of the most significative eigenfaces
eigenface_titles = ["eigenface {}".format(i) for i in range(eigenfaces.shape[0])]
plot_gallery(eigenfaces, eigenface_titles, h, w)
plt.show()
# In[ ]:
| mit |
ky822/Data_Bootcamp | Code/Python/slides_indicators_FRED.py | 1 | 2821 | """
slides_indicators_fred.py
Global Economy course, handles the following:
* Loads data from FRED (uses the fred python modul)
* cross-correlation function and graph
* business cycle scorecard (more graphs)
Written by: Trevor Barnett ([email protected])
Adapted from 'slides_indicators_FRED.R', written by Paul Backus and Espend Henriksen
USAGE: Just run the script. If 'DISPLAY' below is set to True, the script will display the graphs, rather than output them
to the current working directory. This uses the API key provided by Kim Ruhl in the original R script. Make sure you
have the scipy suite and pandas suite installed, as well as the 'fred' package (available in PyPy repository).
"""
import fred
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from datetime import date,datetime
FRED_API_KEY="055ba538c874e5974ee22d786f27fdda"
FRED_SERIES=["INDPRO", "PAYEMS", "HOUST", "RRSFS", "NAPM"]
FRED_START=date(1990,1,1)
#set to false to output graphs to a file, uses current working directory. All numerical analysis is printed to the console
DISPLAY=False
fred.key(FRED_API_KEY)
def get_fred_series(series):
def filter(o):
return {'date': datetime.strptime(o['date'],'%Y-%m-%d').date(),
series: o['value']}
return pd.DataFrame(map(filter,fred.observations(series)['observations']),dtype='float64').set_index('date').dropna()
fred_data=get_fred_series(FRED_SERIES[0]) # Build an initial DataFrame
for s in FRED_SERIES[1:]:
fred_data=fred_data.join(get_fred_series(s))
fred_data=np.log(fred_data).diff(12)[FRED_START:].dropna()
def make_ccf_chart(ax,x,y,title):
ax.xcorr(fred_data[x],fred_data[y],maxlags=24)
ax.set_ylim(-1,1)
ax.set_xlim(-24,24)
ax.set_xlabel("Lag k relative to IP",fontsize=8)
ax.set_title(title,fontsize=12)
ax.axvline(ymin=-1,ymax=1,color='r')
plt.close('all')
fig,((ax1,ax2),(ax3,ax4))=plt.subplots(nrows=2,ncols=2)
make_ccf_chart(ax1,'PAYEMS','INDPRO','Nonfarm Eployment')
make_ccf_chart(ax2,'HOUST','INDPRO','Housing Starts')
make_ccf_chart(ax3,'RRSFS','INDPRO','Retail Sales')
make_ccf_chart(ax4,'NAPM','INDPRO','Purchasing Managers Index')
plt.tight_layout()
if DISPLAY:
plt.show()
else:
plt.savefig('ccf.png')
mean= fred_data.mean()
std=fred_data.std()
corr=fred_data.corr()
print " == MEAN =="
print mean
print " == STD. DEV. =="
print std
print " == CORR =="
print corr
def make_bcs_chart(ax,n,title):
ax.plot(fred_data.index,fred_data[n])
ax.set_title(title)
xlim,ylim=ax.get_xlim(),ax.get_ylim()
ax.fill_between(xlim,mean[n]+std[n],mean[n]-std[n],facecolor='yellow',alpha=0.5)
plt.close('all')
fig,(ax1,ax2)=plt.subplots(nrows=2,ncols=1)
make_bcs_chart(ax1,'INDPRO','Industrial Production')
make_bcs_chart(ax2,'PAYEMS','Nonfarm Employment')
if DISPLAY:
plt.show()
else:
plt.savefig('bcs.png')
| mit |
RBlakes/MusicPredictor | graphaccuracy.py | 1 | 1147 | # graphs the accuracy of the predictions using
# the different combinations of attributes
import matplotlib.pyplot as plt;
plt.rcdefaults()
import numpy as np
import matplotlib.pyplot as plt
objects = ('All', 'Artist Hotness', 'Duration', 'End of Fade in', 'Key', 'Key Confidence', 'Loudness', 'Mode', 'Mode Confidence', 'Tempo', 'Start of Fade out', 'Time Signature', 'Time Signature Confidence', 'Year')
y_pos = np.arange(len(objects))
#performance = [97.27,95.74,97.06,96.46,96.9,97.03,97.14,96.31,95.77,97.25,97.1,97.06,97.1,96.65]
performance = [98.6,98.04,98.08,98.24,97.5,98.47,98.27,97.27,98.54,98.23,98.54,98.16,98.61,98.35]
plt.bar(y_pos, performance, align='center', alpha=0.75)
plt.xticks(rotation=90)
plt.xticks(y_pos, objects)
plt.ylabel('Percentage of Accuracy')
plt.xlabel('Attributes')
plt.title('Prediction Accuracy for Attributes')
rects = plt.bar(y_pos, performance, align='center', alpha=0.75)
for rect in rects:
height = rect.get_height()
plt.text(rect.get_x() + rect.get_width()/2., height,
float(height),
ha='center', va='bottom')
plt.show() | gpl-3.0 |
olafhauk/mne-python | mne/evoked.py | 3 | 49843 | # -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <[email protected]>
# Matti Hämäläinen <[email protected]>
# Denis Engemann <[email protected]>
# Andrew Dykstra <[email protected]>
# Mads Jensen <[email protected]>
# Jona Sassenhagen <[email protected]>
#
# License: BSD (3-clause)
from copy import deepcopy
import numpy as np
from .baseline import rescale
from .channels.channels import (ContainsMixin, UpdateChannelsMixin,
SetChannelsMixin, InterpolationMixin)
from .channels.layout import _merge_ch_data, _pair_grad_sensors
from .defaults import _EXTRAPOLATE_DEFAULT, _BORDER_DEFAULT
from .filter import detrend, FilterMixin
from .utils import (check_fname, logger, verbose, _time_mask, warn, sizeof_fmt,
SizeMixin, copy_function_doc_to_method_doc, _validate_type,
fill_doc, _check_option, ShiftTimeMixin, _build_data_frame,
_check_pandas_installed, _check_pandas_index_arguments,
_convert_times, _scale_dataframe_data, _check_time_format)
from .viz import (plot_evoked, plot_evoked_topomap, plot_evoked_field,
plot_evoked_image, plot_evoked_topo)
from .viz.evoked import plot_evoked_white, plot_evoked_joint
from .viz.topomap import _topomap_animation
from .io.constants import FIFF
from .io.open import fiff_open
from .io.tag import read_tag
from .io.tree import dir_tree_find
from .io.pick import pick_types, _picks_to_idx, _FNIRS_CH_TYPES_SPLIT
from .io.meas_info import read_meas_info, write_meas_info
from .io.proj import ProjMixin
from .io.write import (start_file, start_block, end_file, end_block,
write_int, write_string, write_float_matrix,
write_id, write_float, write_complex_float_matrix)
from .io.base import TimeMixin, _check_maxshield
_aspect_dict = {
'average': FIFF.FIFFV_ASPECT_AVERAGE,
'standard_error': FIFF.FIFFV_ASPECT_STD_ERR,
'single_epoch': FIFF.FIFFV_ASPECT_SINGLE,
'partial_average': FIFF.FIFFV_ASPECT_SUBAVERAGE,
'alternating_subaverage': FIFF.FIFFV_ASPECT_ALTAVERAGE,
'sample_cut_out_by_graph': FIFF.FIFFV_ASPECT_SAMPLE,
'power_density_spectrum': FIFF.FIFFV_ASPECT_POWER_DENSITY,
'dipole_amplitude_cuvre': FIFF.FIFFV_ASPECT_DIPOLE_WAVE,
'squid_modulation_lower_bound': FIFF.FIFFV_ASPECT_IFII_LOW,
'squid_modulation_upper_bound': FIFF.FIFFV_ASPECT_IFII_HIGH,
'squid_gate_setting': FIFF.FIFFV_ASPECT_GATE,
}
_aspect_rev = {val: key for key, val in _aspect_dict.items()}
@fill_doc
class Evoked(ProjMixin, ContainsMixin, UpdateChannelsMixin, SetChannelsMixin,
InterpolationMixin, FilterMixin, TimeMixin, SizeMixin,
ShiftTimeMixin):
"""Evoked data.
Parameters
----------
fname : str
Name of evoked/average FIF file to load.
If None no data is loaded.
condition : int, or str
Dataset ID number (int) or comment/name (str). Optional if there is
only one data set in file.
proj : bool, optional
Apply SSP projection vectors.
kind : str
Either 'average' or 'standard_error'. The type of data to read.
Only used if 'condition' is a str.
allow_maxshield : bool | str (default False)
If True, allow loading of data that has been recorded with internal
active compensation (MaxShield). Data recorded with MaxShield should
generally not be loaded directly, but should first be processed using
SSS/tSSS to remove the compensation signals that may also affect brain
activity. Can also be "yes" to load without eliciting a warning.
%(verbose)s
Attributes
----------
info : dict
Measurement info.
ch_names : list of str
List of channels' names.
nave : int
Number of averaged epochs.
kind : str
Type of data, either average or standard_error.
comment : str
Comment on dataset. Can be the condition.
data : array of shape (n_channels, n_times)
Evoked response.
first : int
First time sample.
last : int
Last time sample.
tmin : float
The first time point in seconds.
tmax : float
The last time point in seconds.
times : array
Time vector in seconds. Goes from ``tmin`` to ``tmax``. Time interval
between consecutive time samples is equal to the inverse of the
sampling frequency.
%(verbose)s
Notes
-----
Evoked objects contain a single condition only.
"""
@verbose
def __init__(self, fname, condition=None, proj=True,
kind='average', allow_maxshield=False,
verbose=None): # noqa: D102
_validate_type(proj, bool, "'proj'")
# Read the requested data
self.info, self.nave, self._aspect_kind, self.comment, self.times, \
self.data = _read_evoked(fname, condition, kind, allow_maxshield)
self._update_first_last()
self.verbose = verbose
self.preload = True
# project and baseline correct
if proj:
self.apply_proj()
@property
def kind(self):
"""The data kind."""
return _aspect_rev[self._aspect_kind]
@kind.setter
def kind(self, kind):
_check_option('kind', kind, list(_aspect_dict.keys()))
self._aspect_kind = _aspect_dict[kind]
@property
def data(self):
"""The data matrix."""
return self._data
@data.setter
def data(self, data):
"""Set the data matrix."""
self._data = data
@verbose
def apply_baseline(self, baseline=(None, 0), *, verbose=None):
"""Baseline correct evoked data.
Parameters
----------
%(baseline_evoked)s
Defaults to ``(None, 0)``, i.e. beginning of the the data until
time point zero.
%(verbose_meth)s
Returns
-------
evoked : instance of Evoked
The baseline-corrected Evoked object.
Notes
-----
Baseline correction can be done multiple times.
.. versionadded:: 0.13.0
"""
self.data = rescale(self.data, self.times, baseline, copy=False)
return self
def save(self, fname):
"""Save dataset to file.
Parameters
----------
fname : str
The name of the file, which should end with -ave.fif or
-ave.fif.gz.
Notes
-----
To write multiple conditions into a single file, use
:func:`mne.write_evokeds`.
"""
write_evokeds(fname, self)
def __repr__(self): # noqa: D105
s = "'%s' (%s, N=%s)" % (self.comment, self.kind, self.nave)
s += ", [%0.5g, %0.5g] sec" % (self.times[0], self.times[-1])
s += ", %s ch" % self.data.shape[0]
s += ", ~%s" % (sizeof_fmt(self._size),)
return "<Evoked | %s>" % s
@property
def ch_names(self):
"""Channel names."""
return self.info['ch_names']
@property
def tmin(self):
"""First time point.
.. versionadded:: 0.21
"""
return self.times[0]
@property
def tmax(self):
"""Last time point.
.. versionadded:: 0.21
"""
return self.times[-1]
@fill_doc
def crop(self, tmin=None, tmax=None, include_tmax=True, verbose=None):
"""Crop data to a given time interval.
Parameters
----------
tmin : float | None
Start time of selection in seconds.
tmax : float | None
End time of selection in seconds.
%(include_tmax)s
%(verbose_meth)s
Returns
-------
evoked : instance of Evoked
The cropped Evoked object, modified in-place.
Notes
-----
%(notes_tmax_included_by_default)s
"""
mask = _time_mask(self.times, tmin, tmax, sfreq=self.info['sfreq'],
include_tmax=include_tmax)
self.times = self.times[mask]
self._update_first_last()
self.data = self.data[:, mask]
return self
@verbose
def decimate(self, decim, offset=0, verbose=None):
"""Decimate the evoked data.
Parameters
----------
%(decim)s
%(decim_offset)s
%(verbose_meth)s
Returns
-------
evoked : instance of Evoked
The decimated Evoked object.
See Also
--------
Epochs.decimate
Epochs.resample
mne.io.Raw.resample
Notes
-----
%(decim_notes)s
.. versionadded:: 0.13.0
"""
decim, offset, new_sfreq = _check_decim(self.info, decim, offset)
start_idx = int(round(self.times[0] * (self.info['sfreq'] * decim)))
i_start = start_idx % decim + offset
decim_slice = slice(i_start, None, decim)
self.info['sfreq'] = new_sfreq
self.data = self.data[:, decim_slice].copy()
self.times = self.times[decim_slice].copy()
self._update_first_last()
return self
@copy_function_doc_to_method_doc(plot_evoked)
def plot(self, picks=None, exclude='bads', unit=True, show=True, ylim=None,
xlim='tight', proj=False, hline=None, units=None, scalings=None,
titles=None, axes=None, gfp=False, window_title=None,
spatial_colors=False, zorder='unsorted', selectable=True,
noise_cov=None, time_unit='s', sphere=None, verbose=None):
return plot_evoked(
self, picks=picks, exclude=exclude, unit=unit, show=show,
ylim=ylim, proj=proj, xlim=xlim, hline=hline, units=units,
scalings=scalings, titles=titles, axes=axes, gfp=gfp,
window_title=window_title, spatial_colors=spatial_colors,
zorder=zorder, selectable=selectable, noise_cov=noise_cov,
time_unit=time_unit, sphere=sphere, verbose=verbose)
@copy_function_doc_to_method_doc(plot_evoked_image)
def plot_image(self, picks=None, exclude='bads', unit=True, show=True,
clim=None, xlim='tight', proj=False, units=None,
scalings=None, titles=None, axes=None, cmap='RdBu_r',
colorbar=True, mask=None, mask_style=None,
mask_cmap='Greys', mask_alpha=.25, time_unit='s',
show_names=None, group_by=None, sphere=None):
return plot_evoked_image(
self, picks=picks, exclude=exclude, unit=unit, show=show,
clim=clim, xlim=xlim, proj=proj, units=units, scalings=scalings,
titles=titles, axes=axes, cmap=cmap, colorbar=colorbar, mask=mask,
mask_style=mask_style, mask_cmap=mask_cmap, mask_alpha=mask_alpha,
time_unit=time_unit, show_names=show_names, group_by=group_by,
sphere=sphere)
@copy_function_doc_to_method_doc(plot_evoked_topo)
def plot_topo(self, layout=None, layout_scale=0.945, color=None,
border='none', ylim=None, scalings=None, title=None,
proj=False, vline=[0.0], fig_background=None,
merge_grads=False, legend=True, axes=None,
background_color='w', noise_cov=None, show=True):
"""
Notes
-----
.. versionadded:: 0.10.0
"""
return plot_evoked_topo(
self, layout=layout, layout_scale=layout_scale, color=color,
border=border, ylim=ylim, scalings=scalings, title=title,
proj=proj, vline=vline, fig_background=fig_background,
merge_grads=merge_grads, legend=legend, axes=axes,
background_color=background_color, noise_cov=noise_cov, show=show)
@copy_function_doc_to_method_doc(plot_evoked_topomap)
def plot_topomap(self, times="auto", ch_type=None, vmin=None,
vmax=None, cmap=None, sensors=True, colorbar=True,
scalings=None, units=None, res=64,
size=1, cbar_fmt="%3.1f",
time_unit='s', time_format=None,
proj=False, show=True, show_names=False, title=None,
mask=None, mask_params=None, outlines='head',
contours=6, image_interp='bilinear', average=None,
axes=None, extrapolate=_EXTRAPOLATE_DEFAULT, sphere=None,
border=_BORDER_DEFAULT, nrows=1, ncols='auto'):
return plot_evoked_topomap(
self, times=times, ch_type=ch_type, vmin=vmin,
vmax=vmax, cmap=cmap, sensors=sensors, colorbar=colorbar,
scalings=scalings, units=units, res=res,
size=size, cbar_fmt=cbar_fmt, time_unit=time_unit,
time_format=time_format, proj=proj, show=show,
show_names=show_names, title=title, mask=mask,
mask_params=mask_params, outlines=outlines, contours=contours,
image_interp=image_interp, average=average,
axes=axes, extrapolate=extrapolate, sphere=sphere, border=border,
nrows=nrows, ncols=ncols)
@copy_function_doc_to_method_doc(plot_evoked_field)
def plot_field(self, surf_maps, time=None, time_label='t = %0.0f ms',
n_jobs=1, fig=None, vmax=None, n_contours=21, verbose=None):
return plot_evoked_field(self, surf_maps, time=time,
time_label=time_label, n_jobs=n_jobs,
fig=fig, vmax=vmax, n_contours=n_contours,
verbose=verbose)
@copy_function_doc_to_method_doc(plot_evoked_white)
def plot_white(self, noise_cov, show=True, rank=None, time_unit='s',
sphere=None, axes=None, verbose=None):
return plot_evoked_white(
self, noise_cov=noise_cov, rank=rank, show=show,
time_unit=time_unit, sphere=sphere, axes=axes, verbose=verbose)
@copy_function_doc_to_method_doc(plot_evoked_joint)
def plot_joint(self, times="peaks", title='', picks=None,
exclude='bads', show=True, ts_args=None,
topomap_args=None):
return plot_evoked_joint(self, times=times, title=title, picks=picks,
exclude=exclude, show=show, ts_args=ts_args,
topomap_args=topomap_args)
@fill_doc
def animate_topomap(self, ch_type=None, times=None, frame_rate=None,
butterfly=False, blit=True, show=True, time_unit='s',
sphere=None, *, extrapolate=_EXTRAPOLATE_DEFAULT,
verbose=None):
"""Make animation of evoked data as topomap timeseries.
The animation can be paused/resumed with left mouse button.
Left and right arrow keys can be used to move backward or forward
in time.
Parameters
----------
ch_type : str | None
Channel type to plot. Accepted data types: 'mag', 'grad', 'eeg',
'hbo', 'hbr', 'fnirs_cw_amplitude',
'fnirs_fd_ac_amplitude', 'fnirs_fd_phase', and 'fnirs_od'.
If None, first available channel type from the above list is used.
Defaults to None.
times : array of float | None
The time points to plot. If None, 10 evenly spaced samples are
calculated over the evoked time series. Defaults to None.
frame_rate : int | None
Frame rate for the animation in Hz. If None,
frame rate = sfreq / 10. Defaults to None.
butterfly : bool
Whether to plot the data as butterfly plot under the topomap.
Defaults to False.
blit : bool
Whether to use blit to optimize drawing. In general, it is
recommended to use blit in combination with ``show=True``. If you
intend to save the animation it is better to disable blit.
Defaults to True.
show : bool
Whether to show the animation. Defaults to True.
time_unit : str
The units for the time axis, can be "ms" (default in 0.16)
or "s" (will become the default in 0.17).
.. versionadded:: 0.16
%(topomap_sphere_auto)s
%(topomap_extrapolate)s
.. versionadded:: 0.22
%(verbose_meth)s
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
anim : instance of matplotlib.animation.FuncAnimation
Animation of the topomap.
Notes
-----
.. versionadded:: 0.12.0
"""
return _topomap_animation(
self, ch_type=ch_type, times=times, frame_rate=frame_rate,
butterfly=butterfly, blit=blit, show=show, time_unit=time_unit,
sphere=sphere, extrapolate=extrapolate, verbose=verbose)
def as_type(self, ch_type='grad', mode='fast'):
"""Compute virtual evoked using interpolated fields.
.. Warning:: Using virtual evoked to compute inverse can yield
unexpected results. The virtual channels have ``'_v'`` appended
at the end of the names to emphasize that the data contained in
them are interpolated.
Parameters
----------
ch_type : str
The destination channel type. It can be 'mag' or 'grad'.
mode : str
Either ``'accurate'`` or ``'fast'``, determines the quality of the
Legendre polynomial expansion used. ``'fast'`` should be sufficient
for most applications.
Returns
-------
evoked : instance of mne.Evoked
The transformed evoked object containing only virtual channels.
Notes
-----
This method returns a copy and does not modify the data it
operates on. It also returns an EvokedArray instance.
.. versionadded:: 0.9.0
"""
from .forward import _as_meg_type_inst
return _as_meg_type_inst(self, ch_type=ch_type, mode=mode)
@fill_doc
def detrend(self, order=1, picks=None):
"""Detrend data.
This function operates in-place.
Parameters
----------
order : int
Either 0 or 1, the order of the detrending. 0 is a constant
(DC) detrend, 1 is a linear detrend.
%(picks_good_data)s
Returns
-------
evoked : instance of Evoked
The detrended evoked object.
"""
picks = _picks_to_idx(self.info, picks)
self.data[picks] = detrend(self.data[picks], order, axis=-1)
return self
def copy(self):
"""Copy the instance of evoked.
Returns
-------
evoked : instance of Evoked
A copy of the object.
"""
evoked = deepcopy(self)
return evoked
def __neg__(self):
"""Negate channel responses.
Returns
-------
evoked_neg : instance of Evoked
The Evoked instance with channel data negated and '-'
prepended to the comment.
"""
out = self.copy()
out.data *= -1
out.comment = '-' + (out.comment or 'unknown')
return out
def get_peak(self, ch_type=None, tmin=None, tmax=None,
mode='abs', time_as_index=False, merge_grads=False,
return_amplitude=False):
"""Get location and latency of peak amplitude.
Parameters
----------
ch_type : str | None
The channel type to use. Defaults to None. If more than one sensor
Type is present in the data the channel type has to be explicitly
set.
tmin : float | None
The minimum point in time to be considered for peak getting.
If None (default), the beginning of the data is used.
tmax : float | None
The maximum point in time to be considered for peak getting.
If None (default), the end of the data is used.
mode : {'pos', 'neg', 'abs'}
How to deal with the sign of the data. If 'pos' only positive
values will be considered. If 'neg' only negative values will
be considered. If 'abs' absolute values will be considered.
Defaults to 'abs'.
time_as_index : bool
Whether to return the time index instead of the latency in seconds.
merge_grads : bool
If True, compute peak from merged gradiometer data.
return_amplitude : bool
If True, return also the amplitude at the maximum response.
.. versionadded:: 0.16
Returns
-------
ch_name : str
The channel exhibiting the maximum response.
latency : float | int
The time point of the maximum response, either latency in seconds
or index.
amplitude : float
The amplitude of the maximum response. Only returned if
return_amplitude is True.
.. versionadded:: 0.16
""" # noqa: E501
supported = ('mag', 'grad', 'eeg', 'seeg', 'ecog', 'misc',
'None') + _FNIRS_CH_TYPES_SPLIT
types_used = self.get_channel_types(unique=True, only_data_chs=True)
_check_option('ch_type', str(ch_type), supported)
if ch_type is not None and ch_type not in types_used:
raise ValueError('Channel type `{ch_type}` not found in this '
'evoked object.'.format(ch_type=ch_type))
elif len(types_used) > 1 and ch_type is None:
raise RuntimeError('More than one sensor type found. `ch_type` '
'must not be `None`, pass a sensor type '
'value instead')
if merge_grads:
if ch_type != 'grad':
raise ValueError('Channel type must be grad for merge_grads')
elif mode == 'neg':
raise ValueError('Negative mode (mode=neg) does not make '
'sense with merge_grads=True')
meg = eeg = misc = seeg = ecog = fnirs = False
picks = None
if ch_type in ('mag', 'grad'):
meg = ch_type
elif ch_type == 'eeg':
eeg = True
elif ch_type == 'misc':
misc = True
elif ch_type == 'seeg':
seeg = True
elif ch_type == 'ecog':
ecog = True
elif ch_type in _FNIRS_CH_TYPES_SPLIT:
fnirs = ch_type
if ch_type is not None:
if merge_grads:
picks = _pair_grad_sensors(self.info, topomap_coords=False)
else:
picks = pick_types(self.info, meg=meg, eeg=eeg, misc=misc,
seeg=seeg, ecog=ecog, ref_meg=False,
fnirs=fnirs)
data = self.data
ch_names = self.ch_names
if picks is not None:
data = data[picks]
ch_names = [ch_names[k] for k in picks]
if merge_grads:
data, _ = _merge_ch_data(data, ch_type, [])
ch_names = [ch_name[:-1] + 'X' for ch_name in ch_names[::2]]
ch_idx, time_idx, max_amp = _get_peak(data, self.times, tmin,
tmax, mode)
out = (ch_names[ch_idx], time_idx if time_as_index else
self.times[time_idx])
if return_amplitude:
out += (max_amp,)
return out
@fill_doc
def to_data_frame(self, picks=None, index=None,
scalings=None, copy=True, long_format=False,
time_format='ms'):
"""Export data in tabular structure as a pandas DataFrame.
Channels are converted to columns in the DataFrame. By default,
an additional column "time" is added, unless ``index='time'``
(in which case time values form the DataFrame's index).
Parameters
----------
%(picks_all)s
%(df_index_evk)s
Defaults to ``None``.
%(df_scalings)s
%(df_copy)s
%(df_longform_raw)s
%(df_time_format)s
.. versionadded:: 0.20
Returns
-------
%(df_return)s
"""
# check pandas once here, instead of in each private utils function
pd = _check_pandas_installed() # noqa
# arg checking
valid_index_args = ['time']
valid_time_formats = ['ms', 'timedelta']
index = _check_pandas_index_arguments(index, valid_index_args)
time_format = _check_time_format(time_format, valid_time_formats)
# get data
picks = _picks_to_idx(self.info, picks, 'all', exclude=())
data = self.data[picks, :]
times = self.times
data = data.T
if copy:
data = data.copy()
data = _scale_dataframe_data(self, data, picks, scalings)
# prepare extra columns / multiindex
mindex = list()
times = _convert_times(self, times, time_format)
mindex.append(('time', times))
# build DataFrame
df = _build_data_frame(self, data, picks, long_format, mindex, index,
default_index=['time'])
return df
def _check_decim(info, decim, offset):
"""Check decimation parameters."""
if decim < 1 or decim != int(decim):
raise ValueError('decim must be an integer > 0')
decim = int(decim)
new_sfreq = info['sfreq'] / float(decim)
lowpass = info['lowpass']
if decim > 1 and lowpass is None:
warn('The measurement information indicates data is not low-pass '
'filtered. The decim=%i parameter will result in a sampling '
'frequency of %g Hz, which can cause aliasing artifacts.'
% (decim, new_sfreq))
elif decim > 1 and new_sfreq < 3 * lowpass:
warn('The measurement information indicates a low-pass frequency '
'of %g Hz. The decim=%i parameter will result in a sampling '
'frequency of %g Hz, which can cause aliasing artifacts.'
% (lowpass, decim, new_sfreq)) # > 50% nyquist lim
offset = int(offset)
if not 0 <= offset < decim:
raise ValueError('decim must be at least 0 and less than %s, got '
'%s' % (decim, offset))
return decim, offset, new_sfreq
@fill_doc
class EvokedArray(Evoked):
"""Evoked object from numpy array.
Parameters
----------
data : array of shape (n_channels, n_times)
The channels' evoked response. See notes for proper units of measure.
info : instance of Info
Info dictionary. Consider using ``create_info`` to populate
this structure.
tmin : float
Start time before event. Defaults to 0.
comment : str
Comment on dataset. Can be the condition. Defaults to ''.
nave : int
Number of averaged epochs. Defaults to 1.
kind : str
Type of data, either average or standard_error. Defaults to 'average'.
%(verbose)s
See Also
--------
EpochsArray, io.RawArray, create_info
Notes
-----
Proper units of measure:
* V: eeg, eog, seeg, emg, ecg, bio, ecog
* T: mag
* T/m: grad
* M: hbo, hbr
* Am: dipole
* AU: misc
"""
@verbose
def __init__(self, data, info, tmin=0., comment='', nave=1, kind='average',
verbose=None): # noqa: D102
dtype = np.complex128 if np.iscomplexobj(data) else np.float64
data = np.asanyarray(data, dtype=dtype)
if data.ndim != 2:
raise ValueError('Data must be a 2D array of shape (n_channels, '
'n_samples), got shape %s' % (data.shape,))
if len(info['ch_names']) != np.shape(data)[0]:
raise ValueError('Info (%s) and data (%s) must have same number '
'of channels.' % (len(info['ch_names']),
np.shape(data)[0]))
self.data = data
self.first = int(round(tmin * info['sfreq']))
self.last = self.first + np.shape(data)[-1] - 1
self.times = np.arange(self.first, self.last + 1,
dtype=np.float64) / info['sfreq']
self.info = info.copy() # do not modify original info
self.nave = nave
self.kind = kind
self.comment = comment
self.picks = None
self.verbose = verbose
self.preload = True
self._projector = None
_validate_type(self.kind, "str", "kind")
if self.kind not in _aspect_dict:
raise ValueError('unknown kind "%s", should be "average" or '
'"standard_error"' % (self.kind,))
self._aspect_kind = _aspect_dict[self.kind]
def _get_entries(fid, evoked_node, allow_maxshield=False):
"""Get all evoked entries."""
comments = list()
aspect_kinds = list()
for ev in evoked_node:
for k in range(ev['nent']):
my_kind = ev['directory'][k].kind
pos = ev['directory'][k].pos
if my_kind == FIFF.FIFF_COMMENT:
tag = read_tag(fid, pos)
comments.append(tag.data)
my_aspect = _get_aspect(ev, allow_maxshield)[0]
for k in range(my_aspect['nent']):
my_kind = my_aspect['directory'][k].kind
pos = my_aspect['directory'][k].pos
if my_kind == FIFF.FIFF_ASPECT_KIND:
tag = read_tag(fid, pos)
aspect_kinds.append(int(tag.data))
comments = np.atleast_1d(comments)
aspect_kinds = np.atleast_1d(aspect_kinds)
if len(comments) != len(aspect_kinds) or len(comments) == 0:
fid.close()
raise ValueError('Dataset names in FIF file '
'could not be found.')
t = [_aspect_rev[a] for a in aspect_kinds]
t = ['"' + c + '" (' + tt + ')' for tt, c in zip(t, comments)]
t = '\n'.join(t)
return comments, aspect_kinds, t
def _get_aspect(evoked, allow_maxshield):
"""Get Evoked data aspect."""
is_maxshield = False
aspect = dir_tree_find(evoked, FIFF.FIFFB_ASPECT)
if len(aspect) == 0:
_check_maxshield(allow_maxshield)
aspect = dir_tree_find(evoked, FIFF.FIFFB_IAS_ASPECT)
is_maxshield = True
if len(aspect) > 1:
logger.info('Multiple data aspects found. Taking first one.')
return aspect[0], is_maxshield
def _get_evoked_node(fname):
"""Get info in evoked file."""
f, tree, _ = fiff_open(fname)
with f as fid:
_, meas = read_meas_info(fid, tree, verbose=False)
evoked_node = dir_tree_find(meas, FIFF.FIFFB_EVOKED)
return evoked_node
def _check_evokeds_ch_names_times(all_evoked):
evoked = all_evoked[0]
ch_names = evoked.ch_names
for ii, ev in enumerate(all_evoked[1:]):
if ev.ch_names != ch_names:
if set(ev.ch_names) != set(ch_names):
raise ValueError(
"%s and %s do not contain the same channels." % (evoked,
ev))
else:
warn("Order of channels differs, reordering channels ...")
ev = ev.copy()
ev.reorder_channels(ch_names)
all_evoked[ii + 1] = ev
if not np.max(np.abs(ev.times - evoked.times)) < 1e-7:
raise ValueError("%s and %s do not contain the same time instants"
% (evoked, ev))
return all_evoked
def combine_evoked(all_evoked, weights):
"""Merge evoked data by weighted addition or subtraction.
Each `~mne.Evoked` in ``all_evoked`` should have the same channels and the
same time instants. Subtraction can be performed by passing
``weights=[1, -1]``.
.. Warning::
Other than cases like simple subtraction mentioned above (where all
weights are -1 or 1), if you provide numeric weights instead of using
``'equal'`` or ``'nave'``, the resulting `~mne.Evoked` object's
``.nave`` attribute (which is used to scale noise covariance when
applying the inverse operator) may not be suitable for inverse imaging.
Parameters
----------
all_evoked : list of Evoked
The evoked datasets.
weights : list of float | 'equal' | 'nave'
The weights to apply to the data of each evoked instance, or a string
describing the weighting strategy to apply: ``'nave'`` computes
sum-to-one weights proportional to each object's ``nave`` attribute;
``'equal'`` weights each `~mne.Evoked` by ``1 / len(all_evoked)``.
Returns
-------
evoked : Evoked
The new evoked data.
Notes
-----
.. versionadded:: 0.9.0
"""
naves = np.array([evk.nave for evk in all_evoked], float)
if isinstance(weights, str):
_check_option('weights', weights, ['nave', 'equal'])
if weights == 'nave':
weights = naves / naves.sum()
else:
weights = np.ones_like(naves) / len(naves)
else:
weights = np.array(weights, float)
if weights.ndim != 1 or weights.size != len(all_evoked):
raise ValueError('weights must be the same size as all_evoked')
# cf. https://en.wikipedia.org/wiki/Weighted_arithmetic_mean, section on
# "weighted sample variance". The variance of a weighted sample mean is:
#
# σ² = w₁² σ₁² + w₂² σ₂² + ... + wₙ² σₙ²
#
# We estimate the variance of each evoked instance as 1 / nave to get:
#
# σ² = w₁² / nave₁ + w₂² / nave₂ + ... + wₙ² / naveₙ
#
# And our resulting nave is the reciprocal of this:
new_nave = 1. / np.sum(weights ** 2 / naves)
# This general formula is equivalent to formulae in Matti's manual
# (pp 128-129), where:
# new_nave = sum(naves) when weights='nave' and
# new_nave = 1. / sum(1. / naves) when weights are all 1.
all_evoked = _check_evokeds_ch_names_times(all_evoked)
evoked = all_evoked[0].copy()
# use union of bad channels
bads = list(set(b for e in all_evoked for b in e.info['bads']))
evoked.info['bads'] = bads
evoked.data = sum(w * e.data for w, e in zip(weights, all_evoked))
evoked.nave = new_nave
evoked.comment = ' + '.join(f'{w:0.3f} × {e.comment or "unknown"}'
for w, e in zip(weights, all_evoked))
return evoked
@verbose
def read_evokeds(fname, condition=None, baseline=None, kind='average',
proj=True, allow_maxshield=False, verbose=None):
"""Read evoked dataset(s).
Parameters
----------
fname : str
The file name, which should end with -ave.fif or -ave.fif.gz.
condition : int or str | list of int or str | None
The index or list of indices of the evoked dataset to read. FIF files
can contain multiple datasets. If None, all datasets are returned as a
list.
%(baseline_evoked)s
Defaults to ``None``, i.e. no baseline correction.
kind : str
Either 'average' or 'standard_error', the type of data to read.
proj : bool
If False, available projectors won't be applied to the data.
allow_maxshield : bool | str (default False)
If True, allow loading of data that has been recorded with internal
active compensation (MaxShield). Data recorded with MaxShield should
generally not be loaded directly, but should first be processed using
SSS/tSSS to remove the compensation signals that may also affect brain
activity. Can also be "yes" to load without eliciting a warning.
%(verbose)s
Returns
-------
evoked : Evoked or list of Evoked
The evoked dataset(s); one Evoked if condition is int or str,
or list of Evoked if condition is None or list.
See Also
--------
write_evokeds
"""
check_fname(fname, 'evoked', ('-ave.fif', '-ave.fif.gz',
'_ave.fif', '_ave.fif.gz'))
logger.info('Reading %s ...' % fname)
return_list = True
if condition is None:
evoked_node = _get_evoked_node(fname)
condition = range(len(evoked_node))
elif not isinstance(condition, list):
condition = [condition]
return_list = False
out = [Evoked(fname, c, kind=kind, proj=proj,
allow_maxshield=allow_maxshield,
verbose=verbose).apply_baseline(baseline)
for c in condition]
return out if return_list else out[0]
def _read_evoked(fname, condition=None, kind='average', allow_maxshield=False):
"""Read evoked data from a FIF file."""
if fname is None:
raise ValueError('No evoked filename specified')
f, tree, _ = fiff_open(fname)
with f as fid:
# Read the measurement info
info, meas = read_meas_info(fid, tree, clean_bads=True)
# Locate the data of interest
processed = dir_tree_find(meas, FIFF.FIFFB_PROCESSED_DATA)
if len(processed) == 0:
raise ValueError('Could not find processed data')
evoked_node = dir_tree_find(meas, FIFF.FIFFB_EVOKED)
if len(evoked_node) == 0:
raise ValueError('Could not find evoked data')
# find string-based entry
if isinstance(condition, str):
if kind not in _aspect_dict.keys():
raise ValueError('kind must be "average" or '
'"standard_error"')
comments, aspect_kinds, t = _get_entries(fid, evoked_node,
allow_maxshield)
goods = (np.in1d(comments, [condition]) &
np.in1d(aspect_kinds, [_aspect_dict[kind]]))
found_cond = np.where(goods)[0]
if len(found_cond) != 1:
raise ValueError('condition "%s" (%s) not found, out of '
'found datasets:\n%s'
% (condition, kind, t))
condition = found_cond[0]
elif condition is None:
if len(evoked_node) > 1:
_, _, conditions = _get_entries(fid, evoked_node,
allow_maxshield)
raise TypeError("Evoked file has more than one "
"condition, the condition parameters "
"must be specified from:\n%s" % conditions)
else:
condition = 0
if condition >= len(evoked_node) or condition < 0:
raise ValueError('Data set selector out of range')
my_evoked = evoked_node[condition]
# Identify the aspects
my_aspect, info['maxshield'] = _get_aspect(my_evoked, allow_maxshield)
# Now find the data in the evoked block
nchan = 0
sfreq = -1
chs = []
comment = last = first = first_time = nsamp = None
for k in range(my_evoked['nent']):
my_kind = my_evoked['directory'][k].kind
pos = my_evoked['directory'][k].pos
if my_kind == FIFF.FIFF_COMMENT:
tag = read_tag(fid, pos)
comment = tag.data
elif my_kind == FIFF.FIFF_FIRST_SAMPLE:
tag = read_tag(fid, pos)
first = int(tag.data)
elif my_kind == FIFF.FIFF_LAST_SAMPLE:
tag = read_tag(fid, pos)
last = int(tag.data)
elif my_kind == FIFF.FIFF_NCHAN:
tag = read_tag(fid, pos)
nchan = int(tag.data)
elif my_kind == FIFF.FIFF_SFREQ:
tag = read_tag(fid, pos)
sfreq = float(tag.data)
elif my_kind == FIFF.FIFF_CH_INFO:
tag = read_tag(fid, pos)
chs.append(tag.data)
elif my_kind == FIFF.FIFF_FIRST_TIME:
tag = read_tag(fid, pos)
first_time = float(tag.data)
elif my_kind == FIFF.FIFF_NO_SAMPLES:
tag = read_tag(fid, pos)
nsamp = int(tag.data)
if comment is None:
comment = 'No comment'
# Local channel information?
if nchan > 0:
if chs is None:
raise ValueError('Local channel information was not found '
'when it was expected.')
if len(chs) != nchan:
raise ValueError('Number of channels and number of '
'channel definitions are different')
info['chs'] = chs
logger.info(' Found channel information in evoked data. '
'nchan = %d' % nchan)
if sfreq > 0:
info['sfreq'] = sfreq
# Read the data in the aspect block
nave = 1
epoch = []
for k in range(my_aspect['nent']):
kind = my_aspect['directory'][k].kind
pos = my_aspect['directory'][k].pos
if kind == FIFF.FIFF_COMMENT:
tag = read_tag(fid, pos)
comment = tag.data
elif kind == FIFF.FIFF_ASPECT_KIND:
tag = read_tag(fid, pos)
aspect_kind = int(tag.data)
elif kind == FIFF.FIFF_NAVE:
tag = read_tag(fid, pos)
nave = int(tag.data)
elif kind == FIFF.FIFF_EPOCH:
tag = read_tag(fid, pos)
epoch.append(tag)
nepoch = len(epoch)
if nepoch != 1 and nepoch != info['nchan']:
raise ValueError('Number of epoch tags is unreasonable '
'(nepoch = %d nchan = %d)'
% (nepoch, info['nchan']))
if nepoch == 1:
# Only one epoch
data = epoch[0].data
# May need a transpose if the number of channels is one
if data.shape[1] == 1 and info['nchan'] == 1:
data = data.T
else:
# Put the old style epochs together
data = np.concatenate([e.data[None, :] for e in epoch], axis=0)
if np.isrealobj(data):
data = data.astype(np.float64)
else:
data = data.astype(np.complex128)
if first_time is not None and nsamp is not None:
times = first_time + np.arange(nsamp) / info['sfreq']
elif first is not None:
nsamp = last - first + 1
times = np.arange(first, last + 1) / info['sfreq']
else:
raise RuntimeError('Could not read time parameters')
del first, last
if nsamp is not None and data.shape[1] != nsamp:
raise ValueError('Incorrect number of samples (%d instead of '
' %d)' % (data.shape[1], nsamp))
logger.info(' Found the data of interest:')
logger.info(' t = %10.2f ... %10.2f ms (%s)'
% (1000 * times[0], 1000 * times[-1], comment))
if info['comps'] is not None:
logger.info(' %d CTF compensation matrices available'
% len(info['comps']))
logger.info(' nave = %d - aspect type = %d'
% (nave, aspect_kind))
# Calibrate
cals = np.array([info['chs'][k]['cal'] *
info['chs'][k].get('scale', 1.0)
for k in range(info['nchan'])])
data *= cals[:, np.newaxis]
return info, nave, aspect_kind, comment, times, data
def write_evokeds(fname, evoked):
"""Write an evoked dataset to a file.
Parameters
----------
fname : str
The file name, which should end with -ave.fif or -ave.fif.gz.
evoked : Evoked instance, or list of Evoked instances
The evoked dataset, or list of evoked datasets, to save in one file.
Note that the measurement info from the first evoked instance is used,
so be sure that information matches.
See Also
--------
read_evokeds
"""
_write_evokeds(fname, evoked)
def _write_evokeds(fname, evoked, check=True):
"""Write evoked data."""
from .epochs import _compare_epochs_infos
if check:
check_fname(fname, 'evoked', ('-ave.fif', '-ave.fif.gz',
'_ave.fif', '_ave.fif.gz'))
if not isinstance(evoked, list):
evoked = [evoked]
warned = False
# Create the file and save the essentials
with start_file(fname) as fid:
start_block(fid, FIFF.FIFFB_MEAS)
write_id(fid, FIFF.FIFF_BLOCK_ID)
if evoked[0].info['meas_id'] is not None:
write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, evoked[0].info['meas_id'])
# Write measurement info
write_meas_info(fid, evoked[0].info)
# One or more evoked data sets
start_block(fid, FIFF.FIFFB_PROCESSED_DATA)
for ei, e in enumerate(evoked):
if ei:
_compare_epochs_infos(evoked[0].info, e.info, f'evoked[{ei}]')
start_block(fid, FIFF.FIFFB_EVOKED)
# Comment is optional
if e.comment is not None and len(e.comment) > 0:
write_string(fid, FIFF.FIFF_COMMENT, e.comment)
# First time, num. samples, first and last sample
write_float(fid, FIFF.FIFF_FIRST_TIME, e.times[0])
write_int(fid, FIFF.FIFF_NO_SAMPLES, len(e.times))
write_int(fid, FIFF.FIFF_FIRST_SAMPLE, e.first)
write_int(fid, FIFF.FIFF_LAST_SAMPLE, e.last)
# The epoch itself
if e.info.get('maxshield'):
aspect = FIFF.FIFFB_IAS_ASPECT
else:
aspect = FIFF.FIFFB_ASPECT
start_block(fid, aspect)
write_int(fid, FIFF.FIFF_ASPECT_KIND, e._aspect_kind)
# convert nave to integer to comply with FIFF spec
nave_int = int(round(e.nave))
if nave_int != e.nave and not warned:
warn('converting "nave" to integer before saving evoked; this '
'can have a minor effect on the scale of source '
'estimates that are computed using "nave".')
warned = True
write_int(fid, FIFF.FIFF_NAVE, nave_int)
del nave_int
decal = np.zeros((e.info['nchan'], 1))
for k in range(e.info['nchan']):
decal[k] = 1.0 / (e.info['chs'][k]['cal'] *
e.info['chs'][k].get('scale', 1.0))
if np.iscomplexobj(e.data):
write_function = write_complex_float_matrix
else:
write_function = write_float_matrix
write_function(fid, FIFF.FIFF_EPOCH, decal * e.data)
end_block(fid, aspect)
end_block(fid, FIFF.FIFFB_EVOKED)
end_block(fid, FIFF.FIFFB_PROCESSED_DATA)
end_block(fid, FIFF.FIFFB_MEAS)
end_file(fid)
def _get_peak(data, times, tmin=None, tmax=None, mode='abs'):
"""Get feature-index and time of maximum signal from 2D array.
Note. This is a 'getter', not a 'finder'. For non-evoked type
data and continuous signals, please use proper peak detection algorithms.
Parameters
----------
data : instance of numpy.ndarray (n_locations, n_times)
The data, either evoked in sensor or source space.
times : instance of numpy.ndarray (n_times)
The times in seconds.
tmin : float | None
The minimum point in time to be considered for peak getting.
tmax : float | None
The maximum point in time to be considered for peak getting.
mode : {'pos', 'neg', 'abs'}
How to deal with the sign of the data. If 'pos' only positive
values will be considered. If 'neg' only negative values will
be considered. If 'abs' absolute values will be considered.
Defaults to 'abs'.
Returns
-------
max_loc : int
The index of the feature with the maximum value.
max_time : int
The time point of the maximum response, index.
max_amp : float
Amplitude of the maximum response.
"""
_check_option('mode', mode, ['abs', 'neg', 'pos'])
if tmin is None:
tmin = times[0]
if tmax is None:
tmax = times[-1]
if tmin < times.min():
raise ValueError('The tmin value is out of bounds. It must be '
'within {} and {}'.format(times.min(), times.max()))
if tmax > times.max():
raise ValueError('The tmax value is out of bounds. It must be '
'within {} and {}'.format(times.min(), times.max()))
if tmin > tmax:
raise ValueError('The tmin must be smaller or equal to tmax')
time_win = (times >= tmin) & (times <= tmax)
mask = np.ones_like(data).astype(bool)
mask[:, time_win] = False
maxfun = np.argmax
if mode == 'pos':
if not np.any(data > 0):
raise ValueError('No positive values encountered. Cannot '
'operate in pos mode.')
elif mode == 'neg':
if not np.any(data < 0):
raise ValueError('No negative values encountered. Cannot '
'operate in neg mode.')
maxfun = np.argmin
masked_index = np.ma.array(np.abs(data) if mode == 'abs' else data,
mask=mask)
max_loc, max_time = np.unravel_index(maxfun(masked_index), data.shape)
return max_loc, max_time, data[max_loc, max_time]
| bsd-3-clause |
ninoxcello/mscs710-project | src/utils.py | 1 | 2465 | """
Author: Patrick Handley
Description: Helper functions. Ex: loading data, plotting forecast trend
"""
import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import preprocessing, cross_validation
def load_data(input_dir, file_name, forecast_col='Close'):
"""
Reading data csv file and split into train and test set by 80/20 ratio.
Args:
input_dir: path to directory containing data csv files.
file_name: name of currency file to be used.
forecast_col: feature to be used as target.
Returns:
x_train: training samples.
y_train: training labels.
x_test: test samples.
y_test: test labels.
x_recent: subset used for prediction
df: pandas dataframe for currency
"""
# read in csv
df = pd.read_csv('{}/{}'.format(input_dir, file_name), parse_dates=['Date'], index_col=0)
# select & add feature columns
df.fillna(0, inplace=True)
df = df[['Open', 'High', 'Low', 'Close']]
df['HL_PCT'] = (df['High'] - df['Low']) / df['Close'] * 100.
df['PCT_Change'] = (df['Close'] - df['Open']) / df['Open'] * 100.
df = df.iloc[::-1]
df.fillna(value=-9999, inplace=True)
# set # of days to forecast out and shift column to be used as labels
days_forecast = 15
df['label'] = df[forecast_col].shift(-days_forecast)
# set up feature & label matrices
X = np.array(df.drop(['label'], 1))
X = preprocessing.scale(X)
x_recent = X[-days_forecast:]
X = X[:-days_forecast]
df.dropna(inplace=True)
y = np.array(df['label'])
# split data 80/20 for train & test respectively
x_train, x_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.2)
return x_train, x_test, x_recent, y_train, y_test, df
def forecast_plot(df, predictions):
""" show plot of historical values as well as prediction values. """
df['Forecast'] = np.nan
last_date = df.iloc[-1].name
last_unix = last_date.timestamp()
one_day = 86400 # sec in day
next_unix = last_unix + one_day
for i in predictions:
next_date = datetime.datetime.fromtimestamp(next_unix)
next_unix += one_day
df.loc[next_date] = [np.nan for _ in range(len(df.columns) - 1)] + [i]
plt.plot(df['Close'])
plt.plot(df['Forecast'])
plt.legend(bbox_to_anchor=(1.01, 1))
plt.xlabel('Time(Yr-M)')
plt.ylabel('Value(USD)')
plt.show() | mit |
Djabbz/scikit-learn | sklearn/covariance/__init__.py | 389 | 1157 | """
The :mod:`sklearn.covariance` module includes methods and algorithms to
robustly estimate the covariance of features given a set of points. The
precision matrix defined as the inverse of the covariance is also estimated.
Covariance estimation is closely related to the theory of Gaussian Graphical
Models.
"""
from .empirical_covariance_ import empirical_covariance, EmpiricalCovariance, \
log_likelihood
from .shrunk_covariance_ import shrunk_covariance, ShrunkCovariance, \
ledoit_wolf, ledoit_wolf_shrinkage, \
LedoitWolf, oas, OAS
from .robust_covariance import fast_mcd, MinCovDet
from .graph_lasso_ import graph_lasso, GraphLasso, GraphLassoCV
from .outlier_detection import EllipticEnvelope
__all__ = ['EllipticEnvelope',
'EmpiricalCovariance',
'GraphLasso',
'GraphLassoCV',
'LedoitWolf',
'MinCovDet',
'OAS',
'ShrunkCovariance',
'empirical_covariance',
'fast_mcd',
'graph_lasso',
'ledoit_wolf',
'ledoit_wolf_shrinkage',
'log_likelihood',
'oas',
'shrunk_covariance']
| bsd-3-clause |
bmcfee/mir_eval | tests/mpl_ic.py | 3 | 12171 | # CREATED:2015-02-17 14:41:28 by Brian McFee <[email protected]>
# this function is lifted wholesale from matploblib v1.4.2,
# and modified so that images are stored explicitly under the tests path
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import functools
import gc
import os
import sys
import shutil
import warnings
import unittest
import nose
import numpy as np
import matplotlib.units
from matplotlib import cbook
from matplotlib import ticker
from matplotlib import pyplot as plt
from matplotlib import ft2font
from matplotlib.testing.noseclasses import KnownFailure
from matplotlib.testing.exceptions import ImageComparisonFailure
from matplotlib.testing.compare import comparable_formats, compare_images, \
make_test_filename
def knownfailureif(fail_condition, msg=None, known_exception_class=None):
"""
Assume a will fail if *fail_condition* is True. *fail_condition*
may also be False or the string 'indeterminate'.
*msg* is the error message displayed for the test.
If *known_exception_class* is not None, the failure is only known
if the exception is an instance of this class. (Default = None)
"""
# based on numpy.testing.dec.knownfailureif
if msg is None:
msg = 'Test known to fail'
def known_fail_decorator(f):
# Local import to avoid a hard nose dependency and only incur the
# import time overhead at actual test-time.
import nose
def failer(*args, **kwargs):
try:
# Always run the test (to generate images).
result = f(*args, **kwargs)
except Exception as err:
if fail_condition:
if known_exception_class is not None:
if not isinstance(err, known_exception_class):
# This is not the expected exception
raise
# (Keep the next ultra-long comment so in shows in
# console.)
# An error here when running nose means that you don't have
# the matplotlib.testing.noseclasses:KnownFailure plugin in
# use.
raise KnownFailure(msg)
else:
raise
if fail_condition and fail_condition != 'indeterminate':
raise KnownFailureDidNotFailTest(msg)
return result
return nose.tools.make_decorator(f)(failer)
return known_fail_decorator
def _do_cleanup(original_units_registry):
plt.close('all')
gc.collect()
import matplotlib.testing
matplotlib.testing.setup()
matplotlib.units.registry.clear()
matplotlib.units.registry.update(original_units_registry)
warnings.resetwarnings() # reset any warning filters set in tests
class KnownFailureDidNotFailTest(KnownFailure):
pass
class CleanupTest(object):
@classmethod
def setup_class(cls):
cls.original_units_registry = matplotlib.units.registry.copy()
@classmethod
def teardown_class(cls):
_do_cleanup(cls.original_units_registry)
def test(self):
self._func()
class CleanupTestCase(unittest.TestCase):
'''A wrapper for unittest.TestCase that includes cleanup operations'''
@classmethod
def setUpClass(cls):
import matplotlib.units
cls.original_units_registry = matplotlib.units.registry.copy()
@classmethod
def tearDownClass(cls):
_do_cleanup(cls.original_units_registry)
def cleanup(func):
@functools.wraps(func)
def wrapped_function(*args, **kwargs):
original_units_registry = matplotlib.units.registry.copy()
try:
func(*args, **kwargs)
finally:
_do_cleanup(original_units_registry)
return wrapped_function
def check_freetype_version(ver):
if ver is None:
return True
from distutils import version
if isinstance(ver, six.string_types):
ver = (ver, ver)
ver = [version.StrictVersion(x) for x in ver]
found = version.StrictVersion(ft2font.__freetype_version__)
return found >= ver[0] and found <= ver[1]
class ImageComparisonTest(CleanupTest):
@classmethod
def setup_class(cls):
CleanupTest.setup_class()
cls._func()
@staticmethod
def remove_text(figure):
figure.suptitle("")
for ax in figure.get_axes():
ax.set_title("")
ax.xaxis.set_major_formatter(ticker.NullFormatter())
ax.xaxis.set_minor_formatter(ticker.NullFormatter())
ax.yaxis.set_major_formatter(ticker.NullFormatter())
ax.yaxis.set_minor_formatter(ticker.NullFormatter())
try:
ax.zaxis.set_major_formatter(ticker.NullFormatter())
ax.zaxis.set_minor_formatter(ticker.NullFormatter())
except AttributeError:
pass
def test(self):
baseline_dir, result_dir = _image_directories(self._func)
for fignum, baseline in zip(plt.get_fignums(), self._baseline_images):
for extension in self._extensions:
will_fail = extension not in comparable_formats()
if will_fail:
fail_msg = ('Cannot compare %s files on this system' %
extension)
else:
fail_msg = 'No failure expected'
orig_expected_fname = (
os.path.join(baseline_dir, baseline) + '.' + extension)
if (extension == 'eps' and
not os.path.exists(orig_expected_fname)):
orig_expected_fname = (
os.path.join(baseline_dir, baseline) + '.pdf')
expected_fname = make_test_filename(os.path.join(
result_dir,
os.path.basename(orig_expected_fname)), 'expected')
actual_fname = (
os.path.join(result_dir, baseline) + '.' + extension)
if os.path.exists(orig_expected_fname):
shutil.copyfile(orig_expected_fname, expected_fname)
else:
will_fail = True
fail_msg = 'Do not have baseline image %s' % expected_fname
@knownfailureif(
will_fail, fail_msg,
known_exception_class=ImageComparisonFailure)
def do_test():
figure = plt.figure(fignum)
if self._remove_text:
self.remove_text(figure)
figure.savefig(actual_fname, **self._savefig_kwarg)
plt.close(figure)
err = compare_images(expected_fname, actual_fname,
self._tol, in_decorator=True)
try:
if not os.path.exists(expected_fname):
raise ImageComparisonFailure(
'image does not exist: %s' % expected_fname)
if err:
raise ImageComparisonFailure(
'images not close: %(actual)s vs. %(expected)s'
' (RMS %(rms).3f)' % err)
except ImageComparisonFailure:
if not check_freetype_version(self._freetype_version):
raise KnownFailure(
"Mismatched version of freetype. Test "
"requires '%s', you have '%s'" %
(self._freetype_version,
ft2font.__freetype_version__))
raise
yield (do_test,)
def image_comparison(baseline_images=None, extensions=None, tol=13,
freetype_version=None, remove_text=False,
savefig_kwarg=None):
"""
call signature::
image_comparison(baseline_images=['my_figure'], extensions=None)
Compare images generated by the test with those specified in
*baseline_images*, which must correspond else an
ImageComparisonFailure exception will be raised.
Keyword arguments:
*baseline_images*: list
A list of strings specifying the names of the images generated
by calls to :meth:`matplotlib.figure.savefig`.
*extensions*: [ None | list ]
If *None*, default to all supported extensions.
Otherwise, a list of extensions to test. For example ['png','pdf'].
*tol*: (default 13)
The RMS threshold above which the test is considered failed.
*freetype_version*: str or tuple
The expected freetype version or range of versions for this
test to pass.
*remove_text*: bool
Remove the title and tick text from the figure before
comparison. This does not remove other, more deliberate,
text, such as legends and annotations.
*savefig_kwarg*: dict
Optional arguments that are passed to the savefig method.
"""
if baseline_images is None:
raise ValueError('baseline_images must be specified')
if extensions is None:
# default extensions to test
extensions = ['png', 'pdf', 'svg']
if savefig_kwarg is None:
# default no kwargs to savefig
savefig_kwarg = dict()
def compare_images_decorator(func):
# We want to run the setup function (the actual test function
# that generates the figure objects) only once for each type
# of output file. The only way to achieve this with nose
# appears to be to create a test class with "setup_class" and
# "teardown_class" methods. Creating a class instance doesn't
# work, so we use type() to actually create a class and fill
# it with the appropriate methods.
name = func.__name__
# For nose 1.0, we need to rename the test function to
# something without the word "test", or it will be run as
# well, outside of the context of our image comparison test
# generator.
func = staticmethod(func)
func.__get__(1).__name__ = str('_private')
new_class = type(
name,
(ImageComparisonTest,),
{'_func': func,
'_baseline_images': baseline_images,
'_extensions': extensions,
'_tol': tol,
'_freetype_version': freetype_version,
'_remove_text': remove_text,
'_savefig_kwarg': savefig_kwarg})
return new_class
return compare_images_decorator
def _image_directories(func):
"""
Compute the baseline and result image directories for testing *func*.
Create the result directory if it doesn't exist.
"""
module_name = func.__module__
# mods = module_name.split('.')
# mods.pop(0) # <- will be the name of the package being tested (in
# most cases "matplotlib")
# assert mods.pop(0) == 'tests'
# subdir = os.path.join(*mods)
subdir = module_name
import imp
def find_dotted_module(module_name, path=None):
"""A version of imp which can handle dots in the module name"""
res = None
for sub_mod in module_name.split('.'):
try:
res = file, path, _ = imp.find_module(sub_mod, path)
path = [path]
if file is not None:
file.close()
except ImportError:
# assume namespace package
path = sys.modules[sub_mod].__path__
res = None, path, None
return res
mod_file = find_dotted_module(func.__module__)[1]
basedir = os.path.dirname(mod_file)
baseline_dir = os.path.join(basedir, 'baseline_images', subdir)
result_dir = os.path.abspath(os.path.join('result_images', subdir))
if not os.path.exists(result_dir):
cbook.mkdirs(result_dir)
return baseline_dir, result_dir
| mit |
mdeff/ntds_2017 | projects/reports/stackoverflow_survey/functions.py | 1 | 2492 | import numpy as np
from sklearn import linear_model
def crossvad_build_k_indices(y, k_fold, seed = 1):
"""build k indices for k-fold."""
num_row = y.shape[0]
interval = int(num_row / k_fold)
np.random.seed(seed)
indices = np.random.permutation(num_row)
k_indices = [indices[k * interval: (k + 1) * interval]
for k in range(k_fold)]
return np.array(k_indices)
def cross_validation(y, x, k_indices, k_fold):
"""use cross validation on method.
return the mean of the mse error of the training sets and of the testing sets
as well as the accuracy and the variance (over kfold) of the accuracy"""
# ***************************************************
#creating list of possible k's
k_list=np.arange(k_indices.shape[0])
# define lists to store the w, the loss of training data and the loss of test data
mse_tr_list = np.zeros(k_fold)
mse_te_list = np.zeros(k_fold)
accuracy_te_list = np.zeros(k_fold)
y_pr_stack = np.zeros(len(y))
for k in range(0,k_fold):
# get k'th subgroup in test, others in train
y_te = y[k_indices[k]]
x_te = x[k_indices[k]]
y_tr = y[np.ravel(k_indices[k_list[k*np.ones(len(k_list))!=k_list]])]
x_tr = x[np.ravel(k_indices[k_list[k*np.ones(len(k_list))!=k_list]])]
#standardize the data
x_tr, mean_tr, std_tr = standardize(x_tr)
x_te = standardize_given(x_te, mean_tr, std_tr)
#logistic regression
logreg = linear_model.LogisticRegression(solver ='liblinear', class_weight ='balanced')
logreg.fit(x_tr, y_tr)
y_pr = logreg.predict(x_te)
y_pr_stack[int(k*len(y)/k_fold):int((k+1)*len(y)/k_fold)] = y_pr
accuracy_te_list[k] = sum(np.equal(y_pr,y_te))/len(y_te)
mse_tr_mean = np.mean(mse_tr_list)
mse_te_mean = np.mean(mse_te_list)
accuracy_te_mean = np.mean(accuracy_te_list)
accuracy_te_var = np.std(accuracy_te_list)
return y_pr_stack, accuracy_te_mean, accuracy_te_var
def standardize(x):
"""Standardize the original data set."""
#standardize is done feature by feature to have equal weights.
mean_x = np.mean(x,axis=0)
x = x - mean_x
std_x = np.std(x,axis=0)
x = x / std_x
return x, mean_x, std_x
def standardize_given(x, mean_x, std_x):
"""Standardize the original data set with given mean_x and std_x."""
x = x - mean_x
x = x / std_x #handle outliers
return x | mit |
rohanp/scikit-learn | examples/linear_model/plot_lasso_and_elasticnet.py | 73 | 2074 | """
========================================
Lasso and Elastic Net for Sparse Signals
========================================
Estimates Lasso and Elastic-Net regression models on a manually generated
sparse signal corrupted with an additive noise. Estimated coefficients are
compared with the ground-truth.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
###############################################################################
# generate some sparse data to play with
np.random.seed(42)
n_samples, n_features = 50, 200
X = np.random.randn(n_samples, n_features)
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[10:]] = 0 # sparsify coef
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
###############################################################################
# Lasso
from sklearn.linear_model import Lasso
alpha = 0.1
lasso = Lasso(alpha=alpha)
y_pred_lasso = lasso.fit(X_train, y_train).predict(X_test)
r2_score_lasso = r2_score(y_test, y_pred_lasso)
print(lasso)
print("r^2 on test data : %f" % r2_score_lasso)
###############################################################################
# ElasticNet
from sklearn.linear_model import ElasticNet
enet = ElasticNet(alpha=alpha, l1_ratio=0.7)
y_pred_enet = enet.fit(X_train, y_train).predict(X_test)
r2_score_enet = r2_score(y_test, y_pred_enet)
print(enet)
print("r^2 on test data : %f" % r2_score_enet)
plt.plot(enet.coef_, color='lightgreen', linewidth=2,
label='Elastic net coefficients')
plt.plot(lasso.coef_, color='gold', linewidth=2,
label='Lasso coefficients')
plt.plot(coef, '--', color='navy', label='original coefficients')
plt.legend(loc='best')
plt.title("Lasso R^2: %f, Elastic Net R^2: %f"
% (r2_score_lasso, r2_score_enet))
plt.show()
| bsd-3-clause |
ahmadia/bokeh | examples/charts/file/scatter.py | 37 | 1607 |
from collections import OrderedDict
import pandas as pd
from bokeh.charts import Scatter, output_file, show, vplot
from bokeh.sampledata.iris import flowers
setosa = flowers[(flowers.species == "setosa")][["petal_length", "petal_width"]]
versicolor = flowers[(flowers.species == "versicolor")][["petal_length", "petal_width"]]
virginica = flowers[(flowers.species == "virginica")][["petal_length", "petal_width"]]
xyvalues = OrderedDict([("setosa", setosa.values), ("versicolor", versicolor.values), ("virginica", virginica.values)])
scatter1 = Scatter(xyvalues, title="iris dataset, dict_input", xlabel="petal_length",
ylabel="petal_width", legend='top_left', marker="triangle")
groupped_df = flowers[["petal_length", "petal_width", "species"]].groupby("species")
scatter2 = Scatter(groupped_df, title="iris dataset, dict_input", xlabel="petal_length",
ylabel="petal_width", legend='top_left')
pdict = OrderedDict()
for i in groupped_df.groups.keys():
labels = groupped_df.get_group(i).columns
xname = labels[0]
yname = labels[1]
x = getattr(groupped_df.get_group(i), xname)
y = getattr(groupped_df.get_group(i), yname)
pdict[i] = list(zip(x, y))
df = pd.DataFrame(pdict)
scatter3 = Scatter(
df, title="iris dataset, dict_input",
xlabel="petal_length", ylabel="petal_width", legend='top_left')
scatter4 = Scatter(
list(xyvalues.values()), title="iris dataset, dict_input",
xlabel="petal_length", ylabel="petal_width", legend='top_left')
output_file("scatter.html")
show(vplot(scatter1, scatter2, scatter3, scatter4))
| bsd-3-clause |
ephes/scikit-learn | examples/neighbors/plot_approximate_nearest_neighbors_hyperparameters.py | 227 | 5170 | """
=================================================
Hyper-parameters of Approximate Nearest Neighbors
=================================================
This example demonstrates the behaviour of the
accuracy of the nearest neighbor queries of Locality Sensitive Hashing
Forest as the number of candidates and the number of estimators (trees)
vary.
In the first plot, accuracy is measured with the number of candidates. Here,
the term "number of candidates" refers to maximum bound for the number of
distinct points retrieved from each tree to calculate the distances. Nearest
neighbors are selected from this pool of candidates. Number of estimators is
maintained at three fixed levels (1, 5, 10).
In the second plot, the number of candidates is fixed at 50. Number of trees
is varied and the accuracy is plotted against those values. To measure the
accuracy, the true nearest neighbors are required, therefore
:class:`sklearn.neighbors.NearestNeighbors` is used to compute the exact
neighbors.
"""
from __future__ import division
print(__doc__)
# Author: Maheshakya Wijewardena <[email protected]>
#
# License: BSD 3 clause
###############################################################################
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Initialize size of the database, iterations and required neighbors.
n_samples = 10000
n_features = 100
n_queries = 30
rng = np.random.RandomState(42)
# Generate sample data
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=10,
random_state=0)
X_index = X[:n_samples]
X_query = X[n_samples:]
# Get exact neighbors
nbrs = NearestNeighbors(n_neighbors=1, algorithm='brute',
metric='cosine').fit(X_index)
neighbors_exact = nbrs.kneighbors(X_query, return_distance=False)
# Set `n_candidate` values
n_candidates_values = np.linspace(10, 500, 5).astype(np.int)
n_estimators_for_candidate_value = [1, 5, 10]
n_iter = 10
stds_accuracies = np.zeros((len(n_estimators_for_candidate_value),
n_candidates_values.shape[0]),
dtype=float)
accuracies_c = np.zeros((len(n_estimators_for_candidate_value),
n_candidates_values.shape[0]), dtype=float)
# LSH Forest is a stochastic index: perform several iteration to estimate
# expected accuracy and standard deviation displayed as error bars in
# the plots
for j, value in enumerate(n_estimators_for_candidate_value):
for i, n_candidates in enumerate(n_candidates_values):
accuracy_c = []
for seed in range(n_iter):
lshf = LSHForest(n_estimators=value,
n_candidates=n_candidates, n_neighbors=1,
random_state=seed)
# Build the LSH Forest index
lshf.fit(X_index)
# Get neighbors
neighbors_approx = lshf.kneighbors(X_query,
return_distance=False)
accuracy_c.append(np.sum(np.equal(neighbors_approx,
neighbors_exact)) /
n_queries)
stds_accuracies[j, i] = np.std(accuracy_c)
accuracies_c[j, i] = np.mean(accuracy_c)
# Set `n_estimators` values
n_estimators_values = [1, 5, 10, 20, 30, 40, 50]
accuracies_trees = np.zeros(len(n_estimators_values), dtype=float)
# Calculate average accuracy for each value of `n_estimators`
for i, n_estimators in enumerate(n_estimators_values):
lshf = LSHForest(n_estimators=n_estimators, n_neighbors=1)
# Build the LSH Forest index
lshf.fit(X_index)
# Get neighbors
neighbors_approx = lshf.kneighbors(X_query, return_distance=False)
accuracies_trees[i] = np.sum(np.equal(neighbors_approx,
neighbors_exact))/n_queries
###############################################################################
# Plot the accuracy variation with `n_candidates`
plt.figure()
colors = ['c', 'm', 'y']
for i, n_estimators in enumerate(n_estimators_for_candidate_value):
label = 'n_estimators = %d ' % n_estimators
plt.plot(n_candidates_values, accuracies_c[i, :],
'o-', c=colors[i], label=label)
plt.errorbar(n_candidates_values, accuracies_c[i, :],
stds_accuracies[i, :], c=colors[i])
plt.legend(loc='upper left', fontsize='small')
plt.ylim([0, 1.2])
plt.xlim(min(n_candidates_values), max(n_candidates_values))
plt.ylabel("Accuracy")
plt.xlabel("n_candidates")
plt.grid(which='both')
plt.title("Accuracy variation with n_candidates")
# Plot the accuracy variation with `n_estimators`
plt.figure()
plt.scatter(n_estimators_values, accuracies_trees, c='k')
plt.plot(n_estimators_values, accuracies_trees, c='g')
plt.ylim([0, 1.2])
plt.xlim(min(n_estimators_values), max(n_estimators_values))
plt.ylabel("Accuracy")
plt.xlabel("n_estimators")
plt.grid(which='both')
plt.title("Accuracy variation with n_estimators")
plt.show()
| bsd-3-clause |
RachitKansal/scikit-learn | sklearn/utils/tests/test_multiclass.py | 128 | 12853 |
from __future__ import division
import numpy as np
import scipy.sparse as sp
from itertools import product
from sklearn.externals.six.moves import xrange
from sklearn.externals.six import iteritems
from scipy.sparse import issparse
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.multiclass import is_multilabel
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.multiclass import class_distribution
class NotAnArray(object):
"""An object that is convertable to an array. This is useful to
simulate a Pandas timeseries."""
def __init__(self, data):
self.data = data
def __array__(self):
return self.data
EXAMPLES = {
'multilabel-indicator': [
# valid when the data is formated as sparse or dense, identified
# by CSR format when the testing takes place
csr_matrix(np.random.RandomState(42).randint(2, size=(10, 10))),
csr_matrix(np.array([[0, 1], [1, 0]])),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.bool)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.int8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.uint8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float32)),
csr_matrix(np.array([[0, 0], [0, 0]])),
csr_matrix(np.array([[0, 1]])),
# Only valid when data is dense
np.array([[-1, 1], [1, -1]]),
np.array([[-3, 3], [3, -3]]),
NotAnArray(np.array([[-3, 3], [3, -3]])),
],
'multiclass': [
[1, 0, 2, 2, 1, 4, 2, 4, 4, 4],
np.array([1, 0, 2]),
np.array([1, 0, 2], dtype=np.int8),
np.array([1, 0, 2], dtype=np.uint8),
np.array([1, 0, 2], dtype=np.float),
np.array([1, 0, 2], dtype=np.float32),
np.array([[1], [0], [2]]),
NotAnArray(np.array([1, 0, 2])),
[0, 1, 2],
['a', 'b', 'c'],
np.array([u'a', u'b', u'c']),
np.array([u'a', u'b', u'c'], dtype=object),
np.array(['a', 'b', 'c'], dtype=object),
],
'multiclass-multioutput': [
np.array([[1, 0, 2, 2], [1, 4, 2, 4]]),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.int8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.uint8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float32),
np.array([['a', 'b'], ['c', 'd']]),
np.array([[u'a', u'b'], [u'c', u'd']]),
np.array([[u'a', u'b'], [u'c', u'd']], dtype=object),
np.array([[1, 0, 2]]),
NotAnArray(np.array([[1, 0, 2]])),
],
'binary': [
[0, 1],
[1, 1],
[],
[0],
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1]),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.bool),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.int8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.uint8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float32),
np.array([[0], [1]]),
NotAnArray(np.array([[0], [1]])),
[1, -1],
[3, 5],
['a'],
['a', 'b'],
['abc', 'def'],
np.array(['abc', 'def']),
[u'a', u'b'],
np.array(['abc', 'def'], dtype=object),
],
'continuous': [
[1e-5],
[0, .5],
np.array([[0], [.5]]),
np.array([[0], [.5]], dtype=np.float32),
],
'continuous-multioutput': [
np.array([[0, .5], [.5, 0]]),
np.array([[0, .5], [.5, 0]], dtype=np.float32),
np.array([[0, .5]]),
],
'unknown': [
[[]],
[()],
# sequence of sequences that were'nt supported even before deprecation
np.array([np.array([]), np.array([1, 2, 3])], dtype=object),
[np.array([]), np.array([1, 2, 3])],
[set([1, 2, 3]), set([1, 2])],
[frozenset([1, 2, 3]), frozenset([1, 2])],
# and also confusable as sequences of sequences
[{0: 'a', 1: 'b'}, {0: 'a'}],
# empty second dimension
np.array([[], []]),
# 3d
np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]),
]
}
NON_ARRAY_LIKE_EXAMPLES = [
set([1, 2, 3]),
{0: 'a', 1: 'b'},
{0: [5], 1: [5]},
'abc',
frozenset([1, 2, 3]),
None,
]
MULTILABEL_SEQUENCES = [
[[1], [2], [0, 1]],
[(), (2), (0, 1)],
np.array([[], [1, 2]], dtype='object'),
NotAnArray(np.array([[], [1, 2]], dtype='object'))
]
def test_unique_labels():
# Empty iterable
assert_raises(ValueError, unique_labels)
# Multiclass problem
assert_array_equal(unique_labels(xrange(10)), np.arange(10))
assert_array_equal(unique_labels(np.arange(10)), np.arange(10))
assert_array_equal(unique_labels([4, 0, 2]), np.array([0, 2, 4]))
# Multilabel indicator
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[1, 0, 1],
[0, 0, 0]])),
np.arange(3))
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[0, 0, 0]])),
np.arange(3))
# Several arrays passed
assert_array_equal(unique_labels([4, 0, 2], xrange(5)),
np.arange(5))
assert_array_equal(unique_labels((0, 1, 2), (0,), (2, 1)),
np.arange(3))
# Border line case with binary indicator matrix
assert_raises(ValueError, unique_labels, [4, 0, 2], np.ones((5, 5)))
assert_raises(ValueError, unique_labels, np.ones((5, 4)), np.ones((5, 5)))
assert_array_equal(unique_labels(np.ones((4, 5)), np.ones((5, 5))),
np.arange(5))
def test_unique_labels_non_specific():
# Test unique_labels with a variety of collected examples
# Smoke test for all supported format
for format in ["binary", "multiclass", "multilabel-indicator"]:
for y in EXAMPLES[format]:
unique_labels(y)
# We don't support those format at the moment
for example in NON_ARRAY_LIKE_EXAMPLES:
assert_raises(ValueError, unique_labels, example)
for y_type in ["unknown", "continuous", 'continuous-multioutput',
'multiclass-multioutput']:
for example in EXAMPLES[y_type]:
assert_raises(ValueError, unique_labels, example)
def test_unique_labels_mixed_types():
# Mix with binary or multiclass and multilabel
mix_clf_format = product(EXAMPLES["multilabel-indicator"],
EXAMPLES["multiclass"] +
EXAMPLES["binary"])
for y_multilabel, y_multiclass in mix_clf_format:
assert_raises(ValueError, unique_labels, y_multiclass, y_multilabel)
assert_raises(ValueError, unique_labels, y_multilabel, y_multiclass)
assert_raises(ValueError, unique_labels, [[1, 2]], [["a", "d"]])
assert_raises(ValueError, unique_labels, ["1", 2])
assert_raises(ValueError, unique_labels, [["1", 2], [1, 3]])
assert_raises(ValueError, unique_labels, [["1", "2"], [2, 3]])
def test_is_multilabel():
for group, group_examples in iteritems(EXAMPLES):
if group in ['multilabel-indicator']:
dense_assert_, dense_exp = assert_true, 'True'
else:
dense_assert_, dense_exp = assert_false, 'False'
for example in group_examples:
# Only mark explicitly defined sparse examples as valid sparse
# multilabel-indicators
if group == 'multilabel-indicator' and issparse(example):
sparse_assert_, sparse_exp = assert_true, 'True'
else:
sparse_assert_, sparse_exp = assert_false, 'False'
if (issparse(example) or
(hasattr(example, '__array__') and
np.asarray(example).ndim == 2 and
np.asarray(example).dtype.kind in 'biuf' and
np.asarray(example).shape[1] > 0)):
examples_sparse = [sparse_matrix(example)
for sparse_matrix in [coo_matrix,
csc_matrix,
csr_matrix,
dok_matrix,
lil_matrix]]
for exmpl_sparse in examples_sparse:
sparse_assert_(is_multilabel(exmpl_sparse),
msg=('is_multilabel(%r)'
' should be %s')
% (exmpl_sparse, sparse_exp))
# Densify sparse examples before testing
if issparse(example):
example = example.toarray()
dense_assert_(is_multilabel(example),
msg='is_multilabel(%r) should be %s'
% (example, dense_exp))
def test_type_of_target():
for group, group_examples in iteritems(EXAMPLES):
for example in group_examples:
assert_equal(type_of_target(example), group,
msg=('type_of_target(%r) should be %r, got %r'
% (example, group, type_of_target(example))))
for example in NON_ARRAY_LIKE_EXAMPLES:
msg_regex = 'Expected array-like \(array or non-string sequence\).*'
assert_raises_regex(ValueError, msg_regex, type_of_target, example)
for example in MULTILABEL_SEQUENCES:
msg = ('You appear to be using a legacy multi-label data '
'representation. Sequence of sequences are no longer supported;'
' use a binary array or sparse matrix instead.')
assert_raises_regex(ValueError, msg, type_of_target, example)
def test_class_distribution():
y = np.array([[1, 0, 0, 1],
[2, 2, 0, 1],
[1, 3, 0, 1],
[4, 2, 0, 1],
[2, 0, 0, 1],
[1, 3, 0, 1]])
# Define the sparse matrix with a mix of implicit and explicit zeros
data = np.array([1, 2, 1, 4, 2, 1, 0, 2, 3, 2, 3, 1, 1, 1, 1, 1, 1])
indices = np.array([0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 5, 0, 1, 2, 3, 4, 5])
indptr = np.array([0, 6, 11, 11, 17])
y_sp = sp.csc_matrix((data, indices, indptr), shape=(6, 4))
classes, n_classes, class_prior = class_distribution(y)
classes_sp, n_classes_sp, class_prior_sp = class_distribution(y_sp)
classes_expected = [[1, 2, 4],
[0, 2, 3],
[0],
[1]]
n_classes_expected = [3, 3, 1, 1]
class_prior_expected = [[3/6, 2/6, 1/6],
[1/3, 1/3, 1/3],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
# Test again with explicit sample weights
(classes,
n_classes,
class_prior) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
(classes_sp,
n_classes_sp,
class_prior_sp) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
class_prior_expected = [[4/9, 3/9, 2/9],
[2/9, 4/9, 3/9],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
| bsd-3-clause |
herilalaina/scikit-learn | sklearn/datasets/tests/test_20news.py | 75 | 3266 | """Test the 20news downloader, if the data is available."""
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn import datasets
def test_20news():
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract a reduced dataset
data2cats = datasets.fetch_20newsgroups(
subset='all', categories=data.target_names[-1:-3:-1], shuffle=False)
# Check that the ordering of the target_names is the same
# as the ordering in the full dataset
assert_equal(data2cats.target_names,
data.target_names[-2:])
# Assert that we have only 0 and 1 as labels
assert_equal(np.unique(data2cats.target).tolist(), [0, 1])
# Check that the number of filenames is consistent with data/target
assert_equal(len(data2cats.filenames), len(data2cats.target))
assert_equal(len(data2cats.filenames), len(data2cats.data))
# Check that the first entry of the reduced dataset corresponds to
# the first entry of the corresponding category in the full dataset
entry1 = data2cats.data[0]
category = data2cats.target_names[data2cats.target[0]]
label = data.target_names.index(category)
entry2 = data.data[np.where(data.target == label)[0][0]]
assert_equal(entry1, entry2)
def test_20news_length_consistency():
"""Checks the length consistencies within the bunch
This is a non-regression test for a bug present in 0.16.1.
"""
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract the full dataset
data = datasets.fetch_20newsgroups(subset='all')
assert_equal(len(data['data']), len(data.data))
assert_equal(len(data['target']), len(data.target))
assert_equal(len(data['filenames']), len(data.filenames))
def test_20news_vectorized():
try:
datasets.fetch_20newsgroups(subset='all',
download_if_missing=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# test subset = train
bunch = datasets.fetch_20newsgroups_vectorized(subset="train")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314, 130107))
assert_equal(bunch.target.shape[0], 11314)
assert_equal(bunch.data.dtype, np.float64)
# test subset = test
bunch = datasets.fetch_20newsgroups_vectorized(subset="test")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (7532, 130107))
assert_equal(bunch.target.shape[0], 7532)
assert_equal(bunch.data.dtype, np.float64)
# test subset = all
bunch = datasets.fetch_20newsgroups_vectorized(subset='all')
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314 + 7532, 130107))
assert_equal(bunch.target.shape[0], 11314 + 7532)
assert_equal(bunch.data.dtype, np.float64)
| bsd-3-clause |
ahellander/pyurdme | examples/yeast_polarization/polarisome_model.py | 5 | 4149 | #!/usr/bin/env python
""" pyURDME model file for the model found in Lawson et al. PloS Comp Bio (2013). """
import os
import pyurdme
import dolfin
import math
import matplotlib.pyplot as plt
import numpy
class Cdc42(pyurdme.URDMEDataFunction):
def __init__(self, a=-4*numpy.pi, b=4*numpy.pi, N=160):
""" 1D domain from a to b. """
pyurdme.URDMEDataFunction.__init__(self, name="Cdc42")
self.a = a
self.b = b
self.N = N
def map(self, x):
#ligand_c[i] = 100*Gradient_max*exp( (-1*pow((i-floor(N/2))*360.0/N,2))/(2*pow(Gradient_sigma,2)) );
# x[0] == i*l
Gradient_max = 3.0*160/self.N
Gradient_max = Gradient_max*0.7917
Gradient_sigma = 20.3837
return 100*Gradient_max*numpy.exp(
-1*((x[0]*(360)/(self.b - self.a))**2) / (2*Gradient_sigma**2)
)
class polarisome_1D(pyurdme.URDMEModel):
def __init__(self,model_name="polarisome_1D"):
pyurdme.URDMEModel.__init__(self,model_name)
default_D = 0.0053
fast_D = 1000*default_D
# Species
Bni1c = pyurdme.Species(name="Bni1c", diffusion_constant=fast_D)
Bni1m = pyurdme.Species(name="Bni1m", diffusion_constant=default_D)
Spa2c = pyurdme.Species(name="Spa2c", diffusion_constant=fast_D)
Spa2m = pyurdme.Species(name="Spa2m", diffusion_constant=default_D)
Actinc = pyurdme.Species(name="Actinc", diffusion_constant=fast_D)
Actinm = pyurdme.Species(name="Actinm", diffusion_constant=default_D)
self.add_species([Bni1c, Bni1m, Spa2c, Spa2m, Actinc, Actinm])
NUM_VOXEL = 160
self.mesh = pyurdme.URDMEMesh.generate_interval_mesh(nx=NUM_VOXEL, a=-4*numpy.pi, b=4*numpy.pi, periodic=True)
Bon = pyurdme.Parameter(name="Bon", expression=1.6e-6)
Boff = pyurdme.Parameter(name="Boff", expression=0.25)
Bfb = pyurdme.Parameter(name="Bfb", expression=1.9e-5)
Aon = pyurdme.Parameter(name="Aon", expression=7.7e-5)
Aoff = pyurdme.Parameter(name="Aoff", expression=0.018)
Km = pyurdme.Parameter(name="Km", expression=3500)
Son = pyurdme.Parameter(name="Son", expression=0.16)
Soff = pyurdme.Parameter(name="Soff", expression=0.35)
self.add_parameter([Bon, Boff, Bfb, Aon, Aoff, Km, Son, Soff])
# Add Data Function to model the mating pheromone gradient.
self.add_data_function(Cdc42())
# Reactions
R0 = pyurdme.Reaction(name="R0", reactants={Bni1c:1}, products={Bni1m:1}, propensity_function="Bon*Bni1c*NUM_VOXELS*Cdc42")
R1 = pyurdme.Reaction(name="R1", reactants={Bni1m:1}, products={Bni1c:1}, massaction=True, rate=Boff)
R2 = pyurdme.Reaction(name="R2", reactants={Actinc:1}, products={Actinm:1}, propensity_function="Aon*Bni1m*Actinc*NUM_VOXELS")
R3 = pyurdme.Reaction(name="R3", reactants={Actinm:1}, products={Actinc:1}, propensity_function="Aoff*Km/(Km+Spa2m)*Actinm")
R4 = pyurdme.Reaction(name="R4", reactants={Spa2c:1}, products={Spa2m:1}, propensity_function="Son*Spa2c*NUM_VOXELS*Actinm")
R5 = pyurdme.Reaction(name="R5", reactants={Spa2m:1}, products={Spa2c:1}, massaction=True, rate=Soff)
R6 = pyurdme.Reaction(name="R6", reactants={Bni1c:1}, products={Bni1m:1}, propensity_function="Bfb*Bni1c*NUM_VOXELS*Spa2m")
self.add_reaction([R0,R1,R2,R3,R4,R5,R6])
# Distribute molecules randomly over the mesh according to their initial values
self.set_initial_condition_scatter({Bni1c:1000})
self.set_initial_condition_scatter({Spa2c:5000})
self.set_initial_condition_scatter({Actinc:40})
#self.timespan(range(0,3601,30))
self.timespan(range(0,201,10))
if __name__=="__main__":
""" Dump model to a file. """
model = polarisome_1D()
result = model.run()
x_vals = model.mesh.coordinates()[:, 0]
Bni1 = result.get_species("Bni1m", timepoints=20)
Spa2 = result.get_species("Spa2m", timepoints=20)
plt.plot(x_vals, Spa2)
plt.title('Spa2_m at t={0}'.format(model.tspan[20]))
plt.show()
| gpl-3.0 |
d-lee/airflow | airflow/hooks/base_hook.py | 18 | 2571 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import object
import logging
import os
import random
from airflow import settings
from airflow.models import Connection
from airflow.exceptions import AirflowException
CONN_ENV_PREFIX = 'AIRFLOW_CONN_'
class BaseHook(object):
"""
Abstract base class for hooks, hooks are meant as an interface to
interact with external systems. MySqlHook, HiveHook, PigHook return
object that can handle the connection and interaction to specific
instances of these systems, and expose consistent methods to interact
with them.
"""
def __init__(self, source):
pass
@classmethod
def get_connections(cls, conn_id):
session = settings.Session()
db = (
session.query(Connection)
.filter(Connection.conn_id == conn_id)
.all()
)
if not db:
raise AirflowException(
"The conn_id `{0}` isn't defined".format(conn_id))
session.expunge_all()
session.close()
return db
@classmethod
def get_connection(cls, conn_id):
environment_uri = os.environ.get(CONN_ENV_PREFIX + conn_id.upper())
conn = None
if environment_uri:
conn = Connection(conn_id=conn_id, uri=environment_uri)
else:
conn = random.choice(cls.get_connections(conn_id))
if conn.host:
logging.info("Using connection to: " + conn.host)
return conn
@classmethod
def get_hook(cls, conn_id):
connection = cls.get_connection(conn_id)
return connection.get_hook()
def get_conn(self):
raise NotImplementedError()
def get_records(self, sql):
raise NotImplementedError()
def get_pandas_df(self, sql):
raise NotImplementedError()
def run(self, sql):
raise NotImplementedError()
| apache-2.0 |
ishanic/scikit-learn | sklearn/svm/tests/test_sparse.py | 32 | 12988 | from nose.tools import assert_raises, assert_true, assert_false
import numpy as np
from scipy import sparse
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal)
from sklearn import datasets, svm, linear_model, base
from sklearn.datasets import make_classification, load_digits, make_blobs
from sklearn.svm.tests import test_svm
from sklearn.utils import ConvergenceWarning
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.testing import assert_warns, assert_raise_message
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
X_sp = sparse.lil_matrix(X)
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2
X2 = np.array([[0, 0, 0], [1, 1, 1], [2, 0, 0, ],
[0, 0, 2], [3, 3, 3]])
X2_sp = sparse.dok_matrix(X2)
Y2 = [1, 2, 2, 2, 3]
T2 = np.array([[-1, -1, -1], [1, 1, 1], [2, 2, 2]])
true_result2 = [1, 2, 3]
iris = datasets.load_iris()
# permute
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# sparsify
iris.data = sparse.csr_matrix(iris.data)
def check_svm_model_equal(dense_svm, sparse_svm, X_train, y_train, X_test):
dense_svm.fit(X_train.toarray(), y_train)
if sparse.isspmatrix(X_test):
X_test_dense = X_test.toarray()
else:
X_test_dense = X_test
sparse_svm.fit(X_train, y_train)
assert_true(sparse.issparse(sparse_svm.support_vectors_))
assert_true(sparse.issparse(sparse_svm.dual_coef_))
assert_array_almost_equal(dense_svm.support_vectors_,
sparse_svm.support_vectors_.toarray())
assert_array_almost_equal(dense_svm.dual_coef_, sparse_svm.dual_coef_.toarray())
if dense_svm.kernel == "linear":
assert_true(sparse.issparse(sparse_svm.coef_))
assert_array_almost_equal(dense_svm.coef_, sparse_svm.coef_.toarray())
assert_array_almost_equal(dense_svm.support_, sparse_svm.support_)
assert_array_almost_equal(dense_svm.predict(X_test_dense), sparse_svm.predict(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test_dense))
if isinstance(dense_svm, svm.OneClassSVM):
msg = "cannot use sparse input in 'OneClassSVM' trained on dense data"
else:
assert_array_almost_equal(dense_svm.predict_proba(X_test_dense),
sparse_svm.predict_proba(X_test), 4)
msg = "cannot use sparse input in 'SVC' trained on dense data"
if sparse.isspmatrix(X_test):
assert_raise_message(ValueError, msg, dense_svm.predict, X_test)
def test_svc():
"""Check that sparse SVC gives the same result as SVC"""
# many class dataset:
X_blobs, y_blobs = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, Y, T], [X2_sp, Y2, T2],
[X_blobs[:80], y_blobs[:80], X_blobs[80:]],
[iris.data, iris.target, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.SVC(kernel=kernel, probability=True, random_state=0)
sp_clf = svm.SVC(kernel=kernel, probability=True, random_state=0)
check_svm_model_equal(clf, sp_clf, *dataset)
def test_unsorted_indices():
# test that the result with sorted and unsorted indices in csr is the same
# we use a subset of digits as iris, blobs or make_classification didn't
# show the problem
digits = load_digits()
X, y = digits.data[:50], digits.target[:50]
X_test = sparse.csr_matrix(digits.data[50:100])
X_sparse = sparse.csr_matrix(X)
coef_dense = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X, y).coef_
sparse_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse, y)
coef_sorted = sparse_svc.coef_
# make sure dense and sparse SVM give the same result
assert_array_almost_equal(coef_dense, coef_sorted.toarray())
X_sparse_unsorted = X_sparse[np.arange(X.shape[0])]
X_test_unsorted = X_test[np.arange(X_test.shape[0])]
# make sure we scramble the indices
assert_false(X_sparse_unsorted.has_sorted_indices)
assert_false(X_test_unsorted.has_sorted_indices)
unsorted_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse_unsorted, y)
coef_unsorted = unsorted_svc.coef_
# make sure unsorted indices give same result
assert_array_almost_equal(coef_unsorted.toarray(), coef_sorted.toarray())
assert_array_almost_equal(sparse_svc.predict_proba(X_test_unsorted),
sparse_svc.predict_proba(X_test))
def test_svc_with_custom_kernel():
kfunc = lambda x, y: safe_sparse_dot(x, y.T)
clf_lin = svm.SVC(kernel='linear').fit(X_sp, Y)
clf_mylin = svm.SVC(kernel=kfunc).fit(X_sp, Y)
assert_array_equal(clf_lin.predict(X_sp), clf_mylin.predict(X_sp))
def test_svc_iris():
# Test the sparse SVC with the iris dataset
for k in ('linear', 'poly', 'rbf'):
sp_clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
clf = svm.SVC(kernel=k).fit(iris.data.toarray(), iris.target)
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.toarray())
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
if k == 'linear':
assert_array_almost_equal(clf.coef_, sp_clf.coef_.toarray())
def test_sparse_decision_function():
#Test decision_function
#Sanity check, test that decision_function implemented in python
#returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1).fit(iris.data, iris.target)
dec = safe_sparse_dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int).ravel()])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
def test_error():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X_sp, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X_sp, Y2)
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(T), true_result)
def test_linearsvc():
# Similar to test_SVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
sp_clf = svm.LinearSVC(random_state=0).fit(X_sp, Y)
assert_true(sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
assert_array_almost_equal(clf.predict(X), sp_clf.predict(X_sp))
clf.fit(X2, Y2)
sp_clf.fit(X2_sp, Y2)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
def test_linearsvc_iris():
# Test the sparse LinearSVC with the iris dataset
sp_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
clf = svm.LinearSVC(random_state=0).fit(iris.data.toarray(), iris.target)
assert_equal(clf.fit_intercept, sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=1)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=1)
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
# check decision_function
pred = np.argmax(sp_clf.decision_function(iris.data), 1)
assert_array_almost_equal(pred, clf.predict(iris.data.toarray()))
# sparsify the coefficients on both models and check that they still
# produce the same results
clf.sparsify()
assert_array_equal(pred, clf.predict(iris.data))
sp_clf.sparsify()
assert_array_equal(pred, sp_clf.predict(iris.data))
def test_weight():
# Test class weights
X_, y_ = make_classification(n_samples=200, n_features=100,
weights=[0.833, 0.167], random_state=0)
X_ = sparse.csr_matrix(X_)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0),
svm.SVC()):
clf.set_params(class_weight={0: 5})
clf.fit(X_[:180], y_[:180])
y_pred = clf.predict(X_[180:])
assert_true(np.sum(y_pred == y_[180:]) >= 11)
def test_sample_weights():
# Test weights on individual samples
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(X[2]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X_sp, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X[2]), [2.])
def test_sparse_liblinear_intercept_handling():
# Test that sparse liblinear honours intercept_scaling param
test_svm.test_dense_liblinear_intercept_handling(svm.LinearSVC)
def test_sparse_oneclasssvm():
"""Check that sparse OneClassSVM gives the same result as dense OneClassSVM"""
# many class dataset:
X_blobs, _ = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, None, T], [X2_sp, None, T2],
[X_blobs[:80], None, X_blobs[80:]],
[iris.data, None, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.OneClassSVM(kernel=kernel, random_state=0)
sp_clf = svm.OneClassSVM(kernel=kernel, random_state=0)
check_svm_model_equal(clf, sp_clf, *dataset)
def test_sparse_realdata():
# Test on a subset from the 20newsgroups dataset.
# This catchs some bugs if input is not correctly converted into
# sparse format or weights are not correctly initialized.
data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069])
indices = np.array([6, 5, 35, 31])
indptr = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4])
X = sparse.csr_matrix((data, indices, indptr))
y = np.array(
[1., 0., 2., 2., 1., 1., 1., 2., 2., 0., 1., 2., 2.,
0., 2., 0., 3., 0., 3., 0., 1., 1., 3., 2., 3., 2.,
0., 3., 1., 0., 2., 1., 2., 0., 1., 0., 2., 3., 1.,
3., 0., 1., 0., 0., 2., 0., 1., 2., 2., 2., 3., 2.,
0., 3., 2., 1., 2., 3., 2., 2., 0., 1., 0., 1., 2.,
3., 0., 0., 2., 2., 1., 3., 1., 1., 0., 1., 2., 1.,
1., 3.])
clf = svm.SVC(kernel='linear').fit(X.toarray(), y)
sp_clf = svm.SVC(kernel='linear').fit(sparse.coo_matrix(X), y)
assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.toarray())
assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
def test_sparse_svc_clone_with_callable_kernel():
# Test that the "dense_fit" is called even though we use sparse input
# meaning that everything works fine.
a = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0)
b = base.clone(a)
b.fit(X_sp, Y)
pred = b.predict(X_sp)
b.predict_proba(X_sp)
dense_svm = svm.SVC(C=1, kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0)
pred_dense = dense_svm.fit(X, Y).predict(X)
assert_array_equal(pred_dense, pred)
# b.decision_function(X_sp) # XXX : should be supported
def test_timeout():
sp = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, sp.fit, X_sp, Y)
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
| bsd-3-clause |
hh-italian-group/h-tautau | Instruments/python/create_cache_splitting.py | 1 | 1072 | #!/usr/bin/env python
# Create splitting for cache jobs.
# This file is part of https://github.com/hh-italian-group/h-tautau.
import argparse
import os
import numpy as np
import pandas
parser = argparse.ArgumentParser(description='Create splitting for cache jobs')
parser.add_argument('--input', type=str, required=True, help="Input csv file with summary")
parser.add_argument('--max-exe-time', type=float, required=True, help="Maximal desired execution time in hours")
args = parser.parse_args()
def create_splits(df, max_exe_time):
jobs = {}
n_splits = np.ceil(df.exeTime_h.values / max_exe_time)
for n in range(df.shape[0]):
if n_splits[n] > 1:
job_name = df.index.values[n]
jobs[job_name] = int(n_splits[n])
return jobs
df = pandas.read_csv(args.input)
df['exeTime_h'] = df.exeTime / (60. * 60.)
df['job_name'] = df.sample_name + '_' + df.channel
df_gr = df.groupby(['job_name'])
jobs = create_splits(df_gr.sum(), args.max_exe_time)
for job_name in sorted(jobs):
print('{} {}'.format(job_name, jobs[job_name]))
| gpl-2.0 |
ClimbsRocks/scikit-learn | sklearn/exceptions.py | 35 | 4329 | """
The :mod:`sklearn.exceptions` module includes all custom warnings and error
classes used across scikit-learn.
"""
__all__ = ['NotFittedError',
'ChangedBehaviorWarning',
'ConvergenceWarning',
'DataConversionWarning',
'DataDimensionalityWarning',
'EfficiencyWarning',
'FitFailedWarning',
'NonBLASDotWarning',
'UndefinedMetricWarning']
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting.
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
Examples
--------
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import NotFittedError
>>> try:
... LinearSVC().predict([[1, 2], [2, 3], [3, 4]])
... except NotFittedError as e:
... print(repr(e))
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
NotFittedError('This LinearSVC instance is not fitted yet',)
"""
class ChangedBehaviorWarning(UserWarning):
"""Warning class used to notify the user of any change in the behavior."""
class ConvergenceWarning(UserWarning):
"""Custom warning to capture convergence problems"""
class DataConversionWarning(UserWarning):
"""Warning used to notify implicit data conversions happening in the code.
This warning occurs when some input data needs to be converted or
interpreted in a way that may not match the user's expectations.
For example, this warning may occur when the user
- passes an integer array to a function which expects float input and
will convert the input
- requests a non-copying operation, but a copy is required to meet the
implementation's data-type expectations;
- passes an input whose shape can be interpreted ambiguously.
"""
class DataDimensionalityWarning(UserWarning):
"""Custom warning to notify potential issues with data dimensionality.
For example, in random projection, this warning is raised when the
number of components, which quantifies the dimensionality of the target
projection space, is higher than the number of features, which quantifies
the dimensionality of the original source space, to imply that the
dimensionality of the problem will not be reduced.
"""
class EfficiencyWarning(UserWarning):
"""Warning used to notify the user of inefficient computation.
This warning notifies the user that the efficiency may not be optimal due
to some reason which may be included as a part of the warning message.
This may be subclassed into a more specific Warning class.
"""
class FitFailedWarning(RuntimeWarning):
"""Warning class used if there is an error while fitting the estimator.
This Warning is used in meta estimators GridSearchCV and RandomizedSearchCV
and the cross-validation helper function cross_val_score to warn when there
is an error while fitting the estimator.
Examples
--------
>>> from sklearn.model_selection import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import FitFailedWarning
>>> import warnings
>>> warnings.simplefilter('always', FitFailedWarning)
>>> gs = GridSearchCV(LinearSVC(), {'C': [-1, -2]}, error_score=0)
>>> X, y = [[1, 2], [3, 4], [5, 6], [7, 8], [8, 9]], [0, 0, 0, 1, 1]
>>> with warnings.catch_warnings(record=True) as w:
... try:
... gs.fit(X, y) # This will raise a ValueError since C is < 0
... except ValueError:
... pass
... print(repr(w[-1].message))
... # doctest: +NORMALIZE_WHITESPACE
FitFailedWarning("Classifier fit failed. The score on this train-test
partition for these parameters will be set to 0.000000. Details:
\\nValueError('Penalty term must be positive; got (C=-2)',)",)
"""
class NonBLASDotWarning(EfficiencyWarning):
"""Warning used when the dot operation does not use BLAS.
This warning is used to notify the user that BLAS was not used for dot
operation and hence the efficiency may be affected.
"""
class UndefinedMetricWarning(UserWarning):
"""Warning used when the metric is invalid"""
| bsd-3-clause |
dmccloskey/SBaaS_isotopomer | SBaaS_isotopomer/main.py | 1 | 4464 | import sys
sys.path.append('C:/Users/dmccloskey-sbrg/Documents/GitHub/SBaaS_base')
from SBaaS_base.postgresql_settings import postgresql_settings
from SBaaS_base.postgresql_orm import postgresql_orm
# read in the settings file
filename = 'C:/Users/dmccloskey-sbrg/Google Drive/SBaaS_settings/settings_metabolomics.ini';
pg_settings = postgresql_settings(filename);
# connect to the database from the settings file
pg_orm = postgresql_orm();
pg_orm.set_sessionFromSettings(pg_settings.database_settings);
session = pg_orm.get_session();
engine = pg_orm.get_engine();
# your app...
sys.path.append(pg_settings.datadir_settings['github']+'/SBaaS_LIMS')
sys.path.append(pg_settings.datadir_settings['github']+'/SBaaS_isotopomer')
sys.path.append(pg_settings.datadir_settings['github']+'/io_utilities')
sys.path.append(pg_settings.datadir_settings['github']+'/MDV_utilities')
sys.path.append(pg_settings.datadir_settings['github']+'/molmass')
sys.path.append(pg_settings.datadir_settings['github']+'/matplotlib_utilities')
sys.path.append(pg_settings.datadir_settings['github']+'/quantification_analysis')
sys.path.append(pg_settings.datadir_settings['github']+'/python_statistics')
sys.path.append(pg_settings.datadir_settings['github']+'/r_statistics')
sys.path.append(pg_settings.datadir_settings['github']+'/listDict')
sys.path.append(pg_settings.datadir_settings['github']+'/ddt_python')
# TODO:
# 1 control 12-C experiment (use: C:\Users\dmccloskey-sbrg\Documents\GitHub\sbaas_workspace\sbaas_workspace\workspace\isotopomer\WTEColi12C02.py)
#make the normalized methods tables
from SBaaS_isotopomer.stage01_isotopomer_normalized_execute import stage01_isotopomer_normalized_execute
normalized01 = stage01_isotopomer_normalized_execute(session,engine,pg_settings.datadir_settings);
## update the database from .csv
#normalized01.import_dataStage01IsotopomerNormalized_update('C:/Users/dmccloskey-sbrg/Desktop/dataStage01IsotopomerNormalized_WTEColi_113C80_U13C20_01_backup.csv');
## export the data to excel
#normalized01.export_dataStage01IsotopomerNormalized_csv('WTEColi_113C80_U13C20_01',
# filename_O = pg_settings.datadir_settings['workspace_data']+'/_output/WTEColi_113C80_U13C20_01_averagesNormSum.csv',
# sample_name_abbreviation_I='%',
# time_point_I='%',
# scan_type_I='EPI',
# met_id_I='%')
#export spectrums to js
##TODO: bug in plots
#normalized01.export_dataStage01IsotopomerNormalized_js('ALEsKOs01',
# sample_name_abbreviations_I=[
# "OxicEvo04sdhCBEvo01EPEcoli13CGlc",
# ],
# met_ids_I=[],
# scan_types_I=[],
# single_plot_I = False,
# );
#make the averages methods tables
from SBaaS_isotopomer.stage01_isotopomer_averages_execute import stage01_isotopomer_averages_execute
ave01 = stage01_isotopomer_averages_execute(session,engine,pg_settings.datadir_settings);
#ave01.import_dataStage01IsotopomerAveragesNormSum_updateUsedAndComment('C:/Users/dmccloskey-sbrg/Desktop/dataStage01IsotopomerAveragesNormSum_WTEColi_113C80_U13C20_01_update.csv');
## export the spectrums to .js
#ave01.export_dataStage01IsotopomerAveragesNormSum_csv('WTEColi_113C80_U13C20_01',pg_settings.datadir_settings['workspace_data']+'/_output/WTEColi_113C80_U13C20_01_averagesNormSum.csv');
# plot specific scan-types and met_ids
ave01.export_dataStage01IsotopomerAveragesNormSum_js('ALEsKOs01',
sample_name_abbreviations_I=[
"OxicEvo04ptsHIcrrEcoli13CGlc",
"OxicEvo04ptsHIcrrEvo01EPEcoli13CGlc",
],
# met_ids_I=[
# #'fad',
# 'pyr',
# 'phpyr',
# ],
#scan_types_I=['EPI'],
single_plot_I = False,
);
##make the spectrumAccuracy methods tables
#from SBaaS_isotopomer.stage01_isotopomer_spectrumAccuracy_execute import stage01_isotopomer_spectrumAccuracy_execute
#specaccuracy01 = stage01_isotopomer_spectrumAccuracy_execute(session,engine,pg_settings.datadir_settings);
#specaccuracy01.drop_dataStage01_isotopomer_spectrumAccuracy();
#specaccuracy01.initialize_dataStage01_isotopomer_spectrumAccuracy();
#specaccuracy01.reset_dataStage01_isotopomer_spectrumAccuracy('chemoCLim01');
##make the QC methods tables
#from SBaaS_isotopomer.stage01_isotopomer_QCs_execute import stage01_isotopomer_QCs_execute
#exqcs01 = stage01_isotopomer_QCs_execute(session,engine,pg_settings.datadir_settings);
#exqcs01.drop_dataStage01_isotopomer_QCs();
#exqcs01.initialize_dataStage01_isotopomer_QCs();
#exqcs01.reset_dataStage01_isotopomer_QCs('chemoCLim01'); | mit |
madjelan/scikit-learn | benchmarks/bench_lasso.py | 297 | 3305 | """
Benchmarks of Lasso vs LassoLars
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import gc
from time import time
import numpy as np
from sklearn.datasets.samples_generator import make_regression
def compute_bench(alpha, n_samples, n_features, precompute):
lasso_results = []
lars_lasso_results = []
it = 0
for ns in n_samples:
for nf in n_features:
it += 1
print('==================')
print('Iteration %s of %s' % (it, max(len(n_samples),
len(n_features))))
print('==================')
n_informative = nf // 10
X, Y, coef_ = make_regression(n_samples=ns, n_features=nf,
n_informative=n_informative,
noise=0.1, coef=True)
X /= np.sqrt(np.sum(X ** 2, axis=0)) # Normalize data
gc.collect()
print("- benchmarking Lasso")
clf = Lasso(alpha=alpha, fit_intercept=False,
precompute=precompute)
tstart = time()
clf.fit(X, Y)
lasso_results.append(time() - tstart)
gc.collect()
print("- benchmarking LassoLars")
clf = LassoLars(alpha=alpha, fit_intercept=False,
normalize=False, precompute=precompute)
tstart = time()
clf.fit(X, Y)
lars_lasso_results.append(time() - tstart)
return lasso_results, lars_lasso_results
if __name__ == '__main__':
from sklearn.linear_model import Lasso, LassoLars
import pylab as pl
alpha = 0.01 # regularization parameter
n_features = 10
list_n_samples = np.linspace(100, 1000000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, list_n_samples,
[n_features], precompute=True)
pl.figure('scikit-learn LASSO benchmark results')
pl.subplot(211)
pl.plot(list_n_samples, lasso_results, 'b-',
label='Lasso')
pl.plot(list_n_samples, lars_lasso_results, 'r-',
label='LassoLars')
pl.title('precomputed Gram matrix, %d features, alpha=%s' % (n_features, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
pl.axis('tight')
n_samples = 2000
list_n_features = np.linspace(500, 3000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, [n_samples],
list_n_features, precompute=False)
pl.subplot(212)
pl.plot(list_n_features, lasso_results, 'b-', label='Lasso')
pl.plot(list_n_features, lars_lasso_results, 'r-', label='LassoLars')
pl.title('%d samples, alpha=%s' % (n_samples, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
StongeEtienne/dipy | doc/examples/reconst_dti.py | 4 | 9303 | """
============================================================
Reconstruction of the diffusion signal with the Tensor model
============================================================
The diffusion tensor model is a model that describes the diffusion within a
voxel. First proposed by Basser and colleagues [Basser1994]_, it has been very
influential in demonstrating the utility of diffusion MRI in characterizing the
micro-structure of white matter tissue and of the biophysical properties of
tissue, inferred from local diffusion properties and it is still very commonly
used.
The diffusion tensor models the diffusion signal as:
.. math::
\frac{S(\mathbf{g}, b)}{S_0} = e^{-b\mathbf{g}^T \mathbf{D} \mathbf{g}}
Where $\mathbf{g}$ is a unit vector in 3 space indicating the direction of
measurement and b are the parameters of measurement, such as the strength and
duration of diffusion-weighting gradient. $S(\mathbf{g}, b)$ is the
diffusion-weighted signal measured and $S_0$ is the signal conducted in a
measurement with no diffusion weighting. $\mathbf{D}$ is a positive-definite quadratic
form, which contains six free parameters to be fit. These six parameters are:
.. math::
\mathbf{D} = \begin{pmatrix} D_{xx} & D_{xy} & D_{xz} \\
D_{yx} & D_{yy} & D_{yz} \\
D_{zx} & D_{zy} & D_{zz} \\ \end{pmatrix}
This matrix is a variance/covariance matrix of the diffusivity along the three
spatial dimensions. Note that we can assume that diffusivity has antipodal
symmetry, so elements across the diagonal are equal. For example:
$D_{xy} = D_{yx}$. This is why there are only 6 free parameters to estimate
here.
In the following example we show how to reconstruct your diffusion datasets
using a single tensor model.
First import the necessary modules:
``numpy`` is for numerical computation
"""
import numpy as np
"""
``nibabel`` is for loading imaging datasets
"""
import nibabel as nib
"""
``dipy.reconst`` is for the reconstruction algorithms which we use to create
voxel models from the raw data.
"""
import dipy.reconst.dti as dti
"""
``dipy.data`` is used for small datasets that we use in tests and examples.
"""
from dipy.data import fetch_stanford_hardi
"""
Fetch will download the raw dMRI dataset of a single subject. The size of the
dataset is 87 MBytes. You only need to fetch once.
"""
fetch_stanford_hardi()
"""
Next, we read the saved dataset
"""
from dipy.data import read_stanford_hardi
img, gtab = read_stanford_hardi()
"""
img contains a nibabel Nifti1Image object (with the data) and gtab contains a
GradientTable object (information about the gradients e.g. b-values and
b-vectors).
"""
data = img.get_data()
print('data.shape (%d, %d, %d, %d)' % data.shape)
"""
data.shape ``(81, 106, 76, 160)``
First of all, we mask and crop the data. This is a quick way to avoid
calculating Tensors on the background of the image. This is done using dipy's
mask module.
"""
from dipy.segment.mask import median_otsu
maskdata, mask = median_otsu(data, 3, 1, True,
vol_idx=range(10, 50), dilate=2)
print('maskdata.shape (%d, %d, %d, %d)' % maskdata.shape)
"""
maskdata.shape ``(72, 87, 59, 160)``
Now that we have prepared the datasets we can go forward with the voxel
reconstruction. First, we instantiate the Tensor model in the following way.
"""
tenmodel = dti.TensorModel(gtab)
"""
Fitting the data is very simple. We just need to call the fit method of the
TensorModel in the following way:
"""
tenfit = tenmodel.fit(maskdata)
"""
The fit method creates a TensorFit object which contains the fitting parameters
and other attributes of the model. For example we can generate fractional
anisotropy (FA) from the eigen-values of the tensor. FA is used to characterize
the degree to which the distribution of diffusion in a voxel is
directional. That is, whether there is relatively unrestricted diffusion in one
particular direction.
Mathematically, FA is defined as the normalized variance of the eigen-values of
the tensor:
.. math::
FA = \sqrt{\frac{1}{2}\frac{(\lambda_1-\lambda_2)^2+(\lambda_1-
\lambda_3)^2+(\lambda_2-\lambda_3)^2}{\lambda_1^2+
\lambda_2^2+\lambda_3^2}}
Note that FA should be interpreted carefully. It may be an indication of the
density of packing of fibers in a voxel, and the amount of myelin wrapping these
axons, but it is not always a measure of "tissue integrity". For example, FA
may decrease in locations in which there is fanning of white matter fibers, or
where more than one population of white matter fibers crosses.
"""
print('Computing anisotropy measures (FA, MD, RGB)')
from dipy.reconst.dti import fractional_anisotropy, color_fa, lower_triangular
FA = fractional_anisotropy(tenfit.evals)
"""
In the background of the image the fitting will not be accurate there is no
signal and possibly we will find FA values with nans (not a number). We can
easily remove these in the following way.
"""
FA[np.isnan(FA)] = 0
"""
Saving the FA images is very easy using nibabel. We need the FA volume and the
affine matrix which transform the image's coordinates to the world coordinates.
Here, we choose to save the FA in float32.
"""
fa_img = nib.Nifti1Image(FA.astype(np.float32), img.get_affine())
nib.save(fa_img, 'tensor_fa.nii.gz')
"""
You can now see the result with any nifti viewer or check it slice by slice
using matplotlib_'s imshow. In the same way you can save the eigen values, the
eigen vectors or any other properties of the Tensor.
"""
evecs_img = nib.Nifti1Image(tenfit.evecs.astype(np.float32), img.get_affine())
nib.save(evecs_img, 'tensor_evecs.nii.gz')
"""
Other tensor statistics can be calculated from the `tenfit` object. For example,
a commonly calculated statistic is the mean diffusivity (MD). This is simply the
mean of the eigenvalues of the tensor. Since FA is a normalized
measure of variance and MD is the mean, they are often used as complimentary
measures. In `dipy`, there are two equivalent ways to calculate the mean
diffusivity. One is by calling the `mean_diffusivity` module function on the
eigen-values of the TensorFit class instance:
"""
MD1 = dti.mean_diffusivity(tenfit.evals)
nib.save(nib.Nifti1Image(MD1.astype(np.float32), img.get_affine()), 'tensors_md.nii.gz')
"""
The other is to call the TensorFit class method:
"""
MD2 = tenfit.md
"""
Obviously, the quantities are identical.
We can also compute the colored FA or RGB-map [Pajevic1999]_. First, we make sure
that the FA is scaled between 0 and 1, we compute the RGB map and save it.
"""
FA = np.clip(FA, 0, 1)
RGB = color_fa(FA, tenfit.evecs)
nib.save(nib.Nifti1Image(np.array(255 * RGB, 'uint8'), img.get_affine()), 'tensor_rgb.nii.gz')
"""
Let's try to visualize the tensor ellipsoids of a small rectangular
area in an axial slice of the splenium of the corpus callosum (CC).
"""
print('Computing tensor ellipsoids in a part of the splenium of the CC')
from dipy.data import get_sphere
sphere = get_sphere('symmetric724')
from dipy.viz import fvtk
ren = fvtk.ren()
evals = tenfit.evals[13:43, 44:74, 28:29]
evecs = tenfit.evecs[13:43, 44:74, 28:29]
"""
We can color the ellipsoids using the ``color_fa`` values that we calculated
above. In this example we additionally normalize the values to increase the
contrast.
"""
cfa = RGB[13:43, 44:74, 28:29]
cfa /= cfa.max()
fvtk.add(ren, fvtk.tensor(evals, evecs, cfa, sphere))
print('Saving illustration as tensor_ellipsoids.png')
fvtk.record(ren, n_frames=1, out_path='tensor_ellipsoids.png', size=(600, 600))
"""
.. figure:: tensor_ellipsoids.png
:align: center
**Tensor Ellipsoids**.
"""
fvtk.clear(ren)
"""
Finally, we can visualize the tensor orientation distribution functions
for the same area as we did with the ellipsoids.
"""
tensor_odfs = tenmodel.fit(data[20:50, 55:85, 38:39]).odf(sphere)
fvtk.add(ren, fvtk.sphere_funcs(tensor_odfs, sphere, colormap=None))
#fvtk.show(r)
print('Saving illustration as tensor_odfs.png')
fvtk.record(ren, n_frames=1, out_path='tensor_odfs.png', size=(600, 600))
"""
.. figure:: tensor_odfs.png
:align: center
**Tensor ODFs**.
Note that while the tensor model is an accurate and reliable model of the
diffusion signal in the white matter, it has the drawback that it only has one
principal diffusion direction. Therefore, in locations in the brain that
contain multiple fiber populations crossing each other, the tensor model may
indicate that the principal diffusion direction is intermediate to these
directions. Therefore, using the principal diffusion direction for tracking in
these locations may be misleading and may lead to errors in defining the
tracks. Fortunately, other reconstruction methods can be used to represent the
diffusion and fiber orientations in those locations. These are presented in
other examples.
.. [Basser1994] Basser PJ, Mattielo J, LeBihan (1994). MR diffusion tensor
spectroscopy and imaging.
.. [Pajevic1999] Pajevic S, Pierpaoli (1999). Color schemes to represent
the orientation of anisotropic tissues from diffusion tensor
data: application to white matter fiber tract mapping in
the human brain.
.. include:: ../links_names.inc
"""
| bsd-3-clause |
CitizensCode/OSMSurfaceParking | populationVehicles.py | 1 | 1424 | from pandas import DataFrame, Series
import pandas as pd
import plotly.plotly as py
from plotly.graph_objs import *
# Locate the cansim data
adultPopulation = "./Data/NBAdultPopulationCansim.csv"
vehicleRegistrations = "./Data/NBVehicleRegistrationsCansim.csv"
# Read in the population data
pop_df = pd.read_csv(adultPopulation)
pop_df = pop_df.drop(['GEO', 'SEX', 'AGE'], axis=1).set_index('Ref_Date')
pop_df.columns = ['New Brunswick Population 18+']
pop_df
# Read in the registration data
reg_df = pd.read_csv(vehicleRegistrations)
reg_df = reg_df.drop(['REGION', 'TYPE'], axis=1).set_index('Ref_Date')
reg_df.columns = ['Vehicles Registered in NB < 4500kg']
reg_df
# Merge them both together
merged = pd.merge(reg_df, pop_df, left_index=True, right_index=True)
merged
# Calculate the number of vehicles per adult
vehic_per_adult = merged.ix[:,0] / merged.ix[:,1]
vehic_per_adult
# Makes it possible to plot a pandas DataFrame in plotly
def df_to_plotly(df):
'''
Coverting a Pandas Data Frame to Plotly interface
'''
x = df.index.values
lines={}
for key in df:
lines[key]={}
lines[key]["x"]=x
lines[key]["y"]=df[key].values
lines[key]["name"]=key
#Appending all lines
lines_plotly=[lines[key] for key in df]
return lines_plotly
# Convert the data using the above function
plotly_data = df_to_plotly(merged)
# Plot it
py.plot(plotly_data)
| apache-2.0 |
marqh/iris | docs/iris/example_code/General/lineplot_with_legend.py | 18 | 1131 | """
Multi-line temperature profile plot
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
"""
import matplotlib.pyplot as plt
import iris
import iris.plot as iplt
import iris.quickplot as qplt
def main():
fname = iris.sample_data_path('air_temp.pp')
# Load exactly one cube from the given file.
temperature = iris.load_cube(fname)
# We only want a small number of latitudes, so filter some out
# using "extract".
temperature = temperature.extract(
iris.Constraint(latitude=lambda cell: 68 <= cell < 78))
for cube in temperature.slices('longitude'):
# Create a string label to identify this cube (i.e. latitude: value).
cube_label = 'latitude: %s' % cube.coord('latitude').points[0]
# Plot the cube, and associate it with a label.
qplt.plot(cube, label=cube_label)
# Add the legend with 2 columns.
plt.legend(ncol=2)
# Put a grid on the plot.
plt.grid(True)
# Tell matplotlib not to extend the plot axes range to nicely
# rounded numbers.
plt.axis('tight')
# Finally, show it.
iplt.show()
if __name__ == '__main__':
main()
| lgpl-3.0 |
gkc1000/pyscf | pyscf/nao/prod_basis.py | 1 | 34330 | from __future__ import print_function, division
import numpy as np
from scipy.sparse import coo_matrix
from pyscf.nao.lsofcsr import lsofcsr_c
from numpy import array, einsum, zeros, int64, sqrt, int32
from ctypes import POINTER, c_double, c_int64, byref
from pyscf.nao.m_libnao import libnao
from timeit import default_timer as timer
libnao.init_vrtx_cc_apair.argtypes = (POINTER(c_double), POINTER(c_int64))
libnao.init_vrtx_cc_batch.argtypes = (POINTER(c_double), POINTER(c_int64))
libnao.vrtx_cc_apair.argtypes = (
POINTER(c_int64), # sp12(1:2) ! chemical species indices
POINTER(c_double), # rc12(1:3,1:2) ! positions of species
POINTER(c_int64), # lscc(ncc) ! list of contributing centers
POINTER(c_int64), # ncc ! number of contributing centers
POINTER(c_double), # dout(nout) ! vertex & converting coefficients
POINTER(c_int64)) # nout ! size of the buffer for vertex & converting coefficients
libnao.vrtx_cc_batch.argtypes = (
POINTER(c_int64), # npairs ! chemical species indices
POINTER(c_double), # p2srncc ! species indices, positions of species, number of cc, cc
POINTER(c_int64), # ncc ! leading dimension of p2srncc
POINTER(c_int64)) # p2ndp ! pair -> number of dominant products in this pair
libnao.get_vrtx_cc_batch.argtypes = (
POINTER(c_int64), # ps ! start pair
POINTER(c_int64), # pf ! finish pair
POINTER(c_double), # data ! output data buffer
POINTER(c_int64)) # ndat ! size of data buffer
#
#
#
class prod_basis():
'''
Holder of local and bilocal product functions and vertices.
Args:
system_vars, i.e. holder of the geometry, and orbitals description
tol : tolerance to keep the linear combinations
Returns:
For each specie returns a set of radial functions defining a product basis
These functions are sufficient to represent the products of original atomic orbitals
via a product vertex coefficients and conversion coefficients.
Examples:
'''
def __init__(self, nao=None, **kw):
""" Variable belonging to the class prod_basis_c:
From input:
self.sv: copy of sv (system variable), probably not necessary??
self.tol_loc: tolerance for local basis
self.tol_biloc: tolerance for bilocal basis
self.ac_rcut_ratio: ac rcut ratio??
self.ac_npc_max: maximal number of participating centers
Output:
self.prod_log: Holder of (local) product functions and vertices
self.hkernel_csr: hartree kernel: local part of Coulomb interaction
self.c2s: global product Center (atom) -> start in case of atom-centered basis
self.bp2info: some information including indices of atoms, list of contributing centres, conversion coefficients
self.dpc2s, self.dpc2t, self.dpc2sp: product Center -> list of the size of the basis set in this center,of center's types,of product species
"""
self.nao = nao
self.tol_loc = kw['tol_loc'] if 'tol_loc' in kw else 1e-5
self.tol_biloc = kw['tol_biloc'] if 'tol_biloc' in kw else 1e-6
self.tol_elim = kw['tol_elim'] if 'tol_elim' in kw else 1e-16
self.ac_rcut_ratio = kw['ac_rcut_ratio'] if 'ac_rcut_ratio' in kw else 1.0
self.ac_npc_max = kw['ac_npc_max'] if 'ac_npc_max' in kw else 8
self.pb_algorithm = kw['pb_algorithm'].lower() if 'pb_algorithm' in kw else 'pp'
self.jcutoff = kw['jcutoff'] if 'jcutoff' in kw else 14
self.metric_type = kw['metric_type'] if 'metric_type' in kw else 2
self.optimize_centers = kw['optimize_centers'] if 'optimize_centers' in kw else 0
self.ngl = kw['ngl'] if 'ngl' in kw else 96
if nao is None: return
if self.pb_algorithm=='pp':
self.init_prod_basis_pp_batch(nao, **kw)
elif self.pb_algorithm=='fp':
from pyscf.nao.m_pb_ae import pb_ae
pb_ae(self, nao, self.tol_loc, self.tol_biloc, self.ac_rcut_ratio)
else:
print( 'self.pb_algorithm', self.pb_algorithm )
raise RuntimeError('unknown pb_algorithm')
return
def init_prod_basis_pp(self, sv, **kvargs):
""" Talman's procedure should be working well with Pseudo-Potential starting point."""
from pyscf.nao.m_prod_biloc import prod_biloc_c
#t1 = timer()
self.init_inp_param_prod_log_dp(sv, **kvargs)
data = self.chain_data()
libnao.init_vrtx_cc_apair(data.ctypes.data_as(POINTER(c_double)), c_int64(len(data)))
self.sv_pbloc_data = True
#t2 = timer(); print(t2-t1); t1=timer();
self.bp2info = [] # going to be some information including indices of atoms, list of contributing centres, conversion coefficients
for ia1 in range(sv.natoms):
rc1 = sv.ao_log.sp2rcut[sv.atom2sp[ia1]]
for ia2 in range(ia1+1,sv.natoms):
rc2,dist = sv.ao_log.sp2rcut[sv.atom2sp[ia2]], sqrt(((sv.atom2coord[ia1]-sv.atom2coord[ia2])**2).sum())
if dist>rc1+rc2 : continue
pbiloc = self.comp_apair_pp_libint(ia1,ia2)
if pbiloc is not None : self.bp2info.append(pbiloc)
self.dpc2s,self.dpc2t,self.dpc2sp = self.init_c2s_domiprod() # dominant product's counting
self.npdp = self.dpc2s[-1]
self.norbs = self.sv.norbs
return self
def init_prod_basis_pp_batch(self, nao, **kw):
""" Talman's procedure should be working well with Pseudo-Potential starting point."""
from pyscf.nao.prod_log import prod_log
from pyscf.nao.m_prod_biloc import prod_biloc_c
sv = nao
t1 = timer()
self.norbs = sv.norbs
self.init_inp_param_prod_log_dp(sv, **kw)
#t2 = timer(); print(' after init_inp_param_prod_log_dp ', t2-t1); t1=timer()
data = self.chain_data()
libnao.init_vrtx_cc_batch(data.ctypes.data_as(POINTER(c_double)), c_int64(len(data)))
self.sv_pbloc_data = True
aos = sv.ao_log
p2srncc,p2npac,p2atoms = [],[],[]
for a1,[sp1,ra1] in enumerate(zip(sv.atom2sp, sv.atom2coord)):
rc1 = aos.sp2rcut[sp1]
for a2,[sp2,ra2] in enumerate(zip(sv.atom2sp[a1+1:], sv.atom2coord[a1+1:])):
a2+=a1+1
rc2,dist = aos.sp2rcut[sp2], sqrt(((ra1-ra2)**2).sum())
if dist>rc1+rc2 : continue
cc2atom = self.ls_contributing(a1,a2)
p2atoms.append([a1,a2])
p2srncc.append([sp1,sp2]+list(ra1)+list(ra2)+[len(cc2atom)]+list(cc2atom))
p2npac.append( sum([self.prod_log.sp2norbs[sv.atom2sp[ia]] for ia in cc2atom ]))
#print(np.asarray(p2srncc))
p2ndp = np.require( zeros(len(p2srncc), dtype=np.int64), requirements='CW')
p2srncc_cp = np.require( np.asarray(p2srncc), requirements='C')
npairs = p2srncc_cp.shape[0]
self.npairs = npairs
self.bp2info = [] # going to be indices of atoms, list of contributing centres, conversion coefficients
if npairs>0 : # Conditional fill of the self.bp2info if there are bilocal pairs (natoms>1)
ld = p2srncc_cp.shape[1]
if nao.verbosity>0:
#print(__name__,'npairs {} and p2srncc_cp.shape is {}'.format(npairs, p2srncc_cp.shape))
t2 = timer(); print(__name__,'\t====> Time for call vrtx_cc_batch: {:.2f} sec, npairs: {}'.format(t2-t1, npairs)); t1=timer()
libnao.vrtx_cc_batch( c_int64(npairs), p2srncc_cp.ctypes.data_as(POINTER(c_double)),
c_int64(ld), p2ndp.ctypes.data_as(POINTER(c_int64)))
if nao.verbosity>0:
print(__name__,'\t====> libnao.vrtx_cc_batch is done!')
t2 = timer(); print(__name__,'\t====> Time after vrtx_cc_batch:\t {:.2f} sec'.format(t2-t1)); t1=timer()
nout = 0
sp2norbs = sv.ao_log.sp2norbs
for srncc,ndp,npac in zip(p2srncc,p2ndp,p2npac):
sp1,sp2 = srncc[0],srncc[1]
nout = nout + ndp*sp2norbs[sp1]*sp2norbs[sp2]+npac*ndp
dout = np.require( zeros(nout), requirements='CW')
if nao.verbosity>3:print(__name__,'\t====>libnao.get_vrtx_cc_batch is calling')
libnao.get_vrtx_cc_batch(c_int64(0),c_int64(npairs),dout.ctypes.data_as(POINTER(c_double)),c_int64(nout))
f = 0
for srncc,ndp,npac,[a1,a2] in zip(p2srncc,p2ndp,p2npac,p2atoms):
if ndp<1 : continue
sp1,sp2,ncc = srncc[0],srncc[1],srncc[8]
icc2a = array(srncc[9:9+ncc], dtype=int64)
nnn = np.array((ndp,sp2norbs[sp2],sp2norbs[sp1]), dtype=int64)
nnc = np.array([ndp,npac], dtype=int64)
s = f; f=s+np.prod(nnn); vrtx = dout[s:f].reshape(nnn)
s = f; f=s+np.prod(nnc); ccoe = dout[s:f].reshape(nnc)
icc2s = zeros(len(icc2a)+1, dtype=int64)
for icc,a in enumerate(icc2a): icc2s[icc+1] = icc2s[icc] + self.prod_log.sp2norbs[sv.atom2sp[a]]
pbiloc = prod_biloc_c(atoms=array([a2,a1]),vrtx=vrtx,cc2a=icc2a,cc2s=icc2s,cc=ccoe)
self.bp2info.append(pbiloc)
#t2 = timer(); print('after loop ', t2-t1); t1=timer()
self.dpc2s,self.dpc2t,self.dpc2sp = self.init_c2s_domiprod() # dominant product's counting
self.npdp = self.dpc2s[-1]
#t2 = timer(); print('after init_c2s_domiprod ', t2-t1); t1=timer()
return self
def init_inp_param_prod_log_dp(self, sv, tol_loc=1e-5, tol_biloc=1e-6, ac_rcut_ratio=1.0, ac_npc_max=8, jcutoff=14, metric_type=2, optimize_centers=0, ngl=96, **kw):
""" Talman's procedure should be working well with a pseudo-potential hamiltonians.
This subroutine prepares the class for a later atom pair by atom pair generation
of the dominant product vertices and the conversion coefficients by calling
subroutines from the library libnao.
"""
from pyscf.nao.prod_log import prod_log
from pyscf.nao.m_libnao import libnao
self.sv = sv
self.tol_loc,self.tol_biloc,self.ac_rcut_ratio,self.ac_npc_max = tol_loc, tol_biloc, ac_rcut_ratio, ac_npc_max
self.jcutoff,self.metric_type,self.optimize_centers,self.ngl = jcutoff, metric_type, optimize_centers, ngl
self.ac_rcut = ac_rcut_ratio*max(sv.ao_log.sp2rcut)
lload = kw['load_from_hdf5'] if 'load_from_hdf5' in kw else False
if lload :
self.prod_log = prod_log(ao_log=sv.ao_log, sp2charge=sv.sp2charge, tol_loc=tol_loc, load=True) # tests Fortran input
else :
self.prod_log = prod_log(ao_log=sv.ao_log, tol_loc=tol_loc, **kw) # local basis (for each specie)
self.c2s = zeros((sv.natm+1), dtype=int64) # global product Center (atom) -> start in case of atom-centered basis
for gc,sp in enumerate(sv.atom2sp): self.c2s[gc+1]=self.c2s[gc]+self.prod_log.sp2norbs[sp] #
return self
def chain_data(self):
""" This subroutine creates a buffer of information to communicate the system variables and the local product vertex to libnao. Later, one will be able to generate the bilocal vertex and conversion coefficient for a given pair of atom species and their coordinates ."""
from numpy import concatenate as conc
aos,sv,pl = self.sv.ao_log, self.sv, self.prod_log
assert aos.nr==pl.nr
assert aos.nspecies==pl.nspecies
nr,nsp,nmt,nrt = aos.nr,aos.nspecies, sum(aos.sp2nmult),aos.nr*sum(aos.sp2nmult)
nat,na1,tna,nms = sv.natoms,sv.natoms+1,3*sv.natoms,sum(aos.sp2nmult)+aos.nspecies
nmtp,nrtp,nmsp = sum(pl.sp2nmult),pl.nr*sum(pl.sp2nmult),sum(pl.sp2nmult)+pl.nspecies
nvrt = sum(aos.sp2norbs*aos.sp2norbs*pl.sp2norbs)
ndat = 200 + 2*nr + 4*nsp + 2*nmt + nrt + nms + 3*3 + nat + 2*na1 + tna + \
4*nsp + 2*nmtp + nrtp + nmsp + nvrt
dat = zeros(ndat)
# Simple parameters
i = 0
dat[i] = -999.0; i+=1 # pointer to the empty space in simple parameter
dat[i] = aos.nspecies; i+=1
dat[i] = aos.nr; i+=1
dat[i] = aos.rmin; i+=1;
dat[i] = aos.rmax; i+=1;
dat[i] = aos.kmax; i+=1;
dat[i] = aos.jmx; i+=1;
dat[i] = conc(aos.psi_log).sum(); i+=1;
dat[i] = conc(pl.psi_log).sum(); i+=1;
dat[i] = sv.natoms; i+=1
dat[i] = sv.norbs; i+=1
dat[i] = sv.norbs_sc; i+=1
dat[i] = sv.nspin; i+=1
dat[i] = self.tol_loc; i+=1
dat[i] = self.tol_biloc; i+=1
dat[i] = self.ac_rcut_ratio; i+=1
dat[i] = self.ac_npc_max; i+=1
dat[i] = self.jcutoff; i+=1
dat[i] = self.metric_type; i+=1
dat[i] = self.optimize_centers; i+=1
dat[i] = self.ngl; i+=1
dat[0] = i
# Pointers to data
i = 99
s = 199
dat[i] = s+1; i+=1; f=s+nr; dat[s:f] = aos.rr; s=f; # pointer to rr
dat[i] = s+1; i+=1; f=s+nr; dat[s:f] = aos.pp; s=f; # pointer to pp
dat[i] = s+1; i+=1; f=s+nsp; dat[s:f] = aos.sp2nmult; s=f; # pointer to sp2nmult
dat[i] = s+1; i+=1; f=s+nsp; dat[s:f] = aos.sp2rcut; s=f; # pointer to sp2rcut
dat[i] = s+1; i+=1; f=s+nsp; dat[s:f] = aos.sp2norbs; s=f; # pointer to sp2norbs
dat[i] = s+1; i+=1; f=s+nsp; dat[s:f] = aos.sp2charge; s=f; # pointer to sp2charge
dat[i] = s+1; i+=1; f=s+nmt; dat[s:f] = conc(aos.sp_mu2j); s=f; # pointer to sp_mu2j
dat[i] = s+1; i+=1; f=s+nmt; dat[s:f] = conc(aos.sp_mu2rcut); s=f; # pointer to sp_mu2rcut
dat[i] = s+1; i+=1; f=s+nrt; dat[s:f] = conc(aos.psi_log).reshape(nrt); s=f; # pointer to psi_log
dat[i] = s+1; i+=1; f=s+nms; dat[s:f] = conc(aos.sp_mu2s); s=f; # pointer to sp_mu2s
dat[i] = s+1; i+=1; f=s+3*3; dat[s:f] = conc(sv.ucell); s=f; # pointer to ucell (123,xyz) ?
dat[i] = s+1; i+=1; f=s+nat; dat[s:f] = sv.atom2sp; s=f; # pointer to atom2sp
dat[i] = s+1; i+=1; f=s+na1; dat[s:f] = sv.atom2s; s=f; # pointer to atom2s
dat[i] = s+1; i+=1; f=s+na1; dat[s:f] = sv.atom2mu_s; s=f; # pointer to atom2mu_s
dat[i] = s+1; i+=1; f=s+tna; dat[s:f] = conc(sv.atom2coord); s=f; # pointer to atom2coord
dat[i] = s+1; i+=1; f=s+nsp; dat[s:f] = pl.sp2nmult; s=f; # sp2nmult of product basis
dat[i] = s+1; i+=1; f=s+nsp; dat[s:f] = pl.sp2rcut; s=f; # sp2nmult of product basis
dat[i] = s+1; i+=1; f=s+nsp; dat[s:f] = pl.sp2norbs; s=f; # sp2norbs of product basis
dat[i] = s+1; i+=1; f=s+nsp; dat[s:f] = pl.sp2charge; s=f; # sp2norbs of product basis
dat[i] = s+1; i+=1; f=s+nmtp; dat[s:f] = conc(pl.sp_mu2j); s=f; # pointer to sp_mu2j
dat[i] = s+1; i+=1; f=s+nmtp; dat[s:f] = conc(pl.sp_mu2rcut); s=f; # pointer to sp_mu2rcut
dat[i] = s+1; i+=1; f=s+nrtp; dat[s:f] = conc(pl.psi_log).reshape(nrtp); s=f; # pointer to psi_log
dat[i] = s+1; i+=1; f=s+nmsp; dat[s:f] = conc(pl.sp_mu2s); s=f; # pointer to sp_mu2s
dat[i] = s+1; i+=1; f=s+nvrt; dat[s:f] = conc([v.flatten() for v in pl.sp2vertex]); s=f; # pointer to sp2vertex
dat[i] = s+1; # this is a terminator to simplify operation
return dat
def comp_apair_pp_libint(self, a1,a2):
""" Get's the vertex coefficient and conversion coefficients for a pair of atoms given by their atom indices """
from operator import mul
from pyscf.nao.m_prod_biloc import prod_biloc_c
if not hasattr(self, 'sv_pbloc_data') : raise RuntimeError('.sv_pbloc_data is absent')
assert a1>=0
assert a2>=0
t1 = timer()
sv = self.sv
aos = self.sv.ao_log
sp12 = np.require( np.array([sv.atom2sp[a] for a in (a1,a2)], dtype=c_int64), requirements='C')
rc12 = np.require( np.array([sv.atom2coord[a,:] for a in (a1,a2)]), requirements='C')
icc2a = np.require( np.array(self.ls_contributing(a1,a2), dtype=c_int64), requirements='C')
npmx = aos.sp2norbs[sv.atom2sp[a1]]*aos.sp2norbs[sv.atom2sp[a2]]
npac = sum([self.prod_log.sp2norbs[sv.atom2sp[ia]] for ia in icc2a ])
nout = c_int64(npmx**2+npmx*npac+10)
dout = np.require( zeros(nout.value), requirements='CW')
libnao.vrtx_cc_apair( sp12.ctypes.data_as(POINTER(c_int64)), rc12.ctypes.data_as(POINTER(c_double)), icc2a.ctypes.data_as(POINTER(c_int64)), c_int64(len(icc2a)), dout.ctypes.data_as(POINTER(c_double)), nout )
if dout[0]<1: return None
nnn = np.array(dout[0:3], dtype=int)
nnc = np.array([dout[8],dout[7]], dtype=int)
ncc = int(dout[9])
if ncc!=len(icc2a): raise RuntimeError('ncc!=len(icc2a)')
s = 10; f=s+np.prod(nnn); vrtx = dout[s:f].reshape(nnn)
s = f; f=s+np.prod(nnc); ccoe = dout[s:f].reshape(nnc)
icc2s = zeros(len(icc2a)+1, dtype=np.int64)
for icc,a in enumerate(icc2a): icc2s[icc+1] = icc2s[icc] + self.prod_log.sp2norbs[sv.atom2sp[a]]
pbiloc = prod_biloc_c(atoms=array([a2,a1]),vrtx=vrtx,cc2a=icc2a,cc2s=icc2s,cc=ccoe)
return pbiloc
def ls_contributing(self, a1,a2):
""" Get the list of contributing centers """
from pyscf.nao.m_ls_contributing import ls_contributing
sp12 = np.array([self.sv.atom2sp[a] for a in (a1,a2)])
rc12 = np.array([self.sv.atom2coord[a,:] for a in (a1,a2)])
return ls_contributing(self, sp12, rc12)
def get_da2cc_den(self, dtype=np.float64):
""" Returns Conversion Coefficients as dense matrix """
nfdp,nfap = self.dpc2s[-1],self.c2s[-1]
da2cc = zeros((nfdp,nfap), dtype=dtype)
for sd,fd,pt in zip(self.dpc2s,self.dpc2s[1:],self.dpc2t):
if pt==1: da2cc[sd:fd,sd:fd] = np.identity(fd-sd)
for sd,fd,pt,spp in zip(self.dpc2s,self.dpc2s[1:],self.dpc2t,self.dpc2sp):
if pt==1: continue
inf = self.bp2info[spp]
for c,ls,lf in zip(inf.cc2a, inf.cc2s, inf.cc2s[1:]):
da2cc[sd:fd, self.c2s[c]:self.c2s[c+1]] = inf.cc[:,ls:lf]
return da2cc
def get_da2cc_nnz(self):
""" Computes the number of non-zero matrix elements in the conversion matrix ac <=> dp """
nnz = 0
for sd,fd,pt in zip(self.dpc2s,self.dpc2s[1:],self.dpc2t):
if pt==1: nnz = nnz + (fd-sd)
for sd,fd,pt,spp in zip(self.dpc2s,self.dpc2s[1:],self.dpc2t,self.dpc2sp):
if pt==1: continue
inf = self.bp2info[spp]
for c,ls,lf in zip(inf.cc2a, inf.cc2s, inf.cc2s[1:]): nnz = nnz + (fd-sd)*(lf-ls)
return nnz
# should we not keep only the sparse matrix and get rid of the original data ??
def get_da2cc_sparse(self, dtype=np.float64, sparseformat=coo_matrix):
""" Returns Conversion Coefficients as sparse COO matrix """
nfdp,nfap = self.dpc2s[-1],self.c2s[-1]
nnz = self.get_da2cc_nnz()
irow,icol,data = zeros(nnz, dtype=int),zeros(nnz, dtype=int), zeros(nnz, dtype=dtype) # Start to construct coo matrix
inz = 0
for atom, [sd,fd,pt] in enumerate(zip(self.dpc2s,self.dpc2s[1:],self.dpc2t)):
if pt!=1: continue
for d in range(sd,fd):
irow[inz],icol[inz],data[inz] = d,d,1.0
inz+=1
for atom, [sd,fd,pt,spp] in enumerate(zip(self.dpc2s,self.dpc2s[1:],self.dpc2t,self.dpc2sp)):
if pt==1: continue
inf = self.bp2info[spp]
for c,ls,lf in zip(inf.cc2a, inf.cc2s, inf.cc2s[1:]):
for d in range(sd,fd):
for a in range(self.c2s[c],self.c2s[c+1]):
irow[inz],icol[inz],data[inz] = d,a,inf.cc[d-sd,a-self.c2s[c]+ls]
inz+=1
return sparseformat((data,(irow,icol)), dtype=dtype, shape=(nfdp, nfap))
def get_ac_vertex_array(self, dtype=np.float64):
""" Returns the product vertex coefficients as 3d array (dense table) """
atom2so = self.sv.atom2s
nfap = self.c2s[-1]
n = self.sv.atom2s[-1]
pab2v = np.require( zeros((nfap,n,n), dtype=dtype), requirements='CW')
for atom,[sd,fd,pt,spp] in enumerate(zip(self.dpc2s,self.dpc2s[1:],self.dpc2t,self.dpc2sp)):
if pt!=1: continue
s,f = atom2so[atom:atom+2]
pab2v[sd:fd,s:f,s:f] = self.prod_log.sp2vertex[spp]
for sd,fd,pt,spp in zip(self.dpc2s,self.dpc2s[1:],self.dpc2t,self.dpc2sp):
if pt!=2: continue
inf= self.bp2info[spp]
lab = einsum('dl,dab->lab', inf.cc, inf.vrtx)
a,b = inf.atoms
sa,fa,sb,fb = atom2so[a],atom2so[a+1],atom2so[b],atom2so[b+1]
for c,ls,lf in zip(inf.cc2a, inf.cc2s, inf.cc2s[1:]):
pab2v[self.c2s[c]:self.c2s[c+1],sa:fa,sb:fb] = lab[ls:lf,:,:]
pab2v[self.c2s[c]:self.c2s[c+1],sb:fb,sa:fa] = einsum('pab->pba', lab[ls:lf,:,:])
return pab2v
def get_dp_vertex_array(self, dtype=np.float64):
""" Returns the product vertex coefficients as 3d array for dominant products """
atom2so = self.sv.atom2s
nfdp = self.dpc2s[-1]
n = self.sv.atom2s[-1]
pab2v = np.require(zeros((nfdp,n,n), dtype=dtype), requirements='CW')
for atom,[sd,fd,pt,spp] in enumerate(zip(self.dpc2s,self.dpc2s[1:],self.dpc2t,self.dpc2sp)):
if pt!=1: continue
s,f = atom2so[atom:atom+2]
pab2v[sd:fd,s:f,s:f] = self.prod_log.sp2vertex[spp]
for sd,fd,pt,spp in zip(self.dpc2s,self.dpc2s[1:],self.dpc2t,self.dpc2sp):
if pt!=2: continue
inf= self.bp2info[spp]
a,b = inf.atoms
sa,fa,sb,fb = atom2so[a],atom2so[a+1],atom2so[b],atom2so[b+1]
pab2v[sd:fd,sa:fa,sb:fb] = inf.vrtx
pab2v[sd:fd,sb:fb,sa:fa] = einsum('pab->pba', inf.vrtx)
return pab2v
def get_dp_vertex_nnz(self):
""" Number of non-zero elements in the dominant product vertex : can be speedup, but..."""
nnz = 0
for pt,spp in zip(self.dpc2t,self.dpc2sp):
if pt==1: nnz += self.prod_log.sp2vertex[spp].size
elif pt==2: nnz += 2*self.bp2info[spp].vrtx.size
else:
raise RuntimeError('pt?')
return nnz
def get_dp_vertex_doubly_sparse(self, dtype=np.float64, sparseformat=lsofcsr_c, axis=0):
""" Returns the product vertex coefficients for dominant products as a one-dimensional array of sparse matrices """
nnz = self.get_dp_vertex_nnz()
i1,i2,i3,data = zeros(nnz, dtype=int), zeros(nnz, dtype=int), zeros(nnz, dtype=int), zeros(nnz, dtype=dtype)
atom2s,dpc2s,nfdp,n = self.sv.atom2s, self.dpc2s,self.dpc2s[-1],self.sv.atom2s[-1]
inz = 0
for s,f,sd,fd,pt,spp in zip(atom2s,atom2s[1:],dpc2s,dpc2s[1:],self.dpc2t,self.dpc2sp):
size = self.prod_log.sp2vertex[spp].size
lv = self.prod_log.sp2vertex[spp].reshape(size)
dd,aa,bb = np.mgrid[sd:fd,s:f,s:f].reshape((3,size))
i1[inz:inz+size],i2[inz:inz+size],i3[inz:inz+size],data[inz:inz+size] = dd,aa,bb,lv
inz+=size
for sd,fd,pt,spp in zip(dpc2s,dpc2s[1:],self.dpc2t,self.dpc2sp):
if pt!=2: continue
inf,(a,b),size = self.bp2info[spp],self.bp2info[spp].atoms,self.bp2info[spp].vrtx.size
sa,fa,sb,fb = atom2s[a],atom2s[a+1],atom2s[b],atom2s[b+1]
dd,aa,bb = np.mgrid[sd:fd,sa:fa,sb:fb].reshape((3,size))
i1[inz:inz+size],i2[inz:inz+size],i3[inz:inz+size] = dd,aa,bb
data[inz:inz+size] = inf.vrtx.reshape(size)
inz+=size
i1[inz:inz+size],i2[inz:inz+size],i3[inz:inz+size] = dd,bb,aa
data[inz:inz+size] = inf.vrtx.reshape(size)
inz+=size
return sparseformat((data, (i1, i2, i3)), dtype=dtype, shape=(nfdp,n,n), axis=axis)
def get_dp_vertex_doubly_sparse_loops(self, dtype=np.float64, sparseformat=lsofcsr_c, axis=0):
""" Returns the product vertex coefficients for dominant products as a one-dimensional array of sparse matrices, slow version """
nnz = self.get_dp_vertex_nnz()
i1,i2,i3,data = zeros(nnz, dtype=int), zeros(nnz, dtype=int), zeros(nnz, dtype=int), zeros(nnz, dtype=dtype)
a2s,n,nfdp,lv = self.sv.atom2s,self.sv.atom2s[-1],self.dpc2s[-1],self.prod_log.sp2vertex # local "aliases"
inz = 0
for atom,[sd,fd,pt,spp] in enumerate(zip(self.dpc2s,self.dpc2s[1:],self.dpc2t,self.dpc2sp)):
if pt!=1: continue
s,f = a2s[atom:atom+2]
for p in range(sd,fd):
for a in range(s,f):
for b in range(s,f):
i1[inz],i2[inz],i3[inz],data[inz] = p,a,b,lv[spp][p-sd,a-s,b-s]
inz+=1
for atom, [sd,fd,pt,spp] in enumerate(zip(self.dpc2s,self.dpc2s[1:],self.dpc2t,self.dpc2sp)):
if pt!=2: continue
inf= self.bp2info[spp]
a,b = inf.atoms
sa,fa,sb,fb = a2s[a],a2s[a+1],a2s[b],a2s[b+1]
for p in range(sd,fd):
for a in range(sa,fa):
for b in range(sb,fb):
i1[inz],i2[inz],i3[inz],data[inz] = p,a,b,inf.vrtx[p-sd,a-sa,b-sb]; inz+=1;
i1[inz],i2[inz],i3[inz],data[inz] = p,b,a,inf.vrtx[p-sd,a-sa,b-sb]; inz+=1;
return sparseformat((data, (i1, i2, i3)), dtype=dtype, shape=(nfdp,n,n), axis=axis)
def get_dp_vertex_sparse(self, dtype=np.float64, sparseformat=coo_matrix):
""" Returns the product vertex coefficients as 3d array for dominant products, in a sparse format coo(p,ab) by default"""
nnz = self.get_dp_vertex_nnz() #Number of non-zero elements in the dominant product vertex
irow,icol,data = zeros(nnz, dtype=int), zeros(nnz, dtype=int), zeros(nnz, dtype=dtype) # Start to construct coo matrix
atom2s,dpc2s,nfdp,n = self.sv.atom2s, self.dpc2s,self.dpc2s[-1],self.sv.atom2s[-1] #self.dpc2s, self.dpc2t, self.dpc2sp: product Center -> list of the size of the basis set in this center,of center's types,of product species
inz = 0
for s,f,sd,fd,pt,spp in zip(atom2s,atom2s[1:],dpc2s,dpc2s[1:],self.dpc2t,self.dpc2sp):
size = self.prod_log.sp2vertex[spp].size
lv = self.prod_log.sp2vertex[spp].reshape(size)
dd,aa,bb = np.mgrid[sd:fd,s:f,s:f].reshape((3,size))
irow[inz:inz+size],icol[inz:inz+size],data[inz:inz+size] = dd, aa+bb*n, lv
inz+=size
for sd,fd,pt,spp in zip(dpc2s,dpc2s[1:],self.dpc2t,self.dpc2sp):
if pt!=2: continue
inf,(a,b),size = self.bp2info[spp],self.bp2info[spp].atoms,self.bp2info[spp].vrtx.size
sa,fa,sb,fb = atom2s[a],atom2s[a+1],atom2s[b],atom2s[b+1]
dd,aa,bb = np.mgrid[sd:fd,sa:fa,sb:fb].reshape((3,size))
irow[inz:inz+size],icol[inz:inz+size] = dd,aa+bb*n
data[inz:inz+size] = inf.vrtx.reshape(size)
inz+=size
irow[inz:inz+size],icol[inz:inz+size] = dd,bb+aa*n
data[inz:inz+size] = inf.vrtx.reshape(size)
inz+=size
return sparseformat((data, (irow, icol)), dtype=dtype, shape=(nfdp,n*n))
def get_dp_vertex_sparse2(self, dtype=np.float64, sparseformat=coo_matrix):
""" Returns the product vertex coefficients as 3d array for dominant products, in a sparse format coo(pa,b) by default"""
import numpy as np
from scipy.sparse import csr_matrix, coo_matrix
nnz = self.get_dp_vertex_nnz()
irow,icol,data = np.zeros(nnz, dtype=int), np.zeros(nnz, dtype=int), np.zeros(nnz, dtype=dtype)
atom2s,dpc2s,nfdp,n = self.sv.atom2s, self.dpc2s, self.dpc2s[-1],self.sv.atom2s[-1]
inz = 0
for s,f,sd,fd,pt,spp in zip(atom2s,atom2s[1:],dpc2s,dpc2s[1:],self.dpc2t,self.dpc2sp):
size = self.prod_log.sp2vertex[spp].size
lv = self.prod_log.sp2vertex[spp].reshape(size)
dd,aa,bb = np.mgrid[sd:fd,s:f,s:f].reshape((3,size))
irow[inz:inz+size],icol[inz:inz+size],data[inz:inz+size] = dd+aa*nfdp, bb, lv
inz+=size
for sd,fd,pt,spp in zip(dpc2s,dpc2s[1:],self.dpc2t, self.dpc2sp):
if pt!=2: continue
inf,(a,b),size = self.bp2info[spp],self.bp2info[spp].atoms,self.bp2info[spp].vrtx.size
sa,fa,sb,fb = atom2s[a],atom2s[a+1],atom2s[b],atom2s[b+1]
dd,aa,bb = np.mgrid[sd:fd,sa:fa,sb:fb].reshape((3,size))
irow[inz:inz+size],icol[inz:inz+size] = dd+aa*nfdp, bb
data[inz:inz+size] = inf.vrtx.reshape(size)
inz+=size
irow[inz:inz+size],icol[inz:inz+size] = dd+bb*nfdp, aa
data[inz:inz+size] = inf.vrtx.reshape(size)
inz+=size
return coo_matrix((data, (irow, icol)), dtype=dtype, shape=(nfdp*n,n))
def get_dp_vertex_sparse_loops(self, dtype=np.float64, sparseformat=coo_matrix):
""" Returns the product vertex coefficients as 3d array for dominant products, in a sparse format coo(p,ab) slow version"""
nnz = self.get_dp_vertex_nnz()
irow,icol,data = zeros(nnz, dtype=int), zeros(nnz, dtype=int), zeros(nnz, dtype=dtype) # Start to construct coo matrix
atom2so = self.sv.atom2s
nfdp = self.dpc2s[-1]
n = self.sv.atom2s[-1]
inz = 0
for atom,[sd,fd,pt,spp] in enumerate(zip(self.dpc2s,self.dpc2s[1:],self.dpc2t,self.dpc2sp)):
if pt!=1: continue
s,f = atom2so[atom:atom+2]
for p in range(sd,fd):
for a in range(s,f):
for b in range(s,f):
irow[inz],icol[inz],data[inz] = p,a+b*n,self.prod_log.sp2vertex[spp][p-sd,a-s,b-s]
inz+=1
for atom, [sd,fd,pt,spp] in enumerate(zip(self.dpc2s,self.dpc2s[1:],self.dpc2t,self.dpc2sp)):
if pt!=2: continue
inf= self.bp2info[spp]
a,b = inf.atoms
sa,fa,sb,fb = atom2so[a],atom2so[a+1],atom2so[b],atom2so[b+1]
for p in range(sd,fd):
for a in range(sa,fa):
for b in range(sb,fb):
irow[inz],icol[inz],data[inz] = p,a+b*n,inf.vrtx[p-sd,a-sa,b-sb]; inz+=1;
irow[inz],icol[inz],data[inz] = p,b+a*n,inf.vrtx[p-sd,a-sa,b-sb]; inz+=1;
return sparseformat((data, (irow, icol)), dtype=dtype, shape=(nfdp,n*n))
def comp_fci_den(self, hk, dtype=np.float64):
""" Compute the four-center integrals and return it in a dense storage """
pab2v = self.get_ac_vertex_array(dtype=dtype)
pcd = np.einsum('pq,qcd->pcd', hk, pab2v)
abcd = np.einsum('pab,pcd->abcd', pab2v, pcd)
return abcd
def init_c2s_domiprod(self):
"""Compute the array of start indices for dominant product basis set """
c2n,c2t,c2sp = [],[],[] # product Center -> list of the size of the basis set in this center,of center's types,of product species
for atom,sp in enumerate(self.sv.atom2sp):
c2n.append(self.prod_log.sp2vertex[sp].shape[0]); c2t.append(1); c2sp.append(sp);
for ibp,inf in enumerate(self.bp2info):
c2n.append(inf.vrtx.shape[0]); c2t.append(2); c2sp.append(ibp);
ndpc = len(c2n) # number of product centers in this vertex
c2s = zeros(ndpc+1, np.int64 ) # product Center -> Start index of a product function in a global counting for this vertex
for c in range(ndpc): c2s[c+1] = c2s[c] + c2n[c]
return c2s,c2t,c2sp
def comp_moments(self, dtype=np.float64):
""" Computes the scalar and dipole moments for the all functions in the product basis """
sp2mom0, sp2mom1 = self.prod_log.comp_moments()
n = self.c2s[-1]
mom0 = np.require(zeros(n, dtype=dtype), requirements='CW')
mom1 = np.require(zeros((n,3), dtype=dtype), requirements='CW')
for a,[sp,coord,s,f] in enumerate(zip(self.sv.atom2sp,self.sv.atom2coord,self.c2s,self.c2s[1:])):
mom0[s:f],mom1[s:f,:] = sp2mom0[sp], einsum('j,k->jk', sp2mom0[sp],coord)+sp2mom1[sp]
return mom0,mom1
def comp_coulomb_pack(self, **kw):
""" Computes the packed version of the Hartree kernel """
from pyscf.nao.m_comp_coulomb_pack import comp_coulomb_pack
return comp_coulomb_pack(self.sv, self.prod_log, **kw)
def comp_coulomb_den(self, **kw):
""" Computes the dense (square) version of the Hartree kernel """
from pyscf.nao.m_comp_coulomb_den import comp_coulomb_den
return comp_coulomb_den(self.sv, self.prod_log, **kw)
def comp_fxc_lil(self, **kw):
""" Computes the sparse version of the xc kernel """
from pyscf.nao.m_vxc_lil import vxc_lil
return vxc_lil(self.sv, deriv=2, ao_log=self.prod_log, **kw)
def comp_fxc_pack(self, **kw):
""" Computes the packed version of the xc kernel """
from pyscf.nao.m_vxc_pack import vxc_pack
return vxc_pack(self.sv, deriv=2, ao_log=self.prod_log, **kw)
def overlap_check(self, **kw):
""" Our standard minimal check comparing with overlaps """
sref = self.sv.overlap_coo(**kw).toarray()
mom0,mom1 = self.comp_moments()
vpab = self.get_ac_vertex_array()
sprd = np.einsum('p,pab->ab', mom0,vpab)
return [[abs(sref-sprd).sum()/sref.size, np.amax(abs(sref-sprd))]]
def dipole_check(self, **kw):
""" Our standard minimal check """
dipcoo = self.sv.dipole_coo(**kw)
mom0,mom1 = self.comp_moments()
vpab = self.get_ac_vertex_array()
xyz2err = []
for i,dref in enumerate(dipcoo):
dref = dref.toarray()
dprd = np.einsum('p,pab->ab', mom1[:,i],vpab)
xyz2err.append([abs(dprd-dref).sum()/dref.size, np.amax(abs(dref-dprd))])
return xyz2err
def get_nprod(self):
""" Number of (atom-centered) products """
return self.c2s[-1]
def generate_png_spy_dp_vertex(self):
"""Produces pictures of the dominant product vertex in a common black-and-white way"""
import matplotlib.pyplot as plt
plt.ioff()
dab2v = self.get_dp_vertex_doubly_sparse()
for i,ab2v in enumerate(dab2v):
plt.spy(ab2v.toarray())
fname = "spy-v-{:06d}.png".format(i)
print(fname)
plt.savefig(fname, bbox_inches='tight')
plt.close()
return 0
def generate_png_chess_dp_vertex(self):
"""Produces pictures of the dominant product vertex a chessboard convention"""
import matplotlib.pylab as plt
plt.ioff()
dab2v = self.get_dp_vertex_doubly_sparse()
for i, ab in enumerate(dab2v):
fname = "chess-v-{:06d}.png".format(i)
print('Matrix No.#{}, Size: {}, Type: {}'.format(i+1, ab.shape, type(ab)), fname)
if type(ab) != 'numpy.ndarray': ab = ab.toarray()
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.set_aspect('equal')
plt.imshow(ab, interpolation='nearest', cmap=plt.cm.ocean)
plt.colorbar()
plt.savefig(fname)
plt.close(fig)
def reshape_COO(a, shape):
"""Reshape the sparse matrix (a) and returns a coo_matrix with favourable shape."""
from scipy.sparse import coo_matrix
if not hasattr(shape, '__len__') or len(shape) != 2:
raise ValueError('`shape` must be a sequence of two integers')
c = a.tocoo()
nrows, ncols = c.shape
size = nrows * ncols
new_size = shape[0] * shape[1]
if new_size != size:
raise ValueError('total size of new array must be unchanged')
flat_indices = ncols * c.row + c.col
new_row, new_col = divmod(flat_indices, shape[1])
b = coo_matrix((c.data, (new_row, new_col)), shape=shape)
return b
#
#
#
if __name__=='__main__':
from pyscf.nao import prod_basis_c, nao
from pyscf.nao.m_overlap_coo import overlap_coo
from pyscf import gto
import numpy as np
mol = gto.M(atom='O 0 0 0; H 0 0 0.5; H 0 0.5 0', basis='ccpvdz') # coordinates in Angstrom!
sv = nao(gto=mol)
print(sv.atom2s)
s_ref = overlap_coo(sv).todense()
pb = prod_basis_c()
pb.init_prod_basis_pp_batch(sv)
mom0,mom1=pb.comp_moments()
pab2v = pb.get_ac_vertex_array()
s_chk = einsum('pab,p->ab', pab2v,mom0)
print(abs(s_chk-s_ref).sum()/s_chk.size, abs(s_chk-s_ref).max())
| apache-2.0 |
glmcdona/FoosAI | Code/RawDataProcessing/raw_data_processing.py | 1 | 1416 | # Dependencies:
# !pip install numpy
# !pip install imageio
# !pip install matplotlib
import sys
from experiment import *
# Settings
#data_path = ".\\..\\Recorder\\FeatureSetBuilder\\Experiments\\experiment4.config"
if len(sys.argv) != 3:
print("ERROR: You need to pass in two arguments.")
print("raw_data_processing.py play <path to recording.config of recording to view>")
print("Eg: raw_data_processing.py play .\\..\\..\\TrainingData\\Raw\\Am1\\recording.config\n")
print("raw_data_processing.py process <path to experiment.config>")
print("Eg: raw_data_processing.py process .\\..\\..\\TrainingData\\Processed\\AmateurDefender\\experiment.config")
elif( sys.argv[1] == "play" ):
# Play the video live
print("Playing recording config frames from path %s." % (sys.argv[2]))
rec = Recording(sys.argv[2])
rec.play()
elif( sys.argv[1] == "playexp" ):
# Play the video live
print("Playing experiment config frames from path %s." % (sys.argv[2]))
exp = Experiment(sys.argv[2])
exp.play()
elif( sys.argv[1] == "process" ):
print("Processing experimient config frames from path %s." % (sys.argv[2]))
exp = Experiment(sys.argv[2])
exp.process()
elif( sys.argv[1] == "play_rod" ):
# Play the video live
print("Playing recording config frames from path %s." % (sys.argv[2]))
rec = Recording(sys.argv[2])
rec.play()
else:
print("ERROR: Invalid command %s. Must be play or process." % sys.argv[1])
| mit |
edhuckle/statsmodels | statsmodels/sandbox/distributions/mv_measures.py | 33 | 6257 | '''using multivariate dependence and divergence measures
The standard correlation coefficient measures only linear dependence between
random variables.
kendall's tau measures any monotonic relationship also non-linear.
mutual information measures any kind of dependence, but does not distinguish
between positive and negative relationship
mutualinfo_kde and mutualinfo_binning follow Khan et al. 2007
Shiraj Khan, Sharba Bandyopadhyay, Auroop R. Ganguly, Sunil Saigal,
David J. Erickson, III, Vladimir Protopopescu, and George Ostrouchov,
Relative performance of mutual information estimation methods for
quantifying the dependence among short and noisy data,
Phys. Rev. E 76, 026209 (2007)
http://pre.aps.org/abstract/PRE/v76/i2/e026209
'''
import numpy as np
from scipy import stats
from scipy.stats import gaussian_kde
import statsmodels.sandbox.infotheo as infotheo
def mutualinfo_kde(y, x, normed=True):
'''mutual information of two random variables estimated with kde
'''
nobs = len(x)
if not len(y) == nobs:
raise ValueError('both data arrays need to have the same size')
x = np.asarray(x, float)
y = np.asarray(y, float)
yx = np.vstack((y,x))
kde_x = gaussian_kde(x)(x)
kde_y = gaussian_kde(y)(y)
kde_yx = gaussian_kde(yx)(yx)
mi_obs = np.log(kde_yx) - np.log(kde_x) - np.log(kde_y)
mi = mi_obs.sum() / nobs
if normed:
mi_normed = np.sqrt(1. - np.exp(-2 * mi))
return mi_normed
else:
return mi
def mutualinfo_kde_2sample(y, x, normed=True):
'''mutual information of two random variables estimated with kde
'''
nobs = len(x)
x = np.asarray(x, float)
y = np.asarray(y, float)
#yx = np.vstack((y,x))
kde_x = gaussian_kde(x.T)(x.T)
kde_y = gaussian_kde(y.T)(x.T)
#kde_yx = gaussian_kde(yx)(yx)
mi_obs = np.log(kde_x) - np.log(kde_y)
if len(mi_obs) != nobs:
raise ValueError("Wrong number of observations")
mi = mi_obs.mean()
if normed:
mi_normed = np.sqrt(1. - np.exp(-2 * mi))
return mi_normed
else:
return mi
def mutualinfo_binned(y, x, bins, normed=True):
'''mutual information of two random variables estimated with kde
Notes
-----
bins='auto' selects the number of bins so that approximately 5 observations
are expected to be in each bin under the assumption of independence. This
follows roughly the description in Kahn et al. 2007
'''
nobs = len(x)
if not len(y) == nobs:
raise ValueError('both data arrays need to have the same size')
x = np.asarray(x, float)
y = np.asarray(y, float)
#yx = np.vstack((y,x))
## fyx, binsy, binsx = np.histogram2d(y, x, bins=bins)
## fx, binsx_ = np.histogram(x, bins=binsx)
## fy, binsy_ = np.histogram(y, bins=binsy)
if bins == 'auto':
ys = np.sort(y)
xs = np.sort(x)
#quantiles = np.array([0,0.25, 0.4, 0.6, 0.75, 1])
qbin_sqr = np.sqrt(5./nobs)
quantiles = np.linspace(0, 1, 1./qbin_sqr)
quantile_index = ((nobs-1)*quantiles).astype(int)
#move edges so that they don't coincide with an observation
shift = 1e-6 + np.ones(quantiles.shape)
shift[0] -= 2*1e-6
binsy = ys[quantile_index] + shift
binsx = xs[quantile_index] + shift
elif np.size(bins) == 1:
binsy = bins
binsx = bins
elif (len(bins) == 2):
binsy, binsx = bins
## if np.size(bins[0]) == 1:
## binsx = bins[0]
## if np.size(bins[1]) == 1:
## binsx = bins[1]
fx, binsx = np.histogram(x, bins=binsx)
fy, binsy = np.histogram(y, bins=binsy)
fyx, binsy, binsx = np.histogram2d(y, x, bins=(binsy, binsx))
pyx = fyx * 1. / nobs
px = fx * 1. / nobs
py = fy * 1. / nobs
mi_obs = pyx * (np.log(pyx+1e-10) - np.log(py)[:,None] - np.log(px))
mi = mi_obs.sum()
if normed:
mi_normed = np.sqrt(1. - np.exp(-2 * mi))
return mi_normed, (pyx, py, px, binsy, binsx), mi_obs
else:
return mi
if __name__ == '__main__':
import statsmodels.api as sm
funtype = ['linear', 'quadratic'][1]
nobs = 200
sig = 2#5.
#x = np.linspace(-3, 3, nobs) + np.random.randn(nobs)
x = np.sort(3*np.random.randn(nobs))
exog = sm.add_constant(x, prepend=True)
#y = 0 + np.log(1+x**2) + sig * np.random.randn(nobs)
if funtype == 'quadratic':
y = 0 + x**2 + sig * np.random.randn(nobs)
if funtype == 'linear':
y = 0 + x + sig * np.random.randn(nobs)
print('correlation')
print(np.corrcoef(y,x)[0, 1])
print('pearsonr', stats.pearsonr(y,x))
print('spearmanr', stats.spearmanr(y,x))
print('kendalltau', stats.kendalltau(y,x))
pxy, binsx, binsy = np.histogram2d(x,y, bins=5)
px, binsx_ = np.histogram(x, bins=binsx)
py, binsy_ = np.histogram(y, bins=binsy)
print('mutualinfo', infotheo.mutualinfo(px*1./nobs, py*1./nobs,
1e-15+pxy*1./nobs, logbase=np.e))
print('mutualinfo_kde normed', mutualinfo_kde(y,x))
print('mutualinfo_kde ', mutualinfo_kde(y,x, normed=False))
mi_normed, (pyx2, py2, px2, binsy2, binsx2), mi_obs = \
mutualinfo_binned(y, x, 5, normed=True)
print('mutualinfo_binned normed', mi_normed)
print('mutualinfo_binned ', mi_obs.sum())
mi_normed, (pyx2, py2, px2, binsy2, binsx2), mi_obs = \
mutualinfo_binned(y, x, 'auto', normed=True)
print('auto')
print('mutualinfo_binned normed', mi_normed)
print('mutualinfo_binned ', mi_obs.sum())
ys = np.sort(y)
xs = np.sort(x)
by = ys[((nobs-1)*np.array([0, 0.25, 0.4, 0.6, 0.75, 1])).astype(int)]
bx = xs[((nobs-1)*np.array([0, 0.25, 0.4, 0.6, 0.75, 1])).astype(int)]
mi_normed, (pyx2, py2, px2, binsy2, binsx2), mi_obs = \
mutualinfo_binned(y, x, (by,bx), normed=True)
print('quantiles')
print('mutualinfo_binned normed', mi_normed)
print('mutualinfo_binned ', mi_obs.sum())
doplot = 1#False
if doplot:
import matplotlib.pyplot as plt
plt.plot(x, y, 'o')
olsres = sm.OLS(y, exog).fit()
plt.plot(x, olsres.fittedvalues)
| bsd-3-clause |
waynenilsen/statsmodels | statsmodels/datasets/elnino/data.py | 25 | 1779 | """El Nino dataset, 1950 - 2010"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """This data is in the public domain."""
TITLE = """El Nino - Sea Surface Temperatures"""
SOURCE = """
National Oceanic and Atmospheric Administration's National Weather Service
ERSST.V3B dataset, Nino 1+2
http://www.cpc.ncep.noaa.gov/data/indices/
"""
DESCRSHORT = """Averaged monthly sea surface temperature - Pacific Ocean."""
DESCRLONG = """This data contains the averaged monthly sea surface
temperature in degrees Celcius of the Pacific Ocean, between 0-10 degrees South
and 90-80 degrees West, from 1950 to 2010. This dataset was obtained from
NOAA.
"""
NOTE = """::
Number of Observations - 61 x 12
Number of Variables - 1
Variable name definitions::
TEMPERATURE - average sea surface temperature in degrees Celcius
(12 columns, one per month).
"""
from numpy import recfromtxt, column_stack, array
from pandas import DataFrame
from statsmodels.datasets.utils import Dataset
from os.path import dirname, abspath
def load():
"""
Load the El Nino data and return a Dataset class.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
Notes
-----
The elnino Dataset instance does not contain endog and exog attributes.
"""
data = _get_data()
names = data.dtype.names
dataset = Dataset(data=data, names=names)
return dataset
def load_pandas():
dataset = load()
dataset.data = DataFrame(dataset.data)
return dataset
def _get_data():
filepath = dirname(abspath(__file__))
data = recfromtxt(open(filepath + '/elnino.csv', 'rb'), delimiter=",",
names=True, dtype=float)
return data
| bsd-3-clause |
chenyyx/scikit-learn-doc-zh | examples/en/model_selection/plot_precision_recall.py | 13 | 10108 | """
================
Precision-Recall
================
Example of Precision-Recall metric to evaluate classifier output quality.
Precision-Recall is a useful measure of success of prediction when the
classes are very imbalanced. In information retrieval, precision is a
measure of result relevancy, while recall is a measure of how many truly
relevant results are returned.
The precision-recall curve shows the tradeoff between precision and
recall for different threshold. A high area under the curve represents
both high recall and high precision, where high precision relates to a
low false positive rate, and high recall relates to a low false negative
rate. High scores for both show that the classifier is returning accurate
results (high precision), as well as returning a majority of all positive
results (high recall).
A system with high recall but low precision returns many results, but most of
its predicted labels are incorrect when compared to the training labels. A
system with high precision but low recall is just the opposite, returning very
few results, but most of its predicted labels are correct when compared to the
training labels. An ideal system with high precision and high recall will
return many results, with all results labeled correctly.
Precision (:math:`P`) is defined as the number of true positives (:math:`T_p`)
over the number of true positives plus the number of false positives
(:math:`F_p`).
:math:`P = \\frac{T_p}{T_p+F_p}`
Recall (:math:`R`) is defined as the number of true positives (:math:`T_p`)
over the number of true positives plus the number of false negatives
(:math:`F_n`).
:math:`R = \\frac{T_p}{T_p + F_n}`
These quantities are also related to the (:math:`F_1`) score, which is defined
as the harmonic mean of precision and recall.
:math:`F1 = 2\\frac{P \\times R}{P+R}`
Note that the precision may not decrease with recall. The
definition of precision (:math:`\\frac{T_p}{T_p + F_p}`) shows that lowering
the threshold of a classifier may increase the denominator, by increasing the
number of results returned. If the threshold was previously set too high, the
new results may all be true positives, which will increase precision. If the
previous threshold was about right or too low, further lowering the threshold
will introduce false positives, decreasing precision.
Recall is defined as :math:`\\frac{T_p}{T_p+F_n}`, where :math:`T_p+F_n` does
not depend on the classifier threshold. This means that lowering the classifier
threshold may increase recall, by increasing the number of true positive
results. It is also possible that lowering the threshold may leave recall
unchanged, while the precision fluctuates.
The relationship between recall and precision can be observed in the
stairstep area of the plot - at the edges of these steps a small change
in the threshold considerably reduces precision, with only a minor gain in
recall.
**Average precision** summarizes such a plot as the weighted mean of precisions
achieved at each threshold, with the increase in recall from the previous
threshold used as the weight:
:math:`\\text{AP} = \\sum_n (R_n - R_{n-1}) P_n`
where :math:`P_n` and :math:`R_n` are the precision and recall at the
nth threshold. A pair :math:`(R_k, P_k)` is referred to as an
*operating point*.
Precision-recall curves are typically used in binary classification to study
the output of a classifier. In order to extend the precision-recall curve and
average precision to multi-class or multi-label classification, it is necessary
to binarize the output. One curve can be drawn per label, but one can also draw
a precision-recall curve by considering each element of the label indicator
matrix as a binary prediction (micro-averaging).
.. note::
See also :func:`sklearn.metrics.average_precision_score`,
:func:`sklearn.metrics.recall_score`,
:func:`sklearn.metrics.precision_score`,
:func:`sklearn.metrics.f1_score`
"""
from __future__ import print_function
###############################################################################
# In binary classification settings
# --------------------------------------------------------
#
# Create simple data
# ..................
#
# Try to differentiate the two first classes of the iris data
from sklearn import svm, datasets
from sklearn.model_selection import train_test_split
import numpy as np
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Add noisy features
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# Limit to the two first classes, and split into training and test
X_train, X_test, y_train, y_test = train_test_split(X[y < 2], y[y < 2],
test_size=.5,
random_state=random_state)
# Create a simple classifier
classifier = svm.LinearSVC(random_state=random_state)
classifier.fit(X_train, y_train)
y_score = classifier.decision_function(X_test)
###############################################################################
# Compute the average precision score
# ...................................
from sklearn.metrics import average_precision_score
average_precision = average_precision_score(y_test, y_score)
print('Average precision-recall score: {0:0.2f}'.format(
average_precision))
###############################################################################
# Plot the Precision-Recall curve
# ................................
from sklearn.metrics import precision_recall_curve
import matplotlib.pyplot as plt
precision, recall, _ = precision_recall_curve(y_test, y_score)
plt.step(recall, precision, color='b', alpha=0.2,
where='post')
plt.fill_between(recall, precision, step='post', alpha=0.2,
color='b')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('2-class Precision-Recall curve: AUC={0:0.2f}'.format(
average_precision))
###############################################################################
# In multi-label settings
# ------------------------
#
# Create multi-label data, fit, and predict
# ...........................................
#
# We create a multi-label dataset, to illustrate the precision-recall in
# multi-label settings
from sklearn.preprocessing import label_binarize
# Use label_binarize to be multi-label like settings
Y = label_binarize(y, classes=[0, 1, 2])
n_classes = Y.shape[1]
# Split into training and test
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=.5,
random_state=random_state)
# We use OneVsRestClassifier for multi-label prediction
from sklearn.multiclass import OneVsRestClassifier
# Run classifier
classifier = OneVsRestClassifier(svm.LinearSVC(random_state=random_state))
classifier.fit(X_train, Y_train)
y_score = classifier.decision_function(X_test)
###############################################################################
# The average precision score in multi-label settings
# ....................................................
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
# For each class
precision = dict()
recall = dict()
average_precision = dict()
for i in range(n_classes):
precision[i], recall[i], _ = precision_recall_curve(Y_test[:, i],
y_score[:, i])
average_precision[i] = average_precision_score(Y_test[:, i], y_score[:, i])
# A "micro-average": quantifying score on all classes jointly
precision["micro"], recall["micro"], _ = precision_recall_curve(Y_test.ravel(),
y_score.ravel())
average_precision["micro"] = average_precision_score(Y_test, y_score,
average="micro")
print('Average precision score, micro-averaged over all classes: {0:0.2f}'
.format(average_precision["micro"]))
###############################################################################
# Plot the micro-averaged Precision-Recall curve
# ...............................................
#
plt.figure()
plt.step(recall['micro'], precision['micro'], color='b', alpha=0.2,
where='post')
plt.fill_between(recall["micro"], precision["micro"], step='post', alpha=0.2,
color='b')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title(
'Average precision score, micro-averaged over all classes: AUC={0:0.2f}'
.format(average_precision["micro"]))
###############################################################################
# Plot Precision-Recall curve for each class and iso-f1 curves
# .............................................................
#
from itertools import cycle
# setup plot details
colors = cycle(['navy', 'turquoise', 'darkorange', 'cornflowerblue', 'teal'])
plt.figure(figsize=(7, 8))
f_scores = np.linspace(0.2, 0.8, num=4)
lines = []
labels = []
for f_score in f_scores:
x = np.linspace(0.01, 1)
y = f_score * x / (2 * x - f_score)
l, = plt.plot(x[y >= 0], y[y >= 0], color='gray', alpha=0.2)
plt.annotate('f1={0:0.1f}'.format(f_score), xy=(0.9, y[45] + 0.02))
lines.append(l)
labels.append('iso-f1 curves')
l, = plt.plot(recall["micro"], precision["micro"], color='gold', lw=2)
lines.append(l)
labels.append('micro-average Precision-recall (area = {0:0.2f})'
''.format(average_precision["micro"]))
for i, color in zip(range(n_classes), colors):
l, = plt.plot(recall[i], precision[i], color=color, lw=2)
lines.append(l)
labels.append('Precision-recall for class {0} (area = {1:0.2f})'
''.format(i, average_precision[i]))
fig = plt.gcf()
fig.subplots_adjust(bottom=0.25)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Extension of Precision-Recall curve to multi-class')
plt.legend(lines, labels, loc=(0, -.38), prop=dict(size=14))
plt.show()
| gpl-3.0 |
benoitsteiner/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/pandas_io_test.py | 111 | 7865 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pandas_io."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.learn_io import pandas_io
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
class PandasIoTest(test.TestCase):
def makeTestDataFrame(self):
index = np.arange(100, 104)
a = np.arange(4)
b = np.arange(32, 36)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -28), index=index)
return x, y
def callInputFnOnce(self, input_fn, session):
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
result_values = session.run(results)
coord.request_stop()
coord.join(threads)
return result_values
def testPandasInputFn_IndexMismatch(self):
if not HAS_PANDAS:
return
x, _ = self.makeTestDataFrame()
y_noindex = pd.Series(np.arange(-32, -28))
with self.assertRaises(ValueError):
pandas_io.pandas_input_fn(
x, y_noindex, batch_size=2, shuffle=False, num_epochs=1)
def testPandasInputFn_ProducesExpectedOutputs(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, target = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
def testPandasInputFn_ProducesOutputsForLargeBatchAndMultipleEpochs(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
index = np.arange(100, 102)
a = np.arange(2)
b = np.arange(32, 34)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -30), index=index)
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=128, shuffle=False, num_epochs=2)
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
features, target = session.run(results)
self.assertAllEqual(features['a'], [0, 1, 0, 1])
self.assertAllEqual(features['b'], [32, 33, 32, 33])
self.assertAllEqual(target, [-32, -31, -32, -31])
with self.assertRaises(errors.OutOfRangeError):
session.run(results)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_ProducesOutputsWhenDataSizeNotDividedByBatchSize(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
index = np.arange(100, 105)
a = np.arange(5)
b = np.arange(32, 37)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -27), index=index)
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
features, target = session.run(results)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
features, target = session.run(results)
self.assertAllEqual(features['a'], [2, 3])
self.assertAllEqual(features['b'], [34, 35])
self.assertAllEqual(target, [-30, -29])
features, target = session.run(results)
self.assertAllEqual(features['a'], [4])
self.assertAllEqual(features['b'], [36])
self.assertAllEqual(target, [-28])
with self.assertRaises(errors.OutOfRangeError):
session.run(results)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_OnlyX(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, _ = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y=None, batch_size=2, shuffle=False, num_epochs=1)
features = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
def testPandasInputFn_ExcludesIndex(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, _ = self.callInputFnOnce(input_fn, session)
self.assertFalse('index' in features)
def assertInputsCallableNTimes(self, input_fn, session, n):
inputs = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
for _ in range(n):
session.run(inputs)
with self.assertRaises(errors.OutOfRangeError):
session.run(inputs)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_RespectsEpoch_NoShuffle(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=False, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffle(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=True, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffleAutosize(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, queue_capacity=None, num_epochs=2)
self.assertInputsCallableNTimes(input_fn, session, 4)
def testPandasInputFn_RespectsEpochUnevenBatches(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
with self.test_session() as session:
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=3, shuffle=False, num_epochs=1)
# Before the last batch, only one element of the epoch should remain.
self.assertInputsCallableNTimes(input_fn, session, 2)
def testPandasInputFn_Idempotent(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, num_epochs=1)()
if __name__ == '__main__':
test.main()
| apache-2.0 |
umuzungu/zipline | zipline/data/loader.py | 1 | 17284 | #
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from collections import OrderedDict
import logbook
import pandas as pd
from pandas.io.data import DataReader
import pytz
from six import iteritems
from six.moves.urllib_error import HTTPError
from .benchmarks import get_benchmark_returns
from . import treasuries, treasuries_can
from ..utils.paths import (
cache_root,
data_root,
)
from ..utils.deprecate import deprecated
from ..utils.tradingcalendar import (
trading_day as trading_day_nyse,
trading_days as trading_days_nyse,
)
logger = logbook.Logger('Loader')
# Mapping from index symbol to appropriate bond data
INDEX_MAPPING = {
'^GSPC':
(treasuries, 'treasury_curves.csv', 'www.federalreserve.gov'),
'^GSPTSE':
(treasuries_can, 'treasury_curves_can.csv', 'bankofcanada.ca'),
'^FTSE': # use US treasuries until UK bonds implemented
(treasuries, 'treasury_curves.csv', 'www.federalreserve.gov'),
}
ONE_HOUR = pd.Timedelta(hours=1)
def last_modified_time(path):
"""
Get the last modified time of path as a Timestamp.
"""
return pd.Timestamp(os.path.getmtime(path), unit='s', tz='UTC')
def get_data_filepath(name):
"""
Returns a handle to data file.
Creates containing directory, if needed.
"""
dr = data_root()
if not os.path.exists(dr):
os.makedirs(dr)
return os.path.join(dr, name)
def get_cache_filepath(name):
cr = cache_root()
if not os.path.exists(cr):
os.makedirs(cr)
return os.path.join(cr, name)
def get_benchmark_filename(symbol):
return "%s_benchmark.csv" % symbol
def has_data_for_dates(series_or_df, first_date, last_date):
"""
Does `series_or_df` have data on or before first_date and on or after
last_date?
"""
dts = series_or_df.index
if not isinstance(dts, pd.DatetimeIndex):
raise TypeError("Expected a DatetimeIndex, but got %s." % type(dts))
first, last = dts[[0, -1]]
return (first <= first_date) and (last >= last_date)
def load_market_data(trading_day=trading_day_nyse,
trading_days=trading_days_nyse,
bm_symbol='^GSPC'):
"""
Load benchmark returns and treasury yield curves for the given calendar and
benchmark symbol.
Benchmarks are downloaded as a Series from Yahoo Finance. Treasury curves
are US Treasury Bond rates and are downloaded from 'www.federalreserve.gov'
by default. For Canadian exchanges, a loader for Canadian bonds from the
Bank of Canada is also available.
Results downloaded from the internet are cached in
~/.zipline/data. Subsequent loads will attempt to read from the cached
files before falling back to redownload.
Parameters
----------
trading_day : pandas.CustomBusinessDay, optional
A trading_day used to determine the latest day for which we
expect to have data. Defaults to an NYSE trading day.
trading_days : pd.DatetimeIndex, optional
A calendar of trading days. Also used for determining what cached
dates we should expect to have cached. Defaults to the NYSE calendar.
bm_symbol : str, optional
Symbol for the benchmark index to load. Defaults to '^GSPC', the Yahoo
ticker for the S&P 500.
Returns
-------
(benchmark_returns, treasury_curves) : (pd.Series, pd.DataFrame)
Notes
-----
Both return values are DatetimeIndexed with values dated to midnight in UTC
of each stored date. The columns of `treasury_curves` are:
'1month', '3month', '6month',
'1year','2year','3year','5year','7year','10year','20year','30year'
"""
first_date = trading_days[0]
now = pd.Timestamp.utcnow()
# We expect to have benchmark and treasury data that's current up until
# **two** full trading days prior to the most recently completed trading
# day.
# Example:
# On Thu Oct 22 2015, the previous completed trading day is Wed Oct 21.
# However, data for Oct 21 doesn't become available until the early morning
# hours of Oct 22. This means that there are times on the 22nd at which we
# cannot reasonably expect to have data for the 21st available. To be
# conservative, we instead expect that at any time on the 22nd, we can
# download data for Tuesday the 20th, which is two full trading days prior
# to the date on which we're running a test.
# We'll attempt to download new data if the latest entry in our cache is
# before this date.
last_date = trading_days[trading_days.get_loc(now, method='ffill') - 2]
br = ensure_benchmark_data(
bm_symbol,
first_date,
last_date,
now,
# We need the trading_day to figure out the close prior to the first
# date so that we can compute returns for the first date.
trading_day,
)
tc = ensure_treasury_data(
bm_symbol,
first_date,
last_date,
now,
)
benchmark_returns = br[br.index.slice_indexer(first_date, last_date)]
treasury_curves = tc[tc.index.slice_indexer(first_date, last_date)]
return benchmark_returns, treasury_curves
def ensure_benchmark_data(symbol, first_date, last_date, now, trading_day):
"""
Ensure we have benchmark data for `symbol` from `first_date` to `last_date`
Parameters
----------
symbol : str
The symbol for the benchmark to load.
first_date : pd.Timestamp
First required date for the cache.
last_date : pd.Timestamp
Last required date for the cache.
now : pd.Timestamp
The current time. This is used to prevent repeated attempts to
re-download data that isn't available due to scheduling quirks or other
failures.
trading_day : pd.CustomBusinessDay
A trading day delta. Used to find the day before first_date so we can
get the close of the day prior to first_date.
We attempt to download data unless we already have data stored at the data
cache for `symbol` whose first entry is before or on `first_date` and whose
last entry is on or after `last_date`.
If we perform a download and the cache criteria are not satisfied, we wait
at least one hour before attempting a redownload. This is determined by
comparing the current time to the result of os.path.getmtime on the cache
path.
"""
path = get_data_filepath(get_benchmark_filename(symbol))
# If the path does not exist, it means the first download has not happened
# yet, so don't try to read from 'path'.
if os.path.exists(path):
try:
data = pd.Series.from_csv(path).tz_localize('UTC')
if has_data_for_dates(data, first_date, last_date):
return data
# Don't re-download if we've successfully downloaded and written a
# file in the last hour.
last_download_time = last_modified_time(path)
if (now - last_download_time) <= ONE_HOUR:
logger.warn(
"Refusing to download new benchmark data because a "
"download succeeded at %s." % last_download_time
)
return data
except (OSError, IOError, ValueError) as e:
# These can all be raised by various versions of pandas on various
# classes of malformed input. Treat them all as cache misses.
logger.info(
"Loading data for {path} failed with error [{error}].".format(
path=path, error=e,
)
)
logger.info(
"Cache at {path} does not have data from {start} to {end}.\n"
"Downloading benchmark data for '{symbol}'.",
start=first_date,
end=last_date,
symbol=symbol,
path=path,
)
try:
data = get_benchmark_returns(
symbol,
first_date - trading_day,
last_date,
)
data.to_csv(path)
except (OSError, IOError, HTTPError):
logger.exception('failed to cache the new benchmark returns')
if not has_data_for_dates(data, first_date, last_date):
logger.warn("Still don't have expected data after redownload!")
return data
def ensure_treasury_data(bm_symbol, first_date, last_date, now):
"""
Ensure we have treasury data from treasury module associated with
`bm_symbol`.
Parameters
----------
bm_symbol : str
Benchmark symbol for which we're loading associated treasury curves.
first_date : pd.Timestamp
First date required to be in the cache.
last_date : pd.Timestamp
Last date required to be in the cache.
now : pd.Timestamp
The current time. This is used to prevent repeated attempts to
re-download data that isn't available due to scheduling quirks or other
failures.
We attempt to download data unless we already have data stored in the cache
for `module_name` whose first entry is before or on `first_date` and whose
last entry is on or after `last_date`.
If we perform a download and the cache criteria are not satisfied, we wait
at least one hour before attempting a redownload. This is determined by
comparing the current time to the result of os.path.getmtime on the cache
path.
"""
loader_module, filename, source = INDEX_MAPPING.get(
bm_symbol, INDEX_MAPPING['^GSPC']
)
first_date = max(first_date, loader_module.earliest_possible_date())
path = get_data_filepath(filename)
# If the path does not exist, it means the first download has not happened
# yet, so don't try to read from 'path'.
if os.path.exists(path):
try:
data = pd.DataFrame.from_csv(path).tz_localize('UTC')
if has_data_for_dates(data, first_date, last_date):
return data
# Don't re-download if we've successfully downloaded and written a
# file in the last hour.
last_download_time = last_modified_time(path)
if (now - last_download_time) <= ONE_HOUR:
logger.warn(
"Refusing to download new treasury data because a "
"download succeeded at %s." % last_download_time
)
return data
except (OSError, IOError, ValueError) as e:
# These can all be raised by various versions of pandas on various
# classes of malformed input. Treat them all as cache misses.
logger.info(
"Loading data for {path} failed with error [{error}].".format(
path=path, error=e,
)
)
try:
data = loader_module.get_treasury_data(first_date, last_date)
data.to_csv(path)
except (OSError, IOError, HTTPError):
logger.exception('failed to cache treasury data')
if not has_data_for_dates(data, first_date, last_date):
logger.warn("Still don't have expected data after redownload!")
return data
def _load_raw_yahoo_data(indexes=None, stocks=None, start=None, end=None):
"""Load closing prices from yahoo finance.
:Optional:
indexes : dict (Default: {'SPX': '^GSPC'})
Financial indexes to load.
stocks : list (Default: ['AAPL', 'GE', 'IBM', 'MSFT',
'XOM', 'AA', 'JNJ', 'PEP', 'KO'])
Stock closing prices to load.
start : datetime (Default: datetime(1993, 1, 1, 0, 0, 0, 0, pytz.utc))
Retrieve prices from start date on.
end : datetime (Default: datetime(2002, 1, 1, 0, 0, 0, 0, pytz.utc))
Retrieve prices until end date.
:Note:
This is based on code presented in a talk by Wes McKinney:
http://wesmckinney.com/files/20111017/notebook_output.pdf
"""
assert indexes is not None or stocks is not None, """
must specify stocks or indexes"""
if start is None:
start = pd.datetime(1990, 1, 1, 0, 0, 0, 0, pytz.utc)
if start is not None and end is not None:
assert start < end, "start date is later than end date."
data = OrderedDict()
if stocks is not None:
for stock in stocks:
logger.info('Loading stock: {}'.format(stock))
stock_pathsafe = stock.replace(os.path.sep, '--')
cache_filename = "{stock}-{start}-{end}.csv".format(
stock=stock_pathsafe,
start=start,
end=end).replace(':', '-')
cache_filepath = get_cache_filepath(cache_filename)
if os.path.exists(cache_filepath):
stkd = pd.DataFrame.from_csv(cache_filepath)
else:
stkd = DataReader(stock, 'yahoo', start, end).sort_index()
stkd.to_csv(cache_filepath)
data[stock] = stkd
if indexes is not None:
for name, ticker in iteritems(indexes):
logger.info('Loading index: {} ({})'.format(name, ticker))
stkd = DataReader(ticker, 'yahoo', start, end).sort_index()
data[name] = stkd
return data
def load_from_yahoo(indexes=None,
stocks=None,
start=None,
end=None,
adjusted=True):
"""
Loads price data from Yahoo into a dataframe for each of the indicated
assets. By default, 'price' is taken from Yahoo's 'Adjusted Close',
which removes the impact of splits and dividends. If the argument
'adjusted' is False, then the non-adjusted 'close' field is used instead.
:param indexes: Financial indexes to load.
:type indexes: dict
:param stocks: Stock closing prices to load.
:type stocks: list
:param start: Retrieve prices from start date on.
:type start: datetime
:param end: Retrieve prices until end date.
:type end: datetime
:param adjusted: Adjust the price for splits and dividends.
:type adjusted: bool
"""
data = _load_raw_yahoo_data(indexes, stocks, start, end)
if adjusted:
close_key = 'Adj Close'
else:
close_key = 'Close'
df = pd.DataFrame({key: d[close_key] for key, d in iteritems(data)})
df.index = df.index.tz_localize(pytz.utc)
return df
@deprecated(
'load_bars_from_yahoo is deprecated, please register a'
' yahoo_equities data bundle instead',
)
def load_bars_from_yahoo(indexes=None,
stocks=None,
start=None,
end=None,
adjusted=True):
"""
Loads data from Yahoo into a panel with the following
column names for each indicated security:
- open
- high
- low
- close
- volume
- price
Note that 'price' is Yahoo's 'Adjusted Close', which removes the
impact of splits and dividends. If the argument 'adjusted' is True, then
the open, high, low, and close values are adjusted as well.
:param indexes: Financial indexes to load.
:type indexes: dict
:param stocks: Stock closing prices to load.
:type stocks: list
:param start: Retrieve prices from start date on.
:type start: datetime
:param end: Retrieve prices until end date.
:type end: datetime
:param adjusted: Adjust open/high/low/close for splits and dividends.
The 'price' field is always adjusted.
:type adjusted: bool
"""
data = _load_raw_yahoo_data(indexes, stocks, start, end)
panel = pd.Panel(data)
# Rename columns
panel.minor_axis = ['open', 'high', 'low', 'close', 'volume', 'price']
panel.major_axis = panel.major_axis.tz_localize(pytz.utc)
# Adjust data
if adjusted:
adj_cols = ['open', 'high', 'low', 'close']
for ticker in panel.items:
ratio = (panel[ticker]['price'] / panel[ticker]['close'])
ratio_filtered = ratio.fillna(0).values
for col in adj_cols:
panel[ticker][col] *= ratio_filtered
return panel
def load_prices_from_csv(filepath, identifier_col, tz='UTC'):
data = pd.read_csv(filepath, index_col=identifier_col)
data.index = pd.DatetimeIndex(data.index, tz=tz)
data.sort_index(inplace=True)
return data
def load_prices_from_csv_folder(folderpath, identifier_col, tz='UTC'):
data = None
for file in os.listdir(folderpath):
if '.csv' not in file:
continue
raw = load_prices_from_csv(os.path.join(folderpath, file),
identifier_col, tz)
if data is None:
data = raw
else:
data = pd.concat([data, raw], axis=1)
return data
| apache-2.0 |
akrherz/iem | htdocs/plotting/auto/scripts/p95.py | 1 | 7455 | """Average T and precip"""
import datetime
import psycopg2.extras
import numpy as np
import pandas as pd
from scipy import stats
import matplotlib.colors as mpcolors
from pyiem import network
from pyiem.plot import get_cmap, figure
from pyiem.plot.use_agg import plt
from pyiem.util import get_autoplot_context, get_dbconn
from pyiem.exceptions import NoDataFound
PDICT = {"none": "Show all values", "hide": 'Show "strong" events'}
def get_description():
""" Return a dict describing how to call this plotter """
desc = dict()
desc["data"] = True
desc[
"description"
] = """This chart displays the combination of average
temperature and total precipitation for one or more months of your choice.
The dots are colorized based on the Southern Oscillation Index (SOI) value
for a month of your choice. Many times, users want to compare the SOI
value with monthly totals for a period a few months after the validity of
the SOI value. The thought is that there is some lag time for the impacts
of a given SOI to be felt in the midwestern US.
"""
desc["arguments"] = [
dict(
type="station",
name="station",
default="IA0000",
label="Select Station",
network="IACLIMATE",
),
dict(type="month", name="month", default=9, label="Start Month:"),
dict(
type="int",
name="months",
default=2,
label="Number of Months to Average:",
),
dict(
type="int",
name="lag",
default=-3,
label="Number of Months to Lag for SOI Value:",
),
dict(
type="select",
name="h",
default="none",
options=PDICT,
label="Hide/Show week SOI events -0.5 to 0.5",
),
dict(
type="text",
default=str(datetime.date.today().year),
name="year",
label="Year(s) to Highlight in Chart (comma delimited)",
),
dict(type="cmap", name="cmap", default="RdYlGn", label="Color Ramp:"),
]
return desc
def title(wanted):
""" Make a title """
t1 = datetime.date(2000, wanted[0], 1)
t2 = datetime.date(2000, wanted[-1], 1)
return "Avg Precip + Temp for %s%s" % (
t1.strftime("%B"),
" thru %s" % (t2.strftime("%B"),) if wanted[0] != wanted[-1] else "",
)
def plotter(fdict):
""" Go """
pgconn = get_dbconn("coop")
ccursor = pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor)
ctx = get_autoplot_context(fdict, get_description())
station = ctx["station"]
lagmonths = ctx["lag"]
months = ctx["months"]
month = ctx["month"]
highyears = [int(x) for x in ctx["year"].split(",")]
h = ctx["h"]
wantmonth = month + lagmonths
yearoffset = 0
if month + lagmonths < 1:
wantmonth = 12 - (month + lagmonths)
yearoffset = 1
wanted = []
deltas = []
for m in range(month, month + months):
if m < 13:
wanted.append(m)
deltas.append(0)
else:
wanted.append(m - 12)
deltas.append(-1)
table = "alldata_%s" % (station[:2],)
nt = network.Table("%sCLIMATE" % (station[:2],))
elnino = {}
ccursor.execute("SELECT monthdate, soi_3m, anom_34 from elnino")
for row in ccursor:
if row[0].month != wantmonth:
continue
elnino[row[0].year + yearoffset] = dict(soi_3m=row[1], anom_34=row[2])
ccursor.execute(
"SELECT year, month, sum(precip), avg((high+low)/2.) "
f"from {table} where station = %s GROUP by year, month",
(station,),
)
if ccursor.rowcount == 0:
raise NoDataFound("No Data Found.")
yearly = {}
for row in ccursor:
(_year, _month, _precip, _temp) = row
if _month not in wanted:
continue
effectiveyear = _year + deltas[wanted.index(_month)]
nino = elnino.get(effectiveyear, {}).get("soi_3m", None)
if nino is None:
continue
data = yearly.setdefault(
effectiveyear, dict(precip=0, temp=[], nino=nino)
)
data["precip"] += _precip
data["temp"].append(float(_temp))
title2 = "[%s] %s %s" % (
station,
nt.sts[station]["name"],
title(wanted),
)
subtitle = "%s SOI (3 month average)" % (
datetime.date(2000, wantmonth, 1).strftime("%B"),
)
fig = figure(title=title2, subtitle=subtitle)
ax = fig.add_axes([0.07, 0.12, 0.53, 0.75])
cmap = get_cmap(ctx["cmap"])
zdata = np.arange(-2.0, 2.1, 0.5)
norm = mpcolors.BoundaryNorm(zdata, cmap.N)
rows = []
xs = []
ys = []
for year in yearly:
x = yearly[year]["precip"]
y = np.average(yearly[year]["temp"])
xs.append(x)
ys.append(y)
val = yearly[year]["nino"]
c = cmap(norm([val])[0])
if h == "hide" and val > -0.5 and val < 0.5:
ax.scatter(
x,
y,
facecolor="#EEEEEE",
edgecolor="#EEEEEE",
s=30,
zorder=2,
marker="s",
)
else:
ax.scatter(
x, y, facecolor=c, edgecolor="k", s=60, zorder=3, marker="o"
)
if year in highyears:
ax.text(
x, y + 0.2, "%s" % (year,), ha="center", va="bottom", zorder=5
)
rows.append(dict(year=year, precip=x, tmpf=y, soi3m=val))
ax.axhline(np.average(ys), lw=2, color="k", linestyle="-.", zorder=2)
ax.axvline(np.average(xs), lw=2, color="k", linestyle="-.", zorder=2)
sm = plt.cm.ScalarMappable(norm, cmap)
sm.set_array(zdata)
cb = fig.colorbar(sm, extend="both")
cb.set_label("<-- El Nino :: SOI :: La Nina -->")
ax.grid(True)
ax.set_xlim(left=-0.01)
ax.set_xlabel("Total Precipitation [inch], Avg: %.2f" % (np.average(xs),))
ax.set_ylabel(
(r"Average Temperature $^\circ$F, " "Avg: %.1f") % (np.average(ys),)
)
df = pd.DataFrame(rows)
ax2 = fig.add_axes([0.67, 0.55, 0.28, 0.35])
ax2.scatter(df["soi3m"].values, df["tmpf"].values)
ax2.set_xlabel("<-- El Nino :: SOI :: La Nina -->")
ax2.set_ylabel(r"Avg Temp $^\circ$F")
slp, intercept, r_value, _, _ = stats.linregress(
df["soi3m"].values, df["tmpf"].values
)
y1 = -2.0 * slp + intercept
y2 = 2.0 * slp + intercept
ax2.plot([-2, 2], [y1, y2])
ax2.text(
0.97,
0.9,
"R$^2$=%.2f" % (r_value ** 2,),
ha="right",
transform=ax2.transAxes,
bbox=dict(color="white"),
)
ax2.grid(True)
ax3 = fig.add_axes([0.67, 0.1, 0.28, 0.35])
ax3.scatter(df["soi3m"].values, df["precip"].values)
ax3.set_xlabel("<-- El Nino :: SOI :: La Nina -->")
ax3.set_ylabel("Total Precip [inch]")
slp, intercept, r_value, _, _ = stats.linregress(
df["soi3m"].values, df["precip"].values
)
y1 = -2.0 * slp + intercept
y2 = 2.0 * slp + intercept
ax3.plot([-2, 2], [y1, y2])
ax3.text(
0.97,
0.9,
"R$^2$=%.2f" % (r_value ** 2,),
ha="right",
transform=ax3.transAxes,
bbox=dict(color="white"),
)
ax3.grid(True)
return fig, df
if __name__ == "__main__":
plotter(dict())
| mit |
meteorcloudy/tensorflow | tensorflow/contrib/learn/python/learn/estimators/linear_test.py | 23 | 77821 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for estimators.linear."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import linear
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.linear_optimizer.python import sdca_optimizer as sdca_optimizer_lib
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.feature_column import feature_column as fc_core
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.platform import test
from tensorflow.python.training import ftrl
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import server_lib
def _prepare_iris_data_for_logistic_regression():
# Converts iris data to a logistic regression problem.
iris = base.load_iris()
ids = np.where((iris.target == 0) | (iris.target == 1))
iris = base.Dataset(data=iris.data[ids], target=iris.target[ids])
return iris
class LinearClassifierTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
exp = experiment.Experiment(
estimator=linear.LinearClassifier(
n_classes=3, feature_columns=cont_features),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self,
linear.LinearClassifier)
def testTrain(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.01)
def testJointTrain(self):
"""Tests that loss goes down with training with joint weights."""
def input_fn():
return {
'age':
sparse_tensor.SparseTensor(
values=['1'], indices=[[0, 0]], dense_shape=[1, 1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.sparse_column_with_hash_bucket('age', 2)
classifier = linear.LinearClassifier(
_joint_weight=True, feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.01)
def testMultiClass_MatrixData(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testMultiClass_MatrixData_Labels1D(self):
"""Same as the last test, but labels shape is [150] instead of [150, 1]."""
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testMultiClass_NpMatrixData(self):
"""Tests multi-class classification using numpy matrix data as input."""
iris = base.load_iris()
train_x = iris.data
train_y = iris.target
feature_column = feature_column_lib.real_valued_column('', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(x=train_x, y=train_y, steps=100)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testMultiClassLabelKeys(self):
"""Tests n_classes > 2 with label_keys vocabulary for labels."""
# Byte literals needed for python3 test to pass.
label_keys = [b'label0', b'label1', b'label2']
def _input_fn(num_epochs=None):
features = {
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant(
[[label_keys[1]], [label_keys[0]], [label_keys[0]]],
dtype=dtypes.string)
return features, labels
language_column = feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
classifier = linear.LinearClassifier(
n_classes=3,
feature_columns=[language_column],
label_keys=label_keys)
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self.assertEqual(3, len(predicted_classes))
for pred in predicted_classes:
self.assertIn(pred, label_keys)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
def _input_fn():
iris = _prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100, 1], dtype=dtypes.int32)
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testEstimatorWithCoreFeatureColumns(self):
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = fc_core.categorical_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [language_column, fc_core.numeric_column('age')]
classifier = linear.LinearClassifier(feature_columns=feature_columns)
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_MatrixData_Labels1D(self):
"""Same as the last test, but labels shape is [100] instead of [100, 1]."""
def _input_fn():
iris = _prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = _prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column_lib.real_valued_column('', dimension=4)]
classifier = linear.LinearClassifier(feature_columns=feature_columns)
classifier.fit(x=train_x, y=train_y, steps=100)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testWeightAndBiasNames(self):
"""Tests that weight and bias names haven't changed."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
variable_names = classifier.get_variable_names()
self.assertIn('linear/feature/weight', variable_names)
self.assertIn('linear/bias_weight', variable_names)
self.assertEqual(
4, len(classifier.get_variable_value('linear/feature/weight')))
self.assertEqual(
3, len(classifier.get_variable_value('linear/bias_weight')))
def testCustomOptimizerByObject(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3,
optimizer=ftrl.FtrlOptimizer(learning_rate=0.1),
feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomOptimizerByString(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
def _optimizer():
return ftrl.FtrlOptimizer(learning_rate=0.1)
classifier = linear.LinearClassifier(
n_classes=3, optimizer=_optimizer, feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomOptimizerByFunction(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, optimizer='Ftrl', feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]], dtype=dtypes.float32)
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = linear.LinearClassifier(
feature_columns=[feature_column_lib.real_valued_column('x')])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict_classes(
input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Tests the case where the prediction_key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
# Tests the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaises(KeyError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={('bad_name', 'bad_type'): metric_ops.streaming_auc})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
('bad_length_name', 'classes', 'bad_length'):
metric_ops.streaming_accuracy
})
def testLogisticFractionalLabels(self):
"""Tests logistic training with fractional labels."""
def input_fn(num_epochs=None):
return {
'age':
input_lib.limit_epochs(
constant_op.constant([[1], [2]]), num_epochs=num_epochs),
}, constant_op.constant(
[[.7], [0]], dtype=dtypes.float32)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(
feature_columns=[age], config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=input_fn, steps=500)
predict_input_fn = functools.partial(input_fn, num_epochs=1)
predictions_proba = list(
classifier.predict_proba(input_fn=predict_input_fn))
# Prediction probabilities mirror the labels column, which proves that the
# classifier learns from float input.
self.assertAllClose([[.3, .7], [1., 0.]], predictions_proba, atol=.1)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn():
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[1], [0], [0]])
return features, labels
sparse_features = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = linear.LinearClassifier(
feature_columns=sparse_features, config=config)
classifier.fit(input_fn=_input_fn, steps=200)
loss = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
self.assertLess(loss, 0.07)
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def input_fn(num_epochs=None):
return {
'age':
input_lib.limit_epochs(
constant_op.constant([1]), num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1]),
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
model_dir = tempfile.mkdtemp()
classifier = linear.LinearClassifier(
model_dir=model_dir, feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=30)
predict_input_fn = functools.partial(input_fn, num_epochs=1)
out1_class = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
out1_proba = list(
classifier.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
del classifier
classifier2 = linear.LinearClassifier(
model_dir=model_dir, feature_columns=[age, language])
out2_class = list(
classifier2.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
out2_proba = list(
classifier2.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
self.assertTrue(np.array_equal(out1_class, out2_class))
self.assertTrue(np.array_equal(out1_proba, out2_proba))
def testWeightColumn(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = linear.LinearClassifier(
weight_column_name='w',
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=3))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
# All examples in eval data set are y=x.
self.assertGreater(scores['labels/actual_label_mean'], 0.9)
# If there were no weight column, model would learn y=Not(x). Because of
# weights, it learns y=x.
self.assertGreater(scores['labels/prediction_mean'], 0.9)
# All examples in eval data set are y=x. So if weight column were ignored,
# then accuracy would be zero. Because of weights, accuracy should be close
# to 1.0.
self.assertGreater(scores['accuracy'], 0.9)
scores_train_set = classifier.evaluate(input_fn=_input_fn_train, steps=1)
# Considering weights, the mean label should be close to 1.0.
# If weights were ignored, it would be 0.25.
self.assertGreater(scores_train_set['labels/actual_label_mean'], 0.9)
# The classifier has learned y=x. If weight column were ignored in
# evaluation, then accuracy for the train set would be 0.25.
# Because weight is not ignored, accuracy is greater than 0.6.
self.assertGreater(scores_train_set['accuracy'], 0.6)
def testWeightColumnLoss(self):
"""Test ensures that you can specify per-example weights for loss."""
def _input_fn():
features = {
'age': constant_op.constant([[20], [20], [20]]),
'weights': constant_op.constant([[100], [1], [1]]),
}
labels = constant_op.constant([[1], [0], [0]])
return features, labels
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age])
classifier.fit(input_fn=_input_fn, steps=100)
loss_unweighted = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
classifier = linear.LinearClassifier(
feature_columns=[age], weight_column_name='weights')
classifier.fit(input_fn=_input_fn, steps=100)
loss_weighted = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
self.assertLess(loss_weighted, loss_unweighted)
def testExport(self):
"""Tests that export model for servo works."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
export_dir = tempfile.mkdtemp()
classifier.export(export_dir)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(
feature_columns=[age, language], enable_centered_bias=False)
classifier.fit(input_fn=input_fn, steps=100)
self.assertNotIn('centered_bias_weight', classifier.get_variable_names())
def testEnableCenteredBias(self):
"""Tests that we can enable centered bias."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(
feature_columns=[age, language], enable_centered_bias=True)
classifier.fit(input_fn=input_fn, steps=100)
self.assertIn('linear/binary_logistic_head/centered_bias_weight',
classifier.get_variable_names())
def testTrainOptimizerWithL1Reg(self):
"""Tests l1 regularized model has higher loss."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['hindi'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
classifier_no_reg = linear.LinearClassifier(feature_columns=[language])
classifier_with_reg = linear.LinearClassifier(
feature_columns=[language],
optimizer=ftrl.FtrlOptimizer(
learning_rate=1.0, l1_regularization_strength=100.))
loss_no_reg = classifier_no_reg.fit(input_fn=input_fn, steps=100).evaluate(
input_fn=input_fn, steps=1)['loss']
loss_with_reg = classifier_with_reg.fit(input_fn=input_fn,
steps=100).evaluate(
input_fn=input_fn,
steps=1)['loss']
self.assertLess(loss_no_reg, loss_with_reg)
def testTrainWithMissingFeature(self):
"""Tests that training works with missing features."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['Swahili', 'turkish'],
indices=[[0, 0], [2, 0]],
dense_shape=[3, 1])
}, constant_op.constant([[1], [1], [1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
classifier = linear.LinearClassifier(feature_columns=[language])
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.07)
def testSdcaOptimizerRealValuedFeatures(self):
"""Tests LinearClassifier with SDCAOptimizer and real valued features."""
def input_fn():
return {
'example_id': constant_op.constant(['1', '2']),
'maintenance_cost': constant_op.constant([[500.0], [200.0]]),
'sq_footage': constant_op.constant([[800.0], [600.0]]),
'weights': constant_op.constant([[1.0], [1.0]])
}, constant_op.constant([[0], [1]])
maintenance_cost = feature_column_lib.real_valued_column('maintenance_cost')
sq_footage = feature_column_lib.real_valued_column('sq_footage')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[maintenance_cost, sq_footage],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerRealValuedFeatureWithHigherDimension(self):
"""Tests SDCAOptimizer with real valued features of higher dimension."""
# input_fn is identical to the one in testSdcaOptimizerRealValuedFeatures
# where 2 1-dimensional dense features have been replaced by 1 2-dimensional
# feature.
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2']),
'dense_feature':
constant_op.constant([[500.0, 800.0], [200.0, 600.0]])
}, constant_op.constant([[0], [1]])
dense_feature = feature_column_lib.real_valued_column(
'dense_feature', dimension=2)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[dense_feature], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerBucketizedFeatures(self):
"""Tests LinearClassifier with SDCAOptimizer and bucketized features."""
def input_fn():
return {
'example_id': constant_op.constant(['1', '2', '3']),
'price': constant_op.constant([[600.0], [1000.0], [400.0]]),
'sq_footage': constant_op.constant([[1000.0], [600.0], [700.0]]),
'weights': constant_op.constant([[1.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
price_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('price'),
boundaries=[500.0, 700.0])
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'), boundaries=[650.0])
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id', symmetric_l2_regularization=1.0)
classifier = linear.LinearClassifier(
feature_columns=[price_bucket, sq_footage_bucket],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerSparseFeatures(self):
"""Tests LinearClassifier with SDCAOptimizer and sparse features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([0.4, 0.6, 0.3]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[1.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
price = feature_column_lib.real_valued_column('price')
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerWeightedSparseFeatures(self):
"""LinearClassifier with SDCAOptimizer and weighted sparse features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
sparse_tensor.SparseTensor(
values=[2., 3., 1.],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 5]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 5])
}, constant_op.constant([[1], [0], [1]])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
country_weighted_by_price = feature_column_lib.weighted_sparse_column(
country, 'price')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[country_weighted_by_price], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerWeightedSparseFeaturesOOVWithNoOOVBuckets(self):
"""LinearClassifier with SDCAOptimizer with OOV features (-1 IDs)."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
sparse_tensor.SparseTensor(
values=[2., 3., 1.],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 5]),
'country':
sparse_tensor.SparseTensor(
# 'GB' is out of the vocabulary.
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 5])
}, constant_op.constant([[1], [0], [1]])
country = feature_column_lib.sparse_column_with_keys(
'country', keys=['US', 'CA', 'MK', 'IT', 'CN'])
country_weighted_by_price = feature_column_lib.weighted_sparse_column(
country, 'price')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[country_weighted_by_price], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerCrossedFeatures(self):
"""Tests LinearClassifier with SDCAOptimizer and crossed features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'language':
sparse_tensor.SparseTensor(
values=['english', 'italian', 'spanish'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
'country':
sparse_tensor.SparseTensor(
values=['US', 'IT', 'MX'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1])
}, constant_op.constant([[0], [0], [1]])
language = feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=5)
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
country_language = feature_column_lib.crossed_column(
[language, country], hash_bucket_size=10)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[country_language], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=10)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerMixedFeatures(self):
"""Tests LinearClassifier with SDCAOptimizer and a mix of features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([[0.6], [0.8], [0.3]]),
'sq_footage':
constant_op.constant([[900.0], [700.0], [600.0]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[3.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
price = feature_column_lib.real_valued_column('price')
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = feature_column_lib.crossed_column(
[sq_footage_bucket, country], hash_bucket_size=10)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerPartitionedVariables(self):
"""Tests LinearClassifier with SDCAOptimizer with partitioned variables."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([[0.6], [0.8], [0.3]]),
'sq_footage':
constant_op.constant([[900.0], [700.0], [600.0]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[3.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
price = feature_column_lib.real_valued_column('price')
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = feature_column_lib.crossed_column(
[sq_footage_bucket, country], hash_bucket_size=10)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id',
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=2, axis=0))
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = linear.LinearClassifier(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights',
optimizer=sdca_optimizer,
config=config)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
print('all scores = {}'.format(scores))
self.assertGreater(scores['accuracy'], 0.9)
def testEval(self):
"""Tests that eval produces correct metrics.
"""
def input_fn():
return {
'age':
constant_op.constant([[1], [2]]),
'language':
sparse_tensor.SparseTensor(
values=['greek', 'chinese'],
indices=[[0, 0], [1, 0]],
dense_shape=[2, 1]),
}, constant_op.constant([[1], [0]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age, language])
# Evaluate on trained model
classifier.fit(input_fn=input_fn, steps=100)
classifier.evaluate(input_fn=input_fn, steps=1)
# TODO(ispir): Enable accuracy check after resolving the randomness issue.
# self.assertLess(evaluated_values['loss/mean'], 0.3)
# self.assertGreater(evaluated_values['accuracy/mean'], .95)
class LinearRegressorTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
exp = experiment.Experiment(
estimator=linear.LinearRegressor(feature_columns=cont_features),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, linear.LinearRegressor)
def testRegression(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[10.]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearRegressor(feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.5)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
regressor = linear.LinearRegressor(
feature_columns=cont_features,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = regressor.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
regressor = linear.LinearRegressor(
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
# Average square loss = (0.75^2 + 3*0.25^2) / 4 = 0.1875
self.assertAlmostEqual(0.1875, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = linear.LinearRegressor(
weight_column_name='w',
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted average square loss = (7*0.75^2 + 3*0.25^2) / 10 = 0.4125
self.assertAlmostEqual(0.4125, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = linear.LinearRegressor(
weight_column_name='w',
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# The model should learn (y = x) because of the weights, so the loss should
# be close to zero.
self.assertLess(scores['loss'], 0.1)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1.0, 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
predicted_scores = regressor.predict_scores(
input_fn=_input_fn, as_iterable=False)
self.assertAllClose(labels, predicted_scores, atol=0.1)
predictions = regressor.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllClose(predicted_scores, predictions)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1.0, 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_scores = list(
regressor.predict_scores(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(labels, predicted_scores, atol=0.1)
predictions = list(
regressor.predict(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(predicted_scores, predictions)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = linear.LinearRegressor(
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(
regressor.predict_scores(input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
# Tests the case where the 2nd element of the key is not "scores".
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('my_error', 'predictions'):
metric_ops.streaming_mean_squared_error
})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('bad_length_name', 'scores', 'bad_length'):
metric_ops.streaming_mean_squared_error
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
model_dir = tempfile.mkdtemp()
regressor = linear.LinearRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor2 = linear.LinearRegressor(
model_dir=model_dir, feature_columns=feature_columns)
predictions2 = list(regressor2.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7),
feature_column_lib.real_valued_column('age')
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
regressor = linear.LinearRegressor(
feature_columns=feature_columns, config=config)
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
def testRecoverWeights(self):
rng = np.random.RandomState(67)
n = 1000
n_weights = 10
bias = 2
x = rng.uniform(-1, 1, (n, n_weights))
weights = 10 * rng.randn(n_weights)
y = np.dot(x, weights)
y += rng.randn(len(x)) * 0.05 + rng.normal(bias, 0.01)
feature_columns = estimator.infer_real_valued_columns_from_input(x)
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
optimizer=ftrl.FtrlOptimizer(learning_rate=0.8))
regressor.fit(x, y, batch_size=64, steps=2000)
self.assertIn('linear//weight', regressor.get_variable_names())
regressor_weights = regressor.get_variable_value('linear//weight')
# Have to flatten weights since they come in (x, 1) shape.
self.assertAllClose(weights, regressor_weights.flatten(), rtol=1)
# TODO(ispir): Disable centered_bias.
# assert abs(bias - regressor.bias_) < 0.1
def testSdcaOptimizerRealValuedLinearFeatures(self):
"""Tests LinearRegressor with SDCAOptimizer and real valued features."""
x = [[1.2, 2.0, -1.5], [-2.0, 3.0, -0.5], [1.0, -0.5, 4.0]]
weights = [[3.0], [-1.2], [0.5]]
y = np.dot(x, weights)
def input_fn():
return {
'example_id': constant_op.constant(['1', '2', '3']),
'x': constant_op.constant(x),
'weights': constant_op.constant([[10.0], [10.0], [10.0]])
}, constant_op.constant(y)
x_column = feature_column_lib.real_valued_column('x', dimension=3)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[x_column],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.01)
self.assertIn('linear/x/weight', regressor.get_variable_names())
regressor_weights = regressor.get_variable_value('linear/x/weight')
self.assertAllClose(
[w[0] for w in weights], regressor_weights.flatten(), rtol=0.1)
def testSdcaOptimizerMixedFeaturesArbitraryWeights(self):
"""Tests LinearRegressor with SDCAOptimizer and a mix of features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([0.6, 0.8, 0.3]),
'sq_footage':
constant_op.constant([[900.0], [700.0], [600.0]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[3.0], [5.0], [7.0]])
}, constant_op.constant([[1.55], [-1.25], [-3.0]])
price = feature_column_lib.real_valued_column('price')
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = feature_column_lib.crossed_column(
[sq_footage_bucket, country], hash_bucket_size=10)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id', symmetric_l2_regularization=1.0)
regressor = linear.LinearRegressor(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerPartitionedVariables(self):
"""Tests LinearRegressor with SDCAOptimizer with partitioned variables."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([0.6, 0.8, 0.3]),
'sq_footage':
constant_op.constant([[900.0], [700.0], [600.0]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[3.0], [5.0], [7.0]])
}, constant_op.constant([[1.55], [-1.25], [-3.0]])
price = feature_column_lib.real_valued_column('price')
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = feature_column_lib.crossed_column(
[sq_footage_bucket, country], hash_bucket_size=10)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id', symmetric_l2_regularization=1.0,
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=2, axis=0))
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
regressor = linear.LinearRegressor(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights',
optimizer=sdca_optimizer,
config=config)
regressor.fit(input_fn=input_fn, steps=20)
loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerSparseFeaturesWithL1Reg(self):
"""Tests LinearClassifier with SDCAOptimizer and sparse features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([[0.4], [0.6], [0.3]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[10.0], [10.0], [10.0]])
}, constant_op.constant([[1.4], [-0.8], [2.6]])
price = feature_column_lib.real_valued_column('price')
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
# Regressor with no L1 regularization.
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
no_l1_reg_loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
variable_names = regressor.get_variable_names()
self.assertIn('linear/price/weight', variable_names)
self.assertIn('linear/country/weights', variable_names)
no_l1_reg_weights = {
'linear/price/weight': regressor.get_variable_value(
'linear/price/weight'),
'linear/country/weights': regressor.get_variable_value(
'linear/country/weights'),
}
# Regressor with L1 regularization.
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id', symmetric_l1_regularization=1.0)
regressor = linear.LinearRegressor(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
l1_reg_loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
l1_reg_weights = {
'linear/price/weight': regressor.get_variable_value(
'linear/price/weight'),
'linear/country/weights': regressor.get_variable_value(
'linear/country/weights'),
}
# Unregularized loss is lower when there is no L1 regularization.
self.assertLess(no_l1_reg_loss, l1_reg_loss)
self.assertLess(no_l1_reg_loss, 0.05)
# But weights returned by the regressor with L1 regularization have smaller
# L1 norm.
l1_reg_weights_norm, no_l1_reg_weights_norm = 0.0, 0.0
for var_name in sorted(l1_reg_weights):
l1_reg_weights_norm += sum(
np.absolute(l1_reg_weights[var_name].flatten()))
no_l1_reg_weights_norm += sum(
np.absolute(no_l1_reg_weights[var_name].flatten()))
print('Var name: %s, value: %s' %
(var_name, no_l1_reg_weights[var_name].flatten()))
self.assertLess(l1_reg_weights_norm, no_l1_reg_weights_norm)
def testSdcaOptimizerBiasOnly(self):
"""Tests LinearClassifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when it's the only feature present.
All of the instances in this input only have the bias feature, and a
1/4 of the labels are positive. This means that the expected weight for
the bias should be close to the average prediction, i.e 0.25.
Returns:
Training data for the test.
"""
num_examples = 40
return {
'example_id':
constant_op.constant([str(x + 1) for x in range(num_examples)]),
# place_holder is an empty column which is always 0 (absent), because
# LinearClassifier requires at least one column.
'place_holder':
constant_op.constant([[0.0]] * num_examples),
}, constant_op.constant(
[[1 if i % 4 is 0 else 0] for i in range(num_examples)])
place_holder = feature_column_lib.real_valued_column('place_holder')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[place_holder], optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=100)
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.25, err=0.1)
def testSdcaOptimizerBiasAndOtherColumns(self):
"""Tests LinearClassifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when there are other features present.
1/2 of the instances in this input have feature 'a', the rest have
feature 'b', and we expect the bias to be added to each instance as well.
0.4 of all instances that have feature 'a' are positive, and 0.2 of all
instances that have feature 'b' are positive. The labels in the dataset
are ordered to appear shuffled since SDCA expects shuffled data, and
converges faster with this pseudo-random ordering.
If the bias was centered we would expect the weights to be:
bias: 0.3
a: 0.1
b: -0.1
Until b/29339026 is resolved, the bias gets regularized with the same
global value for the other columns, and so the expected weights get
shifted and are:
bias: 0.2
a: 0.2
b: 0.0
Returns:
The test dataset.
"""
num_examples = 200
half = int(num_examples / 2)
return {
'example_id':
constant_op.constant([str(x + 1) for x in range(num_examples)]),
'a':
constant_op.constant([[1]] * int(half) + [[0]] * int(half)),
'b':
constant_op.constant([[0]] * int(half) + [[1]] * int(half)),
}, constant_op.constant(
[[x]
for x in [1, 0, 0, 1, 1, 0, 0, 0, 1, 0] * int(half / 10) +
[0, 1, 0, 0, 0, 0, 0, 0, 1, 0] * int(half / 10)])
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[
feature_column_lib.real_valued_column('a'),
feature_column_lib.real_valued_column('b')
],
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=200)
variable_names = regressor.get_variable_names()
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/a/weight', variable_names)
self.assertIn('linear/b/weight', variable_names)
# TODO(b/29339026): Change the expected results to expect a centered bias.
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.2, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/a/weight')[0], 0.2, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/b/weight')[0], 0.0, err=0.05)
def testSdcaOptimizerBiasAndOtherColumnsFabricatedCentered(self):
"""Tests LinearClassifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when there are other features present.
1/2 of the instances in this input have feature 'a', the rest have
feature 'b', and we expect the bias to be added to each instance as well.
0.1 of all instances that have feature 'a' have a label of 1, and 0.1 of
all instances that have feature 'b' have a label of -1.
We can expect the weights to be:
bias: 0.0
a: 0.1
b: -0.1
Returns:
The test dataset.
"""
num_examples = 200
half = int(num_examples / 2)
return {
'example_id':
constant_op.constant([str(x + 1) for x in range(num_examples)]),
'a':
constant_op.constant([[1]] * int(half) + [[0]] * int(half)),
'b':
constant_op.constant([[0]] * int(half) + [[1]] * int(half)),
}, constant_op.constant([[1 if x % 10 == 0 else 0] for x in range(half)] +
[[-1 if x % 10 == 0 else 0] for x in range(half)])
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[
feature_column_lib.real_valued_column('a'),
feature_column_lib.real_valued_column('b')
],
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=100)
variable_names = regressor.get_variable_names()
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/a/weight', variable_names)
self.assertIn('linear/b/weight', variable_names)
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.0, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/a/weight')[0], 0.1, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/b/weight')[0], -0.1, err=0.05)
class LinearEstimatorTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
exp = experiment.Experiment(
estimator=linear.LinearEstimator(feature_columns=cont_features,
head=head_lib.regression_head()),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self,
linear.LinearEstimator)
def testLinearRegression(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[10.]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
linear_estimator = linear.LinearEstimator(feature_columns=[age, language],
head=head_lib.regression_head())
linear_estimator.fit(input_fn=input_fn, steps=100)
loss1 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']
linear_estimator.fit(input_fn=input_fn, steps=400)
loss2 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.5)
def testPoissonRegression(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[10.]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
linear_estimator = linear.LinearEstimator(
feature_columns=[age, language],
head=head_lib.poisson_regression_head())
linear_estimator.fit(input_fn=input_fn, steps=10)
loss1 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']
linear_estimator.fit(input_fn=input_fn, steps=100)
loss2 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
# Here loss of 2.1 implies a prediction of ~9.9998
self.assertLess(loss2, 2.1)
def testSDCANotSupported(self):
"""Tests that we detect error for SDCA."""
maintenance_cost = feature_column_lib.real_valued_column('maintenance_cost')
sq_footage = feature_column_lib.real_valued_column('sq_footage')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
with self.assertRaises(ValueError):
linear.LinearEstimator(
head=head_lib.regression_head(label_dimension=1),
feature_columns=[maintenance_cost, sq_footage],
optimizer=sdca_optimizer,
_joint_weights=True)
def boston_input_fn():
boston = base.load_boston()
features = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.data), [-1, 13]),
dtypes.float32)
labels = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.target), [-1, 1]),
dtypes.float32)
return features, labels
class FeatureColumnTest(test.TestCase):
def testTrain(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
est = linear.LinearRegressor(feature_columns=feature_columns)
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_input_fn, steps=1)
if __name__ == '__main__':
test.main()
| apache-2.0 |
jeffery-do/Vizdoombot | doom/lib/python3.5/site-packages/matplotlib/backends/backend_gtkcairo.py | 8 | 2374 | """
GTK+ Matplotlib interface using cairo (not GDK) drawing operations.
Author: Steve Chaplin
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import gtk
if gtk.pygtk_version < (2,7,0):
import cairo.gtk
from matplotlib.backends import backend_cairo
from matplotlib.backends.backend_gtk import *
backend_version = 'PyGTK(%d.%d.%d) ' % gtk.pygtk_version + \
'Pycairo(%s)' % backend_cairo.backend_version
_debug = False
#_debug = True
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
if _debug: print('backend_gtkcairo.%s()' % fn_name())
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasGTKCairo(figure)
return FigureManagerGTK(canvas, num)
class RendererGTKCairo (backend_cairo.RendererCairo):
if gtk.pygtk_version >= (2,7,0):
def set_pixmap (self, pixmap):
self.gc.ctx = pixmap.cairo_create()
else:
def set_pixmap (self, pixmap):
self.gc.ctx = cairo.gtk.gdk_cairo_create (pixmap)
class FigureCanvasGTKCairo(backend_cairo.FigureCanvasCairo, FigureCanvasGTK):
filetypes = FigureCanvasGTK.filetypes.copy()
filetypes.update(backend_cairo.FigureCanvasCairo.filetypes)
def _renderer_init(self):
"""Override to use cairo (rather than GDK) renderer"""
if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name()))
self._renderer = RendererGTKCairo (self.figure.dpi)
class FigureManagerGTKCairo(FigureManagerGTK):
def _get_toolbar(self, canvas):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar']=='toolbar2':
toolbar = NavigationToolbar2GTKCairo (canvas, self.window)
else:
toolbar = None
return toolbar
class NavigationToolbar2Cairo(NavigationToolbar2GTK):
def _get_canvas(self, fig):
return FigureCanvasGTKCairo(fig)
FigureCanvas = FigureCanvasGTKCairo
FigureManager = FigureManagerGTKCairo
| mit |
NervanaSystems/coach | rl_coach/tests/trace_tests.py | 1 | 12370 | #
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import glob
import os
import shutil
import subprocess
import multiprocessing
import sys
import signal
import pandas as pd
import time
from configparser import ConfigParser
from importlib import import_module
from os import path
sys.path.append('.')
from rl_coach.logger import screen
processes = []
def sigint_handler(signum, frame):
for proc in processes:
os.killpg(os.getpgid(proc[2].pid), signal.SIGTERM)
for f in os.listdir('experiments/'):
if '__test_trace' in f:
shutil.rmtree(os.path.join('experiments', f))
for f in os.listdir('.'):
if 'trace_test_log' in f:
os.remove(f)
exit()
signal.signal(signal.SIGINT, sigint_handler)
def read_csv_paths(test_path, filename_pattern, read_csv_tries=100):
csv_paths = []
tries_counter = 0
while not csv_paths:
csv_paths = glob.glob(path.join(test_path, '*', filename_pattern))
if tries_counter > read_csv_tries:
break
tries_counter += 1
time.sleep(1)
return csv_paths
def clean_df(df):
if 'Wall-Clock Time' in df.keys():
df.drop(['Wall-Clock Time'], 1, inplace=True)
return df
def run_trace_based_test(preset_name, num_env_steps, level=None):
test_name = '__test_trace_{}{}'.format(preset_name, '_' + level if level else '').replace(':', '_')
test_path = os.path.join('./experiments', test_name)
if path.exists(test_path):
shutil.rmtree(test_path)
# run the experiment in a separate thread
screen.log_title("Running test {}{}".format(preset_name, ' - ' + level if level else ''))
log_file_name = 'trace_test_log_{preset_name}.txt'.format(preset_name=test_name[13:])
config_file = './tmp.cred'
cmd = (
'python3 rl_coach/coach.py '
'-p {preset_name} '
'-e {test_name} '
'--seed 42 '
'-c '
'-dcp {template} '
'--no_summary '
'-cp {custom_param} '
'{level} '
'&> {log_file_name} '
).format(
preset_name=preset_name,
test_name=test_name,
template=config_file,
log_file_name=log_file_name,
level='-lvl ' + level if level else '',
custom_param='\"improve_steps=EnvironmentSteps({n});'
'steps_between_evaluation_periods=EnvironmentSteps({n});'
'evaluation_steps=EnvironmentSteps(1);'
'heatup_steps=EnvironmentSteps(1024)\"'.format(n=num_env_steps)
)
p = subprocess.Popen(cmd, shell=True, executable="/bin/bash", preexec_fn=os.setsid)
return test_path, log_file_name, p
def wait_and_check(args, processes, force=False):
if not force and len(processes) < args.max_threads:
return None
test_path = processes[0][0]
test_name = test_path.split('/')[-1]
log_file_name = processes[0][1]
p = processes[0][2]
p.wait()
filename_pattern = '*.csv'
# get the csv with the results
csv_paths = read_csv_paths(test_path, filename_pattern)
test_passed = False
screen.log('Results for {}: '.format(test_name[13:]))
if not csv_paths:
screen.error("csv file never found", crash=False)
if args.verbose:
screen.error("command exitcode: {}".format(p.returncode), crash=False)
screen.error(open(log_file_name).read(), crash=False)
else:
trace_path = os.path.join('./rl_coach', 'traces', test_name[13:])
if not os.path.exists(trace_path):
screen.log('No trace found, creating new trace in: {}'.format(trace_path))
os.makedirs(trace_path)
df = pd.read_csv(csv_paths[0])
df = clean_df(df)
try:
df.to_csv(os.path.join(trace_path, 'trace.csv'), index=False)
except:
pass
screen.success("Successfully created new trace.")
test_passed = True
else:
test_df = pd.read_csv(csv_paths[0])
test_df = clean_df(test_df)
new_trace_csv_path = os.path.join(trace_path, 'trace_new.csv')
test_df.to_csv(new_trace_csv_path, index=False)
test_df = pd.read_csv(new_trace_csv_path)
trace_csv_path = glob.glob(path.join(trace_path, 'trace.csv'))
trace_csv_path = trace_csv_path[0]
trace_df = pd.read_csv(trace_csv_path)
test_passed = test_df.equals(trace_df)
if test_passed:
screen.success("Passed successfully.")
os.remove(new_trace_csv_path)
test_passed = True
else:
screen.error("Trace test failed.", crash=False)
if args.overwrite:
os.remove(trace_csv_path)
os.rename(new_trace_csv_path, trace_csv_path)
screen.error("Overwriting old trace.", crash=False)
else:
screen.error("bcompare {} {}".format(trace_csv_path, new_trace_csv_path), crash=False)
shutil.rmtree(test_path)
os.remove(log_file_name)
processes.pop(0)
return test_passed
def generate_config(image, memory_backend, s3_end_point, s3_bucket_name, s3_creds_file, config_file):
"""
Generate the s3 config file to be used and also the dist-coach-config.template to be used for the test
It reads the `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` env vars and fails if they are not provided.
"""
# Write s3 creds
aws_config = ConfigParser({
'aws_access_key_id': os.environ.get('AWS_ACCESS_KEY_ID'),
'aws_secret_access_key': os.environ.get('AWS_SECRET_ACCESS_KEY')
}, default_section='default')
with open(s3_creds_file, 'w') as f:
aws_config.write(f)
coach_config = ConfigParser({
'image': image,
'memory_backend': memory_backend,
'data_store': 's3',
's3_end_point': s3_end_point,
's3_bucket_name': s3_bucket_name,
's3_creds_file': s3_creds_file
}, default_section="coach")
with open(config_file, 'w') as f:
coach_config.write(f)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--preset', '--presets',
help="(string) Name of preset(s) to run (comma separated, as configured in presets.py)",
default=None,
type=str)
parser.add_argument('-ip', '--ignore_presets',
help="(string) Name of preset(s) to ignore (comma separated, and as configured in presets.py)",
default=None,
type=str)
parser.add_argument('-v', '--verbose',
help="(flag) display verbose logs in the event of an error",
action='store_true')
parser.add_argument('--stop_after_first_failure',
help="(flag) stop executing tests after the first error",
action='store_true')
parser.add_argument('-ow', '--overwrite',
help="(flag) overwrite old trace with new ones in trace testing mode",
action='store_true')
parser.add_argument('-prl', '--parallel',
help="(flag) run tests in parallel",
action='store_true')
parser.add_argument('-ut', '--update_traces',
help="(flag) update traces on repository",
action='store_true')
parser.add_argument('-mt', '--max_threads',
help="(int) maximum number of threads to run in parallel",
default=multiprocessing.cpu_count()-2,
type=int)
parser.add_argument(
'-i', '--image', help="(string) Name of the testing image", type=str, default=None
)
parser.add_argument(
'-mb', '--memory_backend', help="(string) Name of the memory backend", type=str, default="redispubsub"
)
parser.add_argument(
'-e', '--endpoint', help="(string) Name of the s3 endpoint", type=str, default='s3.amazonaws.com'
)
parser.add_argument(
'-cr', '--creds_file', help="(string) Path of the s3 creds file", type=str, default='.aws_creds'
)
parser.add_argument(
'-b', '--bucket', help="(string) Name of the bucket for s3", type=str, default=None
)
args = parser.parse_args()
if args.update_traces:
if not args.bucket:
print("bucket_name required for s3")
exit(1)
if not os.environ.get('AWS_ACCESS_KEY_ID') or not os.environ.get('AWS_SECRET_ACCESS_KEY'):
print("AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY env vars need to be set")
exit(1)
config_file = './tmp.cred'
generate_config(args.image, args.memory_backend, args.endpoint, args.bucket, args.creds_file, config_file)
if not args.parallel:
args.max_threads = 1
if args.preset is not None:
presets_lists = args.preset.split(',')
else:
presets_lists = [f[:-3] for f in os.listdir(os.path.join('rl_coach', 'presets')) if
f[-3:] == '.py' and not f == '__init__.py']
fail_count = 0
test_count = 0
if args.ignore_presets is not None:
presets_to_ignore = args.ignore_presets.split(',')
else:
presets_to_ignore = []
for idx, preset_name in enumerate(sorted(presets_lists)):
if args.stop_after_first_failure and fail_count > 0:
break
if preset_name not in presets_to_ignore:
try:
preset = import_module('rl_coach.presets.{}'.format(preset_name))
except:
screen.error("Failed to load preset <{}>".format(preset_name), crash=False)
fail_count += 1
test_count += 1
continue
preset_validation_params = preset.graph_manager.preset_validation_params
num_env_steps = preset_validation_params.trace_max_env_steps
if preset_validation_params.test_using_a_trace_test:
if preset_validation_params.trace_test_levels:
for level in preset_validation_params.trace_test_levels:
test_count += 1
test_path, log_file, p = run_trace_based_test(preset_name, num_env_steps, level)
processes.append((test_path, log_file, p))
test_passed = wait_and_check(args, processes)
if test_passed is not None and not test_passed:
fail_count += 1
else:
test_count += 1
test_path, log_file, p = run_trace_based_test(preset_name, num_env_steps)
processes.append((test_path, log_file, p))
test_passed = wait_and_check(args, processes)
if test_passed is not None and not test_passed:
fail_count += 1
while len(processes) > 0:
test_passed = wait_and_check(args, processes, force=True)
if test_passed is not None and not test_passed:
fail_count += 1
screen.separator()
if fail_count == 0:
screen.success(" Summary: " + str(test_count) + "/" + str(test_count) + " tests passed successfully")
else:
screen.error(" Summary: " + str(test_count - fail_count) + "/" + str(test_count) + " tests passed successfully", crash=False)
# check fail counts just if update traces is not activated!
if not args.update_traces:
assert fail_count == 0
if __name__ == '__main__':
os.environ['DISABLE_MUJOCO_RENDERING'] = '1'
main()
del os.environ['DISABLE_MUJOCO_RENDERING']
| apache-2.0 |
nealchenzhang/EODAnalyzer | untitled1.py | 1 | 2952 | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 13 14:22:01 2017
@author: Aian Fund
"""
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.ticker as mticker
from matplotlib.finance import candlestick_ohlc
from matplotlib import style
import numpy as np
import urllib
import datetime as dt
style.use('fivethirtyeight')
print(plt.style.available)
print(plt.__file__)
def bytespdate2num(fmt, encoding='utf-8'):
strconverter = mdates.strpdate2num(fmt)
def bytesconverter(b):
s = b.decode(encoding)
return strconverter(s)
return bytesconverter
def graph_data(stock):
fig = plt.figure()
ax1 = plt.subplot2grid((1,1), (0,0))
stock_price_url = 'http://chartapi.finance.yahoo.com/instrument/1.0/'+stock+'/chartdata;type=quote;range=1m/csv'
source_code = urllib.request.urlopen(stock_price_url).read().decode()
stock_data = []
split_source = source_code.split('\n')
for line in split_source:
split_line = line.split(',')
if len(split_line) == 6:
if 'values' not in line and 'labels' not in line:
stock_data.append(line)
date, closep, highp, lowp, openp, volume = np.loadtxt(stock_data,
delimiter=',',
unpack=True,
converters={0: bytespdate2num('%Y%m%d')})
x = 0
y = len(date)
ohlc = []
while x < y:
append_me = date[x], openp[x], highp[x], lowp[x], closep[x], volume[x]
ohlc.append(append_me)
x+=1
candlestick_ohlc(ax1, ohlc, width=0.4, colorup='#77d879', colordown='#db3f3f')
for label in ax1.xaxis.get_ticklabels():
label.set_rotation(45)
ax1.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
ax1.xaxis.set_major_locator(mticker.MaxNLocator(10))
ax1.grid(True)
bbox_props = dict(boxstyle='round',fc='w', ec='k',lw=1)
ax1.annotate(str(closep[-1]), (date[-1], closep[-1]),
xytext = (date[-1]+3, closep[-1]), bbox=bbox_props)
## # Annotation example with arrow
## ax1.annotate('Bad News!',(date[11],highp[11]),
## xytext=(0.8, 0.9), textcoords='axes fraction',
## arrowprops = dict(facecolor='grey',color='grey'))
##
##
## # Font dict example
## font_dict = {'family':'serif',
## 'color':'darkred',
## 'size':15}
## # Hard coded text
## ax1.text(date[10], closep[1],'Text Example', fontdict=font_dict)
plt.xlabel('Date')
plt.ylabel('Price')
plt.title(stock)
#plt.legend()
plt.subplots_adjust(left=0.11, bottom=0.24, right=0.87, top=0.90, wspace=0.2, hspace=0)
plt.show()
graph_data('EBAY') | mit |
nomadcube/scikit-learn | examples/applications/svm_gui.py | 287 | 11161 | """
==========
Libsvm GUI
==========
A simple graphical frontend for Libsvm mainly intended for didactic
purposes. You can create data points by point and click and visualize
the decision region induced by different kernels and parameter settings.
To create positive examples click the left mouse button; to create
negative examples click the right button.
If all examples are from the same class, it uses a one-class SVM.
"""
from __future__ import division, print_function
print(__doc__)
# Author: Peter Prettenhoer <[email protected]>
#
# License: BSD 3 clause
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib.contour import ContourSet
import Tkinter as Tk
import sys
import numpy as np
from sklearn import svm
from sklearn.datasets import dump_svmlight_file
from sklearn.externals.six.moves import xrange
y_min, y_max = -50, 50
x_min, x_max = -50, 50
class Model(object):
"""The Model which hold the data. It implements the
observable in the observer pattern and notifies the
registered observers on change event.
"""
def __init__(self):
self.observers = []
self.surface = None
self.data = []
self.cls = None
self.surface_type = 0
def changed(self, event):
"""Notify the observers. """
for observer in self.observers:
observer.update(event, self)
def add_observer(self, observer):
"""Register an observer. """
self.observers.append(observer)
def set_surface(self, surface):
self.surface = surface
def dump_svmlight_file(self, file):
data = np.array(self.data)
X = data[:, 0:2]
y = data[:, 2]
dump_svmlight_file(X, y, file)
class Controller(object):
def __init__(self, model):
self.model = model
self.kernel = Tk.IntVar()
self.surface_type = Tk.IntVar()
# Whether or not a model has been fitted
self.fitted = False
def fit(self):
print("fit the model")
train = np.array(self.model.data)
X = train[:, 0:2]
y = train[:, 2]
C = float(self.complexity.get())
gamma = float(self.gamma.get())
coef0 = float(self.coef0.get())
degree = int(self.degree.get())
kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
if len(np.unique(y)) == 1:
clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X)
else:
clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C,
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X, y)
if hasattr(clf, 'score'):
print("Accuracy:", clf.score(X, y) * 100)
X1, X2, Z = self.decision_surface(clf)
self.model.clf = clf
self.model.set_surface((X1, X2, Z))
self.model.surface_type = self.surface_type.get()
self.fitted = True
self.model.changed("surface")
def decision_surface(self, cls):
delta = 1
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
return X1, X2, Z
def clear_data(self):
self.model.data = []
self.fitted = False
self.model.changed("clear")
def add_example(self, x, y, label):
self.model.data.append((x, y, label))
self.model.changed("example_added")
# update decision surface if already fitted.
self.refit()
def refit(self):
"""Refit the model if already fitted. """
if self.fitted:
self.fit()
class View(object):
"""Test docstring. """
def __init__(self, root, controller):
f = Figure()
ax = f.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('button_press_event', self.onclick)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
self.controllbar = ControllBar(root, controller)
self.f = f
self.ax = ax
self.canvas = canvas
self.controller = controller
self.contours = []
self.c_labels = None
self.plot_kernels()
def plot_kernels(self):
self.ax.text(-50, -60, "Linear: $u^T v$")
self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$")
self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$")
def onclick(self, event):
if event.xdata and event.ydata:
if event.button == 1:
self.controller.add_example(event.xdata, event.ydata, 1)
elif event.button == 3:
self.controller.add_example(event.xdata, event.ydata, -1)
def update_example(self, model, idx):
x, y, l = model.data[idx]
if l == 1:
color = 'w'
elif l == -1:
color = 'k'
self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0)
def update(self, event, model):
if event == "examples_loaded":
for i in xrange(len(model.data)):
self.update_example(model, i)
if event == "example_added":
self.update_example(model, -1)
if event == "clear":
self.ax.clear()
self.ax.set_xticks([])
self.ax.set_yticks([])
self.contours = []
self.c_labels = None
self.plot_kernels()
if event == "surface":
self.remove_surface()
self.plot_support_vectors(model.clf.support_vectors_)
self.plot_decision_surface(model.surface, model.surface_type)
self.canvas.draw()
def remove_surface(self):
"""Remove old decision surface."""
if len(self.contours) > 0:
for contour in self.contours:
if isinstance(contour, ContourSet):
for lineset in contour.collections:
lineset.remove()
else:
contour.remove()
self.contours = []
def plot_support_vectors(self, support_vectors):
"""Plot the support vectors by placing circles over the
corresponding data points and adds the circle collection
to the contours list."""
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1],
s=80, edgecolors="k", facecolors="none")
self.contours.append(cs)
def plot_decision_surface(self, surface, type):
X1, X2, Z = surface
if type == 0:
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
self.contours.append(self.ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles))
elif type == 1:
self.contours.append(self.ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower', alpha=0.85))
self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k',
linestyles=['solid']))
else:
raise ValueError("surface type unknown")
class ControllBar(object):
def __init__(self, root, controller):
fm = Tk.Frame(root)
kernel_group = Tk.Frame(fm)
Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel,
value=0, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel,
value=1, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel,
value=2, command=controller.refit).pack(anchor=Tk.W)
kernel_group.pack(side=Tk.LEFT)
valbox = Tk.Frame(fm)
controller.complexity = Tk.StringVar()
controller.complexity.set("1.0")
c = Tk.Frame(valbox)
Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(c, width=6, textvariable=controller.complexity).pack(
side=Tk.LEFT)
c.pack()
controller.gamma = Tk.StringVar()
controller.gamma.set("0.01")
g = Tk.Frame(valbox)
Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT)
g.pack()
controller.degree = Tk.StringVar()
controller.degree.set("3")
d = Tk.Frame(valbox)
Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT)
d.pack()
controller.coef0 = Tk.StringVar()
controller.coef0.set("0")
r = Tk.Frame(valbox)
Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT)
r.pack()
valbox.pack(side=Tk.LEFT)
cmap_group = Tk.Frame(fm)
Tk.Radiobutton(cmap_group, text="Hyperplanes",
variable=controller.surface_type, value=0,
command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(cmap_group, text="Surface",
variable=controller.surface_type, value=1,
command=controller.refit).pack(anchor=Tk.W)
cmap_group.pack(side=Tk.LEFT)
train_button = Tk.Button(fm, text='Fit', width=5,
command=controller.fit)
train_button.pack()
fm.pack(side=Tk.LEFT)
Tk.Button(fm, text='Clear', width=5,
command=controller.clear_data).pack(side=Tk.LEFT)
def get_parser():
from optparse import OptionParser
op = OptionParser()
op.add_option("--output",
action="store", type="str", dest="output",
help="Path where to dump data.")
return op
def main(argv):
op = get_parser()
opts, args = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title("Scikit-learn Libsvm GUI")
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause |
costypetrisor/scikit-learn | examples/model_selection/plot_underfitting_overfitting.py | 230 | 2649 | """
============================
Underfitting vs. Overfitting
============================
This example demonstrates the problems of underfitting and overfitting and
how we can use linear regression with polynomial features to approximate
nonlinear functions. The plot shows the function that we want to approximate,
which is a part of the cosine function. In addition, the samples from the
real function and the approximations of different models are displayed. The
models have polynomial features of different degrees. We can see that a
linear function (polynomial with degree 1) is not sufficient to fit the
training samples. This is called **underfitting**. A polynomial of degree 4
approximates the true function almost perfectly. However, for higher degrees
the model will **overfit** the training data, i.e. it learns the noise of the
training data.
We evaluate quantitatively **overfitting** / **underfitting** by using
cross-validation. We calculate the mean squared error (MSE) on the validation
set, the higher, the less likely the model generalizes correctly from the
training data.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn import cross_validation
np.random.seed(0)
n_samples = 30
degrees = [1, 4, 15]
true_fun = lambda X: np.cos(1.5 * np.pi * X)
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
# Evaluate the models using crossvalidation
scores = cross_validation.cross_val_score(pipeline,
X[:, np.newaxis], y, scoring="mean_squared_error", cv=10)
X_test = np.linspace(0, 1, 100)
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(
degrees[i], -scores.mean(), scores.std()))
plt.show()
| bsd-3-clause |
ky822/scikit-learn | sklearn/utils/tests/test_extmath.py | 70 | 16531 | # Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from scipy import stats
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.extmath import density
from sklearn.utils.extmath import logsumexp
from sklearn.utils.extmath import norm, squared_norm
from sklearn.utils.extmath import randomized_svd
from sklearn.utils.extmath import row_norms
from sklearn.utils.extmath import weighted_mode
from sklearn.utils.extmath import cartesian
from sklearn.utils.extmath import log_logistic
from sklearn.utils.extmath import fast_dot, _fast_dot
from sklearn.utils.extmath import svd_flip
from sklearn.utils.extmath import _batch_mean_variance_update
from sklearn.utils.extmath import _deterministic_vector_sign_flip
from sklearn.utils.extmath import softmax
from sklearn.datasets.samples_generator import make_low_rank_matrix
def test_density():
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 5))
X[1, 2] = 0
X[5, 3] = 0
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_coo = sparse.coo_matrix(X)
X_lil = sparse.lil_matrix(X)
for X_ in (X_csr, X_csc, X_coo, X_lil):
assert_equal(density(X_), density(X))
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = stats.mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis)
assert_true(np.all(mode == mode2))
assert_true(np.all(score == score2))
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_array_equal(mode, mode_result)
assert_array_almost_equal(score.ravel(), w[:, :5].sum(1))
def test_logsumexp():
# Try to add some smallish numbers in logspace
x = np.array([1e-40] * 1000000)
logx = np.log(x)
assert_almost_equal(np.exp(logsumexp(logx)), x.sum())
X = np.vstack([x, x])
logX = np.vstack([logx, logx])
assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1))
def test_randomized_svd_low_rank():
# Check that extmath.randomized_svd is consistent with linalg.svd
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X of approximate effective rank `rank` and no noise
# component (very structured signal):
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
U, s, V = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_equal(Ua.shape, (n_samples, k))
assert_equal(sa.shape, (k,))
assert_equal(Va.shape, (k, n_features))
# ensure that the singular values of both methods are equal up to the real
# rank of the matrix
assert_almost_equal(s[:k], sa)
# check the singular vectors too (while not checking the sign)
assert_almost_equal(np.dot(U[:, :k], V[:k, :]), np.dot(Ua, Va))
# check the sparse matrix representation
X = sparse.csr_matrix(X)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_almost_equal(s[:rank], sa[:rank])
def test_norm_squared_norm():
X = np.random.RandomState(42).randn(50, 63)
X *= 100 # check stability
X += 200
assert_almost_equal(np.linalg.norm(X.ravel()), norm(X))
assert_almost_equal(norm(X) ** 2, squared_norm(X), decimal=6)
assert_almost_equal(np.linalg.norm(X), np.sqrt(squared_norm(X)), decimal=6)
def test_row_norms():
X = np.random.RandomState(42).randn(100, 100)
sq_norm = (X ** 2).sum(axis=1)
assert_array_almost_equal(sq_norm, row_norms(X, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(X))
Xcsr = sparse.csr_matrix(X, dtype=np.float32)
assert_array_almost_equal(sq_norm, row_norms(Xcsr, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(Xcsr))
def test_randomized_svd_low_rank_with_noise():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X wity structure approximate rank `rank` and an
# important noisy component
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.05)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is helping getting rid of the noise:
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_infinite_rank():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# let us try again without 'low_rank component': just regularly but slowly
# decreasing singular values: the rank of the data matrix is infinite
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=1.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.1)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is still managing to get most of the structure
# at the requested rank
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_transpose_consistency():
# Check that transposing the design matrix has limit impact
n_samples = 100
n_features = 500
rank = 4
k = 10
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False,
random_state=0)
U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True,
random_state=0)
U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose='auto',
random_state=0)
U4, s4, V4 = linalg.svd(X, full_matrices=False)
assert_almost_equal(s1, s4[:k], decimal=3)
assert_almost_equal(s2, s4[:k], decimal=3)
assert_almost_equal(s3, s4[:k], decimal=3)
assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
# in this case 'auto' is equivalent to transpose
assert_almost_equal(s2, s3)
def test_svd_flip():
# Check that svd_flip works in both situations, and reconstructs input.
rs = np.random.RandomState(1999)
n_samples = 20
n_features = 10
X = rs.randn(n_samples, n_features)
# Check matrix reconstruction
U, S, V = linalg.svd(X, full_matrices=False)
U1, V1 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U1 * S, V1), X, decimal=6)
# Check transposed matrix reconstruction
XT = X.T
U, S, V = linalg.svd(XT, full_matrices=False)
U2, V2 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U2 * S, V2), XT, decimal=6)
# Check that different flip methods are equivalent under reconstruction
U_flip1, V_flip1 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U_flip1 * S, V_flip1), XT, decimal=6)
U_flip2, V_flip2 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U_flip2 * S, V_flip2), XT, decimal=6)
def test_randomized_svd_sign_flip():
a = np.array([[2.0, 0.0], [0.0, 1.0]])
u1, s1, v1 = randomized_svd(a, 2, flip_sign=True, random_state=41)
for seed in range(10):
u2, s2, v2 = randomized_svd(a, 2, flip_sign=True, random_state=seed)
assert_almost_equal(u1, u2)
assert_almost_equal(v1, v2)
assert_almost_equal(np.dot(u2 * s2, v2), a)
assert_almost_equal(np.dot(u2.T, u2), np.eye(2))
assert_almost_equal(np.dot(v2.T, v2), np.eye(2))
def test_cartesian():
# Check if cartesian product delivers the right results
axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7]))
true_out = np.array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
out = cartesian(axes)
assert_array_equal(true_out, out)
# check single axis
x = np.arange(3)
assert_array_equal(x[:, np.newaxis], cartesian((x,)))
def test_logistic_sigmoid():
# Check correctness and robustness of logistic sigmoid implementation
naive_logistic = lambda x: 1 / (1 + np.exp(-x))
naive_log_logistic = lambda x: np.log(naive_logistic(x))
x = np.linspace(-2, 2, 50)
assert_array_almost_equal(log_logistic(x), naive_log_logistic(x))
extreme_x = np.array([-100., 100.])
assert_array_almost_equal(log_logistic(extreme_x), [-100, 0])
def test_fast_dot():
# Check fast dot blas wrapper function
if fast_dot is np.dot:
return
rng = np.random.RandomState(42)
A = rng.random_sample([2, 10])
B = rng.random_sample([2, 10])
try:
linalg.get_blas_funcs(['gemm'])[0]
has_blas = True
except (AttributeError, ValueError):
has_blas = False
if has_blas:
# Test _fast_dot for invalid input.
# Maltyped data.
for dt1, dt2 in [['f8', 'f4'], ['i4', 'i4']]:
assert_raises(ValueError, _fast_dot, A.astype(dt1),
B.astype(dt2).T)
# Malformed data.
## ndim == 0
E = np.empty(0)
assert_raises(ValueError, _fast_dot, E, E)
## ndim == 1
assert_raises(ValueError, _fast_dot, A, A[0])
## ndim > 2
assert_raises(ValueError, _fast_dot, A.T, np.array([A, A]))
## min(shape) == 1
assert_raises(ValueError, _fast_dot, A, A[0, :][None, :])
# test for matrix mismatch error
assert_raises(ValueError, _fast_dot, A, A)
# Test cov-like use case + dtypes.
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
# col < row
C = np.dot(A.T, A)
C_ = fast_dot(A.T, A)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A, B.T)
C_ = fast_dot(A, B.T)
assert_almost_equal(C, C_, decimal=5)
# Test square matrix * rectangular use case.
A = rng.random_sample([2, 2])
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
C = np.dot(A, B)
C_ = fast_dot(A, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
if has_blas:
for x in [np.array([[d] * 10] * 2) for d in [np.inf, np.nan]]:
assert_raises(ValueError, _fast_dot, x, x.T)
def test_incremental_variance_update_formulas():
# Test Youngs and Cramer incremental variance formulas.
# Doggie data from http://www.mathsisfun.com/data/standard-deviation.html
A = np.array([[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300]]).T
idx = 2
X1 = A[:idx, :]
X2 = A[idx:, :]
old_means = X1.mean(axis=0)
old_variances = X1.var(axis=0)
old_sample_count = X1.shape[0]
final_means, final_variances, final_count = _batch_mean_variance_update(
X2, old_means, old_variances, old_sample_count)
assert_almost_equal(final_means, A.mean(axis=0), 6)
assert_almost_equal(final_variances, A.var(axis=0), 6)
assert_almost_equal(final_count, A.shape[0])
def test_incremental_variance_ddof():
# Test that degrees of freedom parameter for calculations are correct.
rng = np.random.RandomState(1999)
X = rng.randn(50, 10)
n_samples, n_features = X.shape
for batch_size in [11, 20, 37]:
steps = np.arange(0, X.shape[0], batch_size)
if steps[-1] != X.shape[0]:
steps = np.hstack([steps, n_samples])
for i, j in zip(steps[:-1], steps[1:]):
batch = X[i:j, :]
if i == 0:
incremental_means = batch.mean(axis=0)
incremental_variances = batch.var(axis=0)
# Assign this twice so that the test logic is consistent
incremental_count = batch.shape[0]
sample_count = batch.shape[0]
else:
result = _batch_mean_variance_update(
batch, incremental_means, incremental_variances,
sample_count)
(incremental_means, incremental_variances,
incremental_count) = result
sample_count += batch.shape[0]
calculated_means = np.mean(X[:j], axis=0)
calculated_variances = np.var(X[:j], axis=0)
assert_almost_equal(incremental_means, calculated_means, 6)
assert_almost_equal(incremental_variances,
calculated_variances, 6)
assert_equal(incremental_count, sample_count)
def test_vector_sign_flip():
# Testing that sign flip is working & largest value has positive sign
data = np.random.RandomState(36).randn(5, 5)
max_abs_rows = np.argmax(np.abs(data), axis=1)
data_flipped = _deterministic_vector_sign_flip(data)
max_rows = np.argmax(data_flipped, axis=1)
assert_array_equal(max_abs_rows, max_rows)
signs = np.sign(data[range(data.shape[0]), max_abs_rows])
assert_array_equal(data, data_flipped * signs[:, np.newaxis])
def test_softmax():
rng = np.random.RandomState(0)
X = rng.randn(3, 5)
exp_X = np.exp(X)
sum_exp_X = np.sum(exp_X, axis=1).reshape((-1, 1))
assert_array_almost_equal(softmax(X), exp_X / sum_exp_X)
| bsd-3-clause |
ElDeveloper/scikit-learn | examples/plot_johnson_lindenstrauss_bound.py | 127 | 7477 | r"""
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
Theoretical bounds
==================
The distortion introduced by a random projection `p` is asserted by
the fact that `p` is defining an eps-embedding with good probability
as defined by:
.. math::
(1 - eps) \|u - v\|^2 < \|p(u) - p(v)\|^2 < (1 + eps) \|u - v\|^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features] and p is a projection by a random Gaussian N(0, 1) matrix
with shape [n_components, n_features] (or a sparse Achlioptas matrix).
The minimum number of components to guarantees the eps-embedding is
given by:
.. math::
n\_components >= 4 log(n\_samples) / (eps^2 / 2 - eps^3 / 3)
The first plot shows that with an increasing number of samples ``n_samples``,
the minimal number of dimensions ``n_components`` increased logarithmically
in order to guarantee an ``eps``-embedding.
The second plot shows that an increase of the admissible
distortion ``eps`` allows to reduce drastically the minimal number of
dimensions ``n_components`` for a given number of samples ``n_samples``
Empirical validation
====================
We validate the above bounds on the the digits dataset or on the 20 newsgroups
text document (TF-IDF word frequencies) dataset:
- for the digits dataset, some 8x8 gray level pixels data for 500
handwritten digits pictures are randomly projected to spaces for various
larger number of dimensions ``n_components``.
- for the 20 newsgroups dataset some 500 documents with 100k
features in total are projected using a sparse random matrix to smaller
euclidean spaces with various values for the target number of dimensions
``n_components``.
The default dataset is the digits dataset. To run the example on the twenty
newsgroups dataset, pass the --twenty-newsgroups command line argument to this
script.
For each value of ``n_components``, we plot:
- 2D distribution of sample pairs with pairwise distances in original
and projected spaces as x and y axis respectively.
- 1D histogram of the ratio of those distances (projected / original).
We can see that for low values of ``n_components`` the distribution is wide
with many distorted pairs and a skewed distribution (due to the hard
limit of zero ratio on the left as distances are always positives)
while for larger values of n_components the distortion is controlled
and the distances are well preserved by the random projection.
Remarks
=======
According to the JL lemma, projecting 500 samples without too much distortion
will require at least several thousands dimensions, irrespective of the
number of features of the original dataset.
Hence using random projections on the digits dataset which only has 64 features
in the input space does not make sense: it does not allow for dimensionality
reduction in this case.
On the twenty newsgroups on the other hand the dimensionality can be decreased
from 56436 down to 10000 while reasonably preserving pairwise distances.
"""
print(__doc__)
import sys
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
# Part 1: plot the theoretical dependency between n_components_min and
# n_samples
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
# Part 2: perform sparse random projection of some digits images which are
# quite low dimensional and dense or documents of the 20 newsgroups dataset
# which is both high dimensional and sparse
if '--twenty-newsgroups' in sys.argv:
# Need an internet connection hence not enabled by default
data = fetch_20newsgroups_vectorized().data[:500]
else:
data = load_digits().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
plt.figure()
plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu)
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = plt.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, normed=True, range=(0., 2.))
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
plt.show()
| bsd-3-clause |
simon-pepin/scikit-learn | sklearn/decomposition/tests/test_dict_learning.py | 47 | 8095 | import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.decomposition import DictionaryLearning
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.decomposition import SparseCoder
from sklearn.decomposition import dict_learning_online
from sklearn.decomposition import sparse_encode
rng_global = np.random.RandomState(0)
n_samples, n_features = 10, 8
X = rng_global.randn(n_samples, n_features)
def test_dict_learning_shapes():
n_components = 5
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_overcomplete():
n_components = 12
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_reconstruction():
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
# used to test lars here too, but there's no guarantee the number of
# nonzero atoms is right.
def test_dict_learning_reconstruction_parallel():
# regression test that parallel reconstruction works with n_jobs=-1
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
def test_dict_learning_nonzero_coefs():
n_components = 4
dico = DictionaryLearning(n_components, transform_algorithm='lars',
transform_n_nonzero_coefs=3, random_state=0)
code = dico.fit(X).transform(X[1])
assert_true(len(np.flatnonzero(code)) == 3)
dico.set_params(transform_algorithm='omp')
code = dico.transform(X[1])
assert_equal(len(np.flatnonzero(code)), 3)
def test_dict_learning_unknown_fit_algorithm():
n_components = 5
dico = DictionaryLearning(n_components, fit_algorithm='<unknown>')
assert_raises(ValueError, dico.fit, X)
def test_dict_learning_split():
n_components = 5
dico = DictionaryLearning(n_components, transform_algorithm='threshold',
random_state=0)
code = dico.fit(X).transform(X)
dico.split_sign = True
split_code = dico.transform(X)
assert_array_equal(split_code[:, :n_components] -
split_code[:, n_components:], code)
def test_dict_learning_online_shapes():
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(X, n_components=n_components,
alpha=1, random_state=rng)
assert_equal(code.shape, (n_samples, n_components))
assert_equal(dictionary.shape, (n_components, n_features))
assert_equal(np.dot(code, dictionary).shape, X.shape)
def test_dict_learning_online_verbosity():
n_components = 5
# test verbosity
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,
random_state=0)
dico.fit(X)
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,
random_state=0)
dico.fit(X)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,
random_state=0)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,
random_state=0)
finally:
sys.stdout = old_stdout
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_estimator_shapes():
n_components = 5
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)
dico.fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_overcomplete():
n_components = 12
dico = MiniBatchDictionaryLearning(n_components, n_iter=20,
random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_initialization():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features)
dico = MiniBatchDictionaryLearning(n_components, n_iter=0,
dict_init=V, random_state=0).fit(X)
assert_array_equal(dico.components_, V)
def test_dict_learning_online_partial_fit():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X),
batch_size=1,
alpha=1, shuffle=False, dict_init=V,
random_state=0).fit(X)
dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,
n_iter=1, dict_init=V,
random_state=0)
for i in range(10):
for sample in X:
dict2.partial_fit(sample)
assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) ==
0))
assert_array_almost_equal(dict1.components_, dict2.components_,
decimal=2)
def test_sparse_encode_shapes():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
code = sparse_encode(X, V, algorithm=algo)
assert_equal(code.shape, (n_samples, n_components))
def test_sparse_encode_error():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, alpha=0.001)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
def test_sparse_encode_error_default_sparsity():
rng = np.random.RandomState(0)
X = rng.randn(100, 64)
D = rng.randn(2, 64)
code = ignore_warnings(sparse_encode)(X, D, algorithm='omp',
n_nonzero_coefs=None)
assert_equal(code.shape, (100, 2))
def test_unknown_method():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>")
def test_sparse_coder_estimator():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',
transform_alpha=0.001).transform(X)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
| bsd-3-clause |
ppegusii/cs689-final | src/representation.py | 1 | 3863 | #!/usr/bin/env python
from __future__ import print_function
import argparse
import gzip
import numpy as np
import pandas as pd
import sys
import load
def main():
args = parseArgs(sys.argv)
raw = load.data(args.input)
rep = None
if args.representation == 'last':
rep = last(raw)
elif args.representation == 'change':
rep = change(raw)
else:
print('Invalid representation requested: {}'.format(
args.representation))
sys.exit(1)
write(rep, args.output)
def last(df):
# XOR with self shifted by 1 row (change)
# Find the indices that have value 1
# Forward fill the rows between the indices
feat = df.iloc[:, :-1].values
# print('feat.shape = {}'.format(feat.shape))
# create the shifted sequence by copying the first row and deleting the last
# print('feat[0:1, :].shape = {}'.format(feat[0:1, :].shape))
# print('feat[:-1, :].shape = {}'.format(feat[:-1, :].shape))
shifted = np.concatenate([feat[0:1, :], feat[:-1, :]])
change = np.logical_xor(feat, shifted).astype(int)
idxs = np.where(change)
idxs, inv = np.unique(idxs[0], return_inverse=True)
# print('idxs = {}'.format(idxs))
if len(idxs) != len(inv):
print('{} simultaneous sensor firings occurred!'.format(
len(inv) - len(idxs)))
for i in xrange(len(idxs)):
# print('*****')
# print(change)
if i != len(idxs) - 1:
change[idxs[i]+1:idxs[i+1], :] = change[idxs[i]:idxs[i]+1, :]
else:
change[idxs[i]+1:, :] = change[idxs[i]:idxs[i]+1, :]
# print(change)
newDf = df.copy()
newDf.iloc[:, :-1] = change
return newDf
def change(df):
# XOR with self shifted by 1 row (change)
feat = df.iloc[:, :-1].values
# create the shifted sequence by copying the first row and deleting the last
shifted = np.concatenate([feat[0:1, :], feat[:-1, :]])
change = np.logical_xor(feat, shifted).astype(int)
newDf = df.copy()
newDf.iloc[:, :-1] = change
return newDf
def testChange():
# From paper
data = pd.DataFrame(
np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [1, 1, 0], [1, 1, 0],
[0, 1, 0], [0, 0, 0]]))
result = np.array([[0, 0, 0], [1, 0, 0], [0, 0, 0], [0, 1, 0], [0, 0, 0],
[1, 0, 0], [0, 1, 0]])
print('Input')
print(data.values)
output = change(data).values
print('Output')
print(output)
print('True')
print(result)
assert np.array_equal(output, result)
def testLast():
# From paper
data = pd.DataFrame(
np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [1, 1, 0], [1, 1, 0],
[0, 1, 0], [0, 0, 0]]))
result = np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [0, 1, 0], [0, 1, 0],
[1, 0, 0], [0, 1, 0]])
print('Input')
print(data.values)
output = last(data).values
print('Output')
print(output)
print('True')
print(result)
assert np.array_equal(output, result)
def write(df, fileName):
with gzip.open(fileName, 'w') as f:
df.to_csv(f, float_format='%.0f')
def parseArgs(args):
parser = argparse.ArgumentParser(
description=('Parse Kasteren data into canonical form. '
'Written in Python 2.7.'),
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('input',
help='CSV of sequence in Raw format.')
parser.add_argument('output',
help='Path of CSV output.')
parser.add_argument('-r', '--representation',
default='last',
help=('Feature representation of output. '
'Choices {last, change}.'))
return parser.parse_args()
if __name__ == '__main__':
main()
| gpl-2.0 |
fredhusser/scikit-learn | examples/linear_model/plot_ols_3d.py | 350 | 2040 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Sparsity Example: Fitting only features 1 and 2
=========================================================
Features 1 and 2 of the diabetes-dataset are fitted and
plotted below. It illustrates that although feature 2
has a strong coefficient on the full model, it does not
give us much regarding `y` when compared to just feature 1
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets, linear_model
diabetes = datasets.load_diabetes()
indices = (0, 1)
X_train = diabetes.data[:-20, indices]
X_test = diabetes.data[-20:, indices]
y_train = diabetes.target[:-20]
y_test = diabetes.target[-20:]
ols = linear_model.LinearRegression()
ols.fit(X_train, y_train)
###############################################################################
# Plot the figure
def plot_figs(fig_num, elev, azim, X_train, clf):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, elev=elev, azim=azim)
ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c='k', marker='+')
ax.plot_surface(np.array([[-.1, -.1], [.15, .15]]),
np.array([[-.1, .15], [-.1, .15]]),
clf.predict(np.array([[-.1, -.1, .15, .15],
[-.1, .15, -.1, .15]]).T
).reshape((2, 2)),
alpha=.5)
ax.set_xlabel('X_1')
ax.set_ylabel('X_2')
ax.set_zlabel('Y')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
#Generate the three different figures from different views
elev = 43.5
azim = -110
plot_figs(1, elev, azim, X_train, ols)
elev = -.5
azim = 0
plot_figs(2, elev, azim, X_train, ols)
elev = -.5
azim = 90
plot_figs(3, elev, azim, X_train, ols)
plt.show()
| bsd-3-clause |
Silmathoron/nest-simulator | pynest/examples/intrinsic_currents_spiking.py | 12 | 6788 | # -*- coding: utf-8 -*-
#
# intrinsic_currents_spiking.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
#
"""Intrinsic currents spiking
-------------------------------
This example illustrates a neuron receiving spiking input through
several different receptors (AMPA, NMDA, GABA_A, GABA_B), provoking
spike output. The model, ``ht_neuron``, also has intrinsic currents
(``I_NaP``, ``I_KNa``, ``I_T``, and ``I_h``). It is a slightly simplified implementation of
neuron model proposed in [1]_.
The neuron is bombarded with spike trains from four Poisson generators,
which are connected to the AMPA, NMDA, GABA_A, and GABA_B receptors,
respectively.
References
~~~~~~~~~~~
.. [1] Hill and Tononi (2005) Modeling sleep and wakefulness in the
thalamocortical system. J Neurophysiol 93:1671
http://dx.doi.org/10.1152/jn.00915.2004.
See Also
~~~~~~~~~~
:doc:`intrinsic_currents_subthreshold`
"""
###############################################################################
# We imported all necessary modules for simulation, analysis and plotting.
import nest
import matplotlib.pyplot as plt
###############################################################################
# Additionally, we set the verbosity using ``set_verbosity`` to suppress info
# messages. We also reset the kernel to be sure to start with a clean NEST.
nest.set_verbosity("M_WARNING")
nest.ResetKernel()
###############################################################################
# We define the simulation parameters:
#
# - The rate of the input spike trains
# - The weights of the different receptors (names must match receptor types)
# - The time to simulate
#
# Note that all parameter values should be doubles, since NEST expects doubles.
rate_in = 100.
w_recep = {'AMPA': 30., 'NMDA': 30., 'GABA_A': 5., 'GABA_B': 10.}
t_sim = 250.
num_recep = len(w_recep)
###############################################################################
# We create
#
# - one neuron instance
# - one Poisson generator instance for each synapse type
# - one multimeter to record from the neuron:
# - membrane potential
# - threshold potential
# - synaptic conductances
# - intrinsic currents
#
# See :doc:`intrinsic_currents_subthreshold` for more details on ``multimeter``
# configuration.
nrn = nest.Create('ht_neuron')
p_gens = nest.Create('poisson_generator', 4, params={'rate': rate_in})
mm = nest.Create('multimeter',
params={'interval': 0.1,
'record_from': ['V_m', 'theta',
'g_AMPA', 'g_NMDA',
'g_GABA_A', 'g_GABA_B',
'I_NaP', 'I_KNa', 'I_T', 'I_h']})
###############################################################################
# We now connect each Poisson generator with the neuron through a different
# receptor type.
#
# First, we need to obtain the numerical codes for the receptor types from
# the model. The ``receptor_types`` entry of the default dictionary for the
# ``ht_neuron`` model is a dictionary mapping receptor names to codes.
#
# In the loop, we use Python's tuple unpacking mechanism to unpack
# dictionary entries from our `w_recep` dictionary.
#
# Note that we need to pack the `pg` variable into a list before
# passing it to ``Connect``, because iterating over the `p_gens` list
# makes `pg` a "naked" node ID.
receptors = nest.GetDefaults('ht_neuron')['receptor_types']
for index, (rec_name, rec_wgt) in enumerate(w_recep.items()):
nest.Connect(p_gens[index], nrn, syn_spec={'receptor_type': receptors[rec_name], 'weight': rec_wgt})
###############################################################################
# We then connnect the ``multimeter``. Note that the multimeter is connected to
# the neuron, not the other way around.
nest.Connect(mm, nrn)
###############################################################################
# We are now ready to simulate.
nest.Simulate(t_sim)
###############################################################################
# We now fetch the data recorded by the multimeter. The data are returned as
# a dictionary with entry ``times`` containing timestamps for all
# recorded data, plus one entry per recorded quantity.
# All data is contained in the ``events`` entry of the status dictionary
# returned by the multimeter. Because all NEST function return arrays,
# we need to pick out element `0` from the result of ``GetStatus``.
data = mm.events
t = data['times']
###############################################################################
# The following function turns a name such as ``I_NaP`` into proper TeX code
# :math:`I_{\mathrm{NaP}}` for a pretty label.
def texify_name(name):
return r'${}_{{\mathrm{{{}}}}}$'.format(*name.split('_'))
###############################################################################
# The next step is to plot the results. We create a new figure, and add one
# subplot each for membrane and threshold potential, synaptic conductances,
# and intrinsic currents.
fig = plt.figure()
Vax = fig.add_subplot(311)
Vax.plot(t, data['V_m'], 'b', lw=2, label=r'$V_m$')
Vax.plot(t, data['theta'], 'g', lw=2, label=r'$\Theta$')
Vax.set_ylabel('Potential [mV]')
try:
Vax.legend(fontsize='small')
except TypeError:
Vax.legend() # work-around for older Matplotlib versions
Vax.set_title('ht_neuron driven by Poisson processes')
Gax = fig.add_subplot(312)
for gname in ('g_AMPA', 'g_NMDA', 'g_GABA_A', 'g_GABA_B'):
Gax.plot(t, data[gname], lw=2, label=texify_name(gname))
try:
Gax.legend(fontsize='small')
except TypeError:
Gax.legend() # work-around for older Matplotlib versions
Gax.set_ylabel('Conductance [nS]')
Iax = fig.add_subplot(313)
for iname, color in (('I_h', 'maroon'), ('I_T', 'orange'),
('I_NaP', 'crimson'), ('I_KNa', 'aqua')):
Iax.plot(t, data[iname], color=color, lw=2, label=texify_name(iname))
try:
Iax.legend(fontsize='small')
except TypeError:
Iax.legend() # work-around for older Matplotlib versions
Iax.set_ylabel('Current [pA]')
Iax.set_xlabel('Time [ms]')
| gpl-2.0 |
tjlaboss/openmc | openmc/mgxs/mgxs.py | 3 | 303367 | from collections import OrderedDict
import copy
from numbers import Integral
import os
import warnings
import h5py
import numpy as np
import openmc
from openmc.data import REACTION_MT, REACTION_NAME, FISSION_MTS
import openmc.checkvalue as cv
from ..tallies import ESTIMATOR_TYPES
from . import EnergyGroups
# Supported cross section types
MGXS_TYPES = (
'total',
'transport',
'nu-transport',
'absorption',
'capture',
'fission',
'nu-fission',
'kappa-fission',
'scatter',
'nu-scatter',
'scatter matrix',
'nu-scatter matrix',
'multiplicity matrix',
'nu-fission matrix',
'scatter probability matrix',
'consistent scatter matrix',
'consistent nu-scatter matrix',
'chi',
'chi-prompt',
'inverse-velocity',
'prompt-nu-fission',
'prompt-nu-fission matrix',
'current',
'diffusion-coefficient',
'nu-diffusion-coefficient'
)
# Some scores from REACTION_MT are not supported, or are simply overkill to
# support and test (like inelastic levels), remoev those from consideration
_BAD_SCORES = ["(n,misc)", "(n,absorption)", "(n,total)", "fission"]
_BAD_SCORES += [REACTION_NAME[mt] for mt in FISSION_MTS]
ARBITRARY_VECTOR_TYPES = tuple(k for k in REACTION_MT.keys()
if k not in _BAD_SCORES)
ARBITRARY_MATRIX_TYPES = []
for rxn in ARBITRARY_VECTOR_TYPES:
# Preclude the fission channels from being treated as a matrix
if rxn not in [REACTION_NAME[mt] for mt in FISSION_MTS]:
split_rxn = rxn.strip("()").split(",")
if len(split_rxn) > 1 and "n" in split_rxn[1]:
# Then there is a neutron product, so it can also be a matrix
ARBITRARY_MATRIX_TYPES.append(rxn + " matrix")
ARBITRARY_MATRIX_TYPES = tuple(ARBITRARY_MATRIX_TYPES)
# Supported domain types
DOMAIN_TYPES = (
'cell',
'distribcell',
'universe',
'material',
'mesh'
)
# Filter types corresponding to each domain
_DOMAIN_TO_FILTER = {
'cell': openmc.CellFilter,
'distribcell': openmc.DistribcellFilter,
'universe': openmc.UniverseFilter,
'material': openmc.MaterialFilter,
'mesh': openmc.MeshFilter
}
# Supported domain classes
_DOMAINS = (
openmc.Cell,
openmc.Universe,
openmc.Material,
openmc.RegularMesh
)
# Supported ScatterMatrixXS angular distribution types. Note that 'histogram' is
# defined here and used in mgxs_library.py, but it is not used for the current
# module
SCATTER_TABULAR = 'tabular'
SCATTER_LEGENDRE = 'legendre'
SCATTER_HISTOGRAM = 'histogram'
MU_TREATMENTS = (
SCATTER_LEGENDRE,
SCATTER_HISTOGRAM
)
# Maximum Legendre order supported by OpenMC
_MAX_LEGENDRE = 10
def _df_column_convert_to_bin(df, current_name, new_name, values_to_bin,
reverse_order=False):
"""Convert a Pandas DataFrame column from the bin edges to an index for
each bin. This method operates on the DataFrame, df, in-place.
Parameters
----------
df : pandas.DataFrame
A Pandas DataFrame containing the cross section data.
current_name : str
Name of the column to replace with bins
new_name : str
New name for column after the data is replaced with bins
values_to_bin : Iterable of Real
Values of the bin edges to be used for identifying the bins
reverse_order : bool
Whether the bin indices should be reversed
"""
# Get the current values
df_bins = np.asarray(df[current_name])
new_vals = np.zeros_like(df_bins, dtype=int)
# Replace the values with the index of the closest entry in values_to_bin
# The closest is used because it is expected that the values in df could
# have lost precision along the way
for i, df_val in enumerate(df_bins):
idx = np.searchsorted(values_to_bin, df_val)
# Check to make sure if the value is just above the search result
if idx > 0 and np.isclose(values_to_bin[idx - 1], df_val):
idx -= 1
# If it is just below the search result then we are done
new_vals[i] = idx
# Switch to a one-based indexing
new_vals += 1
# Reverse the ordering if requested (this is for energy group ordering)
if reverse_order:
new_vals = (len(values_to_bin) - 1) - new_vals + 1
# Assign the values
df[current_name] = new_vals[:]
# And rename the column
df.rename(columns={current_name: new_name}, inplace=True)
class MGXS:
"""An abstract multi-group cross section for some energy group structure
within some spatial domain.
This class can be used for both OpenMC input generation and tally data
post-processing to compute spatially-homogenized and energy-integrated
multi-group cross sections for multi-group neutronics calculations.
.. note:: Users should instantiate the subclasses of this abstract class.
Parameters
----------
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
The domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
The domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file.
num_polar : Integral, optional
Number of equi-width polar angle bins for angle discretization;
defaults to one bin
num_azimuthal : Integral, optional
Number of equi-width azimuthal angle bins for angle discretization;
defaults to one bin
Attributes
----------
name : str, optional
Name of the multi-group cross section
rxn_type : str
Reaction type (e.g., 'total', 'nu-fission', etc.)
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
Domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
Domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
Energy group structure for energy condensation
num_polar : Integral
Number of equi-width polar angle bins for angle discretization
num_azimuthal : Integral
Number of equi-width azimuthal angle bins for angle discretization
tally_trigger : openmc.Trigger
An (optional) tally precision trigger given to each tally used to
compute the cross section
scores : list of str
The scores in each tally used to compute the multi-group cross section
filters : list of openmc.Filter
The filters in each tally used to compute the multi-group cross section
tally_keys : list of str
The keys into the tallies dictionary for each tally used to compute
the multi-group cross section
estimator : {'tracklength', 'collision', 'analog'}
The tally estimator used to compute the multi-group cross section
tallies : collections.OrderedDict
OpenMC tallies needed to compute the multi-group cross section
rxn_rate_tally : openmc.Tally
Derived tally for the reaction rate tally used in the numerator to
compute the multi-group cross section. This attribute is None
unless the multi-group cross section has been computed.
xs_tally : openmc.Tally
Derived tally for the multi-group cross section. This attribute
is None unless the multi-group cross section has been computed.
num_subdomains : int
The number of subdomains is unity for 'material', 'cell' and 'universe'
domain types. This is equal to the number of cell instances
for 'distribcell' domain types (it is equal to unity prior to loading
tally data from a statepoint file) and the number of mesh cells for
'mesh' domain types.
num_nuclides : int
The number of nuclides for which the multi-group cross section is
being tracked. This is unity if the by_nuclide attribute is False.
nuclides : Iterable of str or 'sum'
The optional user-specified nuclides for which to compute cross
sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides
are not specified by the user, all nuclides in the spatial domain
are included. This attribute is 'sum' if by_nuclide is false.
sparse : bool
Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format
for compressed data storage
loaded_sp : bool
Whether or not a statepoint file has been loaded with tally data
derived : bool
Whether or not the MGXS is merged from one or more other MGXS
hdf5_key : str
The key used to index multi-group cross sections in an HDF5 data store
"""
# Store whether or not the number density should be removed for microscopic
# values of this data
_divide_by_density = True
def __init__(self, domain=None, domain_type=None,
energy_groups=None, by_nuclide=False, name='', num_polar=1,
num_azimuthal=1):
self._name = ''
self._rxn_type = None
self._by_nuclide = None
self._nuclides = None
self._estimator = 'tracklength'
self._domain = None
self._domain_type = None
self._energy_groups = None
self._num_polar = 1
self._num_azimuthal = 1
self._tally_trigger = None
self._tallies = None
self._rxn_rate_tally = None
self._xs_tally = None
self._sparse = False
self._loaded_sp = False
self._derived = False
self._hdf5_key = None
self._valid_estimators = ESTIMATOR_TYPES
self.name = name
self.by_nuclide = by_nuclide
if domain_type is not None:
self.domain_type = domain_type
if domain is not None:
self.domain = domain
if energy_groups is not None:
self.energy_groups = energy_groups
self.num_polar = num_polar
self.num_azimuthal = num_azimuthal
def __deepcopy__(self, memo):
existing = memo.get(id(self))
# If this object has been copied before, return the first copy made
if existing is not None:
return existing
# If this is the first time we have tried to copy this object, copy it
clone = type(self).__new__(type(self))
clone._name = self.name
clone._rxn_type = self.rxn_type
clone._by_nuclide = self.by_nuclide
clone._nuclides = copy.deepcopy(self._nuclides, memo)
clone._domain = self.domain
clone._domain_type = self.domain_type
clone._energy_groups = copy.deepcopy(self.energy_groups, memo)
clone._num_polar = self._num_polar
clone._num_azimuthal = self._num_azimuthal
clone._tally_trigger = copy.deepcopy(self.tally_trigger, memo)
clone._rxn_rate_tally = copy.deepcopy(self._rxn_rate_tally, memo)
clone._xs_tally = copy.deepcopy(self._xs_tally, memo)
clone._sparse = self.sparse
clone._loaded_sp = self._loaded_sp
clone._derived = self.derived
clone._hdf5_key = self._hdf5_key
clone._tallies = OrderedDict()
for tally_type, tally in self.tallies.items():
clone.tallies[tally_type] = copy.deepcopy(tally, memo)
memo[id(self)] = clone
return clone
def _add_angle_filters(self, filters):
"""Add the azimuthal and polar bins to the MGXS filters if needed.
Filters will be provided as a ragged 2D list of openmc.Filter objects.
Parameters
----------
filters : Iterable of Iterable of openmc.Filter
Ragged 2D list of openmc.Filter objects for the energy and spatial
domains. The angle filters will be added to the list.
Returns
-------
Iterable of Iterable of openmc.Filter
Ragged 2D list of openmc.Filter objects for the energy and spatial
domains with the angle filters added to the list.
"""
if self.num_polar > 1 or self.num_azimuthal > 1:
# Then the user has requested angular data, so create the bins
pol_bins = np.linspace(0., np.pi, num=self.num_polar + 1,
endpoint=True)
azi_bins = np.linspace(-np.pi, np.pi, num=self.num_azimuthal + 1,
endpoint=True)
for filt in filters:
filt.insert(0, openmc.PolarFilter(pol_bins))
filt.insert(1, openmc.AzimuthalFilter(azi_bins))
return filters
def _squeeze_xs(self, xs):
"""Remove dimensions which are not needed from a cross section array
due to user options. This is used by the openmc.Mgxs.get_xs(...) method
Parameters
----------
xs : np.ndarray
Cross sections array with dimensions to be squeezed
Returns
-------
np.ndarray
Squeezed array of cross sections
"""
# numpy.squeeze will return a ValueError if the axis has a size
# greater than 1, to avoid this we will try each axis one at a
# time to preclude the ValueError.
initial_shape = len(xs.shape)
for axis in range(initial_shape - 1, -1, -1):
if axis not in self._dont_squeeze and xs.shape[axis] == 1:
xs = np.squeeze(xs, axis=axis)
return xs
def _df_convert_columns_to_bins(self, df):
"""This method converts all relevant and present DataFrame columns from
their bin boundaries to the index for each bin. This method operates on
the DataFrame, df, in place. The method returns a list of the columns
in which it has operated on.
Parameters
----------
df : pandas.DataFrame
A Pandas DataFrame containing the cross section data.
Returns
-------
columns : Iterable of str
Names of the re-named and re-valued columns
"""
# Override polar and azimuthal bounds with indices
if self.num_polar > 1 or self.num_azimuthal > 1:
# First for polar
bins = np.linspace(0., np.pi, self.num_polar + 1, True)
_df_column_convert_to_bin(df, 'polar low', 'polar bin', bins)
del df['polar high']
# Second for azimuthal
bins = np.linspace(-np.pi, np.pi, self.num_azimuthal + 1, True)
_df_column_convert_to_bin(df, 'azimuthal low', 'azimuthal bin',
bins)
del df['azimuthal high']
columns = ['polar bin', 'azimuthal bin']
else:
columns = []
# Override energy groups bounds with indices
if 'energy low [eV]' in df:
_df_column_convert_to_bin(df, 'energy low [eV]', 'group in',
self.energy_groups.group_edges,
reverse_order=True)
del df['energy high [eV]']
columns += ['group in']
if 'energyout low [eV]' in df:
_df_column_convert_to_bin(df, 'energyout low [eV]', 'group out',
self.energy_groups.group_edges,
reverse_order=True)
del df['energyout high [eV]']
columns += ['group out']
if 'mu low' in df and hasattr(self, 'histogram_bins'):
# Only the ScatterMatrix class has the histogram_bins attribute
bins = np.linspace(-1., 1., self.histogram_bins + 1, True)
_df_column_convert_to_bin(df, 'mu low', 'mu bin', bins)
del df['mu high']
columns += ['mu bin']
return columns
@property
def _dont_squeeze(self):
"""Create a tuple of axes which should not be removed during the get_xs
process
"""
if self.num_polar > 1 or self.num_azimuthal > 1:
return (0, 1, 3)
else:
return (1, )
@property
def name(self):
return self._name
@property
def rxn_type(self):
return self._rxn_type
@property
def by_nuclide(self):
return self._by_nuclide
@property
def domain(self):
return self._domain
@property
def domain_type(self):
return self._domain_type
@property
def energy_groups(self):
return self._energy_groups
@property
def num_polar(self):
return self._num_polar
@property
def num_azimuthal(self):
return self._num_azimuthal
@property
def tally_trigger(self):
return self._tally_trigger
@property
def num_groups(self):
return self.energy_groups.num_groups
@property
def scores(self):
return ['flux', self.rxn_type]
@property
def filters(self):
group_edges = self.energy_groups.group_edges
energy_filter = openmc.EnergyFilter(group_edges)
filters = []
for i in range(len(self.scores)):
filters.append([energy_filter])
return self._add_angle_filters(filters)
@property
def tally_keys(self):
return self.scores
@property
def estimator(self):
return self._estimator
@property
def tallies(self):
# Instantiate tallies if they do not exist
if self._tallies is None:
# Initialize a collection of Tallies
self._tallies = OrderedDict()
# Create a domain Filter object
filter_type = _DOMAIN_TO_FILTER[self.domain_type]
if self.domain_type == 'mesh':
domain_filter = filter_type(self.domain)
else:
domain_filter = filter_type(self.domain.id)
if isinstance(self.estimator, str):
estimators = [self.estimator] * len(self.scores)
else:
estimators = self.estimator
# Create each Tally needed to compute the multi group cross section
tally_metadata = \
zip(self.scores, self.tally_keys, self.filters, estimators)
for score, key, filters, estimator in tally_metadata:
self._tallies[key] = openmc.Tally(name=self.name)
self._tallies[key].scores = [score]
self._tallies[key].estimator = estimator
if score != 'current':
self._tallies[key].filters = [domain_filter]
# If a tally trigger was specified, add it to each tally
if self.tally_trigger:
trigger_clone = copy.deepcopy(self.tally_trigger)
trigger_clone.scores = [score]
self._tallies[key].triggers.append(trigger_clone)
# Add non-domain specific Filters (e.g., 'energy') to the Tally
for add_filter in filters:
self._tallies[key].filters.append(add_filter)
# If this is a by-nuclide cross-section, add nuclides to Tally
if self.by_nuclide and score != 'flux':
self._tallies[key].nuclides += self.get_nuclides()
else:
self._tallies[key].nuclides.append('total')
return self._tallies
@property
def rxn_rate_tally(self):
if self._rxn_rate_tally is None:
self._rxn_rate_tally = self.tallies[self.rxn_type]
self._rxn_rate_tally.sparse = self.sparse
return self._rxn_rate_tally
@property
def xs_tally(self):
if self._xs_tally is None:
if self.tallies is None:
msg = 'Unable to get xs_tally since tallies have ' \
'not been loaded from a statepoint'
raise ValueError(msg)
self._xs_tally = self.rxn_rate_tally / self.tallies['flux']
self._compute_xs()
return self._xs_tally
@property
def sparse(self):
return self._sparse
@property
def num_subdomains(self):
if self.domain_type.startswith('sum('):
domain_type = self.domain_type[4:-1]
else:
domain_type = self.domain_type
if self._rxn_type == 'current':
filter_type = openmc.MeshSurfaceFilter
else:
filter_type = _DOMAIN_TO_FILTER[domain_type]
domain_filter = self.xs_tally.find_filter(filter_type)
return domain_filter.num_bins
@property
def num_nuclides(self):
if self.by_nuclide:
return len(self.get_nuclides())
else:
return 1
@property
def nuclides(self):
if self.by_nuclide:
return self.get_nuclides()
else:
return ['sum']
@property
def loaded_sp(self):
return self._loaded_sp
@property
def derived(self):
return self._derived
@property
def hdf5_key(self):
if self._hdf5_key is not None:
return self._hdf5_key
else:
return self._rxn_type
@name.setter
def name(self, name):
cv.check_type('name', name, str)
self._name = name
@by_nuclide.setter
def by_nuclide(self, by_nuclide):
cv.check_type('by_nuclide', by_nuclide, bool)
self._by_nuclide = by_nuclide
@nuclides.setter
def nuclides(self, nuclides):
cv.check_iterable_type('nuclides', nuclides, str)
self._nuclides = nuclides
@estimator.setter
def estimator(self, estimator):
cv.check_value('estimator', estimator, self._valid_estimators)
self._estimator = estimator
@domain.setter
def domain(self, domain):
cv.check_type('domain', domain, _DOMAINS)
self._domain = domain
# Assign a domain type
if self.domain_type is None:
if isinstance(domain, openmc.Material):
self._domain_type = 'material'
elif isinstance(domain, openmc.Cell):
self._domain_type = 'cell'
elif isinstance(domain, openmc.Universe):
self._domain_type = 'universe'
elif isinstance(domain, openmc.RegularMesh):
self._domain_type = 'mesh'
@domain_type.setter
def domain_type(self, domain_type):
cv.check_value('domain type', domain_type, DOMAIN_TYPES)
self._domain_type = domain_type
@energy_groups.setter
def energy_groups(self, energy_groups):
cv.check_type('energy groups', energy_groups, openmc.mgxs.EnergyGroups)
self._energy_groups = energy_groups
@num_polar.setter
def num_polar(self, num_polar):
cv.check_type('num_polar', num_polar, Integral)
cv.check_greater_than('num_polar', num_polar, 0)
self._num_polar = num_polar
@num_azimuthal.setter
def num_azimuthal(self, num_azimuthal):
cv.check_type('num_azimuthal', num_azimuthal, Integral)
cv.check_greater_than('num_azimuthal', num_azimuthal, 0)
self._num_azimuthal = num_azimuthal
@tally_trigger.setter
def tally_trigger(self, tally_trigger):
cv.check_type('tally trigger', tally_trigger, openmc.Trigger)
self._tally_trigger = tally_trigger
@sparse.setter
def sparse(self, sparse):
"""Convert tally data from NumPy arrays to SciPy list of lists (LIL)
sparse matrices, and vice versa.
This property may be used to reduce the amount of data in memory during
tally data processing. The tally data will be stored as SciPy LIL
matrices internally within the Tally object. All tally data access
properties and methods will return data as a dense NumPy array.
"""
cv.check_type('sparse', sparse, bool)
# Sparsify or densify the derived MGXS tallies and the base tallies
if self._xs_tally:
self.xs_tally.sparse = sparse
if self._rxn_rate_tally:
self.rxn_rate_tally.sparse = sparse
for tally_name in self.tallies:
self.tallies[tally_name].sparse = sparse
self._sparse = sparse
@staticmethod
def get_mgxs(mgxs_type, domain=None, domain_type=None,
energy_groups=None, by_nuclide=False, name='', num_polar=1,
num_azimuthal=1):
"""Return a MGXS subclass object for some energy group structure within
some spatial domain for some reaction type.
This is a factory method which can be used to quickly create MGXS
subclass objects for various reaction types.
Parameters
----------
mgxs_type : str or Integral
The type of multi-group cross section object to return; valid
values are members of MGXS_TYPES, or the reaction types that are
the keys of REACTION_MT. Note that if a reaction type from
REACTION_MT is used, it can be appended with ' matrix' to obtain
a multigroup matrix (from incoming to outgoing energy groups) for
reactions with a neutron in an outgoing channel.
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
The domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
The domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
by_nuclide : bool
If true, computes cross sections for each nuclide in domain.
Defaults to False
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file. Defaults to the empty string.
num_polar : Integral, optional
Number of equi-width polar angles for angle discretization;
defaults to no discretization
num_azimuthal : Integral, optional
Number of equi-width azimuthal angles for angle discretization;
defaults to no discretization
Returns
-------
openmc.mgxs.MGXS
A subclass of the abstract MGXS class for the multi-group cross
section type requested by the user
"""
cv.check_value(
"mgxs_type", mgxs_type,
MGXS_TYPES + ARBITRARY_VECTOR_TYPES + ARBITRARY_MATRIX_TYPES)
if mgxs_type == 'total':
mgxs = TotalXS(domain, domain_type, energy_groups)
elif mgxs_type == 'transport':
mgxs = TransportXS(domain, domain_type, energy_groups)
elif mgxs_type == 'nu-transport':
mgxs = TransportXS(domain, domain_type, energy_groups, nu=True)
elif mgxs_type == 'absorption':
mgxs = AbsorptionXS(domain, domain_type, energy_groups)
elif mgxs_type == 'capture':
mgxs = CaptureXS(domain, domain_type, energy_groups)
elif mgxs_type == 'fission':
mgxs = FissionXS(domain, domain_type, energy_groups)
elif mgxs_type == 'nu-fission':
mgxs = FissionXS(domain, domain_type, energy_groups, nu=True)
elif mgxs_type == 'kappa-fission':
mgxs = KappaFissionXS(domain, domain_type, energy_groups)
elif mgxs_type == 'scatter':
mgxs = ScatterXS(domain, domain_type, energy_groups)
elif mgxs_type == 'nu-scatter':
mgxs = ScatterXS(domain, domain_type, energy_groups, nu=True)
elif mgxs_type == 'scatter matrix':
mgxs = ScatterMatrixXS(domain, domain_type, energy_groups)
elif mgxs_type == 'nu-scatter matrix':
mgxs = ScatterMatrixXS(domain, domain_type, energy_groups, nu=True)
elif mgxs_type == 'multiplicity matrix':
mgxs = MultiplicityMatrixXS(domain, domain_type, energy_groups)
elif mgxs_type == 'scatter probability matrix':
mgxs = ScatterProbabilityMatrix(domain, domain_type, energy_groups)
elif mgxs_type == 'consistent scatter matrix':
mgxs = ScatterMatrixXS(domain, domain_type, energy_groups)
mgxs.formulation = 'consistent'
elif mgxs_type == 'consistent nu-scatter matrix':
mgxs = ScatterMatrixXS(domain, domain_type, energy_groups, nu=True)
mgxs.formulation = 'consistent'
elif mgxs_type == 'nu-fission matrix':
mgxs = NuFissionMatrixXS(domain, domain_type, energy_groups)
elif mgxs_type == 'chi':
mgxs = Chi(domain, domain_type, energy_groups)
elif mgxs_type == 'chi-prompt':
mgxs = Chi(domain, domain_type, energy_groups, prompt=True)
elif mgxs_type == 'inverse-velocity':
mgxs = InverseVelocity(domain, domain_type, energy_groups)
elif mgxs_type == 'prompt-nu-fission':
mgxs = FissionXS(domain, domain_type, energy_groups, prompt=True)
elif mgxs_type == 'prompt-nu-fission matrix':
mgxs = NuFissionMatrixXS(domain, domain_type, energy_groups,
prompt=True)
elif mgxs_type == 'current':
mgxs = Current(domain, domain_type, energy_groups)
elif mgxs_type == 'diffusion-coefficient':
mgxs = DiffusionCoefficient(domain, domain_type, energy_groups)
elif mgxs_type == 'nu-diffusion-coefficient':
mgxs = DiffusionCoefficient(domain, domain_type, energy_groups, nu=True)
elif mgxs_type in ARBITRARY_VECTOR_TYPES:
# Then it is a reaction not covered by the above that is
# supported by the ArbitraryXS Class
mgxs = ArbitraryXS(mgxs_type, domain, domain_type, energy_groups)
elif mgxs_type in ARBITRARY_MATRIX_TYPES:
mgxs = ArbitraryMatrixXS(mgxs_type, domain, domain_type,
energy_groups)
mgxs.by_nuclide = by_nuclide
mgxs.name = name
mgxs.num_polar = num_polar
mgxs.num_azimuthal = num_azimuthal
return mgxs
def get_nuclides(self):
"""Get all nuclides in the cross section's spatial domain.
Returns
-------
list of str
A list of the string names for each nuclide in the spatial domain
(e.g., ['U235', 'U238', 'O16'])
Raises
------
ValueError
When this method is called before the spatial domain has been set.
"""
if self.domain is None:
raise ValueError('Unable to get all nuclides without a domain')
# If the user defined nuclides, return them
if self._nuclides:
return self._nuclides
# Otherwise, return all nuclides in the spatial domain
else:
return self.domain.get_nuclides()
def get_nuclide_density(self, nuclide):
"""Get the atomic number density in units of atoms/b-cm for a nuclide
in the cross section's spatial domain.
Parameters
----------
nuclide : str
A nuclide name string (e.g., 'U235')
Returns
-------
float
The atomic number density (atom/b-cm) for the nuclide of interest
"""
cv.check_type('nuclide', nuclide, str)
# Get list of all nuclides in the spatial domain
nuclides = self.domain.get_nuclide_densities()
return nuclides[nuclide][1] if nuclide in nuclides else 0.0
def get_nuclide_densities(self, nuclides='all'):
"""Get an array of atomic number densities in units of atom/b-cm for all
nuclides in the cross section's spatial domain.
Parameters
----------
nuclides : Iterable of str or 'all' or 'sum'
A list of nuclide name strings (e.g., ['U235', 'U238']). The
special string 'all' will return the atom densities for all nuclides
in the spatial domain. The special string 'sum' will return the atom
density summed across all nuclides in the spatial domain. Defaults
to 'all'.
Returns
-------
numpy.ndarray of float
An array of the atomic number densities (atom/b-cm) for each of the
nuclides in the spatial domain
Raises
------
ValueError
When this method is called before the spatial domain has been set.
"""
if self.domain is None:
raise ValueError('Unable to get nuclide densities without a domain')
# Sum the atomic number densities for all nuclides
if nuclides == 'sum':
nuclides = self.get_nuclides()
densities = np.zeros(1, dtype=np.float)
for nuclide in nuclides:
densities[0] += self.get_nuclide_density(nuclide)
# Tabulate the atomic number densities for all nuclides
elif nuclides == 'all':
nuclides = self.get_nuclides()
densities = np.zeros(self.num_nuclides, dtype=np.float)
for i, nuclide in enumerate(nuclides):
densities[i] += self.get_nuclide_density(nuclide)
# Tabulate the atomic number densities for each specified nuclide
else:
densities = np.zeros(len(nuclides), dtype=np.float)
for i, nuclide in enumerate(nuclides):
densities[i] = self.get_nuclide_density(nuclide)
return densities
def _compute_xs(self):
"""Performs generic cleanup after a subclass' uses tally arithmetic to
compute a multi-group cross section as a derived tally.
This method replaces CrossNuclides generated by tally arithmetic with
the original Nuclide objects in the xs_tally instance attribute. The
simple Nuclides allow for cleaner output through Pandas DataFrames as
well as simpler data access through the get_xs(...) class method.
In addition, this routine resets NaNs in the multi group cross section
array to 0.0. This may be needed occur if no events were scored in
certain tally bins, which will lead to a divide-by-zero situation.
"""
# If computing xs for each nuclide, replace CrossNuclides with originals
if self.by_nuclide:
self.xs_tally._nuclides = []
nuclides = self.get_nuclides()
for nuclide in nuclides:
self.xs_tally.nuclides.append(openmc.Nuclide(nuclide))
# Remove NaNs which may have resulted from divide-by-zero operations
self.xs_tally._mean = np.nan_to_num(self.xs_tally.mean)
self.xs_tally._std_dev = np.nan_to_num(self.xs_tally.std_dev)
self.xs_tally.sparse = self.sparse
def load_from_statepoint(self, statepoint):
"""Extracts tallies in an OpenMC StatePoint with the data needed to
compute multi-group cross sections.
This method is needed to compute cross section data from tallies
in an OpenMC StatePoint object.
.. note:: The statepoint must be linked with an OpenMC Summary object.
Parameters
----------
statepoint : openmc.StatePoint
An OpenMC StatePoint object with tally data
Raises
------
ValueError
When this method is called with a statepoint that has not been
linked with a summary object.
"""
cv.check_type('statepoint', statepoint, openmc.StatePoint)
if statepoint.summary is None:
msg = 'Unable to load data from a statepoint which has not been ' \
'linked with a summary file'
raise ValueError(msg)
# Override the domain object that loaded from an OpenMC summary file
# NOTE: This is necessary for micro cross-sections which require
# the isotopic number densities as computed by OpenMC
su = statepoint.summary
if self.domain_type in ('cell', 'distribcell'):
self.domain = su._fast_cells[self.domain.id]
elif self.domain_type == 'universe':
self.domain = su._fast_universes[self.domain.id]
elif self.domain_type == 'material':
self.domain = su._fast_materials[self.domain.id]
elif self.domain_type == 'mesh':
self.domain = statepoint.meshes[self.domain.id]
else:
msg = 'Unable to load data from a statepoint for domain type {0} ' \
'which is not yet supported'.format(self.domain_type)
raise ValueError(msg)
# Use tally "slicing" to ensure that tallies correspond to our domain
# NOTE: This is important if tally merging was used
if self.domain_type == 'mesh':
filters = [_DOMAIN_TO_FILTER[self.domain_type]]
filter_bins = [tuple(self.domain.indices)]
elif self.domain_type != 'distribcell':
filters = [_DOMAIN_TO_FILTER[self.domain_type]]
filter_bins = [(self.domain.id,)]
# Distribcell filters only accept single cell - neglect it when slicing
else:
filters = []
filter_bins = []
# Clear any tallies previously loaded from a statepoint
if self.loaded_sp:
self._tallies = None
self._xs_tally = None
self._rxn_rate_tally = None
self._loaded_sp = False
# Find, slice and store Tallies from StatePoint
# The tally slicing is needed if tally merging was used
for tally_type, tally in self.tallies.items():
sp_tally = statepoint.get_tally(
tally.scores, tally.filters, tally.nuclides,
estimator=tally.estimator, exact_filters=True)
sp_tally = sp_tally.get_slice(
tally.scores, filters, filter_bins, tally.nuclides)
sp_tally.sparse = self.sparse
self.tallies[tally_type] = sp_tally
self._loaded_sp = True
def get_xs(self, groups='all', subdomains='all', nuclides='all',
xs_type='macro', order_groups='increasing',
value='mean', squeeze=True, **kwargs):
r"""Returns an array of multi-group cross sections.
This method constructs a 3D NumPy array for the requested
multi-group cross section data for one or more subdomains
(1st dimension), energy groups (2nd dimension), and nuclides
(3rd dimension).
Parameters
----------
groups : Iterable of Integral or 'all'
Energy groups of interest. Defaults to 'all'.
subdomains : Iterable of Integral or 'all'
Subdomain IDs of interest. Defaults to 'all'.
nuclides : Iterable of str or 'all' or 'sum'
A list of nuclide name strings (e.g., ['U235', 'U238']). The
special string 'all' will return the cross sections for all nuclides
in the spatial domain. The special string 'sum' will return the
cross section summed over all nuclides. Defaults to 'all'.
xs_type: {'macro', 'micro'}
Return the macro or micro cross section in units of cm^-1 or barns.
Defaults to 'macro'.
order_groups: {'increasing', 'decreasing'}
Return the cross section indexed according to increasing or
decreasing energy groups (decreasing or increasing energies).
Defaults to 'increasing'.
value : {'mean', 'std_dev', 'rel_err'}
A string for the type of value to return. Defaults to 'mean'.
squeeze : bool
A boolean representing whether to eliminate the extra dimensions
of the multi-dimensional array to be returned. Defaults to True.
Returns
-------
numpy.ndarray
A NumPy array of the multi-group cross section indexed in the order
each group, subdomain and nuclide is listed in the parameters.
Raises
------
ValueError
When this method is called before the multi-group cross section is
computed from tally data.
"""
cv.check_value('value', value, ['mean', 'std_dev', 'rel_err'])
cv.check_value('xs_type', xs_type, ['macro', 'micro'])
# FIXME: Unable to get microscopic xs for mesh domain because the mesh
# cells do not know the nuclide densities in each mesh cell.
if self.domain_type == 'mesh' and xs_type == 'micro':
msg = 'Unable to get micro xs for mesh domain since the mesh ' \
'cells do not know the nuclide densities in each mesh cell.'
raise ValueError(msg)
filters = []
filter_bins = []
# Construct a collection of the domain filter bins
if not isinstance(subdomains, str):
cv.check_iterable_type('subdomains', subdomains, Integral,
max_depth=3)
filters.append(_DOMAIN_TO_FILTER[self.domain_type])
subdomain_bins = []
for subdomain in subdomains:
subdomain_bins.append(subdomain)
filter_bins.append(tuple(subdomain_bins))
# Construct list of energy group bounds tuples for all requested groups
if not isinstance(groups, str):
cv.check_iterable_type('groups', groups, Integral)
filters.append(openmc.EnergyFilter)
energy_bins = []
for group in groups:
energy_bins.append(
(self.energy_groups.get_group_bounds(group),))
filter_bins.append(tuple(energy_bins))
# Construct a collection of the nuclides to retrieve from the xs tally
if self.by_nuclide:
if nuclides == 'all' or nuclides == 'sum' or nuclides == ['sum']:
query_nuclides = self.get_nuclides()
else:
query_nuclides = nuclides
else:
query_nuclides = ['total']
# If user requested the sum for all nuclides, use tally summation
if nuclides == 'sum' or nuclides == ['sum']:
xs_tally = self.xs_tally.summation(nuclides=query_nuclides)
xs = xs_tally.get_values(filters=filters,
filter_bins=filter_bins, value=value)
else:
xs = self.xs_tally.get_values(filters=filters,
filter_bins=filter_bins,
nuclides=query_nuclides, value=value)
# Divide by atom number densities for microscopic cross sections
if xs_type == 'micro' and self._divide_by_density:
if self.by_nuclide:
densities = self.get_nuclide_densities(nuclides)
else:
densities = self.get_nuclide_densities('sum')
if value == 'mean' or value == 'std_dev':
xs /= densities[np.newaxis, :, np.newaxis]
# Eliminate the trivial score dimension
xs = np.squeeze(xs, axis=len(xs.shape) - 1)
xs = np.nan_to_num(xs)
if groups == 'all':
num_groups = self.num_groups
else:
num_groups = len(groups)
# Reshape tally data array with separate axes for domain and energy
# Accomodate the polar and azimuthal bins if needed
num_subdomains = int(xs.shape[0] / (num_groups * self.num_polar *
self.num_azimuthal))
if self.num_polar > 1 or self.num_azimuthal > 1:
new_shape = (self.num_polar, self.num_azimuthal, num_subdomains,
num_groups)
else:
new_shape = (num_subdomains, num_groups)
new_shape += xs.shape[1:]
xs = np.reshape(xs, new_shape)
# Reverse data if user requested increasing energy groups since
# tally data is stored in order of increasing energies
if order_groups == 'increasing':
xs = xs[..., ::-1, :]
if squeeze:
# We want to squeeze out everything but the polar, azimuthal,
# and energy group data.
xs = self._squeeze_xs(xs)
return xs
def get_flux(self, groups='all', subdomains='all',
order_groups='increasing', value='mean',
squeeze=True, **kwargs):
r"""Returns an array of the fluxes used to weight the MGXS.
This method constructs a 2D NumPy array for the requested
weighting flux for one or more subdomains (1st dimension), and
energy groups (2nd dimension).
Parameters
----------
groups : Iterable of Integral or 'all'
Energy groups of interest. Defaults to 'all'.
subdomains : Iterable of Integral or 'all'
Subdomain IDs of interest. Defaults to 'all'.
order_groups: {'increasing', 'decreasing'}
Return the cross section indexed according to increasing or
decreasing energy groups (decreasing or increasing energies).
Defaults to 'increasing'.
value : {'mean', 'std_dev', 'rel_err'}
A string for the type of value to return. Defaults to 'mean'.
squeeze : bool
A boolean representing whether to eliminate the extra dimensions
of the multi-dimensional array to be returned. Defaults to True.
Returns
-------
numpy.ndarray
A NumPy array of the flux indexed in the order
each group and subdomain is listed in the parameters.
Raises
------
ValueError
When this method is called before the data is available from tally
data, or, when this is used on an MGXS type without a flux score.
"""
cv.check_value('value', value, ['mean', 'std_dev', 'rel_err'])
filters = []
filter_bins = []
# Construct a collection of the domain filter bins
if not isinstance(subdomains, str):
cv.check_iterable_type('subdomains', subdomains, Integral,
max_depth=3)
filters.append(_DOMAIN_TO_FILTER[self.domain_type])
subdomain_bins = []
for subdomain in subdomains:
subdomain_bins.append(subdomain)
filter_bins.append(tuple(subdomain_bins))
# Construct list of energy group bounds tuples for all requested groups
if not isinstance(groups, str):
cv.check_iterable_type('groups', groups, Integral)
filters.append(openmc.EnergyFilter)
energy_bins = []
for group in groups:
energy_bins.append(
(self.energy_groups.get_group_bounds(group),))
filter_bins.append(tuple(energy_bins))
# Determine which flux to obtain
# Step through in order of usefulness
for key in ['flux', 'flux (tracklength)', 'flux (analog)']:
if key in self.tally_keys:
tally = self.tallies[key]
break
else:
msg = "MGXS of Type {} do not have an explicit weighting flux!"
raise ValueError(msg.format(self.__name__))
flux = tally.get_values(filters=filters, filter_bins=filter_bins,
nuclides=['total'], value=value)
# Eliminate the trivial score dimension
flux = np.squeeze(flux, axis=len(flux.shape) - 1)
# Eliminate the trivial nuclide dimension
flux = np.squeeze(flux, axis=len(flux.shape) - 1)
flux = np.nan_to_num(flux)
if groups == 'all':
num_groups = self.num_groups
else:
num_groups = len(groups)
# Reshape tally data array with separate axes for domain and energy
# Accomodate the polar and azimuthal bins if needed
num_subdomains = int(flux.shape[0] / (num_groups * self.num_polar *
self.num_azimuthal))
if self.num_polar > 1 or self.num_azimuthal > 1:
new_shape = (self.num_polar, self.num_azimuthal, num_subdomains,
num_groups)
else:
new_shape = (num_subdomains, num_groups)
new_shape += flux.shape[1:]
flux = np.reshape(flux, new_shape)
# Reverse data if user requested increasing energy groups since
# tally data is stored in order of increasing energies
if order_groups == 'increasing':
flux = flux[..., ::-1]
if squeeze:
# We want to squeeze out everything but the polar, azimuthal,
# and energy group data.
flux = self._squeeze_xs(flux)
return flux
def get_condensed_xs(self, coarse_groups):
"""Construct an energy-condensed version of this cross section.
Parameters
----------
coarse_groups : openmc.mgxs.EnergyGroups
The coarse energy group structure of interest
Returns
-------
MGXS
A new MGXS condensed to the group structure of interest
"""
cv.check_type('coarse_groups', coarse_groups, EnergyGroups)
cv.check_less_than('coarse groups', coarse_groups.num_groups,
self.num_groups, equality=True)
cv.check_value('upper coarse energy', coarse_groups.group_edges[-1],
[self.energy_groups.group_edges[-1]])
cv.check_value('lower coarse energy', coarse_groups.group_edges[0],
[self.energy_groups.group_edges[0]])
# Clone this MGXS to initialize the condensed version
condensed_xs = copy.deepcopy(self)
condensed_xs._rxn_rate_tally = None
condensed_xs._xs_tally = None
condensed_xs._sparse = False
condensed_xs._energy_groups = coarse_groups
# Build energy indices to sum across
energy_indices = []
for group in range(coarse_groups.num_groups, 0, -1):
low, high = coarse_groups.get_group_bounds(group)
low_index = np.where(self.energy_groups.group_edges == low)[0][0]
energy_indices.append(low_index)
fine_edges = self.energy_groups.group_edges
# Condense each of the tallies to the coarse group structure
for tally in condensed_xs.tallies.values():
# Make condensed tally derived and null out sum, sum_sq
tally._derived = True
tally._sum = None
tally._sum_sq = None
# Get tally data arrays reshaped with one dimension per filter
mean = tally.get_reshaped_data(value='mean')
std_dev = tally.get_reshaped_data(value='std_dev')
# Sum across all applicable fine energy group filters
for i, tally_filter in enumerate(tally.filters):
if not isinstance(tally_filter, (openmc.EnergyFilter,
openmc.EnergyoutFilter)):
continue
elif len(tally_filter.bins) != len(fine_edges) - 1:
continue
elif not np.allclose(tally_filter.bins[:, 0], fine_edges[:-1]):
continue
else:
cedge = coarse_groups.group_edges
tally_filter.values = cedge
tally_filter.bins = np.vstack((cedge[:-1], cedge[1:])).T
mean = np.add.reduceat(mean, energy_indices, axis=i)
std_dev = np.add.reduceat(std_dev**2, energy_indices,
axis=i)
std_dev = np.sqrt(std_dev)
# Reshape condensed data arrays with one dimension for all filters
mean = np.reshape(mean, tally.shape)
std_dev = np.reshape(std_dev, tally.shape)
# Override tally's data with the new condensed data
tally._mean = mean
tally._std_dev = std_dev
# Compute the energy condensed multi-group cross section
condensed_xs.sparse = self.sparse
return condensed_xs
def get_subdomain_avg_xs(self, subdomains='all'):
"""Construct a subdomain-averaged version of this cross section.
This method is useful for averaging cross sections across distribcell
instances. The method performs spatial homogenization to compute the
scalar flux-weighted average cross section across the subdomains.
Parameters
----------
subdomains : Iterable of Integral or 'all'
The subdomain IDs to average across. Defaults to 'all'.
Returns
-------
openmc.mgxs.MGXS
A new MGXS averaged across the subdomains of interest
Raises
------
ValueError
When this method is called before the multi-group cross section is
computed from tally data.
"""
# Construct a collection of the subdomain filter bins to average across
if not isinstance(subdomains, str):
cv.check_iterable_type('subdomains', subdomains, Integral)
subdomains = [(subdomain,) for subdomain in subdomains]
subdomains = [tuple(subdomains)]
elif self.domain_type == 'distribcell':
subdomains = [i for i in range(self.num_subdomains)]
subdomains = [tuple(subdomains)]
else:
subdomains = None
# Clone this MGXS to initialize the subdomain-averaged version
avg_xs = copy.deepcopy(self)
avg_xs._rxn_rate_tally = None
avg_xs._xs_tally = None
# Average each of the tallies across subdomains
for tally_type, tally in avg_xs.tallies.items():
filt_type = _DOMAIN_TO_FILTER[self.domain_type]
tally_avg = tally.summation(filter_type=filt_type,
filter_bins=subdomains)
avg_xs.tallies[tally_type] = tally_avg
avg_xs._domain_type = 'sum({0})'.format(self.domain_type)
avg_xs.sparse = self.sparse
return avg_xs
def _get_homogenized_mgxs(self, other_mgxs, denom_score='flux'):
"""Construct a homogenized MGXS with other MGXS objects.
This method constructs a new MGXS object that is the flux-weighted
combination of two MGXS objects. It is equivalent to what one would
obtain if the tally spatial domain were designed to encompass the
individual domains for both MGXS objects. This is accomplished by
summing the rxn rate (numerator) tally and the denominator tally
(often a tally of the flux over the spatial domain) that are used to
compute a multi-group cross-section.
Parameters
----------
other_mgxs : openmc.mgxs.MGXS or Iterable of openmc.mgxs.MGXS
The MGXS to homogenize with this one.
denom_score : str
The denominator score in the denominator of computing the MGXS.
Returns
-------
openmc.mgxs.MGXS
A new homogenized MGXS
Raises
------
ValueError
If the other_mgxs is of a different type.
"""
# Check type of denom score
cv.check_type('denom_score', denom_score, str)
# Construct a collection of the subdomain filter bins to homogenize
# across
if isinstance(other_mgxs, openmc.mgxs.MGXS):
other_mgxs = [other_mgxs]
cv.check_iterable_type('other_mgxs', other_mgxs, openmc.mgxs.MGXS)
for mgxs in other_mgxs:
if mgxs.rxn_type != self.rxn_type:
msg = 'Not able to homogenize two MGXS with different rxn types'
raise ValueError(msg)
# Clone this MGXS to initialize the homogenized version
homogenized_mgxs = copy.deepcopy(self)
homogenized_mgxs._derived = True
name = 'hom({}, '.format(self.domain.name)
# Get the domain filter
filter_type = _DOMAIN_TO_FILTER[self.domain_type]
self_filter = self.rxn_rate_tally.find_filter(filter_type)
# Get the rxn rate and denom tallies
rxn_rate_tally = self.rxn_rate_tally
denom_tally = self.tallies[denom_score]
for mgxs in other_mgxs:
# Swap the domain filter bins for the other mgxs rxn rate tally
other_rxn_rate_tally = copy.deepcopy(mgxs.rxn_rate_tally)
other_filter = other_rxn_rate_tally.find_filter(filter_type)
other_filter._bins = self_filter._bins
# Swap the domain filter bins for the denom tally
other_denom_tally = copy.deepcopy(mgxs.tallies[denom_score])
other_filter = other_denom_tally.find_filter(filter_type)
other_filter._bins = self_filter._bins
# Add the rxn rate and denom tallies
rxn_rate_tally += other_rxn_rate_tally
denom_tally += other_denom_tally
# Update the name for the homogenzied MGXS
name += '{}, '.format(mgxs.domain.name)
# Set the properties of the homogenized MGXS
homogenized_mgxs._rxn_rate_tally = rxn_rate_tally
homogenized_mgxs.tallies[denom_score] = denom_tally
homogenized_mgxs._domain.name = name[:-2] + ')'
return homogenized_mgxs
def get_homogenized_mgxs(self, other_mgxs):
"""Construct a homogenized mgxs with other MGXS objects.
Parameters
----------
other_mgxs : openmc.mgxs.MGXS or Iterable of openmc.mgxs.MGXS
The MGXS to homogenize with this one.
Returns
-------
openmc.mgxs.MGXS
A new homogenized MGXS
Raises
------
ValueError
If the other_mgxs is of a different type.
"""
return self._get_homogenized_mgxs(other_mgxs, 'flux')
def get_slice(self, nuclides=[], groups=[]):
"""Build a sliced MGXS for the specified nuclides and energy groups.
This method constructs a new MGXS to encapsulate a subset of the data
represented by this MGXS. The subset of data to include in the tally
slice is determined by the nuclides and energy groups specified in
the input parameters.
Parameters
----------
nuclides : list of str
A list of nuclide name strings
(e.g., ['U235', 'U238']; default is [])
groups : list of int
A list of energy group indices starting at 1 for the high energies
(e.g., [1, 2, 3]; default is [])
Returns
-------
openmc.mgxs.MGXS
A new MGXS object which encapsulates the subset of data requested
for the nuclide(s) and/or energy group(s) requested in the
parameters.
"""
cv.check_iterable_type('nuclides', nuclides, str)
cv.check_iterable_type('energy_groups', groups, Integral)
# Build lists of filters and filter bins to slice
filters = []
filter_bins = []
if len(groups) != 0:
energy_bins = []
for group in groups:
group_bounds = self.energy_groups.get_group_bounds(group)
energy_bins.append(group_bounds)
filter_bins.append(tuple(energy_bins))
filters.append(openmc.EnergyFilter)
# Clone this MGXS to initialize the sliced version
slice_xs = copy.deepcopy(self)
slice_xs._rxn_rate_tally = None
slice_xs._xs_tally = None
# Slice each of the tallies across nuclides and energy groups
for tally_type, tally in slice_xs.tallies.items():
slice_nuclides = [nuc for nuc in nuclides if nuc in tally.nuclides]
if len(groups) != 0 and tally.contains_filter(openmc.EnergyFilter):
tally_slice = tally.get_slice(filters=filters,
filter_bins=filter_bins,
nuclides=slice_nuclides)
else:
tally_slice = tally.get_slice(nuclides=slice_nuclides)
slice_xs.tallies[tally_type] = tally_slice
# Assign sliced energy group structure to sliced MGXS
if groups:
new_group_edges = []
for group in groups:
group_edges = self.energy_groups.get_group_bounds(group)
new_group_edges.extend(group_edges)
new_group_edges = np.unique(new_group_edges)
slice_xs.energy_groups.group_edges = sorted(new_group_edges)
# Assign sliced nuclides to sliced MGXS
if nuclides:
slice_xs.nuclides = nuclides
slice_xs.sparse = self.sparse
return slice_xs
def can_merge(self, other):
"""Determine if another MGXS can be merged with this one
If results have been loaded from a statepoint, then MGXS are only
mergeable along one and only one of enegy groups or nuclides.
Parameters
----------
other : openmc.mgxs.MGXS
MGXS to check for merging
"""
if not isinstance(other, type(self)):
return False
# Compare reaction type, energy groups, nuclides, domain type
if self.rxn_type != other.rxn_type:
return False
elif not self.energy_groups.can_merge(other.energy_groups):
return False
elif self.by_nuclide != other.by_nuclide:
return False
elif self.domain_type != other.domain_type:
return False
elif 'distribcell' not in self.domain_type and self.domain != other.domain:
return False
elif not self.xs_tally.can_merge(other.xs_tally):
return False
elif not self.rxn_rate_tally.can_merge(other.rxn_rate_tally):
return False
# If all conditionals pass then MGXS are mergeable
return True
def merge(self, other):
"""Merge another MGXS with this one
MGXS are only mergeable if their energy groups and nuclides are either
identical or mutually exclusive. If results have been loaded from a
statepoint, then MGXS are only mergeable along one and only one of
energy groups or nuclides.
Parameters
----------
other : openmc.mgxs.MGXS
MGXS to merge with this one
Returns
-------
merged_mgxs : openmc.mgxs.MGXS
Merged MGXS
"""
if not self.can_merge(other):
raise ValueError('Unable to merge MGXS')
# Create deep copy of tally to return as merged tally
merged_mgxs = copy.deepcopy(self)
merged_mgxs._derived = True
# Merge energy groups
if self.energy_groups != other.energy_groups:
merged_groups = self.energy_groups.merge(other.energy_groups)
merged_mgxs.energy_groups = merged_groups
# Merge nuclides
if self.nuclides != other.nuclides:
# The nuclides must be mutually exclusive
for nuclide in self.nuclides:
if nuclide in other.nuclides:
msg = 'Unable to merge MGXS with shared nuclides'
raise ValueError(msg)
# Concatenate lists of nuclides for the merged MGXS
merged_mgxs.nuclides = self.nuclides + other.nuclides
# Null base tallies but merge reaction rate and cross section tallies
merged_mgxs._tallies = OrderedDict()
merged_mgxs._rxn_rate_tally = self.rxn_rate_tally.merge(other.rxn_rate_tally)
merged_mgxs._xs_tally = self.xs_tally.merge(other.xs_tally)
return merged_mgxs
def print_xs(self, subdomains='all', nuclides='all', xs_type='macro'):
"""Print a string representation for the multi-group cross section.
Parameters
----------
subdomains : Iterable of Integral or 'all'
The subdomain IDs of the cross sections to include in the report.
Defaults to 'all'.
nuclides : Iterable of str or 'all' or 'sum'
The nuclides of the cross-sections to include in the report. This
may be a list of nuclide name strings (e.g., ['U235', 'U238']).
The special string 'all' will report the cross sections for all
nuclides in the spatial domain. The special string 'sum' will report
the cross sections summed over all nuclides. Defaults to 'all'.
xs_type: {'macro', 'micro'}
Return the macro or micro cross section in units of cm^-1 or barns.
Defaults to 'macro'.
"""
# Construct a collection of the subdomains to report
if not isinstance(subdomains, str):
cv.check_iterable_type('subdomains', subdomains, Integral)
elif self.domain_type == 'distribcell':
subdomains = np.arange(self.num_subdomains, dtype=np.int)
elif self.domain_type == 'mesh':
subdomains = list(self.domain.indices)
else:
subdomains = [self.domain.id]
# Construct a collection of the nuclides to report
if self.by_nuclide:
if nuclides == 'all':
nuclides = self.get_nuclides()
elif nuclides == 'sum':
nuclides = ['sum']
else:
cv.check_iterable_type('nuclides', nuclides, str)
else:
nuclides = ['sum']
cv.check_value('xs_type', xs_type, ['macro', 'micro'])
# Build header for string with type and domain info
string = 'Multi-Group XS\n'
string += '{0: <16}=\t{1}\n'.format('\tReaction Type', self.rxn_type)
string += '{0: <16}=\t{1}\n'.format('\tDomain Type', self.domain_type)
string += '{0: <16}=\t{1}\n'.format('\tDomain ID', self.domain.id)
# Generate the header for an individual XS
xs_header = '\tCross Sections [{0}]:'.format(self.get_units(xs_type))
# If cross section data has not been computed, only print string header
if self.tallies is None:
print(string)
return
# Set polar/azimuthal bins
if self.num_polar > 1 or self.num_azimuthal > 1:
pol_bins = np.linspace(0., np.pi, num=self.num_polar + 1,
endpoint=True)
azi_bins = np.linspace(-np.pi, np.pi, num=self.num_azimuthal + 1,
endpoint=True)
# Loop over all subdomains
for subdomain in subdomains:
if self.domain_type == 'distribcell' or self.domain_type == 'mesh':
string += '{0: <16}=\t{1}\n'.format('\tSubdomain', subdomain)
# Loop over all Nuclides
for nuclide in nuclides:
# Build header for nuclide type
if nuclide != 'sum':
string += '{0: <16}=\t{1}\n'.format('\tNuclide', nuclide)
# Build header for cross section type
string += '{0: <16}\n'.format(xs_header)
template = '{0: <12}Group {1} [{2: <10} - {3: <10}eV]:\t'
average_xs = self.get_xs(nuclides=[nuclide],
subdomains=[subdomain],
xs_type=xs_type, value='mean')
rel_err_xs = self.get_xs(nuclides=[nuclide],
subdomains=[subdomain],
xs_type=xs_type, value='rel_err')
rel_err_xs = rel_err_xs * 100.
if self.num_polar > 1 or self.num_azimuthal > 1:
# Loop over polar, azimuthal, and energy group ranges
for pol in range(len(pol_bins) - 1):
pol_low, pol_high = pol_bins[pol: pol + 2]
for azi in range(len(azi_bins) - 1):
azi_low, azi_high = azi_bins[azi: azi + 2]
string += '\t\tPolar Angle: [{0:5f} - {1:5f}]'.format(
pol_low, pol_high) + \
'\tAzimuthal Angle: [{0:5f} - {1:5f}]'.format(
azi_low, azi_high) + '\n'
for group in range(1, self.num_groups + 1):
bounds = \
self.energy_groups.get_group_bounds(group)
string += '\t' + template.format('', group,
bounds[0],
bounds[1])
string += '{0:.2e} +/- {1:.2e}%'.format(
average_xs[pol, azi, group - 1],
rel_err_xs[pol, azi, group - 1])
string += '\n'
string += '\n'
else:
# Loop over energy groups
for group in range(1, self.num_groups + 1):
bounds = self.energy_groups.get_group_bounds(group)
string += template.format('', group, bounds[0],
bounds[1])
string += '{0:.2e} +/- {1:.2e}%'.format(
average_xs[group - 1], rel_err_xs[group - 1])
string += '\n'
string += '\n'
string += '\n'
print(string)
def build_hdf5_store(self, filename='mgxs.h5', directory='mgxs',
subdomains='all', nuclides='all',
xs_type='macro', row_column='inout', append=True,
libver='earliest'):
"""Export the multi-group cross section data to an HDF5 binary file.
This method constructs an HDF5 file which stores the multi-group
cross section data. The data is stored in a hierarchy of HDF5 groups
from the domain type, domain id, subdomain id (for distribcell domains),
nuclides and cross section type. Two datasets for the mean and standard
deviation are stored for each subdomain entry in the HDF5 file.
.. note:: This requires the h5py Python package.
Parameters
----------
filename : str
Filename for the HDF5 file. Defaults to 'mgxs.h5'.
directory : str
Directory for the HDF5 file. Defaults to 'mgxs'.
subdomains : Iterable of Integral or 'all'
The subdomain IDs of the cross sections to include in the report.
Defaults to 'all'.
nuclides : Iterable of str or 'all' or 'sum'
The nuclides of the cross-sections to include in the report. This
may be a list of nuclide name strings (e.g., ['U235', 'U238']).
The special string 'all' will report the cross sections for all
nuclides in the spatial domain. The special string 'sum' will report
the cross sections summed over all nuclides. Defaults to 'all'.
xs_type: {'macro', 'micro'}
Store the macro or micro cross section in units of cm^-1 or barns.
Defaults to 'macro'.
row_column: {'inout', 'outin'}
Store scattering matrices indexed first by incoming group and
second by outgoing group ('inout'), or vice versa ('outin').
Defaults to 'inout'.
append : bool
If true, appends to an existing HDF5 file with the same filename
directory (if one exists). Defaults to True.
libver : {'earliest', 'latest'}
Compatibility mode for the HDF5 file. 'latest' will produce files
that are less backwards compatible but have performance benefits.
Raises
------
ValueError
When this method is called before the multi-group cross section is
computed from tally data.
"""
# Make directory if it does not exist
if not os.path.exists(directory):
os.makedirs(directory)
filename = os.path.join(directory, filename)
filename = filename.replace(' ', '-')
if append and os.path.isfile(filename):
xs_results = h5py.File(filename, 'a')
else:
xs_results = h5py.File(filename, 'w', libver=libver)
# Construct a collection of the subdomains to report
if not isinstance(subdomains, str):
cv.check_iterable_type('subdomains', subdomains, Integral)
elif self.domain_type == 'distribcell':
subdomains = np.arange(self.num_subdomains, dtype=np.int)
elif self.domain_type == 'sum(distribcell)':
domain_filter = self.xs_tally.find_filter('sum(distribcell)')
subdomains = domain_filter.bins
elif self.domain_type == 'mesh':
subdomains = list(self.domain.indices)
else:
subdomains = [self.domain.id]
# Construct a collection of the nuclides to report
if self.by_nuclide:
if nuclides == 'all':
nuclides = self.get_nuclides()
densities = np.zeros(len(nuclides), dtype=np.float)
elif nuclides == 'sum':
nuclides = ['sum']
else:
cv.check_iterable_type('nuclides', nuclides, str)
else:
nuclides = ['sum']
cv.check_value('xs_type', xs_type, ['macro', 'micro'])
# Create an HDF5 group within the file for the domain
domain_type_group = xs_results.require_group(self.domain_type)
domain_group = domain_type_group.require_group(str(self.domain.id))
# Determine number of digits to pad subdomain group keys
num_digits = len(str(self.num_subdomains))
# Create a separate HDF5 group for each subdomain
for subdomain in subdomains:
# Create an HDF5 group for the subdomain
if self.domain_type == 'distribcell':
group_name = ''.zfill(num_digits)
subdomain_group = domain_group.require_group(group_name)
else:
subdomain_group = domain_group
# Create a separate HDF5 group for this cross section
rxn_group = subdomain_group.require_group(self.hdf5_key)
# Create a separate HDF5 group for each nuclide
for j, nuclide in enumerate(nuclides):
if nuclide != 'sum':
density = densities[j]
nuclide_group = rxn_group.require_group(nuclide)
nuclide_group.require_dataset('density', dtype=np.float64,
data=[density], shape=(1,))
else:
nuclide_group = rxn_group
# Extract the cross section for this subdomain and nuclide
average = self.get_xs(subdomains=[subdomain], nuclides=[nuclide],
xs_type=xs_type, value='mean',
row_column=row_column)
std_dev = self.get_xs(subdomains=[subdomain], nuclides=[nuclide],
xs_type=xs_type, value='std_dev',
row_column=row_column)
# Add MGXS results data to the HDF5 group
nuclide_group.require_dataset('average', dtype=np.float64,
shape=average.shape, data=average)
nuclide_group.require_dataset('std. dev.', dtype=np.float64,
shape=std_dev.shape, data=std_dev)
# Close the results HDF5 file
xs_results.close()
def export_xs_data(self, filename='mgxs', directory='mgxs',
format='csv', groups='all', xs_type='macro'):
"""Export the multi-group cross section data to a file.
This method leverages the functionality in the Pandas library to export
the multi-group cross section data in a variety of output file formats
for storage and/or post-processing.
Parameters
----------
filename : str
Filename for the exported file. Defaults to 'mgxs'.
directory : str
Directory for the exported file. Defaults to 'mgxs'.
format : {'csv', 'excel', 'pickle', 'latex'}
The format for the exported data file. Defaults to 'csv'.
groups : Iterable of Integral or 'all'
Energy groups of interest. Defaults to 'all'.
xs_type: {'macro', 'micro'}
Store the macro or micro cross section in units of cm^-1 or barns.
Defaults to 'macro'.
"""
cv.check_type('filename', filename, str)
cv.check_type('directory', directory, str)
cv.check_value('format', format, ['csv', 'excel', 'pickle', 'latex'])
cv.check_value('xs_type', xs_type, ['macro', 'micro'])
# Make directory if it does not exist
if not os.path.exists(directory):
os.makedirs(directory)
filename = os.path.join(directory, filename)
filename = filename.replace(' ', '-')
# Get a Pandas DataFrame for the data
df = self.get_pandas_dataframe(groups=groups, xs_type=xs_type)
# Export the data using Pandas IO API
if format == 'csv':
df.to_csv(filename + '.csv', index=False)
elif format == 'excel':
if self.domain_type == 'mesh':
df.to_excel(filename + '.xls')
else:
df.to_excel(filename + '.xls', index=False)
elif format == 'pickle':
df.to_pickle(filename + '.pkl')
elif format == 'latex':
if self.domain_type == 'distribcell':
msg = 'Unable to export distribcell multi-group cross section' \
'data to a LaTeX table'
raise NotImplementedError(msg)
df.to_latex(filename + '.tex', bold_rows=True,
longtable=True, index=False)
# Surround LaTeX table with code needed to run pdflatex
with open(filename + '.tex', 'r') as original:
data = original.read()
with open(filename + '.tex', 'w') as modified:
modified.write(
'\\documentclass[preview, 12pt, border=1mm]{standalone}\n')
modified.write('\\usepackage{caption}\n')
modified.write('\\usepackage{longtable}\n')
modified.write('\\usepackage{booktabs}\n')
modified.write('\\begin{document}\n\n')
modified.write(data)
modified.write('\n\\end{document}')
def get_pandas_dataframe(self, groups='all', nuclides='all',
xs_type='macro', paths=True):
"""Build a Pandas DataFrame for the MGXS data.
This method leverages :meth:`openmc.Tally.get_pandas_dataframe`, but
renames the columns with terminology appropriate for cross section data.
Parameters
----------
groups : Iterable of Integral or 'all'
Energy groups of interest. Defaults to 'all'.
nuclides : Iterable of str or 'all' or 'sum'
The nuclides of the cross-sections to include in the dataframe. This
may be a list of nuclide name strings (e.g., ['U235', 'U238']).
The special string 'all' will include the cross sections for all
nuclides in the spatial domain. The special string 'sum' will
include the cross sections summed over all nuclides. Defaults
to 'all'.
xs_type: {'macro', 'micro'}
Return macro or micro cross section in units of cm^-1 or barns.
Defaults to 'macro'.
paths : bool, optional
Construct columns for distribcell tally filters (default is True).
The geometric information in the Summary object is embedded into
a Multi-index column with a geometric "path" to each distribcell
instance.
Returns
-------
pandas.DataFrame
A Pandas DataFrame for the cross section data.
Raises
------
ValueError
When this method is called before the multi-group cross section is
computed from tally data.
"""
if not isinstance(groups, str):
cv.check_iterable_type('groups', groups, Integral)
if nuclides != 'all' and nuclides != 'sum':
cv.check_iterable_type('nuclides', nuclides, str)
cv.check_value('xs_type', xs_type, ['macro', 'micro'])
# Get a Pandas DataFrame from the derived xs tally
if self.by_nuclide and nuclides == 'sum':
# Use tally summation to sum across all nuclides
xs_tally = self.xs_tally.summation(nuclides=self.get_nuclides())
df = xs_tally.get_pandas_dataframe(paths=paths)
# Remove nuclide column since it is homogeneous and redundant
if self.domain_type == 'mesh':
df.drop('sum(nuclide)', axis=1, level=0, inplace=True)
else:
df.drop('sum(nuclide)', axis=1, inplace=True)
# If the user requested a specific set of nuclides
elif self.by_nuclide and nuclides != 'all':
xs_tally = self.xs_tally.get_slice(nuclides=nuclides)
df = xs_tally.get_pandas_dataframe(paths=paths)
# If the user requested all nuclides, keep nuclide column in dataframe
else:
df = self.xs_tally.get_pandas_dataframe(paths=paths)
# Remove the score column since it is homogeneous and redundant
if self.domain_type == 'mesh':
df = df.drop('score', axis=1, level=0)
else:
df = df.drop('score', axis=1)
# Convert azimuthal, polar, energy in and energy out bin values in to
# bin indices
columns = self._df_convert_columns_to_bins(df)
# Select out those groups the user requested
if not isinstance(groups, str):
if 'group in' in df:
df = df[df['group in'].isin(groups)]
if 'group out' in df:
df = df[df['group out'].isin(groups)]
# If user requested micro cross sections, divide out the atom densities
if xs_type == 'micro' and self._divide_by_density:
if self.by_nuclide:
densities = self.get_nuclide_densities(nuclides)
else:
densities = self.get_nuclide_densities('sum')
densities = np.repeat(densities, len(self.rxn_rate_tally.scores))
tile_factor = int(df.shape[0] / len(densities))
df['mean'] /= np.tile(densities, tile_factor)
df['std. dev.'] /= np.tile(densities, tile_factor)
# Replace NaNs by zeros (happens if nuclide density is zero)
df['mean'].replace(np.nan, 0.0, inplace=True)
df['std. dev.'].replace(np.nan, 0.0, inplace=True)
# Sort the dataframe by domain type id (e.g., distribcell id) and
# energy groups such that data is from fast to thermal
if self.domain_type == 'mesh':
mesh_str = 'mesh {0}'.format(self.domain.id)
df.sort_values(by=[(mesh_str, 'x'), (mesh_str, 'y'),
(mesh_str, 'z')] + columns, inplace=True)
else:
df.sort_values(by=[self.domain_type] + columns, inplace=True)
return df
def get_units(self, xs_type='macro'):
"""This method returns the units of a MGXS based on a desired xs_type.
Parameters
----------
xs_type: {'macro', 'micro'}
Return the macro or micro cross section units.
Defaults to 'macro'.
Returns
-------
str
A string representing the units of the MGXS.
"""
cv.check_value('xs_type', xs_type, ['macro', 'micro'])
return 'cm^-1' if xs_type == 'macro' else 'barns'
class MatrixMGXS(MGXS):
"""An abstract multi-group cross section for some energy group structure
within some spatial domain. This class is specifically intended for
cross sections which depend on both the incoming and outgoing energy groups
and are therefore represented by matrices. Examples of this include the
scattering and nu-fission matrices.
This class can be used for both OpenMC input generation and tally data
post-processing to compute spatially-homogenized and energy-integrated
multi-group cross sections for multi-group neutronics calculations.
.. note:: Users should instantiate the subclasses of this abstract class.
Parameters
----------
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
The domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
The domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file.
num_polar : Integral, optional
Number of equi-width polar angle bins for angle discretization;
defaults to one bin
num_azimuthal : Integral, optional
Number of equi-width azimuthal angle bins for angle discretization;
defaults to one bin
Attributes
----------
name : str, optional
Name of the multi-group cross section
rxn_type : str
Reaction type (e.g., 'total', 'nu-fission', etc.)
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
Domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
Domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
Energy group structure for energy condensation
num_polar : Integral
Number of equi-width polar angle bins for angle discretization
num_azimuthal : Integral
Number of equi-width azimuthal angle bins for angle discretization
tally_trigger : openmc.Trigger
An (optional) tally precision trigger given to each tally used to
compute the cross section
scores : list of str
The scores in each tally used to compute the multi-group cross section
filters : list of openmc.Filter
The filters in each tally used to compute the multi-group cross section
tally_keys : list of str
The keys into the tallies dictionary for each tally used to compute
the multi-group cross section
estimator : {'tracklength', 'collision', 'analog'}
The tally estimator used to compute the multi-group cross section
tallies : collections.OrderedDict
OpenMC tallies needed to compute the multi-group cross section
rxn_rate_tally : openmc.Tally
Derived tally for the reaction rate tally used in the numerator to
compute the multi-group cross section. This attribute is None
unless the multi-group cross section has been computed.
xs_tally : openmc.Tally
Derived tally for the multi-group cross section. This attribute
is None unless the multi-group cross section has been computed.
num_subdomains : int
The number of subdomains is unity for 'material', 'cell' and 'universe'
domain types. This is equal to the number of cell instances
for 'distribcell' domain types (it is equal to unity prior to loading
tally data from a statepoint file) and the number of mesh cells for
'mesh' domain types.
num_nuclides : int
The number of nuclides for which the multi-group cross section is
being tracked. This is unity if the by_nuclide attribute is False.
nuclides : Iterable of str or 'sum'
The optional user-specified nuclides for which to compute cross
sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides
are not specified by the user, all nuclides in the spatial domain
are included. This attribute is 'sum' if by_nuclide is false.
sparse : bool
Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format
for compressed data storage
loaded_sp : bool
Whether or not a statepoint file has been loaded with tally data
derived : bool
Whether or not the MGXS is merged from one or more other MGXS
hdf5_key : str
The key used to index multi-group cross sections in an HDF5 data store
"""
@property
def _dont_squeeze(self):
"""Create a tuple of axes which should not be removed during the get_xs
process
"""
if self.num_polar > 1 or self.num_azimuthal > 1:
return (0, 1, 3, 4)
else:
return (1, 2)
@property
def filters(self):
# Create the non-domain specific Filters for the Tallies
group_edges = self.energy_groups.group_edges
energy = openmc.EnergyFilter(group_edges)
energyout = openmc.EnergyoutFilter(group_edges)
filters = [[energy], [energy, energyout]]
return self._add_angle_filters(filters)
def get_xs(self, in_groups='all', out_groups='all', subdomains='all',
nuclides='all', xs_type='macro', order_groups='increasing',
row_column='inout', value='mean', squeeze=True, **kwargs):
"""Returns an array of multi-group cross sections.
This method constructs a 4D NumPy array for the requested
multi-group cross section data for one or more subdomains
(1st dimension), energy groups in (2nd dimension), energy groups out
(3rd dimension), and nuclides (4th dimension).
Parameters
----------
in_groups : Iterable of Integral or 'all'
Incoming energy groups of interest. Defaults to 'all'.
out_groups : Iterable of Integral or 'all'
Outgoing energy groups of interest. Defaults to 'all'.
subdomains : Iterable of Integral or 'all'
Subdomain IDs of interest. Defaults to 'all'.
nuclides : Iterable of str or 'all' or 'sum'
A list of nuclide name strings (e.g., ['U235', 'U238']). The
special string 'all' will return the cross sections for all
nuclides in the spatial domain. The special string 'sum' will
return the cross section summed over all nuclides. Defaults to
'all'.
xs_type: {'macro', 'micro'}
Return the macro or micro cross section in units of cm^-1 or barns.
Defaults to 'macro'.
order_groups: {'increasing', 'decreasing'}
Return the cross section indexed according to increasing or
decreasing energy groups (decreasing or increasing energies).
Defaults to 'increasing'.
row_column: {'inout', 'outin'}
Return the cross section indexed first by incoming group and
second by outgoing group ('inout'), or vice versa ('outin').
Defaults to 'inout'.
value : {'mean', 'std_dev', 'rel_err'}
A string for the type of value to return. Defaults to 'mean'.
squeeze : bool
A boolean representing whether to eliminate the extra dimensions
of the multi-dimensional array to be returned. Defaults to True.
Returns
-------
numpy.ndarray
A NumPy array of the multi-group cross section indexed in the order
each group and subdomain is listed in the parameters.
Raises
------
ValueError
When this method is called before the multi-group cross section is
computed from tally data.
"""
cv.check_value('value', value, ['mean', 'std_dev', 'rel_err'])
cv.check_value('xs_type', xs_type, ['macro', 'micro'])
# FIXME: Unable to get microscopic xs for mesh domain because the mesh
# cells do not know the nuclide densities in each mesh cell.
if self.domain_type == 'mesh' and xs_type == 'micro':
msg = 'Unable to get micro xs for mesh domain since the mesh ' \
'cells do not know the nuclide densities in each mesh cell.'
raise ValueError(msg)
filters = []
filter_bins = []
# Construct a collection of the domain filter bins
if not isinstance(subdomains, str):
cv.check_iterable_type('subdomains', subdomains, Integral,
max_depth=3)
filters.append(_DOMAIN_TO_FILTER[self.domain_type])
subdomain_bins = []
for subdomain in subdomains:
subdomain_bins.append(subdomain)
filter_bins.append(tuple(subdomain_bins))
# Construct list of energy group bounds tuples for all requested groups
if not isinstance(in_groups, str):
cv.check_iterable_type('groups', in_groups, Integral)
filters.append(openmc.EnergyFilter)
energy_bins = []
for group in in_groups:
energy_bins.append((self.energy_groups.get_group_bounds(group),))
filter_bins.append(tuple(energy_bins))
# Construct list of energy group bounds tuples for all requested groups
if not isinstance(out_groups, str):
cv.check_iterable_type('groups', out_groups, Integral)
for group in out_groups:
filters.append(openmc.EnergyoutFilter)
filter_bins.append((
self.energy_groups.get_group_bounds(group),))
# Construct a collection of the nuclides to retrieve from the xs tally
if self.by_nuclide:
if nuclides == 'all' or nuclides == 'sum' or nuclides == ['sum']:
query_nuclides = self.get_nuclides()
else:
query_nuclides = nuclides
else:
query_nuclides = ['total']
# Use tally summation if user requested the sum for all nuclides
if nuclides == 'sum' or nuclides == ['sum']:
xs_tally = self.xs_tally.summation(nuclides=query_nuclides)
xs = xs_tally.get_values(filters=filters, filter_bins=filter_bins,
value=value)
else:
xs = self.xs_tally.get_values(filters=filters,
filter_bins=filter_bins,
nuclides=query_nuclides, value=value)
# Divide by atom number densities for microscopic cross sections
if xs_type == 'micro' and self._divide_by_density:
if self.by_nuclide:
densities = self.get_nuclide_densities(nuclides)
else:
densities = self.get_nuclide_densities('sum')
if value == 'mean' or value == 'std_dev':
xs /= densities[np.newaxis, :, np.newaxis]
# Eliminate the trivial score dimension
xs = np.squeeze(xs, axis=len(xs.shape) - 1)
xs = np.nan_to_num(xs)
if in_groups == 'all':
num_in_groups = self.num_groups
else:
num_in_groups = len(in_groups)
if out_groups == 'all':
num_out_groups = self.num_groups
else:
num_out_groups = len(out_groups)
# Reshape tally data array with separate axes for domain and energy
# Accomodate the polar and azimuthal bins if needed
num_subdomains = int(xs.shape[0] / (num_in_groups * num_out_groups *
self.num_polar *
self.num_azimuthal))
if self.num_polar > 1 or self.num_azimuthal > 1:
new_shape = (self.num_polar, self.num_azimuthal, num_subdomains,
num_in_groups, num_out_groups)
new_shape += xs.shape[1:]
xs = np.reshape(xs, new_shape)
# Transpose the matrix if requested by user
if row_column == 'outin':
xs = np.swapaxes(xs, 3, 4)
else:
new_shape = (num_subdomains, num_in_groups, num_out_groups)
new_shape += xs.shape[1:]
xs = np.reshape(xs, new_shape)
# Transpose the matrix if requested by user
if row_column == 'outin':
xs = np.swapaxes(xs, 1, 2)
# Reverse data if user requested increasing energy groups since
# tally data is stored in order of increasing energies
if order_groups == 'increasing':
xs = xs[..., ::-1, ::-1, :]
if squeeze:
# We want to squeeze out everything but the polar, azimuthal,
# and in/out energy group data.
xs = self._squeeze_xs(xs)
return xs
def get_slice(self, nuclides=[], in_groups=[], out_groups=[]):
"""Build a sliced MatrixMGXS object for the specified nuclides and
energy groups.
This method constructs a new MGXS to encapsulate a subset of the data
represented by this MGXS. The subset of data to include in the tally
slice is determined by the nuclides and energy groups specified in
the input parameters.
Parameters
----------
nuclides : list of str
A list of nuclide name strings
(e.g., ['U235', 'U238']; default is [])
in_groups : list of int
A list of incoming energy group indices starting at 1 for the high
energies (e.g., [1, 2, 3]; default is [])
out_groups : list of int
A list of outgoing energy group indices starting at 1 for the high
energies (e.g., [1, 2, 3]; default is [])
Returns
-------
openmc.mgxs.MatrixMGXS
A new MatrixMGXS object which encapsulates the subset of data
requested for the nuclide(s) and/or energy group(s) requested in
the parameters.
"""
# Call super class method and null out derived tallies
slice_xs = super().get_slice(nuclides, in_groups)
slice_xs._rxn_rate_tally = None
slice_xs._xs_tally = None
# Slice outgoing energy groups if needed
if len(out_groups) != 0:
filter_bins = []
for group in out_groups:
group_bounds = self.energy_groups.get_group_bounds(group)
filter_bins.append(group_bounds)
filter_bins = [tuple(filter_bins)]
# Slice each of the tallies across energyout groups
for tally_type, tally in slice_xs.tallies.items():
if tally.contains_filter(openmc.EnergyoutFilter):
tally_slice = tally.get_slice(
filters=[openmc.EnergyoutFilter],
filter_bins=filter_bins)
slice_xs.tallies[tally_type] = tally_slice
slice_xs.sparse = self.sparse
return slice_xs
def print_xs(self, subdomains='all', nuclides='all', xs_type='macro'):
"""Prints a string representation for the multi-group cross section.
Parameters
----------
subdomains : Iterable of Integral or 'all'
The subdomain IDs of the cross sections to include in the report.
Defaults to 'all'.
nuclides : Iterable of str or 'all' or 'sum'
The nuclides of the cross-sections to include in the report. This
may be a list of nuclide name strings (e.g., ['U235', 'U238']).
The special string 'all' will report the cross sections for all
nuclides in the spatial domain. The special string 'sum' will
report the cross sections summed over all nuclides. Defaults to
'all'.
xs_type: {'macro', 'micro'}
Return the macro or micro cross section in units of cm^-1 or barns.
Defaults to 'macro'.
"""
# Construct a collection of the subdomains to report
if not isinstance(subdomains, str):
cv.check_iterable_type('subdomains', subdomains, Integral)
elif self.domain_type == 'distribcell':
subdomains = np.arange(self.num_subdomains, dtype=np.int)
elif self.domain_type == 'mesh':
subdomains = list(self.domain.indices)
else:
subdomains = [self.domain.id]
# Construct a collection of the nuclides to report
if self.by_nuclide:
if nuclides == 'all':
nuclides = self.get_nuclides()
if nuclides == 'sum':
nuclides = ['sum']
else:
cv.check_iterable_type('nuclides', nuclides, str)
else:
nuclides = ['sum']
cv.check_value('xs_type', xs_type, ['macro', 'micro'])
# Build header for string with type and domain info
string = 'Multi-Group XS\n'
string += '{0: <16}=\t{1}\n'.format('\tReaction Type', self.rxn_type)
string += '{0: <16}=\t{1}\n'.format('\tDomain Type', self.domain_type)
string += '{0: <16}=\t{1}\n'.format('\tDomain ID', self.domain.id)
# Generate the header for an individual XS
xs_header = '\tCross Sections [{0}]:'.format(self.get_units(xs_type))
# If cross section data has not been computed, only print string header
if self.tallies is None:
print(string)
return
string += '{0: <16}\n'.format('\tEnergy Groups:')
template = '{0: <12}Group {1} [{2: <10} - {3: <10}eV]\n'
# Loop over energy groups ranges
for group in range(1, self.num_groups + 1):
bounds = self.energy_groups.get_group_bounds(group)
string += template.format('', group, bounds[0], bounds[1])
# Set polar and azimuthal bins if necessary
if self.num_polar > 1 or self.num_azimuthal > 1:
pol_bins = np.linspace(0., np.pi, num=self.num_polar + 1,
endpoint=True)
azi_bins = np.linspace(-np.pi, np.pi, num=self.num_azimuthal + 1,
endpoint=True)
# Loop over all subdomains
for subdomain in subdomains:
if self.domain_type == 'distribcell' or self.domain_type == 'mesh':
string += '{0: <16}=\t{1}\n'.format('\tSubdomain', subdomain)
# Loop over all Nuclides
for nuclide in nuclides:
# Build header for nuclide type
if xs_type != 'sum':
string += '{0: <16}=\t{1}\n'.format('\tNuclide', nuclide)
# Build header for cross section type
string += '{0: <16}\n'.format(xs_header)
template = '{0: <12}Group {1} -> Group {2}:\t\t'
average_xs = self.get_xs(nuclides=[nuclide],
subdomains=[subdomain],
xs_type=xs_type, value='mean')
rel_err_xs = self.get_xs(nuclides=[nuclide],
subdomains=[subdomain],
xs_type=xs_type, value='rel_err')
rel_err_xs = rel_err_xs * 100.
if self.num_polar > 1 or self.num_azimuthal > 1:
# Loop over polar, azi, and in/out energy group ranges
for pol in range(len(pol_bins) - 1):
pol_low, pol_high = pol_bins[pol: pol + 2]
for azi in range(len(azi_bins) - 1):
azi_low, azi_high = azi_bins[azi: azi + 2]
string += '\t\tPolar Angle: [{0:5f} - {1:5f}]'.format(
pol_low, pol_high) + \
'\tAzimuthal Angle: [{0:5f} - {1:5f}]'.format(
azi_low, azi_high) + '\n'
for in_group in range(1, self.num_groups + 1):
for out_group in range(1, self.num_groups + 1):
string += '\t' + template.format('',
in_group,
out_group)
string += '{0:.2e} +/- {1:.2e}%'.format(
average_xs[pol, azi, in_group - 1,
out_group - 1],
rel_err_xs[pol, azi, in_group - 1,
out_group - 1])
string += '\n'
string += '\n'
string += '\n'
else:
# Loop over incoming/outgoing energy groups ranges
for in_group in range(1, self.num_groups + 1):
for out_group in range(1, self.num_groups + 1):
string += template.format('', in_group, out_group)
string += '{0:.2e} +/- {1:.2e}%'.format(
average_xs[in_group - 1, out_group - 1],
rel_err_xs[in_group - 1, out_group - 1])
string += '\n'
string += '\n'
string += '\n'
string += '\n'
print(string)
class TotalXS(MGXS):
r"""A total multi-group cross section.
This class can be used for both OpenMC input generation and tally data
post-processing to compute spatially-homogenized and energy-integrated
multi-group total cross sections for multi-group neutronics calculations. At
a minimum, one needs to set the :attr:`TotalXS.energy_groups` and
:attr:`TotalXS.domain` properties. Tallies for the flux and appropriate
reaction rates over the specified domain are generated automatically via the
:attr:`TotalXS.tallies` property, which can then be appended to a
:class:`openmc.Tallies` instance.
For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the
necessary data to compute multi-group cross sections from a
:class:`openmc.StatePoint` instance. The derived multi-group cross section
can then be obtained from the :attr:`TotalXS.xs_tally` property.
For a spatial domain :math:`V` and energy group :math:`[E_g,E_{g-1}]`, the
total cross section is calculated as:
.. math::
\frac{\int_{r \in V} dr \int_{4\pi} d\Omega \int_{E_g}^{E_{g-1}} dE \;
\sigma_t (r, E) \psi (r, E, \Omega)}{\int_{r \in V} dr \int_{4\pi}
d\Omega \int_{E_g}^{E_{g-1}} dE \; \psi (r, E, \Omega)}.
Parameters
----------
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
The domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
The domain type for spatial homogenization
groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file.
num_polar : Integral, optional
Number of equi-width polar angle bins for angle discretization;
defaults to one bin
num_azimuthal : Integral, optional
Number of equi-width azimuthal angle bins for angle discretization;
defaults to one bin
Attributes
----------
name : str, optional
Name of the multi-group cross section
rxn_type : str
Reaction type (e.g., 'total', 'nu-fission', etc.)
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
Domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
Domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
Energy group structure for energy condensation
num_polar : Integral
Number of equi-width polar angle bins for angle discretization
num_azimuthal : Integral
Number of equi-width azimuthal angle bins for angle discretization
tally_trigger : openmc.Trigger
An (optional) tally precision trigger given to each tally used to
compute the cross section
scores : list of str
The scores in each tally used to compute the multi-group cross section
filters : list of openmc.Filter
The filters in each tally used to compute the multi-group cross section
tally_keys : list of str
The keys into the tallies dictionary for each tally used to compute
the multi-group cross section
estimator : {'tracklength', 'collision', 'analog'}
The tally estimator used to compute the multi-group cross section
tallies : collections.OrderedDict
OpenMC tallies needed to compute the multi-group cross section. The keys
are strings listed in the :attr:`TotalXS.tally_keys` property and values
are instances of :class:`openmc.Tally`.
rxn_rate_tally : openmc.Tally
Derived tally for the reaction rate tally used in the numerator to
compute the multi-group cross section. This attribute is None
unless the multi-group cross section has been computed.
xs_tally : openmc.Tally
Derived tally for the multi-group cross section. This attribute
is None unless the multi-group cross section has been computed.
num_subdomains : int
The number of subdomains is unity for 'material', 'cell' and 'universe'
domain types. This is equal to the number of cell instances
for 'distribcell' domain types (it is equal to unity prior to loading
tally data from a statepoint file).
num_nuclides : int
The number of nuclides for which the multi-group cross section is
being tracked. This is unity if the by_nuclide attribute is False.
nuclides : Iterable of str or 'sum'
The optional user-specified nuclides for which to compute cross
sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides
are not specified by the user, all nuclides in the spatial domain
are included. This attribute is 'sum' if by_nuclide is false.
sparse : bool
Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format
for compressed data storage
loaded_sp : bool
Whether or not a statepoint file has been loaded with tally data
derived : bool
Whether or not the MGXS is merged from one or more other MGXS
hdf5_key : str
The key used to index multi-group cross sections in an HDF5 data store
"""
def __init__(self, domain=None, domain_type=None, groups=None,
by_nuclide=False, name='', num_polar=1, num_azimuthal=1):
super().__init__(domain, domain_type, groups, by_nuclide, name,
num_polar, num_azimuthal)
self._rxn_type = 'total'
class TransportXS(MGXS):
r"""A transport-corrected total multi-group cross section.
This class can be used for both OpenMC input generation and tally data
post-processing to compute spatially-homogenized and energy-integrated
multi-group cross sections for multi-group neutronics calculations. At a
minimum, one needs to set the :attr:`TransportXS.energy_groups` and
:attr:`TransportXS.domain` properties. Tallies for the flux and appropriate
reaction rates over the specified domain are generated automatically via the
:attr:`TransportXS.tallies` property, which can then be appended to a
:class:`openmc.Tallies` instance.
For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the
necessary data to compute multi-group cross sections from a
:class:`openmc.StatePoint` instance. The derived multi-group cross section
can then be obtained from the :attr:`TransportXS.xs_tally` property.
For a spatial domain :math:`V` and energy group :math:`[E_g,E_{g-1}]`, the
transport-corrected total cross section is calculated as:
.. math::
\begin{aligned}
\langle \sigma_t \phi \rangle &= \int_{r \in V} dr \int_{4\pi}
d\Omega \int_{E_g}^{E_{g-1}} dE \sigma_t (r, E) \psi
(r, E, \Omega) \\
\langle \sigma_{s1} \phi \rangle &= \int_{r \in V} dr
\int_{4\pi} d\Omega \int_{E_g}^{E_{g-1}} dE \int_{4\pi}
d\Omega' \int_0^\infty dE' \int_{-1}^1 d\mu \; \mu \sigma_s
(r, E' \rightarrow E, \Omega' \cdot \Omega)
\phi (r, E', \Omega) \\
\langle \phi \rangle &= \int_{r \in V} dr \int_{4\pi} d\Omega
\int_{E_g}^{E_{g-1}} dE \; \psi (r, E, \Omega) \\
\sigma_{tr} &= \frac{\langle \sigma_t \phi \rangle - \langle \sigma_{s1}
\phi \rangle}{\langle \phi \rangle}
\end{aligned}
To incorporate the effect of scattering multiplication in the above
relation, the `nu` parameter can be set to `True`.
Parameters
----------
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
The domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
The domain type for spatial homogenization
groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
nu : bool
If True, the cross section data will include neutron multiplication;
defaults to False.
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file.
num_polar : Integral, optional
Number of equi-width polar angle bins for angle discretization;
defaults to one bin
num_azimuthal : Integral, optional
Number of equi-width azimuthal angle bins for angle discretization;
defaults to one bin
Attributes
----------
name : str, optional
Name of the multi-group cross section
rxn_type : str
Reaction type (e.g., 'total', 'nu-fission', etc.)
nu : bool
If True, the cross section data will include neutron multiplication
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
Domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
Domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
Energy group structure for energy condensation
num_polar : Integral
Number of equi-width polar angle bins for angle discretization
num_azimuthal : Integral
Number of equi-width azimuthal angle bins for angle discretization
tally_trigger : openmc.Trigger
An (optional) tally precision trigger given to each tally used to
compute the cross section
scores : list of str
The scores in each tally used to compute the multi-group cross section
filters : list of openmc.Filter
The filters in each tally used to compute the multi-group cross section
tally_keys : list of str
The keys into the tallies dictionary for each tally used to compute
the multi-group cross section
estimator : 'analog'
The tally estimator used to compute the multi-group cross section
tallies : collections.OrderedDict
OpenMC tallies needed to compute the multi-group cross section. The keys
are strings listed in the :attr:`TransportXS.tally_keys` property and
values are instances of :class:`openmc.Tally`.
rxn_rate_tally : openmc.Tally
Derived tally for the reaction rate tally used in the numerator to
compute the multi-group cross section. This attribute is None
unless the multi-group cross section has been computed.
xs_tally : openmc.Tally
Derived tally for the multi-group cross section. This attribute
is None unless the multi-group cross section has been computed.
num_subdomains : int
The number of subdomains is unity for 'material', 'cell' and 'universe'
domain types. This is equal to the number of cell instances
for 'distribcell' domain types (it is equal to unity prior to loading
tally data from a statepoint file).
num_nuclides : int
The number of nuclides for which the multi-group cross section is
being tracked. This is unity if the by_nuclide attribute is False.
nuclides : Iterable of str or 'sum'
The optional user-specified nuclides for which to compute cross
sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides
are not specified by the user, all nuclides in the spatial domain
are included. This attribute is 'sum' if by_nuclide is false.
sparse : bool
Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format
for compressed data storage
loaded_sp : bool
Whether or not a statepoint file has been loaded with tally data
derived : bool
Whether or not the MGXS is merged from one or more other MGXS
hdf5_key : str
The key used to index multi-group cross sections in an HDF5 data store
"""
def __init__(self, domain=None, domain_type=None, groups=None, nu=False,
by_nuclide=False, name='', num_polar=1, num_azimuthal=1):
super().__init__(domain, domain_type, groups, by_nuclide, name,
num_polar, num_azimuthal)
# Use tracklength estimators for the total MGXS term, and
# analog estimators for the transport correction term
self._estimator = ['tracklength', 'tracklength', 'analog', 'analog']
self._valid_estimators = ['analog']
self.nu = nu
def __deepcopy__(self, memo):
clone = super().__deepcopy__(memo)
clone._nu = self.nu
return clone
@property
def scores(self):
if not self.nu:
return ['flux', 'total', 'flux', 'scatter']
else:
return ['flux', 'total', 'flux', 'nu-scatter']
@property
def tally_keys(self):
return ['flux (tracklength)', 'total', 'flux (analog)', 'scatter-1']
@property
def filters(self):
group_edges = self.energy_groups.group_edges
energy_filter = openmc.EnergyFilter(group_edges)
energyout_filter = openmc.EnergyoutFilter(group_edges)
p1_filter = openmc.LegendreFilter(1)
filters = [[energy_filter], [energy_filter],
[energy_filter], [energyout_filter, p1_filter]]
return self._add_angle_filters(filters)
@property
def rxn_rate_tally(self):
if self._rxn_rate_tally is None:
# Switch EnergyoutFilter to EnergyFilter.
p1_tally = self.tallies['scatter-1']
old_filt = p1_tally.filters[-2]
new_filt = openmc.EnergyFilter(old_filt.values)
p1_tally.filters[-2] = new_filt
# Slice Legendre expansion filter and change name of score
p1_tally = p1_tally.get_slice(filters=[openmc.LegendreFilter],
filter_bins=[('P1',)],
squeeze=True)
p1_tally._scores = ['scatter-1']
self._rxn_rate_tally = self.tallies['total'] - p1_tally
self._rxn_rate_tally.sparse = self.sparse
return self._rxn_rate_tally
@property
def xs_tally(self):
if self._xs_tally is None:
if self.tallies is None:
msg = 'Unable to get xs_tally since tallies have ' \
'not been loaded from a statepoint'
raise ValueError(msg)
# Switch EnergyoutFilter to EnergyFilter.
p1_tally = self.tallies['scatter-1']
old_filt = p1_tally.filters[-2]
new_filt = openmc.EnergyFilter(old_filt.values)
p1_tally.filters[-2] = new_filt
# Slice Legendre expansion filter and change name of score
p1_tally = p1_tally.get_slice(filters=[openmc.LegendreFilter],
filter_bins=[('P1',)],
squeeze=True)
p1_tally._scores = ['scatter-1']
# Compute total cross section
total_xs = self.tallies['total'] / self.tallies['flux (tracklength)']
# Compute transport correction term
trans_corr = p1_tally / self.tallies['flux (analog)']
# Compute the transport-corrected total cross section
self._xs_tally = total_xs - trans_corr
self._compute_xs()
return self._xs_tally
@property
def nu(self):
return self._nu
@nu.setter
def nu(self, nu):
cv.check_type('nu', nu, bool)
self._nu = nu
if not nu:
self._rxn_type = 'transport'
else:
self._rxn_type = 'nu-transport'
class DiffusionCoefficient(TransportXS):
r"""A diffusion coefficient multi-group cross section.
This class can be used for both OpenMC input generation and tally data
post-processing to compute spatially-homogenized and energy-integrated
multi-group cross sections for multi-group neutronics calculations. At a
minimum, one needs to set the :attr:`DiffusionCoefficient.energy_groups` and
:attr:`DiffusionCoefficient.domain` properties. Tallies for the flux and appropriate
reaction rates over the specified domain are generated automatically via the
:attr:`DiffusionCoefficient.tallies` property, which can then be appended to a
:class:`openmc.Tallies` instance.
For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the
necessary data to compute multi-group cross sections from a
:class:`openmc.StatePoint` instance. The derived multi-group cross section
can then be obtained from the :attr:`DiffusionCoefficient.xs_tally` property.
For a spatial domain :math:`V` and energy group :math:`[E_g,E_{g-1}]`, the
diffusion coefficient is calculated as:
.. math::
\begin{aligned}
\langle \sigma_t \phi \rangle &= \int_{r \in V} dr \int_{4\pi}
d\Omega \int_{E_g}^{E_{g-1}} dE \sigma_t (r, E) \psi
(r, E, \Omega) \\
\langle \sigma_{s1} \phi \rangle &= \int_{r \in V} dr
\int_{4\pi} d\Omega \int_{E_g}^{E_{g-1}} dE \int_{4\pi}
d\Omega' \int_0^\infty dE' \int_{-1}^1 d\mu \; \mu \sigma_s
(r, E' \rightarrow E, \Omega' \cdot \Omega)
\phi (r, E', \Omega) \\
\langle \phi \rangle &= \int_{r \in V} dr \int_{4\pi} d\Omega
\int_{E_g}^{E_{g-1}} dE \; \psi (r, E, \Omega) \\
\sigma_{tr} &= \frac{\langle \sigma_t \phi \rangle - \langle \sigma_{s1}
\phi \rangle}{\langle \phi \rangle} \\
D = \frac{1}{3 \sigma_{tr}}
\end{aligned}
To incorporate the effect of scattering multiplication in the above
relation, the `nu` parameter can be set to `True`.
.. versionadded:: 0.12.1
Parameters
----------
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
The domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
The domain type for spatial homogenization
groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
nu : bool
If True, the cross section data will include neutron multiplication;
defaults to False.
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file.
num_polar : Integral, optional
Number of equi-width polar angle bins for angle discretization;
defaults to one bin
num_azimuthal : Integral, optional
Number of equi-width azimuthal angle bins for angle discretization;
defaults to one bin
Attributes
----------
name : str, optional
Name of the multi-group cross section
rxn_type : str
Reaction type (e.g., 'total', 'nu-fission', etc.)
nu : bool
If True, the cross section data will include neutron multiplication
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
Domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
Domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
Energy group structure for energy condensation
num_polar : Integral
Number of equi-width polar angle bins for angle discretization
num_azimuthal : Integral
Number of equi-width azimuthal angle bins for angle discretization
tally_trigger : openmc.Trigger
An (optional) tally precision trigger given to each tally used to
compute the cross section
scores : list of str
The scores in each tally used to compute the multi-group cross section
filters : list of openmc.Filter
The filters in each tally used to compute the multi-group cross section
tally_keys : list of str
The keys into the tallies dictionary for each tally used to compute
the multi-group cross section
estimator : 'analog'
The tally estimator used to compute the multi-group cross section
tallies : collections.OrderedDict
OpenMC tallies needed to compute the multi-group cross section. The keys
are strings listed in the :attr:`TransportXS.tally_keys` property and
values are instances of :class:`openmc.Tally`.
rxn_rate_tally : openmc.Tally
Derived tally for the reaction rate tally used in the numerator to
compute the multi-group cross section. This attribute is None
unless the multi-group cross section has been computed.
xs_tally : openmc.Tally
Derived tally for the multi-group cross section. This attribute
is None unless the multi-group cross section has been computed.
num_subdomains : int
The number of subdomains is unity for 'material', 'cell' and 'universe'
domain types. This is equal to the number of cell instances
for 'distribcell' domain types (it is equal to unity prior to loading
tally data from a statepoint file).
num_nuclides : int
The number of nuclides for which the multi-group cross section is
being tracked. This is unity if the by_nuclide attribute is False.
nuclides : Iterable of str or 'sum'
The optional user-specified nuclides for which to compute cross
sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides
are not specified by the user, all nuclides in the spatial domain
are included. This attribute is 'sum' if by_nuclide is false.
sparse : bool
Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format
for compressed data storage
loaded_sp : bool
Whether or not a statepoint file has been loaded with tally data
derived : bool
Whether or not the MGXS is merged from one or more other MGXS
hdf5_key : str
The key used to index multi-group cross sections in an HDF5 data store
"""
def __init__(self, domain=None, domain_type=None, groups=None, nu=False,
by_nuclide=False, name='', num_polar=1, num_azimuthal=1):
super(DiffusionCoefficient, self).__init__(domain, domain_type, groups,
nu, by_nuclide, name,
num_polar, num_azimuthal)
if not nu:
self._rxn_type = 'diffusion-coefficient'
else:
self._rxn_type = 'nu-diffusion-coefficient'
@property
def rxn_rate_tally(self):
if self._rxn_rate_tally is None:
# Switch EnergyoutFilter to EnergyFilter
p1_tally = self.tallies['scatter-1']
old_filt = p1_tally.filters[-2]
new_filt = openmc.EnergyFilter(old_filt.values)
p1_tally.filters[-2] = new_filt
# Slice Legendre expansion filter and change name of score
p1_tally = p1_tally.get_slice(filters=[openmc.LegendreFilter],
filter_bins=[('P1',)],
squeeze=True)
p1_tally._scores = ['scatter-1']
# Compute total cross section
total_xs = self.tallies['total'] / self.tallies['flux (tracklength)']
# Compute transport correction term
trans_corr = self.tallies['scatter-1'] / self.tallies['flux (analog)']
# Compute the diffusion coefficient
transport = total_xs - trans_corr
dif_coef = transport**(-1) / 3.0
self._rxn_rate_tally = dif_coef * self.tallies['flux (tracklength)']
self._rxn_rate_tally.sparse = self.sparse
return self._rxn_rate_tally
@property
def xs_tally(self):
if self._xs_tally is None:
if self.tallies is None:
msg = 'Unable to get xs_tally since tallies have ' \
'not been loaded from a statepoint'
raise ValueError(msg)
self._xs_tally = self.rxn_rate_tally / self.tallies['flux (tracklength)']
self._compute_xs()
return self._xs_tally
def get_condensed_xs(self, coarse_groups):
"""Construct an energy-condensed version of this cross section.
Parameters
----------
coarse_groups : openmc.mgxs.EnergyGroups
The coarse energy group structure of interest
Returns
-------
MGXS
A new MGXS condensed to the group structure of interest
"""
cv.check_type('coarse_groups', coarse_groups, EnergyGroups)
cv.check_less_than('coarse groups', coarse_groups.num_groups,
self.num_groups, equality=True)
cv.check_value('upper coarse energy', coarse_groups.group_edges[-1],
[self.energy_groups.group_edges[-1]])
cv.check_value('lower coarse energy', coarse_groups.group_edges[0],
[self.energy_groups.group_edges[0]])
# Clone this MGXS to initialize the condensed version
condensed_xs = copy.deepcopy(self)
if self._rxn_rate_tally is None:
p1_tally = self.tallies['scatter-1']
old_filt = p1_tally.filters[-2]
new_filt = openmc.EnergyFilter(old_filt.values)
p1_tally.filters[-2] = new_filt
# Slice Legendre expansion filter and change name of score
p1_tally = p1_tally.get_slice(filters=[openmc.LegendreFilter],
filter_bins=[('P1',)],
squeeze=True)
p1_tally._scores = ['scatter-1']
total = self.tallies['total'] / self.tallies['flux (tracklength)']
trans_corr = p1_tally / self.tallies['flux (analog)']
transport = (total - trans_corr)
dif_coef = transport**(-1) / 3.0
dif_coef *= self.tallies['flux (tracklength)']
else:
dif_coef = self.rxn_rate_tally
flux_tally = condensed_xs.tallies['flux (tracklength)']
condensed_xs._tallies = OrderedDict()
condensed_xs._tallies[self._rxn_type] = dif_coef
condensed_xs._tallies['flux (tracklength)'] = flux_tally
condensed_xs._rxn_rate_tally = dif_coef
condensed_xs._xs_tally = None
condensed_xs._sparse = False
condensed_xs._energy_groups = coarse_groups
# Build energy indices to sum across
energy_indices = []
for group in range(coarse_groups.num_groups, 0, -1):
low, high = coarse_groups.get_group_bounds(group)
low_index = np.where(self.energy_groups.group_edges == low)[0][0]
energy_indices.append(low_index)
fine_edges = self.energy_groups.group_edges
# Condense each of the tallies to the coarse group structure
for tally in condensed_xs.tallies.values():
# Make condensed tally derived and null out sum, sum_sq
tally._derived = True
tally._sum = None
tally._sum_sq = None
# Get tally data arrays reshaped with one dimension per filter
mean = tally.get_reshaped_data(value='mean')
std_dev = tally.get_reshaped_data(value='std_dev')
# Sum across all applicable fine energy group filters
for i, tally_filter in enumerate(tally.filters):
if not isinstance(tally_filter,
(openmc.EnergyFilter,
openmc.EnergyoutFilter)):
continue
elif len(tally_filter.bins) != len(fine_edges):
continue
elif not np.allclose(tally_filter.bins, fine_edges):
continue
else:
tally_filter.bins = coarse_groups.group_edges
mean = np.add.reduceat(mean, energy_indices, axis=i)
std_dev = np.add.reduceat(std_dev**2, energy_indices,
axis=i)
std_dev = np.sqrt(std_dev)
# Reshape condensed data arrays with one dimension for all filters
mean = np.reshape(mean, tally.shape)
std_dev = np.reshape(std_dev, tally.shape)
# Override tally's data with the new condensed data
tally._mean = mean
tally._std_dev = std_dev
# Compute the energy condensed multi-group cross section
condensed_xs.sparse = self.sparse
return condensed_xs
class AbsorptionXS(MGXS):
r"""An absorption multi-group cross section.
Absorption is defined as all reactions that do not produce secondary
neutrons (disappearance) plus fission reactions.
This class can be used for both OpenMC input generation and tally data
post-processing to compute spatially-homogenized and energy-integrated
multi-group absorption cross sections for multi-group neutronics
calculations. At a minimum, one needs to set the
:attr:`AbsorptionXS.energy_groups` and :attr:`AbsorptionXS.domain`
properties. Tallies for the flux and appropriate reaction rates over the
specified domain are generated automatically via the
:attr:`AbsorptionXS.tallies` property, which can then be appended to a
:class:`openmc.Tallies` instance.
For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the
necessary data to compute multi-group cross sections from a
:class:`openmc.StatePoint` instance. The derived multi-group cross section
can then be obtained from the :attr:`AbsorptionXS.xs_tally` property.
For a spatial domain :math:`V` and energy group :math:`[E_g,E_{g-1}]`, the
absorption cross section is calculated as:
.. math::
\frac{\int_{r \in V} dr \int_{4\pi} d\Omega \int_{E_g}^{E_{g-1}} dE \;
\sigma_a (r, E) \psi (r, E, \Omega)}{\int_{r \in V} dr \int_{4\pi}
d\Omega \int_{E_g}^{E_{g-1}} dE \; \psi (r, E, \Omega)}.
Parameters
----------
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
The domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
The domain type for spatial homogenization
groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file.
num_polar : Integral, optional
Number of equi-width polar angle bins for angle discretization;
defaults to one bin
num_azimuthal : Integral, optional
Number of equi-width azimuthal angle bins for angle discretization;
defaults to one bin
Attributes
----------
name : str, optional
Name of the multi-group cross section
rxn_type : str
Reaction type (e.g., 'total', 'nu-fission', etc.)
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
Domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
Domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
Energy group structure for energy condensation
num_polar : Integral
Number of equi-width polar angle bins for angle discretization
num_azimuthal : Integral
Number of equi-width azimuthal angle bins for angle discretization
tally_trigger : openmc.Trigger
An (optional) tally precision trigger given to each tally used to
compute the cross section
scores : list of str
The scores in each tally used to compute the multi-group cross section
filters : list of openmc.Filter
The filters in each tally used to compute the multi-group cross section
tally_keys : list of str
The keys into the tallies dictionary for each tally used to compute
the multi-group cross section
estimator : {'tracklength', 'collision', 'analog'}
The tally estimator used to compute the multi-group cross section
tallies : collections.OrderedDict
OpenMC tallies needed to compute the multi-group cross section. The keys
are strings listed in the :attr:`AbsorptionXS.tally_keys` property and
values are instances of :class:`openmc.Tally`.
rxn_rate_tally : openmc.Tally
Derived tally for the reaction rate tally used in the numerator to
compute the multi-group cross section. This attribute is None
unless the multi-group cross section has been computed.
xs_tally : openmc.Tally
Derived tally for the multi-group cross section. This attribute
is None unless the multi-group cross section has been computed.
num_subdomains : int
The number of subdomains is unity for 'material', 'cell' and 'universe'
domain types. This is equal to the number of cell instances
for 'distribcell' domain types (it is equal to unity prior to loading
tally data from a statepoint file) and the number of mesh cells for
'mesh' domain types.
num_nuclides : int
The number of nuclides for which the multi-group cross section is
being tracked. This is unity if the by_nuclide attribute is False.
nuclides : Iterable of str or 'sum'
The optional user-specified nuclides for which to compute cross
sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides
are not specified by the user, all nuclides in the spatial domain
are included. This attribute is 'sum' if by_nuclide is false.
sparse : bool
Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format
for compressed data storage
loaded_sp : bool
Whether or not a statepoint file has been loaded with tally data
derived : bool
Whether or not the MGXS is merged from one or more other MGXS
hdf5_key : str
The key used to index multi-group cross sections in an HDF5 data store
"""
def __init__(self, domain=None, domain_type=None, groups=None,
by_nuclide=False, name='', num_polar=1, num_azimuthal=1):
super().__init__(domain, domain_type, groups, by_nuclide, name,
num_polar, num_azimuthal)
self._rxn_type = 'absorption'
class CaptureXS(MGXS):
r"""A capture multi-group cross section.
The neutron capture reaction rate is defined as the difference between
OpenMC's 'absorption' and 'fission' reaction rate score types. This includes
not only radiative capture, but all forms of neutron disappearance aside
from fission (i.e., MT > 100).
This class can be used for both OpenMC input generation and tally data
post-processing to compute spatially-homogenized and energy-integrated
multi-group capture cross sections for multi-group neutronics
calculations. At a minimum, one needs to set the
:attr:`CaptureXS.energy_groups` and :attr:`CaptureXS.domain`
properties. Tallies for the flux and appropriate reaction rates over the
specified domain are generated automatically via the
:attr:`CaptureXS.tallies` property, which can then be appended to a
:class:`openmc.Tallies` instance.
For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the
necessary data to compute multi-group cross sections from a
:class:`openmc.StatePoint` instance. The derived multi-group cross section
can then be obtained from the :attr:`CaptureXS.xs_tally` property.
For a spatial domain :math:`V` and energy group :math:`[E_g,E_{g-1}]`, the
capture cross section is calculated as:
.. math::
\frac{\int_{r \in V} dr \int_{4\pi} d\Omega \int_{E_g}^{E_{g-1}} dE \;
\left [ \sigma_a (r, E) \psi (r, E, \Omega) - \sigma_f (r, E) \psi (r, E,
\Omega) \right ]}{\int_{r \in V} dr \int_{4\pi} d\Omega
\int_{E_g}^{E_{g-1}} dE \; \psi (r, E, \Omega)}.
Parameters
----------
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
The domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
The domain type for spatial homogenization
groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file.
num_polar : Integral, optional
Number of equi-width polar angle bins for angle discretization;
defaults to one bin
num_azimuthal : Integral, optional
Number of equi-width azimuthal angle bins for angle discretization;
defaults to one bin
Attributes
----------
name : str, optional
Name of the multi-group cross section
rxn_type : str
Reaction type (e.g., 'total', 'nu-fission', etc.)
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
Domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
Domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
Energy group structure for energy condensation
num_polar : Integral
Number of equi-width polar angle bins for angle discretization
num_azimuthal : Integral
Number of equi-width azimuthal angle bins for angle discretization
tally_trigger : openmc.Trigger
An (optional) tally precision trigger given to each tally used to
compute the cross section
scores : list of str
The scores in each tally used to compute the multi-group cross section
filters : list of openmc.Filter
The filters in each tally used to compute the multi-group cross section
tally_keys : list of str
The keys into the tallies dictionary for each tally used to compute
the multi-group cross section
estimator : {'tracklength', 'collision', 'analog'}
The tally estimator used to compute the multi-group cross section
tallies : collections.OrderedDict
OpenMC tallies needed to compute the multi-group cross section. The keys
are strings listed in the :attr:`CaptureXS.tally_keys` property and
values are instances of :class:`openmc.Tally`.
rxn_rate_tally : openmc.Tally
Derived tally for the reaction rate tally used in the numerator to
compute the multi-group cross section. This attribute is None
unless the multi-group cross section has been computed.
xs_tally : openmc.Tally
Derived tally for the multi-group cross section. This attribute
is None unless the multi-group cross section has been computed.
num_subdomains : int
The number of subdomains is unity for 'material', 'cell' and 'universe'
domain types. This is equal to the number of cell instances
for 'distribcell' domain types (it is equal to unity prior to loading
tally data from a statepoint file).
num_nuclides : int
The number of nuclides for which the multi-group cross section is
being tracked. This is unity if the by_nuclide attribute is False.
nuclides : Iterable of str or 'sum'
The optional user-specified nuclides for which to compute cross
sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides
are not specified by the user, all nuclides in the spatial domain
are included. This attribute is 'sum' if by_nuclide is false.
sparse : bool
Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format
for compressed data storage
loaded_sp : bool
Whether or not a statepoint file has been loaded with tally data
derived : bool
Whether or not the MGXS is merged from one or more other MGXS
hdf5_key : str
The key used to index multi-group cross sections in an HDF5 data store
"""
def __init__(self, domain=None, domain_type=None, groups=None,
by_nuclide=False, name='', num_polar=1, num_azimuthal=1):
super().__init__(domain, domain_type, groups, by_nuclide, name,
num_polar, num_azimuthal)
self._rxn_type = 'capture'
@property
def scores(self):
return ['flux', 'absorption', 'fission']
@property
def rxn_rate_tally(self):
if self._rxn_rate_tally is None:
self._rxn_rate_tally = \
self.tallies['absorption'] - self.tallies['fission']
self._rxn_rate_tally.sparse = self.sparse
return self._rxn_rate_tally
class FissionXS(MGXS):
r"""A fission multi-group cross section.
This class can be used for both OpenMC input generation and tally data
post-processing to compute spatially-homogenized and energy-integrated
multi-group fission cross sections for multi-group neutronics
calculations. At a minimum, one needs to set the
:attr:`FissionXS.energy_groups` and :attr:`FissionXS.domain`
properties. Tallies for the flux and appropriate reaction rates over the
specified domain are generated automatically via the
:attr:`FissionXS.tallies` property, which can then be appended to a
:class:`openmc.Tallies` instance.
For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the
necessary data to compute multi-group cross sections from a
:class:`openmc.StatePoint` instance. The derived multi-group cross section
can then be obtained from the :attr:`FissionXS.xs_tally` property.
For a spatial domain :math:`V` and energy group :math:`[E_g,E_{g-1}]`, the
fission cross section is calculated as:
.. math::
\frac{\int_{r \in V} dr \int_{4\pi} d\Omega \int_{E_g}^{E_{g-1}} dE \;
\sigma_f (r, E) \psi (r, E, \Omega)}{\int_{r \in V} dr \int_{4\pi}
d\Omega \int_{E_g}^{E_{g-1}} dE \; \psi (r, E, \Omega)}.
To incorporate the effect of neutron multiplication in the above
relation, the `nu` parameter can be set to `True`.
This class can also be used to gather a prompt-nu-fission cross section
(which only includes the contributions from prompt neutrons). This is
accomplished by setting the :attr:`FissionXS.prompt` attribute to `True`.
Since the prompt-nu-fission cross section requires neutron multiplication,
the `nu` parameter will automatically be set to `True` if `prompt` is also
`True`.
Parameters
----------
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
The domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
The domain type for spatial homogenization
groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
nu : bool
If True, the cross section data will include neutron multiplication;
defaults to False
prompt : bool
If true, computes cross sections which only includes prompt neutrons;
defaults to False which includes prompt and delayed in total. Setting
this to True will also set nu to True
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file.
num_polar : Integral, optional
Number of equi-width polar angle bins for angle discretization;
defaults to one bin
num_azimuthal : Integral, optional
Number of equi-width azimuthal angle bins for angle discretization;
defaults to one bin
Attributes
----------
name : str, optional
Name of the multi-group cross section
rxn_type : str
Reaction type (e.g., 'total', 'nu-fission', etc.)
nu : bool
If True, the cross section data will include neutron multiplication
prompt : bool
If true, computes cross sections which only includes prompt neutrons
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
Domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
Domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
Energy group structure for energy condensation
num_polar : Integral
Number of equi-width polar angle bins for angle discretization
num_azimuthal : Integral
Number of equi-width azimuthal angle bins for angle discretization
tally_trigger : openmc.Trigger
An (optional) tally precision trigger given to each tally used to
compute the cross section
scores : list of str
The scores in each tally used to compute the multi-group cross section
filters : list of openmc.Filter
The filters in each tally used to compute the multi-group cross section
tally_keys : list of str
The keys into the tallies dictionary for each tally used to compute
the multi-group cross section
estimator : {'tracklength', 'collision', 'analog'}
The tally estimator used to compute the multi-group cross section
tallies : collections.OrderedDict
OpenMC tallies needed to compute the multi-group cross section. The keys
are strings listed in the :attr:`FissionXS.tally_keys` property and
values are instances of :class:`openmc.Tally`.
rxn_rate_tally : openmc.Tally
Derived tally for the reaction rate tally used in the numerator to
compute the multi-group cross section. This attribute is None
unless the multi-group cross section has been computed.
xs_tally : openmc.Tally
Derived tally for the multi-group cross section. This attribute
is None unless the multi-group cross section has been computed.
num_subdomains : int
The number of subdomains is unity for 'material', 'cell' and 'universe'
domain types. This is equal to the number of cell instances
for 'distribcell' domain types (it is equal to unity prior to loading
tally data from a statepoint file).
num_nuclides : int
The number of nuclides for which the multi-group cross section is
being tracked. This is unity if the by_nuclide attribute is False.
nuclides : Iterable of str or 'sum'
The optional user-specified nuclides for which to compute cross
sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides
are not specified by the user, all nuclides in the spatial domain
are included. This attribute is 'sum' if by_nuclide is false.
sparse : bool
Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format
for compressed data storage
loaded_sp : bool
Whether or not a statepoint file has been loaded with tally data
derived : bool
Whether or not the MGXS is merged from one or more other MGXS
hdf5_key : str
The key used to index multi-group cross sections in an HDF5 data store
"""
def __init__(self, domain=None, domain_type=None, groups=None, nu=False,
prompt=False, by_nuclide=False, name='', num_polar=1,
num_azimuthal=1):
super().__init__(domain, domain_type, groups, by_nuclide, name,
num_polar, num_azimuthal)
self._nu = False
self._prompt = False
self.nu = nu
self.prompt = prompt
def __deepcopy__(self, memo):
clone = super().__deepcopy__(memo)
clone._nu = self.nu
clone._prompt = self.prompt
return clone
@property
def nu(self):
return self._nu
@property
def prompt(self):
return self._prompt
@nu.setter
def nu(self, nu):
cv.check_type('nu', nu, bool)
self._nu = nu
if not self.prompt:
if not self.nu:
self._rxn_type = 'fission'
else:
self._rxn_type = 'nu-fission'
else:
self._rxn_type = 'prompt-nu-fission'
@prompt.setter
def prompt(self, prompt):
cv.check_type('prompt', prompt, bool)
self._prompt = prompt
if not self.prompt:
if not self.nu:
self._rxn_type = 'fission'
else:
self._rxn_type = 'nu-fission'
else:
self._rxn_type = 'prompt-nu-fission'
class KappaFissionXS(MGXS):
r"""A recoverable fission energy production rate multi-group cross section.
The recoverable energy per fission, :math:`\kappa`, is defined as the
fission product kinetic energy, prompt and delayed neutron kinetic energies,
prompt and delayed :math:`\gamma`-ray total energies, and the total energy
released by the delayed :math:`\beta` particles. The neutrino energy does
not contribute to this response. The prompt and delayed :math:`\gamma`-rays
are assumed to deposit their energy locally.
This class can be used for both OpenMC input generation and tally data
post-processing to compute spatially-homogenized and energy-integrated
multi-group cross sections for multi-group neutronics calculations. At a
minimum, one needs to set the :attr:`KappaFissionXS.energy_groups` and
:attr:`KappaFissionXS.domain` properties. Tallies for the flux and appropriate
reaction rates over the specified domain are generated automatically via the
:attr:`KappaFissionXS.tallies` property, which can then be appended to a
:class:`openmc.Tallies` instance.
For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the
necessary data to compute multi-group cross sections from a
:class:`openmc.StatePoint` instance. The derived multi-group cross section
can then be obtained from the :attr:`KappaFissionXS.xs_tally` property.
For a spatial domain :math:`V` and energy group :math:`[E_g,E_{g-1}]`, the
recoverable fission energy production rate cross section is calculated as:
.. math::
\frac{\int_{r \in V} dr \int_{4\pi} d\Omega \int_{E_g}^{E_{g-1}} dE \;
\kappa\sigma_f (r, E) \psi (r, E, \Omega)}{\int_{r \in V} dr \int_{4\pi}
d\Omega \int_{E_g}^{E_{g-1}} dE \; \psi (r, E, \Omega)}.
Parameters
----------
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
The domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
The domain type for spatial homogenization
groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file.
num_polar : Integral, optional
Number of equi-width polar angle bins for angle discretization;
defaults to one bin
num_azimuthal : Integral, optional
Number of equi-width azimuthal angle bins for angle discretization;
defaults to one bin
Attributes
----------
name : str, optional
Name of the multi-group cross section
rxn_type : str
Reaction type (e.g., 'total', 'nu-fission', etc.)
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
Domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
Domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
Energy group structure for energy condensation
num_polar : Integral
Number of equi-width polar angle bins for angle discretization
num_azimuthal : Integral
Number of equi-width azimuthal angle bins for angle discretization
tally_trigger : openmc.Trigger
An (optional) tally precision trigger given to each tally used to
compute the cross section
scores : list of str
The scores in each tally used to compute the multi-group cross section
filters : list of openmc.Filter
The filters in each tally used to compute the multi-group cross section
tally_keys : list of str
The keys into the tallies dictionary for each tally used to compute
the multi-group cross section
estimator : {'tracklength', 'collision', 'analog'}
The tally estimator used to compute the multi-group cross section
tallies : collections.OrderedDict
OpenMC tallies needed to compute the multi-group cross section. The keys
are strings listed in the :attr:`KappaFissionXS.tally_keys` property and
values are instances of :class:`openmc.Tally`.
rxn_rate_tally : openmc.Tally
Derived tally for the reaction rate tally used in the numerator to
compute the multi-group cross section. This attribute is None
unless the multi-group cross section has been computed.
xs_tally : openmc.Tally
Derived tally for the multi-group cross section. This attribute
is None unless the multi-group cross section has been computed.
num_subdomains : int
The number of subdomains is unity for 'material', 'cell' and 'universe'
domain types. This is equal to the number of cell instances
for 'distribcell' domain types (it is equal to unity prior to loading
tally data from a statepoint file).
num_nuclides : int
The number of nuclides for which the multi-group cross section is
being tracked. This is unity if the by_nuclide attribute is False.
nuclides : Iterable of str or 'sum'
The optional user-specified nuclides for which to compute cross
sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides
are not specified by the user, all nuclides in the spatial domain
are included. This attribute is 'sum' if by_nuclide is false.
sparse : bool
Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format
for compressed data storage
loaded_sp : bool
Whether or not a statepoint file has been loaded with tally data
derived : bool
Whether or not the MGXS is merged from one or more other MGXS
hdf5_key : str
The key used to index multi-group cross sections in an HDF5 data store
"""
def __init__(self, domain=None, domain_type=None, groups=None,
by_nuclide=False, name='', num_polar=1, num_azimuthal=1):
super().__init__(domain, domain_type, groups, by_nuclide, name,
num_polar, num_azimuthal)
self._rxn_type = 'kappa-fission'
class ScatterXS(MGXS):
r"""A scattering multi-group cross section.
The scattering cross section is defined as the difference between the total
and absorption cross sections.
This class can be used for both OpenMC input generation and tally data
post-processing to compute spatially-homogenized and energy-integrated
multi-group cross sections for multi-group neutronics calculations. At a
minimum, one needs to set the :attr:`ScatterXS.energy_groups` and
:attr:`ScatterXS.domain` properties. Tallies for the flux and
appropriate reaction rates over the specified domain are generated
automatically via the :attr:`ScatterXS.tallies` property, which can
then be appended to a :class:`openmc.Tallies` instance.
For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the
necessary data to compute multi-group cross sections from a
:class:`openmc.StatePoint` instance. The derived multi-group cross section
can then be obtained from the :attr:`ScatterXS.xs_tally` property.
For a spatial domain :math:`V` and energy group :math:`[E_g,E_{g-1}]`, the
scattering cross section is calculated as:
.. math::
\frac{\int_{r \in V} dr \int_{4\pi} d\Omega \int_{E_g}^{E_{g-1}} dE \;
\left [ \sigma_t (r, E) \psi (r, E, \Omega) - \sigma_a (r, E) \psi (r, E,
\Omega) \right ]}{\int_{r \in V} dr \int_{4\pi} d\Omega
\int_{E_g}^{E_{g-1}} dE \; \psi (r, E, \Omega)}.
To incorporate the effect of scattering multiplication from (n,xn)
reactions in the above relation, the `nu` parameter can be set to `True`.
Parameters
----------
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
The domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
The domain type for spatial homogenization
groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file.
num_polar : Integral, optional
Number of equi-width polar angle bins for angle discretization;
defaults to one bin
num_azimuthal : Integral, optional
Number of equi-width azimuthal angle bins for angle discretization;
defaults to one bin
nu : bool
If True, the cross section data will include neutron multiplication;
defaults to False
Attributes
----------
name : str, optional
Name of the multi-group cross section
rxn_type : str
Reaction type (e.g., 'total', 'nu-fission', etc.)
nu : bool
If True, the cross section data will include neutron multiplication
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
Domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
Domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
Energy group structure for energy condensation
num_polar : Integral
Number of equi-width polar angle bins for angle discretization
num_azimuthal : Integral
Number of equi-width azimuthal angle bins for angle discretization
tally_trigger : openmc.Trigger
An (optional) tally precision trigger given to each tally used to
compute the cross section
scores : list of str
The scores in each tally used to compute the multi-group cross section
filters : list of openmc.Filter
The filters in each tally used to compute the multi-group cross section
tally_keys : list of str
The keys into the tallies dictionary for each tally used to compute
the multi-group cross section
estimator : {'tracklength', 'collision', 'analog'}
The tally estimator used to compute the multi-group cross section
tallies : collections.OrderedDict
OpenMC tallies needed to compute the multi-group cross section. The keys
are strings listed in the :attr:`ScatterXS.tally_keys` property and
values are instances of :class:`openmc.Tally`.
rxn_rate_tally : openmc.Tally
Derived tally for the reaction rate tally used in the numerator to
compute the multi-group cross section. This attribute is None
unless the multi-group cross section has been computed.
xs_tally : openmc.Tally
Derived tally for the multi-group cross section. This attribute
is None unless the multi-group cross section has been computed.
num_subdomains : int
The number of subdomains is unity for 'material', 'cell' and 'universe'
domain types. This is equal to the number of cell instances
for 'distribcell' domain types (it is equal to unity prior to loading
tally data from a statepoint file).
num_nuclides : int
The number of nuclides for which the multi-group cross section is
being tracked. This is unity if the by_nuclide attribute is False.
nuclides : Iterable of str or 'sum'
The optional user-specified nuclides for which to compute cross
sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides
are not specified by the user, all nuclides in the spatial domain
are included. This attribute is 'sum' if by_nuclide is false.
sparse : bool
Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format
for compressed data storage
loaded_sp : bool
Whether or not a statepoint file has been loaded with tally data
derived : bool
Whether or not the MGXS is merged from one or more other MGXS
hdf5_key : str
The key used to index multi-group cross sections in an HDF5 data store
"""
def __init__(self, domain=None, domain_type=None, groups=None,
by_nuclide=False, name='', num_polar=1,
num_azimuthal=1, nu=False):
super().__init__(domain, domain_type, groups, by_nuclide, name,
num_polar, num_azimuthal)
self.nu = nu
def __deepcopy__(self, memo):
clone = super().__deepcopy__(memo)
clone._nu = self.nu
return clone
@property
def nu(self):
return self._nu
@nu.setter
def nu(self, nu):
cv.check_type('nu', nu, bool)
self._nu = nu
if not nu:
self._rxn_type = 'scatter'
else:
self._rxn_type = 'nu-scatter'
self._estimator = 'analog'
self._valid_estimators = ['analog']
class ArbitraryXS(MGXS):
r"""A multi-group cross section for an arbitrary reaction type.
This class can be used for both OpenMC input generation and tally data
post-processing to compute spatially-homogenized and energy-integrated
multi-group total cross sections for multi-group neutronics calculations.
At a minimum, one needs to set the :attr:`ArbitraryXS.energy_groups` and
:attr:`ArbitraryXS.domain` properties. Tallies for the flux and appropriate
reaction rates over the specified domain are generated automatically via the
:attr:`ArbitraryXS.tallies` property, which can then be appended to a
:class:`openmc.Tallies` instance.
For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the
necessary data to compute multi-group cross sections from a
:class:`openmc.StatePoint` instance. The derived multi-group cross section
can then be obtained from the :attr:`ArbitraryXS.xs_tally` property.
For a spatial domain :math:`V` and energy group :math:`[E_g,E_{g-1}]`, the
requested cross section is calculated as:
.. math::
\frac{\int_{r \in V} dr \int_{4\pi} d\Omega \int_{E_g}^{E_{g-1}} dE \;
\sigma_X (r, E) \psi (r, E, \Omega)}{\int_{r \in V} dr \int_{4\pi}
d\Omega \int_{E_g}^{E_{g-1}} dE \; \psi (r, E, \Omega)}
where :math:`\sigma_X` is the requested reaction type of interest.
Parameters
----------
rxn_type : str
Reaction type (e.g., '(n,2n)', '(n,Xt)', etc.)
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
The domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
The domain type for spatial homogenization
groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file.
num_polar : Integral, optional
Number of equi-width polar angle bins for angle discretization;
defaults to one bin
num_azimuthal : Integral, optional
Number of equi-width azimuthal angle bins for angle discretization;
defaults to one bin
Attributes
----------
name : str, optional
Name of the multi-group cross section
rxn_type : str
Reaction type (e.g., '(n,2n)', '(n,Xt)', etc.)
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
Domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
Domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
Energy group structure for energy condensation
num_polar : Integral
Number of equi-width polar angle bins for angle discretization
num_azimuthal : Integral
Number of equi-width azimuthal angle bins for angle discretization
tally_trigger : openmc.Trigger
An (optional) tally precision trigger given to each tally used to
compute the cross section
scores : list of str
The scores in each tally used to compute the multi-group cross section
filters : list of openmc.Filter
The filters in each tally used to compute the multi-group cross section
tally_keys : list of str
The keys into the tallies dictionary for each tally used to compute
the multi-group cross section
estimator : {'tracklength', 'collision', 'analog'}
The tally estimator used to compute the multi-group cross section
tallies : collections.OrderedDict
OpenMC tallies needed to compute the multi-group cross section. The keys
are strings listed in the :attr:`TotalXS.tally_keys` property and values
are instances of :class:`openmc.Tally`.
rxn_rate_tally : openmc.Tally
Derived tally for the reaction rate tally used in the numerator to
compute the multi-group cross section. This attribute is None
unless the multi-group cross section has been computed.
xs_tally : openmc.Tally
Derived tally for the multi-group cross section. This attribute
is None unless the multi-group cross section has been computed.
num_subdomains : int
The number of subdomains is unity for 'material', 'cell' and 'universe'
domain types. This is equal to the number of cell instances
for 'distribcell' domain types (it is equal to unity prior to loading
tally data from a statepoint file).
num_nuclides : int
The number of nuclides for which the multi-group cross section is
being tracked. This is unity if the by_nuclide attribute is False.
nuclides : Iterable of str or 'sum'
The optional user-specified nuclides for which to compute cross
sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides
are not specified by the user, all nuclides in the spatial domain
are included. This attribute is 'sum' if by_nuclide is false.
sparse : bool
Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format
for compressed data storage
loaded_sp : bool
Whether or not a statepoint file has been loaded with tally data
derived : bool
Whether or not the MGXS is merged from one or more other MGXS
hdf5_key : str
The key used to index multi-group cross sections in an HDF5 data store
"""
def __init__(self, rxn_type, domain=None, domain_type=None, groups=None,
by_nuclide=False, name='', num_polar=1, num_azimuthal=1):
cv.check_value("rxn_type", rxn_type, ARBITRARY_VECTOR_TYPES)
super().__init__(domain, domain_type, groups, by_nuclide, name,
num_polar, num_azimuthal)
self._rxn_type = rxn_type
class ArbitraryMatrixXS(MatrixMGXS):
r"""A multi-group matrix cross section for an arbitrary reaction type.
This class can be used for both OpenMC input generation and tally data
post-processing to compute spatially-homogenized and energy-integrated
multi-group cross sections for multi-group neutronics calculations. At a
minimum, one needs to set the :attr:`ArbitraryMatrixXS.energy_groups` and
:attr:`ArbitraryMatrixXS.domain` properties. Tallies for the flux and
appropriate reaction rates over the specified domain are generated
automatically via the :attr:`ArbitraryMatrixXS.tallies` property, which can
then be appended to a :class:`openmc.Tallies` instance.
For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the
necessary data to compute multi-group cross sections from a
:class:`openmc.StatePoint` instance. The derived multi-group cross section
can then be obtained from the :attr:`ArbitraryMatrixXS.xs_tally` property.
For a spatial domain :math:`V`, incoming energy group
:math:`[E_{g'},E_{g'-1}]`, and outgoing energy group :math:`[E_g,E_{g-1}]`,
the fission production is calculated as:
.. math::
\begin{aligned}
\langle \sigma_{X,g'\rightarrow g} \phi \rangle &= \int_{r \in V} dr
\int_{4\pi} d\Omega' \int_{E_{g'}}^{E_{g'-1}} dE' \int_{E_g}^{E_{g-1}} dE
\; \chi(E) \sigma_X (r, E') \psi(r, E', \Omega')\\
\langle \phi \rangle &= \int_{r \in V} dr \int_{4\pi} d\Omega
\int_{E_g}^{E_{g-1}} dE \; \psi (r, E, \Omega) \\
\sigma_{X,g'\rightarrow g} &= \frac{\langle \sigma_{X,g'\rightarrow
g} \phi \rangle}{\langle \phi \rangle}
\end{aligned}
where :math:`\sigma_X` is the requested reaction type of interest.
Parameters
----------
rxn_type : str
Reaction type (e.g., '(n,2n)', '(n,nta)', etc.). Valid names have
neutrons as a product.
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
The domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
The domain type for spatial homogenization
groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file.
num_polar : Integral, optional
Number of equi-width polar angle bins for angle discretization;
defaults to one bin
num_azimuthal : Integral, optional
Number of equi-width azimuthal angle bins for angle discretization;
defaults to one bin
Attributes
----------
name : str, optional
Name of the multi-group cross section
rxn_type : str
Reaction type (e.g., 'total', 'nu-fission', etc.)
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
Domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
Domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
Energy group structure for energy condensation
num_polar : Integral
Number of equi-width polar angle bins for angle discretization
num_azimuthal : Integral
Number of equi-width azimuthal angle bins for angle discretization
tally_trigger : openmc.Trigger
An (optional) tally precision trigger given to each tally used to
compute the cross section
scores : list of str
The scores in each tally used to compute the multi-group cross section
filters : list of openmc.Filter
The filters in each tally used to compute the multi-group cross section
tally_keys : list of str
The keys into the tallies dictionary for each tally used to compute
the multi-group cross section
estimator : 'analog'
The tally estimator used to compute the multi-group cross section
tallies : collections.OrderedDict
OpenMC tallies needed to compute the multi-group cross section. The keys
are strings listed in the :attr:`NuFissionMatrixXS.tally_keys`
property and values are instances of :class:`openmc.Tally`.
rxn_rate_tally : openmc.Tally
Derived tally for the reaction rate tally used in the numerator to
compute the multi-group cross section. This attribute is None
unless the multi-group cross section has been computed.
xs_tally : openmc.Tally
Derived tally for the multi-group cross section. This attribute
is None unless the multi-group cross section has been computed.
num_subdomains : int
The number of subdomains is unity for 'material', 'cell' and 'universe'
domain types. This is equal to the number of cell instances
for 'distribcell' domain types (it is equal to unity prior to loading
tally data from a statepoint file).
num_nuclides : int
The number of nuclides for which the multi-group cross section is
being tracked. This is unity if the by_nuclide attribute is False.
nuclides : Iterable of str or 'sum'
The optional user-specified nuclides for which to compute cross
sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides
are not specified by the user, all nuclides in the spatial domain
are included. This attribute is 'sum' if by_nuclide is false.
sparse : bool
Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format
for compressed data storage
loaded_sp : bool
Whether or not a statepoint file has been loaded with tally data
derived : bool
Whether or not the MGXS is merged from one or more other MGXS
hdf5_key : str
The key used to index multi-group cross sections in an HDF5 data store
"""
def __init__(self, rxn_type, domain=None, domain_type=None, groups=None,
by_nuclide=False, name='', num_polar=1,
num_azimuthal=1):
cv.check_value("rxn_type", rxn_type, ARBITRARY_MATRIX_TYPES)
super().__init__(domain, domain_type, groups, by_nuclide, name,
num_polar, num_azimuthal)
self._rxn_type = rxn_type.split(" ")[0]
self._estimator = 'analog'
self._valid_estimators = ['analog']
class ScatterMatrixXS(MatrixMGXS):
r"""A scattering matrix multi-group cross section with the cosine of the
change-in-angle represented as one or more Legendre moments or a histogram.
This class can be used for both OpenMC input generation and tally data
post-processing to compute spatially-homogenized and energy-integrated
multi-group cross sections for multi-group neutronics calculations. At a
minimum, one needs to set the :attr:`ScatterMatrixXS.energy_groups` and
:attr:`ScatterMatrixXS.domain` properties. Tallies for the flux and
appropriate reaction rates over the specified domain are generated
automatically via the :attr:`ScatterMatrixXS.tallies` property, which can
then be appended to a :class:`openmc.Tallies` instance.
For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the
necessary data to compute multi-group cross sections from a
:class:`openmc.StatePoint` instance. The derived multi-group cross section
can then be obtained from the :attr:`ScatterMatrixXS.xs_tally` property.
For a spatial domain :math:`V`, incoming energy group
:math:`[E_{g'},E_{g'-1}]`, and outgoing energy group :math:`[E_g,E_{g-1}]`,
the Legendre scattering moments are calculated as:
.. math::
\begin{aligned}
\langle \sigma_{s,\ell,g'\rightarrow g} \phi \rangle &= \int_{r \in V} dr
\int_{4\pi} d\Omega' \int_{E_{g'}}^{E_{g'-1}} dE' \int_{4\pi} d\Omega
\int_{E_g}^{E_{g-1}} dE \; P_\ell (\Omega \cdot \Omega') \sigma_s (r, E'
\rightarrow E, \Omega' \cdot \Omega) \psi(r, E', \Omega')\\
\langle \phi \rangle &= \int_{r \in V} dr \int_{4\pi} d\Omega
\int_{E_g}^{E_{g-1}} dE \; \psi (r, E, \Omega) \\
\sigma_{s,\ell,g'\rightarrow g} &= \frac{\langle
\sigma_{s,\ell,g'\rightarrow g} \phi \rangle}{\langle \phi \rangle}
\end{aligned}
If the order is zero and a :math:`P_0` transport-correction is applied
(default), the scattering matrix elements are:
.. math::
\sigma_{s,g'\rightarrow g} = \frac{\langle \sigma_{s,0,g'\rightarrow g}
\phi \rangle - \delta_{gg'} \sum_{g''} \langle \sigma_{s,1,g''\rightarrow
g} \phi \rangle}{\langle \phi \rangle}
To incorporate the effect of neutron multiplication from (n,xn) reactions
in the above relation, the `nu` parameter can be set to `True`.
An alternative form of the scattering matrix is computed when the
`formulation` property is set to 'consistent' rather than the default
of 'simple'. This formulation computes the scattering matrix multi-group
cross section as the product of the scatter cross section and
group-to-group scattering probabilities.
Unlike the default 'simple' formulation, the 'consistent' formulation
is computed from the groupwise scattering cross section which uses a
tracklength estimator. This ensures that reaction rate balance is exactly
preserved with a :class:`TotalXS` computed using a tracklength estimator.
For a scattering probability matrix :math:`P_{s,\ell,g'\rightarrow g}` and
scattering cross section :math:`\sigma_s (r, E)` for incoming energy group
:math:`[E_{g'},E_{g'-1}]` and outgoing energy group :math:`[E_g,E_{g-1}]`,
the Legendre scattering moments are calculated as:
.. math::
\sigma_{s,\ell,g'\rightarrow g} = \sigma_s (r, E) \times
P_{s,\ell,g'\rightarrow g}
To incorporate the effect of neutron multiplication from (n,xn) reactions
in the 'consistent' scattering matrix, the `nu` parameter can be set to `True`
such that the Legendre scattering moments are calculated as:
.. math::
\sigma_{s,\ell,g'\rightarrow g} = \upsilon_{g'\rightarrow g} \times
\sigma_s (r, E) \times P_{s,\ell,g'\rightarrow g}
Parameters
----------
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
The domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
The domain type for spatial homogenization
groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file.
num_polar : int, optional
Number of equi-width polar angle bins for angle discretization;
defaults to one bin
num_azimuthal : int, optional
Number of equi-width azimuthal angle bins for angle discretization;
defaults to one bin
nu : bool
If True, the cross section data will include neutron multiplication;
defaults to False
Attributes
----------
formulation : 'simple' or 'consistent'
The calculation approach to use ('simple' by default). The 'simple'
formulation simply divides the group-to-group scattering rates by
the groupwise flux, each computed from analog tally estimators. The
'consistent' formulation multiplies the groupwise scattering rates
by the group-to-group scatter probability matrix, the former computed
from tracklength tallies and the latter computed from analog tallies.
The 'consistent' formulation is designed to better conserve reaction
rate balance with the total and absorption cross sections computed
using tracklength tally estimators.
correction : 'P0' or None
Apply the P0 correction to scattering matrices if set to 'P0'; this is
used only if :attr:`ScatterMatrixXS.scatter_format` is 'legendre'
scatter_format : {'legendre', or 'histogram'}
Representation of the angular scattering distribution (default is
'legendre')
legendre_order : int
The highest Legendre moment in the scattering matrix; this is used if
:attr:`ScatterMatrixXS.scatter_format` is 'legendre'. (default is 0)
histogram_bins : int
The number of equally-spaced bins for the histogram representation of
the angular scattering distribution; this is used if
:attr:`ScatterMatrixXS.scatter_format` is 'histogram'. (default is 16)
name : str, optional
Name of the multi-group cross section
rxn_type : str
Reaction type (e.g., 'total', 'nu-fission', etc.)
nu : bool
If True, the cross section data will include neutron multiplication
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
Domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
Domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
Energy group structure for energy condensation
num_polar : int
Number of equi-width polar angle bins for angle discretization
num_azimuthal : int
Number of equi-width azimuthal angle bins for angle discretization
tally_trigger : openmc.Trigger
An (optional) tally precision trigger given to each tally used to
compute the cross section
scores : list of str
The scores in each tally used to compute the multi-group cross section
filters : list of openmc.Filter
The filters in each tally used to compute the multi-group cross section
tally_keys : list of str
The keys into the tallies dictionary for each tally used to compute
the multi-group cross section
estimator : 'analog'
The tally estimator used to compute the multi-group cross section
tallies : collections.OrderedDict
OpenMC tallies needed to compute the multi-group cross section. The keys
are strings listed in the :attr:`ScatterMatrixXS.tally_keys` property
and values are instances of :class:`openmc.Tally`.
rxn_rate_tally : openmc.Tally
Derived tally for the reaction rate tally used in the numerator to
compute the multi-group cross section. This attribute is None
unless the multi-group cross section has been computed.
xs_tally : openmc.Tally
Derived tally for the multi-group cross section. This attribute
is None unless the multi-group cross section has been computed.
num_subdomains : int
The number of subdomains is unity for 'material', 'cell' and 'universe'
domain types. This is equal to the number of cell instances
for 'distribcell' domain types (it is equal to unity prior to loading
tally data from a statepoint file).
num_nuclides : int
The number of nuclides for which the multi-group cross section is
being tracked. This is unity if the by_nuclide attribute is False.
nuclides : Iterable of str or 'sum'
The optional user-specified nuclides for which to compute cross
sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides
are not specified by the user, all nuclides in the spatial domain
are included. This attribute is 'sum' if by_nuclide is false.
sparse : bool
Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format
for compressed data storage
loaded_sp : bool
Whether or not a statepoint file has been loaded with tally data
derived : bool
Whether or not the MGXS is merged from one or more other MGXS
hdf5_key : str
The key used to index multi-group cross sections in an HDF5 data store
"""
def __init__(self, domain=None, domain_type=None, groups=None,
by_nuclide=False, name='', num_polar=1,
num_azimuthal=1, nu=False):
super().__init__(domain, domain_type, groups, by_nuclide, name,
num_polar, num_azimuthal)
self._formulation = 'simple'
self._correction = 'P0'
self._scatter_format = SCATTER_LEGENDRE
self._legendre_order = 0
self._histogram_bins = 16
self._estimator = 'analog'
self._valid_estimators = ['analog']
self.nu = nu
def __deepcopy__(self, memo):
clone = super().__deepcopy__(memo)
clone._formulation = self.formulation
clone._correction = self.correction
clone._scatter_format = self.scatter_format
clone._legendre_order = self.legendre_order
clone._histogram_bins = self.histogram_bins
clone._nu = self.nu
return clone
@property
def _dont_squeeze(self):
"""Create a tuple of axes which should not be removed during the get_xs
process
"""
if self.num_polar > 1 or self.num_azimuthal > 1:
if self.scatter_format == SCATTER_HISTOGRAM:
return (0, 1, 3, 4, 5)
else:
return (0, 1, 3, 4)
else:
if self.scatter_format == SCATTER_HISTOGRAM:
return (1, 2, 3)
else:
return (1, 2)
@property
def formulation(self):
return self._formulation
@property
def correction(self):
return self._correction
@property
def scatter_format(self):
return self._scatter_format
@property
def legendre_order(self):
return self._legendre_order
@property
def histogram_bins(self):
return self._histogram_bins
@property
def nu(self):
return self._nu
@property
def scores(self):
if self.formulation == 'simple':
scores = ['flux', self.rxn_type]
else:
# Add scores for groupwise scattering cross section
scores = ['flux', 'scatter']
# Add scores for group-to-group scattering probability matrix
# these scores also contain the angular information, whether it be
# Legendre expansion or histogram bins
scores.append('scatter')
# Add scores for multiplicity matrix; scatter info for the
# denominator will come from the previous score
if self.nu:
scores.append('nu-scatter')
# Add scores for transport correction
if self.correction == 'P0' and self.legendre_order == 0:
scores.extend([self.rxn_type, 'flux'])
return scores
@property
def tally_keys(self):
if self.formulation == 'simple':
return super().tally_keys
else:
# Add keys for groupwise scattering cross section
tally_keys = ['flux (tracklength)', 'scatter']
# Add keys for group-to-group scattering probability matrix
tally_keys.append('scatter matrix')
# Add keys for multiplicity matrix
if self.nu:
tally_keys.extend(['nu-scatter'])
# Add keys for transport correction
if self.correction == 'P0' and self.legendre_order == 0:
tally_keys.extend(['correction', 'flux (analog)'])
return tally_keys
@property
def estimator(self):
if self.formulation == 'simple':
return self._estimator
else:
# Add estimators for groupwise scattering cross section
estimators = ['tracklength', 'tracklength']
# Add estimators for group-to-group scattering probabilities
estimators.append('analog')
# Add estimators for multiplicity matrix
if self.nu:
estimators.extend(['analog'])
# Add estimators for transport correction
if self.correction == 'P0' and self.legendre_order == 0:
estimators.extend(['analog', 'analog'])
return estimators
@property
def filters(self):
if self.formulation == 'simple':
group_edges = self.energy_groups.group_edges
energy = openmc.EnergyFilter(group_edges)
energyout = openmc.EnergyoutFilter(group_edges)
if self.scatter_format == SCATTER_LEGENDRE:
if self.correction == 'P0' and self.legendre_order == 0:
angle_filter = openmc.LegendreFilter(order=1)
else:
angle_filter = \
openmc.LegendreFilter(order=self.legendre_order)
elif self.scatter_format == SCATTER_HISTOGRAM:
bins = np.linspace(-1., 1., num=self.histogram_bins + 1,
endpoint=True)
angle_filter = openmc.MuFilter(bins)
filters = [[energy], [energy, energyout, angle_filter]]
else:
group_edges = self.energy_groups.group_edges
energy = openmc.EnergyFilter(group_edges)
energyout = openmc.EnergyoutFilter(group_edges)
# Groupwise scattering cross section
filters = [[energy], [energy]]
# Group-to-group scattering probability matrix
if self.scatter_format == SCATTER_LEGENDRE:
angle_filter = openmc.LegendreFilter(order=self.legendre_order)
elif self.scatter_format == SCATTER_HISTOGRAM:
bins = np.linspace(-1., 1., num=self.histogram_bins + 1,
endpoint=True)
angle_filter = openmc.MuFilter(bins)
filters.append([energy, energyout, angle_filter])
# Multiplicity matrix
if self.nu:
filters.extend([[energy, energyout]])
# Add filters for transport correction
if self.correction == 'P0' and self.legendre_order == 0:
filters.extend([[energyout, openmc.LegendreFilter(1)],
[energy]])
return self._add_angle_filters(filters)
@property
def rxn_rate_tally(self):
if self._rxn_rate_tally is None:
if self.formulation == 'simple':
if self.scatter_format == SCATTER_LEGENDRE:
# If using P0 correction subtract P1 scatter from the diag.
if self.correction == 'P0' and self.legendre_order == 0:
scatter_p0 = self.tallies[self.rxn_type].get_slice(
filters=[openmc.LegendreFilter],
filter_bins=[('P0',)])
scatter_p1 = self.tallies[self.rxn_type].get_slice(
filters=[openmc.LegendreFilter],
filter_bins=[('P1',)])
# Set the Legendre order of these tallies to be 0
# so they can be subtracted
legendre = openmc.LegendreFilter(order=0)
scatter_p0.filters[-1] = legendre
scatter_p1.filters[-1] = legendre
scatter_p1 = scatter_p1.summation(
filter_type=openmc.EnergyFilter,
remove_filter=True)
energy_filter = \
scatter_p0.find_filter(openmc.EnergyFilter)
# Transform scatter-p1 into an energyin/out matrix
# to match scattering matrix shape for tally arithmetic
energy_filter = copy.deepcopy(energy_filter)
scatter_p1 = \
scatter_p1.diagonalize_filter(energy_filter, 1)
self._rxn_rate_tally = scatter_p0 - scatter_p1
# Otherwise, extract scattering moment reaction rate Tally
else:
self._rxn_rate_tally = self.tallies[self.rxn_type]
elif self.scatter_format == SCATTER_HISTOGRAM:
# Extract scattering rate distribution tally
self._rxn_rate_tally = self.tallies[self.rxn_type]
self._rxn_rate_tally.sparse = self.sparse
else:
msg = 'The reaction rate tally is poorly defined' \
' for the consistent formulation'
raise NotImplementedError(msg)
return self._rxn_rate_tally
@property
def xs_tally(self):
if self._xs_tally is None:
if self.tallies is None:
msg = 'Unable to get xs_tally since tallies have ' \
'not been loaded from a statepoint'
raise ValueError(msg)
# Use super class method
if self.formulation == 'simple':
self._xs_tally = MGXS.xs_tally.fget(self)
else:
# Compute scattering probability matrixS
tally_key = 'scatter matrix'
# Compute normalization factor summed across outgoing energies
if self.scatter_format == SCATTER_LEGENDRE:
norm = self.tallies[tally_key].get_slice(
scores=['scatter'],
filters=[openmc.LegendreFilter],
filter_bins=[('P0',)], squeeze=True)
# Compute normalization factor summed across outgoing mu bins
elif self.scatter_format == SCATTER_HISTOGRAM:
norm = self.tallies[tally_key].get_slice(
scores=['scatter'])
norm = norm.summation(
filter_type=openmc.MuFilter, remove_filter=True)
norm = norm.summation(filter_type=openmc.EnergyoutFilter,
remove_filter=True)
# Compute groupwise scattering cross section
self._xs_tally = self.tallies['scatter'] * \
self.tallies[tally_key] / norm / \
self.tallies['flux (tracklength)']
# Override the nuclides for tally arithmetic
self._xs_tally.nuclides = self.tallies['scatter'].nuclides
# Multiply by the multiplicity matrix
if self.nu:
numer = self.tallies['nu-scatter']
# Get the denominator
if self.scatter_format == SCATTER_LEGENDRE:
denom = self.tallies[tally_key].get_slice(
scores=['scatter'],
filters=[openmc.LegendreFilter],
filter_bins=[('P0',)], squeeze=True)
# Compute normalization factor summed across mu bins
elif self.scatter_format == SCATTER_HISTOGRAM:
denom = self.tallies[tally_key].get_slice(
scores=['scatter'])
# Sum across all mu bins
denom = denom.summation(
filter_type=openmc.MuFilter, remove_filter=True)
self._xs_tally *= (numer / denom)
# If using P0 correction subtract scatter-1 from the diagonal
if self.correction == 'P0' and self.legendre_order == 0:
scatter_p1 = self.tallies['correction'].get_slice(
filters=[openmc.LegendreFilter], filter_bins=[('P1',)])
flux = self.tallies['flux (analog)']
# Set the Legendre order of the P1 tally to be P0
# so it can be subtracted
legendre = openmc.LegendreFilter(order=0)
scatter_p1.filters[-1] = legendre
# Transform scatter-p1 tally into an energyin/out matrix
# to match scattering matrix shape for tally arithmetic
energy_filter = flux.find_filter(openmc.EnergyFilter)
energy_filter = copy.deepcopy(energy_filter)
scatter_p1 = scatter_p1.diagonalize_filter(energy_filter, 1)
# Compute the trasnport correction term
correction = scatter_p1 / flux
# Override the nuclides for tally arithmetic
correction.nuclides = scatter_p1.nuclides
# Set xs_tally to be itself with only P0 data
self._xs_tally = self._xs_tally.get_slice(
filters=[openmc.LegendreFilter], filter_bins=[('P0',)])
# Tell xs_tally that it is P0
legendre_xs_tally = \
self._xs_tally.find_filter(openmc.LegendreFilter)
legendre_xs_tally.order = 0
# And subtract the P1 correction from the P0 matrix
self._xs_tally -= correction
self._compute_xs()
# Force the angle filter to be the last filter
if self.scatter_format == SCATTER_HISTOGRAM:
angle_filter = self._xs_tally.find_filter(openmc.MuFilter)
else:
angle_filter = \
self._xs_tally.find_filter(openmc.LegendreFilter)
angle_filter_index = self._xs_tally.filters.index(angle_filter)
# If the angle filter index is not last, then make it last
if angle_filter_index != len(self._xs_tally.filters) - 1:
energyout_filter = \
self._xs_tally.find_filter(openmc.EnergyoutFilter)
self._xs_tally._swap_filters(energyout_filter,
angle_filter)
return self._xs_tally
@nu.setter
def nu(self, nu):
cv.check_type('nu', nu, bool)
self._nu = nu
if self.formulation == 'simple':
if not nu:
self._rxn_type = 'scatter'
self._hdf5_key = 'scatter matrix'
else:
self._rxn_type = 'nu-scatter'
self._hdf5_key = 'nu-scatter matrix'
else:
if not nu:
self._rxn_type = 'scatter'
self._hdf5_key = 'consistent scatter matrix'
else:
self._rxn_type = 'nu-scatter'
self._hdf5_key = 'consistent nu-scatter matrix'
@formulation.setter
def formulation(self, formulation):
cv.check_value('formulation', formulation, ('simple', 'consistent'))
self._formulation = formulation
if self.formulation == 'simple':
self._valid_estimators = ['analog']
if not self.nu:
self._hdf5_key = 'scatter matrix'
else:
self._hdf5_key = 'nu-scatter matrix'
else:
self._valid_estimators = ['tracklength']
if not self.nu:
self._hdf5_key = 'consistent scatter matrix'
else:
self._hdf5_key = 'consistent nu-scatter matrix'
@correction.setter
def correction(self, correction):
cv.check_value('correction', correction, ('P0', None))
if self.scatter_format == SCATTER_LEGENDRE:
if correction == 'P0' and self.legendre_order > 0:
msg = 'The P0 correction will be ignored since the ' \
'scattering order {} is greater than '\
'zero'.format(self.legendre_order)
warnings.warn(msg)
elif self.scatter_format == SCATTER_HISTOGRAM:
msg = 'The P0 correction will be ignored since the ' \
'scatter format is set to histogram'
warnings.warn(msg)
self._correction = correction
@scatter_format.setter
def scatter_format(self, scatter_format):
cv.check_value('scatter_format', scatter_format, MU_TREATMENTS)
self._scatter_format = scatter_format
@legendre_order.setter
def legendre_order(self, legendre_order):
cv.check_type('legendre_order', legendre_order, Integral)
cv.check_greater_than('legendre_order', legendre_order, 0,
equality=True)
cv.check_less_than('legendre_order', legendre_order, _MAX_LEGENDRE,
equality=True)
if self.scatter_format == SCATTER_LEGENDRE:
if self.correction == 'P0' and legendre_order > 0:
msg = 'The P0 correction will be ignored since the ' \
'scattering order {} is greater than '\
'zero'.format(legendre_order)
warnings.warn(msg, RuntimeWarning)
self.correction = None
elif self.scatter_format == SCATTER_HISTOGRAM:
msg = 'The legendre order will be ignored since the ' \
'scatter format is set to histogram'
warnings.warn(msg)
self._legendre_order = legendre_order
@histogram_bins.setter
def histogram_bins(self, histogram_bins):
cv.check_type('histogram_bins', histogram_bins, Integral)
cv.check_greater_than('histogram_bins', histogram_bins, 0)
self._histogram_bins = histogram_bins
def load_from_statepoint(self, statepoint):
"""Extracts tallies in an OpenMC StatePoint with the data needed to
compute multi-group cross sections.
This method is needed to compute cross section data from tallies
in an OpenMC StatePoint object.
.. note:: The statepoint must be linked with an OpenMC Summary object.
Parameters
----------
statepoint : openmc.StatePoint
An OpenMC StatePoint object with tally data
Raises
------
ValueError
When this method is called with a statepoint that has not been
linked with a summary object.
"""
# Clear any tallies previously loaded from a statepoint
if self.loaded_sp:
self._tallies = None
self._xs_tally = None
self._rxn_rate_tally = None
self._loaded_sp = False
super().load_from_statepoint(statepoint)
def get_slice(self, nuclides=[], in_groups=[], out_groups=[],
legendre_order='same'):
"""Build a sliced ScatterMatrix for the specified nuclides and
energy groups.
This method constructs a new MGXS to encapsulate a subset of the data
represented by this MGXS. The subset of data to include in the tally
slice is determined by the nuclides and energy groups specified in
the input parameters.
Parameters
----------
nuclides : list of str
A list of nuclide name strings
(e.g., ['U235', 'U238']; default is [])
in_groups : list of int
A list of incoming energy group indices starting at 1 for the high
energies (e.g., [1, 2, 3]; default is [])
out_groups : list of int
A list of outgoing energy group indices starting at 1 for the high
energies (e.g., [1, 2, 3]; default is [])
legendre_order : int or 'same'
The highest Legendre moment in the sliced MGXS. If order is 'same'
then the sliced MGXS will have the same Legendre moments as the
original MGXS (default). If order is an integer less than the
original MGXS' order, then only those Legendre moments up to that
order will be included in the sliced MGXS.
Returns
-------
openmc.mgxs.MatrixMGXS
A new MatrixMGXS which encapsulates the subset of data requested
for the nuclide(s) and/or energy group(s) requested in the
parameters.
"""
# Call super class method and null out derived tallies
slice_xs = super().get_slice(nuclides, in_groups)
slice_xs._rxn_rate_tally = None
slice_xs._xs_tally = None
# Slice the Legendre order if needed
if legendre_order != 'same' and self.scatter_format == SCATTER_LEGENDRE:
cv.check_type('legendre_order', legendre_order, Integral)
cv.check_less_than('legendre_order', legendre_order,
self.legendre_order, equality=True)
slice_xs.legendre_order = legendre_order
# Slice the scattering tally
filter_bins = [tuple(['P{}'.format(i)
for i in range(self.legendre_order + 1)])]
slice_xs.tallies[self.rxn_type] = \
slice_xs.tallies[self.rxn_type].get_slice(
filters=[openmc.LegendreFilter], filter_bins=filter_bins)
# Slice outgoing energy groups if needed
if len(out_groups) != 0:
filter_bins = []
for group in out_groups:
group_bounds = self.energy_groups.get_group_bounds(group)
filter_bins.append(group_bounds)
filter_bins = [tuple(filter_bins)]
# Slice each of the tallies across energyout groups
for tally_type, tally in slice_xs.tallies.items():
if tally.contains_filter(openmc.EnergyoutFilter):
tally_slice = tally.get_slice(
filters=[openmc.EnergyoutFilter],
filter_bins=filter_bins)
slice_xs.tallies[tally_type] = tally_slice
slice_xs.sparse = self.sparse
return slice_xs
def get_xs(self, in_groups='all', out_groups='all',
subdomains='all', nuclides='all', moment='all',
xs_type='macro', order_groups='increasing',
row_column='inout', value='mean', squeeze=True):
r"""Returns an array of multi-group cross sections.
This method constructs a 5D NumPy array for the requested
multi-group cross section data for one or more subdomains
(1st dimension), energy groups in (2nd dimension), energy groups out
(3rd dimension), nuclides (4th dimension), and moments/histograms
(5th dimension).
.. note:: The scattering moments are not multiplied by the
:math:`(2\ell+1)/2` prefactor in the expansion of the
scattering source into Legendre moments in the neutron
transport equation.
Parameters
----------
in_groups : Iterable of Integral or 'all'
Incoming energy groups of interest. Defaults to 'all'.
out_groups : Iterable of Integral or 'all'
Outgoing energy groups of interest. Defaults to 'all'.
subdomains : Iterable of Integral or 'all'
Subdomain IDs of interest. Defaults to 'all'.
nuclides : Iterable of str or 'all' or 'sum'
A list of nuclide name strings (e.g., ['U235', 'U238']). The
special string 'all' will return the cross sections for all nuclides
in the spatial domain. The special string 'sum' will return the
cross section summed over all nuclides. Defaults to 'all'.
moment : int or 'all'
The scattering matrix moment to return. All moments will be
returned if the moment is 'all' (default); otherwise, a specific
moment will be returned.
xs_type: {'macro', 'micro'}
Return the macro or micro cross section in units of cm^-1 or barns.
Defaults to 'macro'.
order_groups: {'increasing', 'decreasing'}
Return the cross section indexed according to increasing or
decreasing energy groups (decreasing or increasing energies).
Defaults to 'increasing'.
row_column: {'inout', 'outin'}
Return the cross section indexed first by incoming group and
second by outgoing group ('inout'), or vice versa ('outin').
Defaults to 'inout'.
value : {'mean', 'std_dev', 'rel_err'}
A string for the type of value to return. Defaults to 'mean'.
squeeze : bool
A boolean representing whether to eliminate the extra dimensions
of the multi-dimensional array to be returned. Defaults to True.
Returns
-------
numpy.ndarray
A NumPy array of the multi-group cross section indexed in the order
each group and subdomain is listed in the parameters.
Raises
------
ValueError
When this method is called before the multi-group cross section is
computed from tally data.
"""
cv.check_value('value', value, ['mean', 'std_dev', 'rel_err'])
cv.check_value('xs_type', xs_type, ['macro', 'micro'])
# FIXME: Unable to get microscopic xs for mesh domain because the mesh
# cells do not know the nuclide densities in each mesh cell.
if self.domain_type == 'mesh' and xs_type == 'micro':
msg = 'Unable to get micro xs for mesh domain since the mesh ' \
'cells do not know the nuclide densities in each mesh cell.'
raise ValueError(msg)
filters = []
filter_bins = []
# Construct a collection of the domain filter bins
if not isinstance(subdomains, str):
cv.check_iterable_type('subdomains', subdomains, Integral, max_depth=3)
filters.append(_DOMAIN_TO_FILTER[self.domain_type])
subdomain_bins = []
for subdomain in subdomains:
subdomain_bins.append(subdomain)
filter_bins.append(tuple(subdomain_bins))
# Construct list of energy group bounds tuples for all requested groups
if not isinstance(in_groups, str):
cv.check_iterable_type('groups', in_groups, Integral)
filters.append(openmc.EnergyFilter)
energy_bins = []
for group in in_groups:
energy_bins.append(
(self.energy_groups.get_group_bounds(group),))
filter_bins.append(tuple(energy_bins))
# Construct list of energy group bounds tuples for all requested groups
if not isinstance(out_groups, str):
cv.check_iterable_type('groups', out_groups, Integral)
for group in out_groups:
filters.append(openmc.EnergyoutFilter)
filter_bins.append((self.energy_groups.get_group_bounds(group),))
# Construct CrossScore for requested scattering moment
if self.scatter_format == SCATTER_LEGENDRE:
if moment != 'all':
cv.check_type('moment', moment, Integral)
cv.check_greater_than('moment', moment, 0, equality=True)
cv.check_less_than(
'moment', moment, self.legendre_order, equality=True)
filters.append(openmc.LegendreFilter)
filter_bins.append(('P{}'.format(moment),))
num_angle_bins = 1
else:
num_angle_bins = self.legendre_order + 1
else:
num_angle_bins = self.histogram_bins
# Construct a collection of the nuclides to retrieve from the xs tally
if self.by_nuclide:
if nuclides == 'all' or nuclides == 'sum' or nuclides == ['sum']:
query_nuclides = self.get_nuclides()
else:
query_nuclides = nuclides
else:
query_nuclides = ['total']
# Use tally summation if user requested the sum for all nuclides
scores = self.xs_tally.scores
if nuclides == 'sum' or nuclides == ['sum']:
xs_tally = self.xs_tally.summation(nuclides=query_nuclides)
xs = xs_tally.get_values(scores=scores, filters=filters,
filter_bins=filter_bins, value=value)
else:
xs = self.xs_tally.get_values(scores=scores, filters=filters,
filter_bins=filter_bins,
nuclides=query_nuclides, value=value)
# Divide by atom number densities for microscopic cross sections
if xs_type == 'micro' and self._divide_by_density:
if self.by_nuclide:
densities = self.get_nuclide_densities(nuclides)
else:
densities = self.get_nuclide_densities('sum')
if value == 'mean' or value == 'std_dev':
xs /= densities[np.newaxis, :, np.newaxis]
# Convert and nans to zero
xs = np.nan_to_num(xs)
if in_groups == 'all':
num_in_groups = self.num_groups
else:
num_in_groups = len(in_groups)
if out_groups == 'all':
num_out_groups = self.num_groups
else:
num_out_groups = len(out_groups)
# Reshape tally data array with separate axes for domain and energy
# Accomodate the polar and azimuthal bins if needed
num_subdomains = int(xs.shape[0] / (num_angle_bins * num_in_groups *
num_out_groups * self.num_polar *
self.num_azimuthal))
if self.num_polar > 1 or self.num_azimuthal > 1:
new_shape = (self.num_polar, self.num_azimuthal,
num_subdomains, num_in_groups, num_out_groups,
num_angle_bins)
new_shape += xs.shape[1:]
xs = np.reshape(xs, new_shape)
# Transpose the scattering matrix if requested by user
if row_column == 'outin':
xs = np.swapaxes(xs, 3, 4)
# Reverse data if user requested increasing energy groups since
# tally data is stored in order of increasing energies
if order_groups == 'increasing':
xs = xs[:, :, :, ::-1, ::-1, ...]
else:
new_shape = (num_subdomains, num_in_groups, num_out_groups,
num_angle_bins)
new_shape += xs.shape[1:]
xs = np.reshape(xs, new_shape)
# Transpose the scattering matrix if requested by user
if row_column == 'outin':
xs = np.swapaxes(xs, 1, 2)
# Reverse data if user requested increasing energy groups since
# tally data is stored in order of increasing energies
if order_groups == 'increasing':
xs = xs[:, ::-1, ::-1, ...]
if squeeze:
# We want to squeeze out everything but the angles, in_groups,
# out_groups, and, if needed, num_angle_bins dimension. These must
# not be squeezed so 1-group, 1-angle problems have the correct
# shape.
xs = self._squeeze_xs(xs)
return xs
def get_pandas_dataframe(self, groups='all', nuclides='all',
xs_type='macro', paths=False):
"""Build a Pandas DataFrame for the MGXS data.
This method leverages :meth:`openmc.Tally.get_pandas_dataframe`, but
renames the columns with terminology appropriate for cross section data.
Parameters
----------
groups : Iterable of Integral or 'all'
Energy groups of interest. Defaults to 'all'.
nuclides : Iterable of str or 'all' or 'sum'
The nuclides of the cross-sections to include in the dataframe. This
may be a list of nuclide name strings (e.g., ['U235', 'U238']).
The special string 'all' will include the cross sections for all
nuclides in the spatial domain. The special string 'sum' will
include the cross sections summed over all nuclides. Defaults to
'all'.
xs_type: {'macro', 'micro'}
Return macro or micro cross section in units of cm^-1 or barns.
Defaults to 'macro'.
paths : bool, optional
Construct columns for distribcell tally filters (default is True).
The geometric information in the Summary object is embedded into
a Multi-index column with a geometric "path" to each distribcell
instance.
Returns
-------
pandas.DataFrame
A Pandas DataFrame for the cross section data.
Raises
------
ValueError
When this method is called before the multi-group cross section is
computed from tally data.
"""
# Build the dataframe using the parent class method
df = super().get_pandas_dataframe(groups, nuclides, xs_type,
paths=paths)
# If the matrix is P0, remove the legendre column
if self.scatter_format == SCATTER_LEGENDRE and self.legendre_order == 0:
df = df.drop(axis=1, labels=['legendre'])
return df
def print_xs(self, subdomains='all', nuclides='all',
xs_type='macro', moment=0):
"""Prints a string representation for the multi-group cross section.
Parameters
----------
subdomains : Iterable of Integral or 'all'
The subdomain IDs of the cross sections to include in the report.
Defaults to 'all'.
nuclides : Iterable of str or 'all' or 'sum'
The nuclides of the cross-sections to include in the report. This
may be a list of nuclide name strings (e.g., ['U235', 'U238']).
The special string 'all' will report the cross sections for all
nuclides in the spatial domain. The special string 'sum' will
report the cross sections summed over all nuclides. Defaults to
'all'.
xs_type: {'macro', 'micro'}
Return the macro or micro cross section in units of cm^-1 or barns.
Defaults to 'macro'.
moment : int
The scattering moment to print (default is 0)
"""
# Construct a collection of the subdomains to report
if not isinstance(subdomains, str):
cv.check_iterable_type('subdomains', subdomains, Integral)
elif self.domain_type == 'distribcell':
subdomains = np.arange(self.num_subdomains, dtype=np.int)
elif self.domain_type == 'mesh':
subdomains = list(self.domain.indices)
else:
subdomains = [self.domain.id]
# Construct a collection of the nuclides to report
if self.by_nuclide:
if nuclides == 'all':
nuclides = self.get_nuclides()
if nuclides == 'sum':
nuclides = ['sum']
else:
cv.check_iterable_type('nuclides', nuclides, str)
else:
nuclides = ['sum']
cv.check_value('xs_type', xs_type, ['macro', 'micro'])
if self.correction != 'P0' and self.scatter_format == SCATTER_LEGENDRE:
rxn_type = '{0} (P{1})'.format(self.rxn_type, moment)
else:
rxn_type = self.rxn_type
# Build header for string with type and domain info
string = 'Multi-Group XS\n'
string += '{0: <16}=\t{1}\n'.format('\tReaction Type', rxn_type)
string += '{0: <16}=\t{1}\n'.format('\tDomain Type', self.domain_type)
string += '{0: <16}=\t{1}\n'.format('\tDomain ID', self.domain.id)
# Generate the header for an individual XS
xs_header = '\tCross Sections [{0}]:'.format(self.get_units(xs_type))
# If cross section data has not been computed, only print string header
if self.tallies is None:
print(string)
return
string += '{0: <16}\n'.format('\tEnergy Groups:')
template = '{0: <12}Group {1} [{2: <10} - {3: <10}eV]\n'
# Loop over energy groups ranges
for group in range(1, self.num_groups + 1):
bounds = self.energy_groups.get_group_bounds(group)
string += template.format('', group, bounds[0], bounds[1])
# Set polar and azimuthal bins if necessary
if self.num_polar > 1 or self.num_azimuthal > 1:
pol_bins = np.linspace(0., np.pi, num=self.num_polar + 1,
endpoint=True)
azi_bins = np.linspace(-np.pi, np.pi, num=self.num_azimuthal + 1,
endpoint=True)
# Loop over all subdomains
for subdomain in subdomains:
if self.domain_type == 'distribcell' or self.domain_type == 'mesh':
string += '{0: <16}=\t{1}\n'.format('\tSubdomain', subdomain)
# Loop over all Nuclides
for nuclide in nuclides:
# Build header for nuclide type
if xs_type != 'sum':
string += '{0: <16}=\t{1}\n'.format('\tNuclide', nuclide)
# Build header for cross section type
string += '{0: <16}\n'.format(xs_header)
average_xs = self.get_xs(nuclides=[nuclide],
subdomains=[subdomain],
xs_type=xs_type, value='mean',
moment=moment)
rel_err_xs = self.get_xs(nuclides=[nuclide],
subdomains=[subdomain],
xs_type=xs_type, value='rel_err',
moment=moment)
rel_err_xs = rel_err_xs * 100.
# Create a function for printing group and histogram data
def print_groups_and_histogram(avg_xs, err_xs, num_groups,
num_histogram_bins):
template = '{0: <12}Group {1} -> Group {2}:\t\t'
to_print = ""
# Loop over incoming/outgoing energy groups ranges
for in_group in range(1, num_groups + 1):
for out_group in range(1, num_groups + 1):
to_print += template.format('', in_group,
out_group)
if num_histogram_bins > 0:
for i in range(num_histogram_bins):
to_print += \
'\n{0: <16}Histogram Bin {1}:{2: <6}'.format(
'', i + 1, '')
to_print += '{0:.2e} +/- {1:.2e}%'.format(
avg_xs[in_group - 1, out_group - 1, i],
err_xs[in_group - 1, out_group - 1, i])
to_print += '\n'
else:
to_print += '{0:.2e} +/- {1:.2e}%'.format(
avg_xs[in_group - 1, out_group - 1],
err_xs[in_group - 1, out_group - 1])
to_print += '\n'
to_print += '\n'
return to_print
# Set the number of histogram bins
if self.scatter_format == SCATTER_HISTOGRAM:
num_mu_bins = self.histogram_bins
else:
num_mu_bins = 0
if self.num_polar > 1 or self.num_azimuthal > 1:
# Loop over polar, azi, and in/out energy group ranges
for pol in range(len(pol_bins) - 1):
pol_low, pol_high = pol_bins[pol: pol + 2]
for azi in range(len(azi_bins) - 1):
azi_low, azi_high = azi_bins[azi: azi + 2]
string += \
'\t\tPolar Angle: [{0:5f} - {1:5f}]'.format(
pol_low, pol_high) + \
'\tAzimuthal Angle: [{0:5f} - {1:5f}]'.format(
azi_low, azi_high) + '\n'
string += print_groups_and_histogram(
average_xs[pol, azi, ...],
rel_err_xs[pol, azi, ...], self.num_groups,
num_mu_bins)
string += '\n'
else:
string += print_groups_and_histogram(
average_xs, rel_err_xs, self.num_groups, num_mu_bins)
string += '\n'
string += '\n'
string += '\n'
print(string)
class MultiplicityMatrixXS(MatrixMGXS):
r"""The scattering multiplicity matrix.
This class can be used for both OpenMC input generation and tally data
post-processing to compute spatially-homogenized and energy-integrated
multi-group cross sections for multi-group neutronics calculations. At a
minimum, one needs to set the :attr:`MultiplicityMatrixXS.energy_groups` and
:attr:`MultiplicityMatrixXS.domain` properties. Tallies for the flux and
appropriate reaction rates over the specified domain are generated
automatically via the :attr:`MultiplicityMatrixXS.tallies` property, which
can then be appended to a :class:`openmc.Tallies` instance.
For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the
necessary data to compute multi-group cross sections from a
:class:`openmc.StatePoint` instance. The derived multi-group cross section
can then be obtained from the :attr:`MultiplicityMatrixXS.xs_tally`
property.
For a spatial domain :math:`V`, incoming energy group
:math:`[E_{g'},E_{g'-1}]`, and outgoing energy group :math:`[E_g,E_{g-1}]`,
the multiplicity is calculated as:
.. math::
\begin{aligned}
\langle \upsilon \sigma_{s,g'\rightarrow g} \phi \rangle &= \int_{r \in
D} dr \int_{4\pi} d\Omega' \int_{E_{g'}}^{E_{g'-1}} dE' \int_{4\pi}
d\Omega \int_{E_g}^{E_{g-1}} dE \; \sum_i \upsilon_i \sigma_i (r, E' \rightarrow
E, \Omega' \cdot \Omega) \psi(r, E', \Omega') \\
\langle \sigma_{s,g'\rightarrow g} \phi \rangle &= \int_{r \in
D} dr \int_{4\pi} d\Omega' \int_{E_{g'}}^{E_{g'-1}} dE' \int_{4\pi}
d\Omega \int_{E_g}^{E_{g-1}} dE \; \sum_i \upsilon_i \sigma_i (r, E' \rightarrow
E, \Omega' \cdot \Omega) \psi(r, E', \Omega') \\
\upsilon_{g'\rightarrow g} &= \frac{\langle \upsilon
\sigma_{s,g'\rightarrow g} \rangle}{\langle \sigma_{s,g'\rightarrow g}
\rangle}
\end{aligned}
where :math:`\upsilon_i` is the multiplicity for the :math:`i`-th reaction.
Parameters
----------
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
The domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
The domain type for spatial homogenization
groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file.
num_polar : Integral, optional
Number of equi-width polar angle bins for angle discretization;
defaults to one bin
num_azimuthal : Integral, optional
Number of equi-width azimuthal angle bins for angle discretization;
defaults to one bin
Attributes
----------
name : str, optional
Name of the multi-group cross section
rxn_type : str
Reaction type (e.g., 'total', 'nu-fission', etc.)
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
Domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
Domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
Energy group structure for energy condensation
num_polar : Integral
Number of equi-width polar angle bins for angle discretization
num_azimuthal : Integral
Number of equi-width azimuthal angle bins for angle discretization
tally_trigger : openmc.Trigger
An (optional) tally precision trigger given to each tally used to
compute the cross section
scores : list of str
The scores in each tally used to compute the multi-group cross section
filters : list of openmc.Filter
The filters in each tally used to compute the multi-group cross section
tally_keys : list of str
The keys into the tallies dictionary for each tally used to compute
the multi-group cross section
estimator : 'analog'
The tally estimator used to compute the multi-group cross section
tallies : collections.OrderedDict
OpenMC tallies needed to compute the multi-group cross section. The keys
are strings listed in the :attr:`MultiplicityMatrixXS.tally_keys`
property and values are instances of :class:`openmc.Tally`.
rxn_rate_tally : openmc.Tally
Derived tally for the reaction rate tally used in the numerator to
compute the multi-group cross section. This attribute is None
unless the multi-group cross section has been computed.
xs_tally : openmc.Tally
Derived tally for the multi-group cross section. This attribute
is None unless the multi-group cross section has been computed.
num_subdomains : int
The number of subdomains is unity for 'material', 'cell' and 'universe'
domain types. This is equal to the number of cell instances
for 'distribcell' domain types (it is equal to unity prior to loading
tally data from a statepoint file).
num_nuclides : int
The number of nuclides for which the multi-group cross section is
being tracked. This is unity if the by_nuclide attribute is False.
nuclides : Iterable of str or 'sum'
The optional user-specified nuclides for which to compute cross
sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides
are not specified by the user, all nuclides in the spatial domain
are included. This attribute is 'sum' if by_nuclide is false.
sparse : bool
Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format
for compressed data storage
loaded_sp : bool
Whether or not a statepoint file has been loaded with tally data
derived : bool
Whether or not the MGXS is merged from one or more other MGXS
hdf5_key : str
The key used to index multi-group cross sections in an HDF5 data store
"""
# Store whether or not the number density should be removed for microscopic
# values of this data; since a multiplicity matrix should reflect the
# multiplication relative to 1, this class will not divide by density
# for microscopic data
_divide_by_density = False
def __init__(self, domain=None, domain_type=None, groups=None,
by_nuclide=False, name='', num_polar=1, num_azimuthal=1):
super().__init__(domain, domain_type, groups, by_nuclide, name,
num_polar, num_azimuthal)
self._rxn_type = 'multiplicity matrix'
self._estimator = 'analog'
self._valid_estimators = ['analog']
@property
def scores(self):
scores = ['nu-scatter', 'scatter']
return scores
@property
def filters(self):
# Create the non-domain specific Filters for the Tallies
group_edges = self.energy_groups.group_edges
energy = openmc.EnergyFilter(group_edges)
energyout = openmc.EnergyoutFilter(group_edges)
filters = [[energy, energyout], [energy, energyout]]
return self._add_angle_filters(filters)
@property
def rxn_rate_tally(self):
if self._rxn_rate_tally is None:
self._rxn_rate_tally = self.tallies['nu-scatter']
self._rxn_rate_tally.sparse = self.sparse
return self._rxn_rate_tally
@property
def xs_tally(self):
if self._xs_tally is None:
scatter = self.tallies['scatter']
# Compute the multiplicity
self._xs_tally = self.rxn_rate_tally / scatter
super()._compute_xs()
return self._xs_tally
class ScatterProbabilityMatrix(MatrixMGXS):
r"""The group-to-group scattering probability matrix.
This class can be used for both OpenMC input generation and tally data
post-processing to compute spatially-homogenized and energy-integrated
multi-group cross sections for multi-group neutronics calculations. At a
minimum, one needs to set the :attr:`ScatterProbabilityMatrix.energy_groups`
and :attr:`ScatterProbabilityMatrix.domain` properties. Tallies for the
appropriate reaction rates over the specified domain are generated
automatically via the :attr:`ScatterProbabilityMatrix.tallies` property,
which can then be appended to a :class:`openmc.Tallies` instance.
For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the
necessary data to compute multi-group cross sections from a
:class:`openmc.StatePoint` instance. The derived multi-group cross section
can then be obtained from the :attr:`ScatterProbabilityMatrix.xs_tally`
property.
For a spatial domain :math:`V`, incoming energy group
:math:`[E_{g'},E_{g'-1}]`, and outgoing energy group :math:`[E_g,E_{g-1}]`,
the group-to-group scattering probabilities are calculated as:
.. math::
\begin{aligned}
\langle \sigma_{s,g'\rightarrow g} \phi \rangle &= \int_{r \in V} dr
\int_{4\pi} d\Omega' \int_{E_{g'}}^{E_{g'-1}} dE' \int_{4\pi} d\Omega
\int_{E_g}^{E_{g-1}} dE \; \sigma_{s} (r, E' \rightarrow E, \Omega'
\cdot \Omega) \psi(r, E', \Omega')\\
\langle \sigma_{s,0,g'} \phi \rangle &= \int_{r \in V} dr
\int_{4\pi} d\Omega' \int_{E_{g'}}^{E_{g'-1}} dE' \int_{4\pi} d\Omega
\int_{0}^{\infty} dE \; \sigma_s (r, E'
\rightarrow E, \Omega' \cdot \Omega) \psi(r, E', \Omega')\\
P_{s,g'\rightarrow g} &= \frac{\langle
\sigma_{s,g'\rightarrow g} \phi \rangle}{\langle
\sigma_{s,g'} \phi \rangle}
\end{aligned}
Parameters
----------
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
The domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
The domain type for spatial homogenization
groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file.
num_polar : Integral, optional
Number of equi-width polar angle bins for angle discretization;
defaults to one bin
num_azimuthal : Integral, optional
Number of equi-width azimuthal angle bins for angle discretization;
defaults to one bin
Attributes
----------
name : str, optional
Name of the multi-group cross section
rxn_type : str
Reaction type (e.g., 'total', 'nu-fission', etc.)
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
Domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
Domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
Energy group structure for energy condensation
num_polar : Integral
Number of equi-width polar angle bins for angle discretization
num_azimuthal : Integral
Number of equi-width azimuthal angle bins for angle discretization
tally_trigger : openmc.Trigger
An (optional) tally precision trigger given to each tally used to
compute the cross section
scores : list of str
The scores in each tally used to compute the multi-group cross section
filters : list of openmc.Filter
The filters in each tally used to compute the multi-group cross section
tally_keys : list of str
The keys into the tallies dictionary for each tally used to compute
the multi-group cross section
estimator : 'analog'
The tally estimator used to compute the multi-group cross section
tallies : collections.OrderedDict
OpenMC tallies needed to compute the multi-group cross section. The keys
are strings listed in the :attr:`ScatterProbabilityMatrix.tally_keys`
property and values are instances of :class:`openmc.Tally`.
rxn_rate_tally : openmc.Tally
Derived tally for the reaction rate tally used in the numerator to
compute the multi-group cross section. This attribute is None
unless the multi-group cross section has been computed.
xs_tally : openmc.Tally
Derived tally for the multi-group cross section. This attribute
is None unless the multi-group cross section has been computed.
num_subdomains : int
The number of subdomains is unity for 'material', 'cell' and 'universe'
domain types. This is equal to the number of cell instances
for 'distribcell' domain types (it is equal to unity prior to loading
tally data from a statepoint file).
num_nuclides : int
The number of nuclides for which the multi-group cross section is
being tracked. This is unity if the by_nuclide attribute is False.
nuclides : Iterable of str or 'sum'
The optional user-specified nuclides for which to compute cross
sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides
are not specified by the user, all nuclides in the spatial domain
are included. This attribute is 'sum' if by_nuclide is false.
sparse : bool
Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format
for compressed data storage
loaded_sp : bool
Whether or not a statepoint file has been loaded with tally data
derived : bool
Whether or not the MGXS is merged from one or more other MGXS
hdf5_key : str
The key used to index multi-group cross sections in an HDF5 data store
"""
# Store whether or not the number density should be removed for microscopic
# values of this data; since this probability matrix is always normalized
# to 1.0, this density division is not necessary
_divide_by_density = False
def __init__(self, domain=None, domain_type=None, groups=None,
by_nuclide=False, name='', num_polar=1, num_azimuthal=1):
super().__init__(domain, domain_type, groups, by_nuclide,
name, num_polar, num_azimuthal)
self._rxn_type = 'scatter'
self._hdf5_key = 'scatter probability matrix'
self._estimator = 'analog'
self._valid_estimators = ['analog']
@property
def scores(self):
return [self.rxn_type]
@property
def filters(self):
# Create the non-domain specific Filters for the Tallies
group_edges = self.energy_groups.group_edges
energy = openmc.EnergyFilter(group_edges)
energyout = openmc.EnergyoutFilter(group_edges)
filters = [[energy, energyout]]
return self._add_angle_filters(filters)
@property
def rxn_rate_tally(self):
if self._rxn_rate_tally is None:
self._rxn_rate_tally = self.tallies[self.rxn_type]
self._rxn_rate_tally.sparse = self.sparse
return self._rxn_rate_tally
@property
def xs_tally(self):
if self._xs_tally is None:
norm = self.rxn_rate_tally.get_slice(scores=[self.rxn_type])
norm = norm.summation(
filter_type=openmc.EnergyoutFilter, remove_filter=True)
# Compute the group-to-group probabilities
self._xs_tally = self.tallies[self.rxn_type] / norm
super()._compute_xs()
return self._xs_tally
class NuFissionMatrixXS(MatrixMGXS):
r"""A fission production matrix multi-group cross section.
This class can be used for both OpenMC input generation and tally data
post-processing to compute spatially-homogenized and energy-integrated
multi-group cross sections for multi-group neutronics calculations. At a
minimum, one needs to set the :attr:`NuFissionMatrixXS.energy_groups` and
:attr:`NuFissionMatrixXS.domain` properties. Tallies for the flux and
appropriate reaction rates over the specified domain are generated
automatically via the :attr:`NuFissionMatrixXS.tallies` property, which can
then be appended to a :class:`openmc.Tallies` instance.
For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the
necessary data to compute multi-group cross sections from a
:class:`openmc.StatePoint` instance. The derived multi-group cross section
can then be obtained from the :attr:`NuFissionMatrixXS.xs_tally` property.
For a spatial domain :math:`V`, incoming energy group
:math:`[E_{g'},E_{g'-1}]`, and outgoing energy group :math:`[E_g,E_{g-1}]`,
the fission production is calculated as:
.. math::
\begin{aligned}
\langle \nu\sigma_{f,g'\rightarrow g} \phi \rangle &= \int_{r \in V} dr
\int_{4\pi} d\Omega' \int_{E_{g'}}^{E_{g'-1}} dE' \int_{E_g}^{E_{g-1}} dE
\; \chi(E) \nu\sigma_f (r, E') \psi(r, E', \Omega')\\
\langle \phi \rangle &= \int_{r \in V} dr \int_{4\pi} d\Omega
\int_{E_g}^{E_{g-1}} dE \; \psi (r, E, \Omega) \\
\nu\sigma_{f,g'\rightarrow g} &= \frac{\langle \nu\sigma_{f,g'\rightarrow
g} \phi \rangle}{\langle \phi \rangle}
\end{aligned}
Parameters
----------
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
The domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
The domain type for spatial homogenization
groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file.
num_polar : Integral, optional
Number of equi-width polar angle bins for angle discretization;
defaults to one bin
num_azimuthal : Integral, optional
Number of equi-width azimuthal angle bins for angle discretization;
defaults to one bin
prompt : bool
If true, computes cross sections which only includes prompt neutrons;
defaults to False which includes prompt and delayed in total
Attributes
----------
name : str, optional
Name of the multi-group cross section
rxn_type : str
Reaction type (e.g., 'total', 'nu-fission', etc.)
prompt : bool
If true, computes cross sections which only includes prompt neutrons
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
Domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
Domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
Energy group structure for energy condensation
num_polar : Integral
Number of equi-width polar angle bins for angle discretization
num_azimuthal : Integral
Number of equi-width azimuthal angle bins for angle discretization
tally_trigger : openmc.Trigger
An (optional) tally precision trigger given to each tally used to
compute the cross section
scores : list of str
The scores in each tally used to compute the multi-group cross section
filters : list of openmc.Filter
The filters in each tally used to compute the multi-group cross section
tally_keys : list of str
The keys into the tallies dictionary for each tally used to compute
the multi-group cross section
estimator : 'analog'
The tally estimator used to compute the multi-group cross section
tallies : collections.OrderedDict
OpenMC tallies needed to compute the multi-group cross section. The keys
are strings listed in the :attr:`NuFissionMatrixXS.tally_keys`
property and values are instances of :class:`openmc.Tally`.
rxn_rate_tally : openmc.Tally
Derived tally for the reaction rate tally used in the numerator to
compute the multi-group cross section. This attribute is None
unless the multi-group cross section has been computed.
xs_tally : openmc.Tally
Derived tally for the multi-group cross section. This attribute
is None unless the multi-group cross section has been computed.
num_subdomains : int
The number of subdomains is unity for 'material', 'cell' and 'universe'
domain types. This is equal to the number of cell instances
for 'distribcell' domain types (it is equal to unity prior to loading
tally data from a statepoint file).
num_nuclides : int
The number of nuclides for which the multi-group cross section is
being tracked. This is unity if the by_nuclide attribute is False.
nuclides : Iterable of str or 'sum'
The optional user-specified nuclides for which to compute cross
sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides
are not specified by the user, all nuclides in the spatial domain
are included. This attribute is 'sum' if by_nuclide is false.
sparse : bool
Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format
for compressed data storage
loaded_sp : bool
Whether or not a statepoint file has been loaded with tally data
derived : bool
Whether or not the MGXS is merged from one or more other MGXS
hdf5_key : str
The key used to index multi-group cross sections in an HDF5 data store
"""
def __init__(self, domain=None, domain_type=None, groups=None,
by_nuclide=False, name='', num_polar=1,
num_azimuthal=1, prompt=False):
super().__init__(domain, domain_type, groups, by_nuclide, name,
num_polar, num_azimuthal)
if not prompt:
self._rxn_type = 'nu-fission'
self._hdf5_key = 'nu-fission matrix'
else:
self._rxn_type = 'prompt-nu-fission'
self._hdf5_key = 'prompt-nu-fission matrix'
self._estimator = 'analog'
self._valid_estimators = ['analog']
self.prompt = prompt
@property
def prompt(self):
return self._prompt
@prompt.setter
def prompt(self, prompt):
cv.check_type('prompt', prompt, bool)
self._prompt = prompt
def __deepcopy__(self, memo):
clone = super().__deepcopy__(memo)
clone._prompt = self.prompt
return clone
class Chi(MGXS):
r"""The fission spectrum.
This class can be used for both OpenMC input generation and tally data
post-processing to compute spatially-homogenized and energy-integrated
multi-group cross sections for multi-group neutronics calculations. At a
minimum, one needs to set the :attr:`Chi.energy_groups` and
:attr:`Chi.domain` properties. Tallies for the flux and appropriate reaction
rates over the specified domain are generated automatically via the
:attr:`Chi.tallies` property, which can then be appended to a
:class:`openmc.Tallies` instance.
For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the
necessary data to compute multi-group cross sections from a
:class:`openmc.StatePoint` instance. The derived multi-group cross section
can then be obtained from the :attr:`Chi.xs_tally` property.
For a spatial domain :math:`V` and energy group :math:`[E_g,E_{g-1}]`, the
fission spectrum is calculated as:
.. math::
\begin{aligned}
\langle \nu\sigma_{f,g' \rightarrow g} \phi \rangle &= \int_{r \in V} dr
\int_{4\pi} d\Omega' \int_0^\infty dE' \int_{E_g}^{E_{g-1}} dE \; \chi(E)
\nu\sigma_f (r, E') \psi(r, E', \Omega')\\
\langle \nu\sigma_f \phi \rangle &= \int_{r \in V} dr \int_{4\pi}
d\Omega' \int_0^\infty dE' \int_0^\infty dE \; \chi(E) \nu\sigma_f (r,
E') \psi(r, E', \Omega') \\
\chi_g &= \frac{\langle \nu\sigma_{f,g' \rightarrow g} \phi \rangle}
{\langle \nu\sigma_f \phi \rangle}
\end{aligned}
This class can also be used to gather a prompt-chi (which only includes the
outgoing energy spectrum of prompt neutrons). This is accomplished by
setting the :attr:`Chi.prompt` attribute to `True`.
Parameters
----------
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
The domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
The domain type for spatial homogenization
groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
prompt : bool
If true, computes cross sections which only includes prompt neutrons;
defaults to False which includes prompt and delayed in total
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file.
num_polar : Integral, optional
Number of equi-width polar angle bins for angle discretization;
defaults to one bin
num_azimuthal : Integral, optional
Number of equi-width azimuthal angle bins for angle discretization;
defaults to one bin
Attributes
----------
name : str, optional
Name of the multi-group cross section
rxn_type : str
Reaction type (e.g., 'total', 'nu-fission', etc.)
prompt : bool
If true, computes cross sections which only includes prompt neutrons
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
Domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
Domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
Energy group structure for energy condensation
num_polar : Integral
Number of equi-width polar angle bins for angle discretization
num_azimuthal : Integral
Number of equi-width azimuthal angle bins for angle discretization
tally_trigger : openmc.Trigger
An (optional) tally precision trigger given to each tally used to
compute the cross section
scores : list of str
The scores in each tally used to compute the multi-group cross section
filters : list of openmc.Filter
The filters in each tally used to compute the multi-group cross section
tally_keys : list of str
The keys into the tallies dictionary for each tally used to compute
the multi-group cross section
estimator : 'analog'
The tally estimator used to compute the multi-group cross section
tallies : collections.OrderedDict
OpenMC tallies needed to compute the multi-group cross section. The keys
are strings listed in the :attr:`Chi.tally_keys` property and values are
instances of :class:`openmc.Tally`.
rxn_rate_tally : openmc.Tally
Derived tally for the reaction rate tally used in the numerator to
compute the multi-group cross section. This attribute is None
unless the multi-group cross section has been computed.
xs_tally : openmc.Tally
Derived tally for the multi-group cross section. This attribute
is None unless the multi-group cross section has been computed.
num_subdomains : int
The number of subdomains is unity for 'material', 'cell' and 'universe'
domain types. This is equal to the number of cell instances
for 'distribcell' domain types (it is equal to unity prior to loading
tally data from a statepoint file).
num_nuclides : int
The number of nuclides for which the multi-group cross section is
being tracked. This is unity if the by_nuclide attribute is False.
nuclides : Iterable of str or 'sum'
The optional user-specified nuclides for which to compute cross
sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides
are not specified by the user, all nuclides in the spatial domain
are included. This attribute is 'sum' if by_nuclide is false.
sparse : bool
Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format
for compressed data storage
loaded_sp : bool
Whether or not a statepoint file has been loaded with tally data
derived : bool
Whether or not the MGXS is merged from one or more other MGXS
hdf5_key : str
The key used to index multi-group cross sections in an HDF5 data store
"""
# Store whether or not the number density should be removed for microscopic
# values of this data; since this chi data is normalized to 1.0, the
# data should not be divided by the number density
_divide_by_density = False
def __init__(self, domain=None, domain_type=None, groups=None,
prompt=False, by_nuclide=False, name='', num_polar=1,
num_azimuthal=1):
super().__init__(domain, domain_type, groups, by_nuclide, name,
num_polar, num_azimuthal)
if not prompt:
self._rxn_type = 'chi'
else:
self._rxn_type = 'chi-prompt'
self._estimator = 'analog'
self._valid_estimators = ['analog']
self.prompt = prompt
def __deepcopy__(self, memo):
clone = super().__deepcopy__(memo)
clone._prompt = self.prompt
return clone
@property
def prompt(self):
return self._prompt
@property
def _dont_squeeze(self):
"""Create a tuple of axes which should not be removed during the get_xs
process
"""
if self.num_polar > 1 or self.num_azimuthal > 1:
return (0, 1, 3)
else:
return (1,)
@property
def scores(self):
if not self.prompt:
return ['nu-fission', 'nu-fission']
else:
return ['prompt-nu-fission', 'prompt-nu-fission']
@property
def filters(self):
# Create the non-domain specific Filters for the Tallies
group_edges = self.energy_groups.group_edges
energyout = openmc.EnergyoutFilter(group_edges)
energyin = openmc.EnergyFilter([group_edges[0], group_edges[-1]])
filters = [[energyin], [energyout]]
return self._add_angle_filters(filters)
@property
def tally_keys(self):
return ['nu-fission-in', 'nu-fission-out']
@property
def rxn_rate_tally(self):
if self._rxn_rate_tally is None:
self._rxn_rate_tally = self.tallies['nu-fission-out']
self._rxn_rate_tally.sparse = self.sparse
return self._rxn_rate_tally
@property
def xs_tally(self):
if self._xs_tally is None:
nu_fission_in = self.tallies['nu-fission-in']
# Remove coarse energy filter to keep it out of tally arithmetic
energy_filter = nu_fission_in.find_filter(openmc.EnergyFilter)
nu_fission_in.remove_filter(energy_filter)
# Compute chi
self._xs_tally = self.rxn_rate_tally / nu_fission_in
# Add the coarse energy filter back to the nu-fission tally
nu_fission_in.filters.append(energy_filter)
return self._xs_tally
@prompt.setter
def prompt(self, prompt):
cv.check_type('prompt', prompt, bool)
self._prompt = prompt
if not self.prompt:
self._rxn_type = 'nu-fission'
self._hdf5_key = 'chi'
else:
self._rxn_type = 'prompt-nu-fission'
self._hdf5_key = 'chi-prompt'
def get_homogenized_mgxs(self, other_mgxs):
"""Construct a homogenized mgxs with other MGXS objects.
Parameters
----------
other_mgxs : openmc.mgxs.MGXS or Iterable of openmc.mgxs.MGXS
The MGXS to homogenize with this one.
Returns
-------
openmc.mgxs.MGXS
A new homogenized MGXS
Raises
------
ValueError
If the other_mgxs is of a different type.
"""
return self._get_homogenized_mgxs(other_mgxs, 'nu-fission-in')
def get_slice(self, nuclides=[], groups=[]):
"""Build a sliced Chi for the specified nuclides and energy groups.
This method constructs a new MGXS to encapsulate a subset of the data
represented by this MGXS. The subset of data to include in the tally
slice is determined by the nuclides and energy groups specified in
the input parameters.
Parameters
----------
nuclides : list of str
A list of nuclide name strings
(e.g., ['U235', 'U238']; default is [])
groups : list of Integral
A list of energy group indices starting at 1 for the high energies
(e.g., [1, 2, 3]; default is [])
Returns
-------
openmc.mgxs.MGXS
A new MGXS which encapsulates the subset of data requested
for the nuclide(s) and/or energy group(s) requested in the
parameters.
"""
# Temporarily remove energy filter from nu-fission-in since its
# group structure will work in super MGXS.get_slice(...) method
nu_fission_in = self.tallies['nu-fission-in']
energy_filter = nu_fission_in.find_filter(openmc.EnergyFilter)
nu_fission_in.remove_filter(energy_filter)
# Call super class method and null out derived tallies
slice_xs = super().get_slice(nuclides, groups)
slice_xs._rxn_rate_tally = None
slice_xs._xs_tally = None
# Slice energy groups if needed
if len(groups) != 0:
filter_bins = []
for group in groups:
group_bounds = self.energy_groups.get_group_bounds(group)
filter_bins.append(group_bounds)
filter_bins = [tuple(filter_bins)]
# Slice nu-fission-out tally along energyout filter
nu_fission_out = slice_xs.tallies['nu-fission-out']
tally_slice = nu_fission_out.get_slice(
filters=[openmc.EnergyoutFilter], filter_bins=filter_bins)
slice_xs._tallies['nu-fission-out'] = tally_slice
# Add energy filter back to nu-fission-in tallies
self.tallies['nu-fission-in'].add_filter(energy_filter)
slice_xs._tallies['nu-fission-in'].add_filter(energy_filter)
slice_xs.sparse = self.sparse
return slice_xs
def merge(self, other):
"""Merge another Chi with this one
If results have been loaded from a statepoint, then Chi are only
mergeable along one and only one of energy groups or nuclides.
Parameters
----------
other : openmc.mgxs.MGXS
MGXS to merge with this one
Returns
-------
merged_mgxs : openmc.mgxs.MGXS
Merged MGXS
"""
if not self.can_merge(other):
raise ValueError('Unable to merge a Chi MGXS')
# Create deep copy of tally to return as merged tally
merged_mgxs = copy.deepcopy(self)
merged_mgxs._derived = True
merged_mgxs._rxn_rate_tally = None
merged_mgxs._xs_tally = None
# Merge energy groups
if self.energy_groups != other.energy_groups:
merged_groups = self.energy_groups.merge(other.energy_groups)
merged_mgxs.energy_groups = merged_groups
# Merge nuclides
if self.nuclides != other.nuclides:
# The nuclides must be mutually exclusive
for nuclide in self.nuclides:
if nuclide in other.nuclides:
msg = 'Unable to merge a Chi MGXS with shared nuclides'
raise ValueError(msg)
# Concatenate lists of nuclides for the merged MGXS
merged_mgxs.nuclides = self.nuclides + other.nuclides
# Merge tallies
for tally_key in self.tallies:
merged_tally = self.tallies[tally_key].merge(other.tallies[tally_key])
merged_mgxs.tallies[tally_key] = merged_tally
return merged_mgxs
def get_xs(self, groups='all', subdomains='all', nuclides='all',
xs_type='macro', order_groups='increasing',
value='mean', squeeze=True, **kwargs):
"""Returns an array of the fission spectrum.
This method constructs a 3D NumPy array for the requested
multi-group cross section data for one or more subdomains
(1st dimension), energy groups (2nd dimension), and nuclides
(3rd dimension).
Parameters
----------
groups : Iterable of Integral or 'all'
Energy groups of interest. Defaults to 'all'.
subdomains : Iterable of Integral or 'all'
Subdomain IDs of interest. Defaults to 'all'.
nuclides : Iterable of str or 'all' or 'sum'
A list of nuclide name strings (e.g., ['U235', 'U238']). The
special string 'all' will return the cross sections for all nuclides
in the spatial domain. The special string 'sum' will return the
cross section summed over all nuclides. Defaults to 'all'.
xs_type: {'macro', 'micro'}
This parameter is not relevant for chi but is included here to
mirror the parent MGXS.get_xs(...) class method
order_groups: {'increasing', 'decreasing'}
Return the cross section indexed according to increasing or
decreasing energy groups (decreasing or increasing energies).
Defaults to 'increasing'.
value : {'mean', 'std_dev', 'rel_err'}
A string for the type of value to return. Defaults to 'mean'.
squeeze : bool
A boolean representing whether to eliminate the extra dimensions
of the multi-dimensional array to be returned. Defaults to True.
Returns
-------
numpy.ndarray
A NumPy array of the multi-group cross section indexed in the order
each group, subdomain and nuclide is listed in the parameters.
Raises
------
ValueError
When this method is called before the multi-group cross section is
computed from tally data.
"""
cv.check_value('value', value, ['mean', 'std_dev', 'rel_err'])
cv.check_value('xs_type', xs_type, ['macro', 'micro'])
# FIXME: Unable to get microscopic xs for mesh domain because the mesh
# cells do not know the nuclide densities in each mesh cell.
if self.domain_type == 'mesh' and xs_type == 'micro':
msg = 'Unable to get micro xs for mesh domain since the mesh ' \
'cells do not know the nuclide densities in each mesh cell.'
raise ValueError(msg)
filters = []
filter_bins = []
# Construct a collection of the domain filter bins
if not isinstance(subdomains, str):
cv.check_iterable_type('subdomains', subdomains, Integral,
max_depth=3)
filters.append(_DOMAIN_TO_FILTER[self.domain_type])
subdomain_bins = []
for subdomain in subdomains:
subdomain_bins.append(subdomain)
filter_bins.append(tuple(subdomain_bins))
# Construct list of energy group bounds tuples for all requested groups
if not isinstance(groups, str):
cv.check_iterable_type('groups', groups, Integral)
filters.append(openmc.EnergyoutFilter)
energy_bins = []
for group in groups:
energy_bins.append(
(self.energy_groups.get_group_bounds(group),))
filter_bins.append(tuple(energy_bins))
# If chi was computed for each nuclide in the domain
if self.by_nuclide:
# Get the sum as the fission source weighted average chi for all
# nuclides in the domain
if nuclides == 'sum' or nuclides == ['sum']:
# Retrieve the fission production tallies
nu_fission_in = self.tallies['nu-fission-in']
nu_fission_out = self.tallies['nu-fission-out']
# Sum out all nuclides
nuclides = self.get_nuclides()
nu_fission_in = nu_fission_in.summation(nuclides=nuclides)
nu_fission_out = nu_fission_out.summation(nuclides=nuclides)
# Remove coarse energy filter to keep it out of tally arithmetic
energy_filter = nu_fission_in.find_filter(openmc.EnergyFilter)
nu_fission_in.remove_filter(energy_filter)
# Compute chi and store it as the xs_tally attribute so we can
# use the generic get_xs(...) method
xs_tally = nu_fission_out / nu_fission_in
# Add the coarse energy filter back to the nu-fission tally
nu_fission_in.filters.append(energy_filter)
xs = xs_tally.get_values(filters=filters,
filter_bins=filter_bins, value=value)
# Get chi for all nuclides in the domain
elif nuclides == 'all':
nuclides = self.get_nuclides()
xs = self.xs_tally.get_values(filters=filters,
filter_bins=filter_bins,
nuclides=nuclides, value=value)
# Get chi for user-specified nuclides in the domain
else:
cv.check_iterable_type('nuclides', nuclides, str)
xs = self.xs_tally.get_values(filters=filters,
filter_bins=filter_bins,
nuclides=nuclides, value=value)
# If chi was computed as an average of nuclides in the domain
else:
xs = self.xs_tally.get_values(filters=filters,
filter_bins=filter_bins, value=value)
# Eliminate the trivial score dimension
xs = np.squeeze(xs, axis=len(xs.shape) - 1)
xs = np.nan_to_num(xs)
if groups == 'all':
num_groups = self.num_groups
else:
num_groups = len(groups)
# Reshape tally data array with separate axes for domain and energy
# Accomodate the polar and azimuthal bins if needed
num_subdomains = int(xs.shape[0] / (num_groups * self.num_polar *
self.num_azimuthal))
if self.num_polar > 1 or self.num_azimuthal > 1:
new_shape = (self.num_polar, self.num_azimuthal, num_subdomains,
num_groups) + xs.shape[1:]
else:
new_shape = (num_subdomains, num_groups) + xs.shape[1:]
xs = np.reshape(xs, new_shape)
# Reverse data if user requested increasing energy groups since
# tally data is stored in order of increasing energies
if order_groups == 'increasing':
xs = xs[..., ::-1, :]
if squeeze:
# We want to squeeze out everything but the polar, azimuthal,
# and energy group data.
xs = self._squeeze_xs(xs)
return xs
def get_units(self, xs_type='macro'):
"""Returns the units of Chi.
This method returns the units of Chi, which is "%" for both macro
and micro xs types.
Parameters
----------
xs_type: {'macro', 'micro'}
Return the macro or micro cross section units.
Defaults to 'macro'.
Returns
-------
str
A string representing the units of Chi.
"""
cv.check_value('xs_type', xs_type, ['macro', 'micro'])
# Chi has the same units (%) for both macro and micro
return '%'
class InverseVelocity(MGXS):
r"""An inverse velocity multi-group cross section.
This class can be used for both OpenMC input generation and tally data
post-processing to compute spatially-homogenized and energy-integrated
multi-group neutron inverse velocities for multi-group neutronics
calculations. The units of inverse velocity are seconds per centimeter. At a
minimum, one needs to set the :attr:`InverseVelocity.energy_groups` and
:attr:`InverseVelocity.domain` properties. Tallies for the flux and
appropriate reaction rates over the specified domain are generated
automatically via the :attr:`InverseVelocity.tallies` property, which can
then be appended to a :class:`openmc.Tallies` instance.
For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the
necessary data to compute multi-group cross sections from a
:class:`openmc.StatePoint` instance. The derived multi-group cross section
can then be obtained from the :attr:`InverseVelocity.xs_tally` property.
For a spatial domain :math:`V` and energy group :math:`[E_g,E_{g-1}]`, the
neutron inverse velocities are calculated by tallying the flux-weighted
inverse velocity and the flux. The inverse velocity is then the
flux-weighted inverse velocity divided by the flux:
.. math::
\frac{\int_{r \in V} dr \int_{4\pi} d\Omega \int_{E_g}^{E_{g-1}} dE \;
\frac{\psi (r, E, \Omega)}{v (r, E)}}{\int_{r \in V} dr \int_{4\pi}
d\Omega \int_{E_g}^{E_{g-1}} dE \; \psi (r, E, \Omega)}
Parameters
----------
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
The domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
The domain type for spatial homogenization
groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file.
num_polar : Integral, optional
Number of equi-width polar angle bins for angle discretization;
defaults to one bin
num_azimuthal : Integral, optional
Number of equi-width azimuthal angle bins for angle discretization;
defaults to one bin
Attributes
----------
name : str, optional
Name of the multi-group cross section
rxn_type : str
Reaction type (e.g., 'total', 'nu-fission', etc.)
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
Domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
Domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
Energy group structure for energy condensation
num_polar : Integral
Number of equi-width polar angle bins for angle discretization
num_azimuthal : Integral
Number of equi-width azimuthal angle bins for angle discretization
tally_trigger : openmc.Trigger
An (optional) tally precision trigger given to each tally used to
compute the cross section
scores : list of str
The scores in each tally used to compute the multi-group cross section
filters : list of openmc.Filter
The filters in each tally used to compute the multi-group cross section
tally_keys : list of str
The keys into the tallies dictionary for each tally used to compute
the multi-group cross section
estimator : {'tracklength', 'collision', 'analog'}
The tally estimator used to compute the multi-group cross section
tallies : collections.OrderedDict
OpenMC tallies needed to compute the multi-group cross section. The keys
are strings listed in the :attr:`InverseVelocity.tally_keys` property
and values are instances of :class:`openmc.Tally`.
rxn_rate_tally : openmc.Tally
Derived tally for the reaction rate tally used in the numerator to
compute the multi-group cross section. This attribute is None
unless the multi-group cross section has been computed.
xs_tally : openmc.Tally
Derived tally for the multi-group cross section. This attribute
is None unless the multi-group cross section has been computed.
num_subdomains : int
The number of subdomains is unity for 'material', 'cell' and 'universe'
domain types. This is equal to the number of cell instances
for 'distribcell' domain types (it is equal to unity prior to loading
tally data from a statepoint file) and the number of mesh cells for
'mesh' domain types.
num_nuclides : int
The number of nuclides for which the multi-group cross section is
being tracked. This is unity if the by_nuclide attribute is False.
nuclides : Iterable of str or 'sum'
The optional user-specified nuclides for which to compute cross
sections (e.g., 'U-238', 'O-16'). If by_nuclide is True but nuclides
are not specified by the user, all nuclides in the spatial domain
are included. This attribute is 'sum' if by_nuclide is false.
sparse : bool
Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format
for compressed data storage
loaded_sp : bool
Whether or not a statepoint file has been loaded with tally data
derived : bool
Whether or not the MGXS is merged from one or more other MGXS
hdf5_key : str
The key used to index multi-group cross sections in an HDF5 data store
"""
# Store whether or not the number density should be removed for microscopic
# values of this data; since the inverse velocity does not contain number
# density scaling, we should not remove the number density from microscopic
# values
_divide_by_density = False
def __init__(self, domain=None, domain_type=None, groups=None,
by_nuclide=False, name='', num_polar=1, num_azimuthal=1):
super().__init__(domain, domain_type, groups, by_nuclide, name,
num_polar, num_azimuthal)
self._rxn_type = 'inverse-velocity'
def get_units(self, xs_type='macro'):
"""Returns the units of InverseVelocity.
This method returns the units of an InverseVelocity based on a desired
xs_type.
Parameters
----------
xs_type: {'macro', 'micro'}
Return the macro or micro cross section units.
Defaults to 'macro'.
Returns
-------
str
A string representing the units of the InverseVelocity.
"""
if xs_type == 'macro':
return 'second/cm'
else:
raise ValueError('Unable to return the units of InverseVelocity'
' for xs_type other than "macro"')
class MeshSurfaceMGXS(MGXS):
"""An abstract multi-group cross section for some energy group structure
on the surfaces of a mesh domain.
This class can be used for both OpenMC input generation and tally data
post-processing to compute surface- and energy-integrated multi-group cross
sections for multi-group neutronics calculations.
.. note:: Users should instantiate the subclasses of this abstract class.
.. versionadded:: 0.12.1
Parameters
----------
domain : openmc.RegularMesh
The domain for spatial homogenization
domain_type : {'mesh'}
The domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
by_nuclide : bool
Unused in MeshSurfaceMGXS
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file.
Attributes
----------
name : str, optional
Name of the multi-group cross section
rxn_type : str
Reaction type (e.g., 'total', 'nu-fission', etc.)
by_nuclide : bool
Unused in MeshSurfaceMGXS
domain : Mesh
Domain for spatial homogenization
domain_type : {'mesh'}
Domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
Energy group structure for energy condensation
tally_trigger : openmc.Trigger
An (optional) tally precision trigger given to each tally used to
compute the cross section
scores : list of str
The scores in each tally used to compute the multi-group cross section
filters : list of openmc.Filter
The filters in each tally used to compute the multi-group cross section
tally_keys : list of str
The keys into the tallies dictionary for each tally used to compute
the multi-group cross section
estimator : {'analog'}
The tally estimator used to compute the multi-group cross section
tallies : collections.OrderedDict
OpenMC tallies needed to compute the multi-group cross section
rxn_rate_tally : openmc.Tally
Derived tally for the reaction rate tally used in the numerator to
compute the multi-group cross section. This attribute is None
unless the multi-group cross section has been computed.
xs_tally : openmc.Tally
Derived tally for the multi-group cross section. This attribute
is None unless the multi-group cross section has been computed.
num_subdomains : int
The number of subdomains is equal to the number of mesh surfaces times
two to account for both the incoming and outgoing current from the
mesh cell surfaces.
num_nuclides : int
Unused in MeshSurfaceMGXS
nuclides : Iterable of str or 'sum'
Unused in MeshSurfaceMGXS
sparse : bool
Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format
for compressed data storage
loaded_sp : bool
Whether or not a statepoint file has been loaded with tally data
derived : bool
Whether or not the MGXS is merged from one or more other MGXS
hdf5_key : str
The key used to index multi-group cross sections in an HDF5 data store
"""
def __init__(self, domain=None, domain_type=None, energy_groups=None,
by_nuclide=False, name=''):
super(MeshSurfaceMGXS, self).__init__(domain, domain_type, energy_groups,
by_nuclide, name)
self._estimator = ['analog']
self._valid_estimators = ['analog']
@property
def scores(self):
return [self.rxn_type]
@property
def domain(self):
return self._domain
@property
def domain_type(self):
return self._domain_type
@domain.setter
def domain(self, domain):
cv.check_type('domain', domain, openmc.RegularMesh)
self._domain = domain
# Assign a domain type
if self.domain_type is None:
self._domain_type = 'mesh'
@domain_type.setter
def domain_type(self, domain_type):
cv.check_value('domain type', domain_type, 'mesh')
self._domain_type = domain_type
@property
def filters(self):
group_edges = self.energy_groups.group_edges
energy_filter = openmc.EnergyFilter(group_edges)
mesh = _DOMAIN_TO_FILTER[self.domain_type](self.domain).mesh
meshsurface_filter = openmc.MeshSurfaceFilter(mesh)
filters = [[meshsurface_filter, energy_filter]]
return self._add_angle_filters(filters)
@property
def xs_tally(self):
if self._xs_tally is None:
if self.tallies is None:
msg = 'Unable to get xs_tally since tallies have ' \
'not been loaded from a statepoint'
raise ValueError(msg)
self._xs_tally = self.rxn_rate_tally
self._compute_xs()
return self._xs_tally
def load_from_statepoint(self, statepoint):
"""Extracts tallies in an OpenMC StatePoint with the data needed to
compute multi-group cross sections.
This method is needed to compute cross section data from tallies
in an OpenMC StatePoint object.
.. note:: The statepoint must first be linked with a :class:`openmc.Summary`
object.
Parameters
----------
statepoint : openmc.StatePoint
An OpenMC StatePoint object with tally data
Raises
------
ValueError
When this method is called with a statepoint that has not been
linked with a summary object.
"""
cv.check_type('statepoint', statepoint, openmc.statepoint.StatePoint)
if statepoint.summary is None:
msg = 'Unable to load data from a statepoint which has not been ' \
'linked with a summary file'
raise ValueError(msg)
filters= []
filter_bins = []
# Clear any tallies previously loaded from a statepoint
if self.loaded_sp:
self._tallies = None
self._xs_tally = None
self._rxn_rate_tally = None
self._loaded_sp = False
# Find, slice and store Tallies from StatePoint
# The tally slicing is needed if tally merging was used
for tally_type, tally in self.tallies.items():
sp_tally = statepoint.get_tally(
tally.scores, tally.filters, tally.nuclides,
estimator=tally.estimator, exact_filters=True)
sp_tally = sp_tally.get_slice(
tally.scores, filters, filter_bins, tally.nuclides)
sp_tally.sparse = self.sparse
self.tallies[tally_type] = sp_tally
self._loaded_sp = True
def get_xs(self, groups='all', subdomains='all', nuclides='all',
xs_type='macro', order_groups='increasing',
value='mean', squeeze=True, **kwargs):
r"""Returns an array of multi-group cross sections.
This method constructs a 3D NumPy array for the requested
multi-group cross section data for one or more subdomains
(1st dimension), energy groups (2nd dimension), and nuclides
(3rd dimension).
Parameters
----------
groups : Iterable of Integral or 'all'
Energy groups of interest. Defaults to 'all'.
subdomains : Iterable of Integral or 'all'
Subdomain IDs of interest. Defaults to 'all'.
nuclides : Iterable of str or 'all' or 'sum'
Unused in MeshSurfaceMGXS, its value will be ignored. The nuclides
dimension of the resultant array will always have a length of 1.
xs_type: {'macro'}
The 'macro'/'micro' distinction does not apply to MeshSurfaceMGXS.
The calculation of a 'micro' xs_type is omited in this class.
order_groups: {'increasing', 'decreasing'}
Return the cross section indexed according to increasing or
decreasing energy groups (decreasing or increasing energies).
Defaults to 'increasing'.
value : {'mean', 'std_dev', 'rel_err'}
A string for the type of value to return. Defaults to 'mean'.
squeeze : bool
A boolean representing whether to eliminate the extra dimensions
of the multi-dimensional array to be returned. Defaults to True.
Returns
-------
numpy.ndarray
A NumPy array of the multi-group cross section indexed in the order
each group, subdomain and nuclide is listed in the parameters.
Raises
------
ValueError
When this method is called before the multi-group cross section is
computed from tally data.
"""
cv.check_value('value', value, ['mean', 'std_dev', 'rel_err'])
cv.check_value('xs_type', xs_type, ['macro'])
filters = []
filter_bins = []
# Construct a collection of the domain filter bins
if not isinstance(subdomains, str):
cv.check_iterable_type('subdomains', subdomains, Integral,
max_depth=3)
filters.append(_DOMAIN_TO_FILTER[self.domain_type])
subdomain_bins = []
for subdomain in subdomains:
subdomain_bins.append(subdomain)
filter_bins.append(tuple(subdomain_bins))
xs = self.xs_tally.get_values(filters=filters,
filter_bins=filter_bins, value=value)
# Construct list of energy group bounds tuples for all requested groups
if not isinstance(groups, str):
cv.check_iterable_type('groups', groups, Integral)
filters.append(openmc.EnergyFilter)
energy_bins = []
for group in groups:
energy_bins.append(
(self.energy_groups.get_group_bounds(group),))
filter_bins.append(tuple(energy_bins))
# Eliminate the trivial score dimension
xs = np.squeeze(xs, axis=len(xs.shape) - 1)
xs = np.nan_to_num(xs)
if groups == 'all':
num_groups = self.num_groups
else:
num_groups = len(groups)
# Reshape tally data array with separate axes for domain and energy
# Accomodate the polar and azimuthal bins if needed
num_surfaces = 4 * self.domain.n_dimension
num_subdomains = int(xs.shape[0] / (num_groups * self.num_polar *
self.num_azimuthal * num_surfaces))
if self.num_polar > 1 or self.num_azimuthal > 1:
new_shape = (self.num_polar, self.num_azimuthal, num_subdomains,
num_groups, num_surfaces)
else:
new_shape = (num_subdomains, num_groups, num_surfaces)
new_shape += xs.shape[1:]
new_xs = np.zeros(new_shape)
for cell in range(num_subdomains):
for g in range(num_groups):
for s in range(num_surfaces):
new_xs[cell,g,s] = \
xs[cell*num_surfaces*num_groups+s*num_groups+g]
xs = new_xs
# Reverse data if user requested increasing energy groups since
# tally data is stored in order of increasing energies
if order_groups == 'increasing':
xs = xs[..., ::-1, :, :]
if squeeze:
# We want to squeeze out everything but the polar, azimuthal,
# and energy group data.
xs = self._squeeze_xs(xs)
return xs
def get_pandas_dataframe(self, groups='all', nuclides='all',
xs_type='macro', paths=True):
"""Build a Pandas DataFrame for the MGXS data.
This method leverages :meth:`openmc.Tally.get_pandas_dataframe`, but
renames the columns with terminology appropriate for cross section data.
Parameters
----------
groups : Iterable of Integral or 'all'
Energy groups of interest. Defaults to 'all'.
nuclides : Iterable of str or 'all' or 'sum'
Unused in MeshSurfaceMGXS, its value will be ignored. The nuclides
dimension of the resultant array will always have a length of 1.
xs_type: {'macro'}
'micro' unused in MeshSurfaceMGXS.
paths : bool, optional
Construct columns for distribcell tally filters (default is True).
The geometric information in the Summary object is embedded into
a Multi-index column with a geometric "path" to each distribcell
instance.
Returns
-------
pandas.DataFrame
A Pandas DataFrame for the cross section data.
Raises
------
ValueError
When this method is called before the multi-group cross section is
computed from tally data.
"""
if not isinstance(groups, str):
cv.check_iterable_type('groups', groups, Integral)
cv.check_value('xs_type', xs_type, ['macro'])
df = self.xs_tally.get_pandas_dataframe(paths=paths)
# Remove the score column since it is homogeneous and redundant
df = df.drop('score', axis=1, level=0)
# Convert azimuthal, polar, energy in and energy out bin values in to
# bin indices
columns = self._df_convert_columns_to_bins(df)
# Select out those groups the user requested
if not isinstance(groups, str):
if 'group in' in df:
df = df[df['group in'].isin(groups)]
if 'group out' in df:
df = df[df['group out'].isin(groups)]
mesh_str = 'mesh {0}'.format(self.domain.id)
col_key = (mesh_str, 'surf')
surfaces = df.pop(col_key)
df.insert(len(self.domain.dimension), col_key, surfaces)
if len(self.domain.dimension) == 1:
df.sort_values(by=[(mesh_str, 'x'), (mesh_str, 'surf')]
+ columns, inplace=True)
elif len(self.domain.dimension) == 2:
df.sort_values(by=[(mesh_str, 'x'), (mesh_str, 'y'),
(mesh_str, 'surf')] + columns, inplace=True)
elif len(self.domain.dimension) == 3:
df.sort_values(by=[(mesh_str, 'x'), (mesh_str, 'y'),
(mesh_str, 'z'), (mesh_str, 'surf')] + columns, inplace=True)
return df
class Current(MeshSurfaceMGXS):
r"""A current multi-group cross section.
This class can be used for both OpenMC input generation and tally data
post-processing to compute surface- and energy-integrated
multi-group current cross sections for multi-group neutronics calculations. At
a minimum, one needs to set the :attr:`Current.energy_groups` and
:attr:`Current.domain` properties. Tallies for the appropriate
reaction rates over the specified domain are generated automatically via the
:attr:`Current.tallies` property, which can then be appended to a
:class:`openmc.Tallies` instance.
For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the
necessary data to compute multi-group cross sections from a
:class:`openmc.StatePoint` instance. The derived multi-group cross section
can then be obtained from the :attr:`Current.xs_tally` property.
For a spatial domain :math:`S` and energy group :math:`[E_g,E_{g-1}]`, the
total cross section is calculated as:
.. math::
\frac{\int_{r \in S} dS \int_{E_g}^{E_{g-1}} dE \;
J(r, E)}{\int_{r \in S} dS \int_{E_g}^{E_{g-1}} dE}.
.. versionadded:: 0.12.1
Parameters
----------
domain : openmc.RegularMesh
The domain for spatial homogenization
domain_type : ('mesh'}
The domain type for spatial homogenization
groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
by_nuclide : bool
Unused in MeshSurfaceMGXS
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file.
Attributes
----------
name : str, optional
Name of the multi-group cross section
rxn_type : str
Reaction type (e.g., 'total', 'nu-fission', etc.)
by_nuclide : bool
Unused in MeshSurfaceMGXS
domain : openmc.RegularMesh
Domain for spatial homogenization
domain_type : {'mesh'}
Domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
Energy group structure for energy condensation
tally_trigger : openmc.Trigger
An (optional) tally precision trigger given to each tally used to
compute the cross section
scores : list of str
The scores in each tally used to compute the multi-group cross section
filters : list of openmc.Filter
The filters in each tally used to compute the multi-group cross section
tally_keys : list of str
The keys into the tallies dictionary for each tally used to compute
the multi-group cross section
estimator : {'analog'}
The tally estimator used to compute the multi-group cross section
tallies : collections.OrderedDict
OpenMC tallies needed to compute the multi-group cross section. The keys
are strings listed in the :attr:`TotalXS.tally_keys` property and values
are instances of :class:`openmc.Tally`.
rxn_rate_tally : openmc.Tally
Derived tally for the reaction rate tally used in the numerator to
compute the multi-group cross section. This attribute is None
unless the multi-group cross section has been computed.
xs_tally : openmc.Tally
Derived tally for the multi-group cross section. This attribute
is None unless the multi-group cross section has been computed.
num_subdomains : int
The number of subdomains is equal to the number of mesh surfaces times
two to account for both the incoming and outgoing current from the
mesh cell surfaces.
num_nuclides : int
Unused in MeshSurfaceMGXS
nuclides : Iterable of str or 'sum'
Unused in MeshSurfaceMGXS
sparse : bool
Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format
for compressed data storage
loaded_sp : bool
Whether or not a statepoint file has been loaded with tally data
derived : bool
Whether or not the MGXS is merged from one or more other MGXS
hdf5_key : str
The key used to index multi-group cross sections in an HDF5 data store
"""
def __init__(self, domain=None, domain_type=None,
groups=None, by_nuclide=False, name=''):
super(Current, self).__init__(domain, domain_type,
groups, by_nuclide, name)
self._rxn_type = 'current'
| mit |
0x0all/scikit-learn | sklearn/tree/tests/test_tree.py | 4 | 43560 | """
Testing for the tree module (sklearn.tree).
"""
import pickle
from functools import partial
from itertools import product
import platform
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.random import sample_without_replacement
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import raises
from sklearn.utils.validation import check_random_state
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import ExtraTreeRegressor
from sklearn import tree
from sklearn.tree.tree import SPARSE_SPLITTERS
from sklearn.tree._tree import TREE_LEAF
from sklearn import datasets
from sklearn.preprocessing._weights import _balance_weights
CLF_CRITERIONS = ("gini", "entropy")
REG_CRITERIONS = ("mse", )
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier,
splitter="presort-best"),
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor,
splitter="presort-best"),
"ExtraTreeRegressor": ExtraTreeRegressor,
}
ALL_TREES = dict()
ALL_TREES.update(CLF_TREES)
ALL_TREES.update(REG_TREES)
SPARSE_TREES = [name for name, Tree in ALL_TREES.items()
if Tree().splitter in SPARSE_SPLITTERS]
X_small = np.array([
[0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ],
[0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ],
[-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ],
[-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ],
[-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ],
[-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ],
[2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ],
[2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ],
[2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ],
[1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ],
[3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ],
[2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ],
[2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ],
[2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ],
[2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ],
[1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ],
[3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]])
y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0,
0, 0]
y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1,
0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0]
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
random_state = check_random_state(0)
X_multilabel, y_multilabel = datasets.make_multilabel_classification(
random_state=0, return_indicator=True, n_samples=30, n_features=10)
X_sparse_pos = random_state.uniform(size=(20, 5))
X_sparse_pos[X_sparse_pos <= 0.8] = 0.
y_random = random_state.randint(0, 4, size=(20, ))
X_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0)
DATASETS = {
"iris": {"X": iris.data, "y": iris.target},
"boston": {"X": boston.data, "y": boston.target},
"digits": {"X": digits.data, "y": digits.target},
"toy": {"X": X, "y": y},
"clf_small": {"X": X_small, "y": y_small},
"reg_small": {"X": X_small, "y": y_small_reg},
"multilabel": {"X": X_multilabel, "y": y_multilabel},
"sparse-pos": {"X": X_sparse_pos, "y": y_random},
"sparse-neg": {"X": - X_sparse_pos, "y": y_random},
"sparse-mix": {"X": X_sparse_mix, "y": y_random},
"zeros": {"X": np.zeros((20, 3)), "y": y_random}
}
for name in DATASETS:
DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"])
def assert_tree_equal(d, s, message):
assert_equal(s.node_count, d.node_count,
"{0}: inequal number of node ({1} != {2})"
"".format(message, s.node_count, d.node_count))
assert_array_equal(d.children_right, s.children_right,
message + ": inequal children_right")
assert_array_equal(d.children_left, s.children_left,
message + ": inequal children_left")
external = d.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(d.feature[internal], s.feature[internal],
message + ": inequal features")
assert_array_equal(d.threshold[internal], s.threshold[internal],
message + ": inequal threshold")
assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(),
message + ": inequal sum(n_node_samples)")
assert_array_equal(d.n_node_samples, s.n_node_samples,
message + ": inequal n_node_samples")
assert_almost_equal(d.impurity, s.impurity,
err_msg=message + ": inequal impurity")
assert_array_almost_equal(d.value[external], s.value[external],
err_msg=message + ": inequal value")
def test_classification_toy():
"""Check classification on a toy dataset."""
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_weighted_classification_toy():
"""Check classification on a weighted toy dataset."""
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y, sample_weight=np.ones(len(X)))
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_regression_toy():
"""Check regression on a toy dataset."""
for name, Tree in REG_TREES.items():
reg = Tree(random_state=1)
reg.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
def test_xor():
"""Check on a XOR problem"""
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
clf = Tree(random_state=0, max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
def test_iris():
"""Check consistency on dataset iris."""
for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS):
clf = Tree(criterion=criterion, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.9,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
clf = Tree(criterion=criterion, max_features=2, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.5,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_boston():
"""Check consistency on dataset boston house prices."""
for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS):
reg = Tree(criterion=criterion, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 1,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
# using fewer features reduces the learning ability of this tree,
# but reduces training time.
reg = Tree(criterion=criterion, max_features=6, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 2,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_probability():
"""Predict probabilities using DecisionTreeClassifier."""
for name, Tree in CLF_TREES.items():
clf = Tree(max_depth=1, max_features=1, random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(np.sum(prob_predict, 1),
np.ones(iris.data.shape[0]),
err_msg="Failed with {0}".format(name))
assert_array_equal(np.argmax(prob_predict, 1),
clf.predict(iris.data),
err_msg="Failed with {0}".format(name))
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8,
err_msg="Failed with {0}".format(name))
def test_arrayrepr():
"""Check the array representation."""
# Check resize
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
for name, Tree in REG_TREES.items():
reg = Tree(max_depth=None, random_state=0)
reg.fit(X, y)
def test_pure_set():
"""Check when y is pure."""
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(X, y)
assert_almost_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
def test_numerical_stability():
"""Check numerical stability."""
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
with np.errstate(all="raise"):
for name, Tree in REG_TREES.items():
reg = Tree(random_state=0)
reg.fit(X, y)
reg.fit(X, -y)
reg.fit(-X, y)
reg.fit(-X, -y)
def test_importances():
"""Check variable importances."""
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10, "Failed with {0}".format(name))
assert_equal(n_important, 3, "Failed with {0}".format(name))
X_new = clf.transform(X, threshold="mean")
assert_less(0, X_new.shape[1], "Failed with {0}".format(name))
assert_less(X_new.shape[1], X.shape[1], "Failed with {0}".format(name))
# Check on iris that importances are the same for all builders
clf = DecisionTreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
clf2 = DecisionTreeClassifier(random_state=0,
max_leaf_nodes=len(iris.data))
clf2.fit(iris.data, iris.target)
assert_array_equal(clf.feature_importances_,
clf2.feature_importances_)
@raises(ValueError)
def test_importances_raises():
"""Check if variable importance before fit raises ValueError. """
clf = DecisionTreeClassifier()
clf.feature_importances_
def test_importances_gini_equal_mse():
"""Check that gini is equivalent to mse for binary output variable"""
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
# The gini index and the mean square error (variance) might differ due
# to numerical instability. Since those instabilities mainly occurs at
# high tree depth, we restrict this maximal depth.
clf = DecisionTreeClassifier(criterion="gini", max_depth=5,
random_state=0).fit(X, y)
reg = DecisionTreeRegressor(criterion="mse", max_depth=5,
random_state=0).fit(X, y)
assert_almost_equal(clf.feature_importances_, reg.feature_importances_)
assert_array_equal(clf.tree_.feature, reg.tree_.feature)
assert_array_equal(clf.tree_.children_left, reg.tree_.children_left)
assert_array_equal(clf.tree_.children_right, reg.tree_.children_right)
assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples)
def test_max_features():
"""Check max_features."""
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(max_features="auto")
reg.fit(boston.data, boston.target)
assert_equal(reg.max_features_, boston.data.shape[1])
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(max_features="auto")
clf.fit(iris.data, iris.target)
assert_equal(clf.max_features_, 2)
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_features="sqrt")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.sqrt(iris.data.shape[1])))
est = TreeEstimator(max_features="log2")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.log2(iris.data.shape[1])))
est = TreeEstimator(max_features=1)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=3)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 3)
est = TreeEstimator(max_features=0.01)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=0.5)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(0.5 * iris.data.shape[1]))
est = TreeEstimator(max_features=1.0)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
est = TreeEstimator(max_features=None)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
# use values of max_features that are invalid
est = TreeEstimator(max_features=10)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=-1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=0.0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=1.5)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features="foobar")
assert_raises(ValueError, est.fit, X, y)
def test_error():
"""Test that it gives proper exception on deficient input."""
for name, TreeEstimator in CLF_TREES.items():
# predict before fit
est = TreeEstimator()
assert_raises(Exception, est.predict_proba, X)
est.fit(X, y)
X2 = [-2, -1, 1] # wrong feature shape for sample
assert_raises(ValueError, est.predict_proba, X2)
for name, TreeEstimator in ALL_TREES.items():
# Invalid values for parameters
assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=-1).fit,
X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=0.51).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y)
# Wrong dimensions
est = TreeEstimator()
y2 = y[:-1]
assert_raises(ValueError, est.fit, X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
est = TreeEstimator()
est.fit(Xf, y)
assert_almost_equal(est.predict(T), true_result)
# predict before fitting
est = TreeEstimator()
assert_raises(Exception, est.predict, T)
# predict on vector with different dims
est.fit(X, y)
t = np.asarray(T)
assert_raises(ValueError, est.predict, t[:, 1:])
# wrong sample shape
Xt = np.array(X).T
est = TreeEstimator()
est.fit(np.dot(X, Xt), y)
assert_raises(ValueError, est.predict, X)
clf = TreeEstimator()
clf.fit(X, y)
assert_raises(ValueError, clf.predict, Xt)
def test_min_samples_leaf():
"""Test if leaves contain more than leaf_count training examples"""
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def check_min_weight_fraction_leaf(name, datasets, sparse=False):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
TreeEstimator = ALL_TREES[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y, sample_weight=weights)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
# Check on dense input
for name in ALL_TREES:
yield check_min_weight_fraction_leaf, name, "iris"
# Check on sparse input
for name in SPARSE_TREES:
yield check_min_weight_fraction_leaf, name, "multilabel", True
def test_pickle():
"""Check that tree estimator are pickable """
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
serialized_object = pickle.dumps(clf)
clf2 = pickle.loads(serialized_object)
assert_equal(type(clf2), clf.__class__)
score2 = clf2.score(iris.data, iris.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (classification) "
"with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(boston.data, boston.target)
score = reg.score(boston.data, boston.target)
serialized_object = pickle.dumps(reg)
reg2 = pickle.loads(serialized_object)
assert_equal(type(reg2), reg.__class__)
score2 = reg2.score(boston.data, boston.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (regression) "
"with {0}".format(name))
def test_multioutput():
"""Check estimators on multi-output problems."""
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
proba = clf.predict_proba(T)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = clf.predict_log_proba(T)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
# toy regression problem
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
y_hat = reg.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
def test_classes_shape():
"""Test that n_classes_ and classes_ have proper shape."""
for name, TreeClassifier in CLF_TREES.items():
# Classification, single output
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = TreeClassifier(random_state=0)
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_unbalanced_iris():
"""Check class rebalancing."""
unbalanced_X = iris.data[:125]
unbalanced_y = iris.target[:125]
sample_weight = _balance_weights(unbalanced_y)
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight)
assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y)
def test_memory_layout():
"""Check that it works no matter the memory layout"""
for (name, TreeEstimator), dtype in product(ALL_TREES.items(),
[np.float64, np.float32]):
est = TreeEstimator(random_state=0)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_sample_weight():
"""Check sample weighting."""
# Test that zero-weighted samples are not taken into account
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
sample_weight = np.ones(100)
sample_weight[y == 0] = 0.0
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), np.ones(100))
# Test that low weighted samples are not taken into account at low depth
X = np.arange(200)[:, np.newaxis]
y = np.zeros(200)
y[50:100] = 1
y[100:200] = 2
X[100:200, 0] = 200
sample_weight = np.ones(200)
sample_weight[y == 2] = .51 # Samples of class '2' are still weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 149.5)
sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved
# Test that sample weighting is the same as having duplicates
X = iris.data
y = iris.target
duplicates = rng.randint(0, X.shape[0], 200)
clf = DecisionTreeClassifier(random_state=1)
clf.fit(X[duplicates], y[duplicates])
sample_weight = np.bincount(duplicates, minlength=X.shape[0])
clf2 = DecisionTreeClassifier(random_state=1)
clf2.fit(X, y, sample_weight=sample_weight)
internal = clf.tree_.children_left != tree._tree.TREE_LEAF
assert_array_almost_equal(clf.tree_.threshold[internal],
clf2.tree_.threshold[internal])
def test_sample_weight_invalid():
"""Check sample weighting raises errors."""
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
clf = DecisionTreeClassifier(random_state=0)
sample_weight = np.random.rand(100, 1)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.array(0)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(101)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(99)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
def test_max_leaf_nodes():
"""Test greedy trees with max_depth + 1 leafs. """
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y)
tree = est.tree_
assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1)
# max_leaf_nodes in (0, 1) should raise ValueError
est = TreeEstimator(max_depth=None, max_leaf_nodes=0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1)
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
"""Test preceedence of max_leaf_nodes over max_depth. """
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.tree_
assert_greater(tree.max_depth, 1)
def test_arrays_persist():
"""Ensure property arrays' memory stays alive when tree disappears
non-regression for #2726
"""
for attr in ['n_classes', 'value', 'children_left', 'children_right',
'threshold', 'impurity', 'feature', 'n_node_samples']:
value = getattr(DecisionTreeClassifier().fit([[0]], [0]).tree_, attr)
# if pointing to freed memory, contents may be arbitrary
assert_true(-2 <= value.flat[0] < 2,
'Array points to arbitrary memory')
def test_only_constant_features():
random_state = check_random_state(0)
X = np.zeros((10, 20))
y = random_state.randint(0, 2, (10, ))
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(random_state=0)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 0)
def test_with_only_one_non_constant_features():
X = np.hstack([np.array([[1.], [1.], [0.], [0.]]),
np.zeros((4, 1000))])
y = np.array([0., 1., 0., 1.0])
for name, TreeEstimator in CLF_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2)))
for name, TreeEstimator in REG_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict(X), 0.5 * np.ones((4, )))
def test_big_input():
"""Test if the warning for too large inputs is appropriate."""
X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1)
clf = DecisionTreeClassifier()
try:
clf.fit(X, [0, 1, 0, 1])
except ValueError as e:
assert_in("float32", str(e))
def test_realloc():
from sklearn.tree._tree import _realloc_test
assert_raises(MemoryError, _realloc_test)
def test_huge_allocations():
n_bits = int(platform.architecture()[0].rstrip('bit'))
X = np.random.randn(10, 2)
y = np.random.randint(0, 2, 10)
# Sanity check: we cannot request more memory than the size of the address
# space. Currently raises OverflowError.
huge = 2 ** (n_bits + 1)
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(Exception, clf.fit, X, y)
# Non-regression test: MemoryError used to be dropped by Cython
# because of missing "except *".
huge = 2 ** (n_bits - 1) - 1
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(MemoryError, clf.fit, X, y)
def check_sparse_input(tree, dataset, max_depth=None):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Gain testing time
if dataset in ["digits", "boston"]:
n_samples = X.shape[0] // 5
X = X[:n_samples]
X_sparse = X_sparse[:n_samples]
y = y[:n_samples]
for sparse_format in (csr_matrix, csc_matrix, coo_matrix):
X_sparse = sparse_format(X_sparse)
# Check the default (depth first search)
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
y_pred = d.predict(X)
if tree in CLF_TREES:
y_proba = d.predict_proba(X)
y_log_proba = d.predict_log_proba(X)
for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix):
X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32)
assert_array_almost_equal(s.predict(X_sparse_test), y_pred)
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X_sparse_test),
y_proba)
assert_array_almost_equal(s.predict_log_proba(X_sparse_test),
y_log_proba)
def test_sparse_input():
for tree, dataset in product(SPARSE_TREES,
("clf_small", "toy", "digits", "multilabel",
"sparse-pos", "sparse-neg", "sparse-mix",
"zeros")):
max_depth = 3 if dataset == "digits" else None
yield (check_sparse_input, tree, dataset, max_depth)
# Due to numerical instability of MSE and too strict test, we limit the
# maximal depth
for tree, dataset in product(REG_TREES, ["boston", "reg_small"]):
if tree in SPARSE_TREES:
yield (check_sparse_input, tree, dataset, 2)
def check_sparse_parameters(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check max_features
d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
max_depth=2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_split
d = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_leaf
d = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y)
s = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check best-first search
d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y)
s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_parameters():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_parameters, tree, dataset)
def check_sparse_criterion(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check various criterion
CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS
for criterion in CRITERIONS:
d = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_criterion():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_criterion, tree, dataset)
def check_explicit_sparse_zeros(tree, max_depth=3,
n_features=10):
TreeEstimator = ALL_TREES[tree]
# n_samples set n_feature to ease construction of a simultaneous
# construction of a csr and csc matrix
n_samples = n_features
samples = np.arange(n_samples)
# Generate X, y
random_state = check_random_state(0)
indices = []
data = []
offset = 0
indptr = [offset]
for i in range(n_features):
n_nonzero_i = random_state.binomial(n_samples, 0.5)
indices_i = random_state.permutation(samples)[:n_nonzero_i]
indices.append(indices_i)
data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1
data.append(data_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
data = np.array(np.concatenate(data), dtype=np.float32)
X_sparse = csc_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X = X_sparse.toarray()
X_sparse_test = csr_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X_test = X_sparse_test.toarray()
y = random_state.randint(0, 3, size=(n_samples, ))
# Ensure that X_sparse_test owns its data, indices and indptr array
X_sparse_test = X_sparse_test.copy()
# Ensure that we have explicit zeros
assert_greater((X_sparse.data == 0.).sum(), 0)
assert_greater((X_sparse_test.data == 0.).sum(), 0)
# Perform the comparison
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
Xs = (X_test, X_sparse_test)
for X1, X2 in product(Xs, Xs):
assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2))
assert_array_almost_equal(s.predict(X1), d.predict(X2))
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X1),
d.predict_proba(X2))
def test_explicit_sparse_zeros():
for tree in SPARSE_TREES:
yield (check_explicit_sparse_zeros, tree)
def check_raise_error_on_1d_input(name):
TreeEstimator = ALL_TREES[name]
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y)
est = TreeEstimator(random_state=0)
est.fit(X_2d, y)
assert_raises(ValueError, est.predict, X)
def test_1d_input():
for name in ALL_TREES:
yield check_raise_error_on_1d_input, name
def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight):
# Private function to keep pretty printing in nose yielded tests
est = TreeEstimator(random_state=0)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 1)
est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 0)
def check_min_weight_leaf_split_level(name):
TreeEstimator = ALL_TREES[name]
X = np.array([[0], [0], [0], [0], [1]])
y = [0, 0, 0, 0, 1]
sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2]
_check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight)
if TreeEstimator().splitter in SPARSE_SPLITTERS:
_check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y,
sample_weight)
def test_min_weight_leaf_split_level():
for name in ALL_TREES:
yield check_min_weight_leaf_split_level, name
| bsd-3-clause |
cowlicks/blaze | blaze/compute/tests/test_sparksql.py | 3 | 11734 | from __future__ import absolute_import, print_function, division
import sys
import pytest
pytestmark = pytest.mark.skipif(
sys.version_info.major == 3,
reason="PyHive doesn't work with Python 3.x"
)
pyspark = pytest.importorskip('pyspark')
py4j = pytest.importorskip('py4j')
sa = pytest.importorskip('sqlalchemy')
pytest.importorskip('pyhive.sqlalchemy_hive')
import os
import itertools
import shutil
from functools import partial
from py4j.protocol import Py4JJavaError
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from blaze import compute, symbol, into, by, sin, exp, cos, tan, join
from pyspark.sql import DataFrame as SparkDataFrame
try:
from pyspark.sql.utils import AnalysisException
except ImportError:
AnalysisException = Py4JJavaError
from pyspark import HiveContext
from pyspark.sql import Row
from odo import odo, discover
from odo.utils import tmpfile
data = [['Alice', 100.0, 1],
['Bob', 200.0, 2],
['Alice', 50.0, 3]]
date_data = []
np.random.seed(0)
for attr in ('YearBegin', 'MonthBegin', 'Day', 'Hour', 'Minute', 'Second'):
rng = pd.date_range(start='now', periods=len(data),
freq=getattr(pd.datetools, attr)()).values
date_data += list(zip(np.random.choice(['Alice', 'Bob', 'Joe', 'Lester'],
size=len(data)),
np.random.rand(len(data)) * 100,
np.random.randint(100, size=3),
rng))
cities_data = [['Alice', 'NYC'],
['Bob', 'Boston']]
df = pd.DataFrame(data, columns=['name', 'amount', 'id'])
date_df = pd.DataFrame(date_data, columns=['name', 'amount', 'id', 'ds'])
cities_df = pd.DataFrame(cities_data, columns=['name', 'city'])
# sc is from conftest.py
@pytest.yield_fixture(scope='module')
def sql(sc):
try:
yield HiveContext(sc)
finally:
dbpath = 'metastore_db'
logpath = 'derby.log'
if os.path.exists(dbpath):
assert os.path.isdir(dbpath), '%s is not a directory' % dbpath
shutil.rmtree(dbpath)
if os.path.exists(logpath):
assert os.path.isfile(logpath), '%s is not a file' % logpath
os.remove(logpath)
@pytest.yield_fixture(scope='module')
def people(sc):
with tmpfile('.txt') as fn:
df.to_csv(fn, header=False, index=False)
raw = sc.textFile(fn)
parts = raw.map(lambda line: line.split(','))
yield parts.map(
lambda person: Row(
name=person[0], amount=float(person[1]), id=int(person[2])
)
)
@pytest.yield_fixture(scope='module')
def cities(sc):
with tmpfile('.txt') as fn:
cities_df.to_csv(fn, header=False, index=False)
raw = sc.textFile(fn)
parts = raw.map(lambda line: line.split(','))
yield parts.map(lambda person: Row(name=person[0], city=person[1]))
@pytest.yield_fixture(scope='module')
def date_people(sc):
with tmpfile('.txt') as fn:
date_df.to_csv(fn, header=False, index=False)
raw = sc.textFile(fn)
parts = raw.map(lambda line: line.split(','))
yield parts.map(
lambda person: Row(
name=person[0],
amount=float(person[1]),
id=int(person[2]),
ds=pd.Timestamp(person[3]).to_pydatetime()
)
)
@pytest.fixture(scope='module')
def ctx(sql, people, cities, date_people):
sql.registerDataFrameAsTable(sql.createDataFrame(people), 't')
sql.cacheTable('t')
sql.registerDataFrameAsTable(sql.createDataFrame(cities), 's')
sql.cacheTable('s')
sql.registerDataFrameAsTable(sql.createDataFrame(date_people), 'dates')
sql.cacheTable('dates')
return sql
@pytest.fixture(scope='module')
def db(ctx):
return symbol('db', discover(ctx))
def test_projection(db, ctx):
expr = db.t[['id', 'name']]
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert into(set, result) == into(set, expected)
def test_symbol_compute(db, ctx):
assert isinstance(compute(db.t, ctx), SparkDataFrame)
def test_field_access(db, ctx):
for field in db.t.fields:
expr = getattr(db.t, field)
result = into(pd.Series, compute(expr, ctx))
expected = compute(expr, {db: {'t': df}})
assert result.name == expected.name
np.testing.assert_array_equal(result.values,
expected.values)
def test_head(db, ctx):
expr = db.t[['name', 'amount']].head(2)
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert into(list, result) == into(list, expected)
def test_literals(db, ctx):
expr = db.t[db.t.amount >= 100]
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert list(map(set, into(list, result))) == list(
map(set, into(list, expected))
)
def test_by_summary(db, ctx):
t = db.t
expr = by(t.name, mymin=t.amount.min(), mymax=t.amount.max())
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert into(set, result) == into(set, expected)
def test_join(db, ctx):
expr = join(db.t, db.s)
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df, 's': cities_df}})
assert isinstance(result, SparkDataFrame)
assert into(set, result) == into(set, expected)
assert discover(result) == expr.dshape
def test_join_diff_contexts(db, ctx, cities):
expr = join(db.t, db.s, 'name')
people = ctx.table('t')
cities = into(ctx, cities, dshape=discover(ctx.table('s')))
scope = {db: {'t': people, 's': cities}}
result = compute(expr, scope)
expected = compute(expr, {db: {'t': df, 's': cities_df}})
assert set(map(frozenset, odo(result, set))) == set(
map(frozenset, odo(expected, set))
)
def test_field_distinct(ctx, db):
expr = db.t.name.distinct()
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert into(set, result, dshape=expr.dshape) == into(set, expected)
def test_boolean(ctx, db):
expr = db.t.amount > 50
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert into(set, result, dshape=expr.dshape) == into(set, expected)
def test_selection(ctx, db):
expr = db.t[db.t.amount > 50]
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert list(map(set, into(list, result))) == list(
map(set, into(list, expected))
)
def test_selection_field(ctx, db):
expr = db.t[db.t.amount > 50].name
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert into(set, result, dshape=expr.dshape) == into(set, expected)
@pytest.mark.parametrize(
['field', 'reduction'],
itertools.product(
['id', 'amount'],
['sum', 'max', 'min', 'mean', 'count', 'nunique']
)
)
def test_reductions(ctx, db, field, reduction):
expr = getattr(db.t[field], reduction)()
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert into(list, result)[0][0] == expected
def test_column_arithmetic(ctx, db):
expr = db.t.amount + 1
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert into(set, result, dshape=expr.dshape) == into(set, expected)
@pytest.mark.parametrize('func', [sin, cos, tan, exp])
def test_math(ctx, db, func):
expr = func(db.t.amount)
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
np.testing.assert_allclose(
np.sort(odo(result, np.ndarray, dshape=expr.dshape)),
np.sort(odo(expected, np.ndarray))
)
@pytest.mark.parametrize(['field', 'ascending'],
itertools.product(['name', 'id', ['name', 'amount']],
[True, False]))
def test_sort(ctx, db, field, ascending):
expr = db.t.sort(field, ascending=ascending)
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert list(map(set, into(list, result))) == list(
map(set, into(list, expected))
)
@pytest.mark.xfail
def test_map(ctx, db):
expr = db.t.id.map(lambda x: x + 1, 'int')
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert into(set, result, dshape=expr.dshape) == into(set, expected)
@pytest.mark.parametrize(
['grouper', 'reducer', 'reduction'],
itertools.chain(
itertools.product(
['name', 'id', ['id', 'amount']],
['id', 'amount'],
['sum', 'count', 'max', 'min', 'mean', 'nunique']
),
[('name', 'name', 'count'), ('name', 'name', 'nunique')]
)
)
def test_by(ctx, db, grouper, reducer, reduction):
t = db.t
expr = by(t[grouper], total=getattr(t[reducer], reduction)())
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert set(map(frozenset, into(list, result))) == set(
map(frozenset, into(list, expected))
)
@pytest.mark.parametrize(
['reducer', 'reduction'],
itertools.product(['id', 'name'], ['count', 'nunique'])
)
def test_multikey_by(ctx, db, reducer, reduction):
t = db.t
expr = by(t[['id', 'amount']], total=getattr(t[reducer], reduction)())
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert (set(map(frozenset, into(list, result))) ==
set(map(frozenset, into(list, expected))))
def test_grouper_with_arith(ctx, db):
expr = by(db.t[['id', 'amount']], total=(db.t.amount + 1).sum())
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert list(map(set, into(list, result))) == list(map(set, into(list, expected)))
def test_by_non_native_ops(ctx, db):
expr = by(db.t.id, total=db.t.id.nunique())
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert list(map(set, into(list, result))) == list(map(set, into(list, expected)))
def test_strlen(ctx, db):
expr = db.t.name.strlen()
result = odo(compute(expr, ctx), pd.Series)
expected = compute(expr, {db: {'t': df}})
assert result.name == 'name'
assert expected.name == 'name'
assert odo(result, set) == odo(expected, set)
@pytest.mark.parametrize(
'attr',
['year', 'month', 'day', 'hour', 'minute', 'second'] + list(
map(
partial(
pytest.mark.xfail,
raises=(Py4JJavaError, AnalysisException)
),
['millisecond', 'microsecond']
)
)
)
def test_by_with_date(ctx, db, attr):
# TODO: investigate CSV writing precision between pandas 0.16.0 and 0.16.1
# TODO: see if we can use odo to convert the dshape of an existing
# DataFrame
expr = by(getattr(db.dates.ds, attr), mean=db.dates.amount.mean())
result = odo(
compute(expr, ctx), pd.DataFrame
).sort('mean').reset_index(drop=True)
expected = compute(
expr,
{db: {'dates': date_df}}
).sort('mean').reset_index(drop=True)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.parametrize('keys', [[1], [1, 2]])
def test_isin(ctx, db, keys):
expr = db.t[db.t.id.isin(keys)]
result = odo(compute(expr, ctx), set)
expected = odo(compute(expr, {db: {'t': df}}), set)
assert (set(map(frozenset, odo(result, list))) ==
set(map(frozenset, odo(expected, list))))
def test_nunique_spark_dataframe(ctx, db):
result = odo(compute(db.t.nunique(), ctx), int)
expected = ctx.table('t').distinct().count()
assert result == expected
| bsd-3-clause |
jinzishuai/learn2deeplearn | deeplearning.ai/C1.NN_DL/week4/Building your Deep Neural Network - Step by Step/week4ex1.py | 1 | 39163 | #!/usr/bin/python3
# coding: utf-8
# # Building your Deep Neural Network: Step by Step
#
# Welcome to your week 4 assignment (part 1 of 2)! You have previously trained a 2-layer Neural Network (with a single hidden layer). This week, you will build a deep neural network, with as many layers as you want!
#
# - In this notebook, you will implement all the functions required to build a deep neural network.
# - In the next assignment, you will use these functions to build a deep neural network for image classification.
#
# **After this assignment you will be able to:**
# - Use non-linear units like ReLU to improve your model
# - Build a deeper neural network (with more than 1 hidden layer)
# - Implement an easy-to-use neural network class
#
# **Notation**:
# - Superscript $[l]$ denotes a quantity associated with the $l^{th}$ layer.
# - Example: $a^{[L]}$ is the $L^{th}$ layer activation. $W^{[L]}$ and $b^{[L]}$ are the $L^{th}$ layer parameters.
# - Superscript $(i)$ denotes a quantity associated with the $i^{th}$ example.
# - Example: $x^{(i)}$ is the $i^{th}$ training example.
# - Lowerscript $i$ denotes the $i^{th}$ entry of a vector.
# - Example: $a^{[l]}_i$ denotes the $i^{th}$ entry of the $l^{th}$ layer's activations).
#
# Let's get started!
# ## 1 - Packages
#
# Let's first import all the packages that you will need during this assignment.
# - [numpy](www.numpy.org) is the main package for scientific computing with Python.
# - [matplotlib](http://matplotlib.org) is a library to plot graphs in Python.
# - dnn_utils provides some necessary functions for this notebook.
# - testCases provides some test cases to assess the correctness of your functions
# - np.random.seed(1) is used to keep all the random function calls consistent. It will help us grade your work. Please don't change the seed.
# In[106]:
import numpy as np
import h5py
import matplotlib.pyplot as plt
from testCases_v3 import *
from dnn_utils_v2 import sigmoid, sigmoid_backward, relu, relu_backward
#get_ipython().magic('matplotlib inline')
plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
#get_ipython().magic('load_ext autoreload')
#get_ipython().magic('autoreload 2')
np.random.seed(1)
# ## 2 - Outline of the Assignment
#
# To build your neural network, you will be implementing several "helper functions". These helper functions will be used in the next assignment to build a two-layer neural network and an L-layer neural network. Each small helper function you will implement will have detailed instructions that will walk you through the necessary steps. Here is an outline of this assignment, you will:
#
# - Initialize the parameters for a two-layer network and for an $L$-layer neural network.
# - Implement the forward propagation module (shown in purple in the figure below).
# - Complete the LINEAR part of a layer's forward propagation step (resulting in $Z^{[l]}$).
# - We give you the ACTIVATION function (relu/sigmoid).
# - Combine the previous two steps into a new [LINEAR->ACTIVATION] forward function.
# - Stack the [LINEAR->RELU] forward function L-1 time (for layers 1 through L-1) and add a [LINEAR->SIGMOID] at the end (for the final layer $L$). This gives you a new L_model_forward function.
# - Compute the loss.
# - Implement the backward propagation module (denoted in red in the figure below).
# - Complete the LINEAR part of a layer's backward propagation step.
# - We give you the gradient of the ACTIVATE function (relu_backward/sigmoid_backward)
# - Combine the previous two steps into a new [LINEAR->ACTIVATION] backward function.
# - Stack [LINEAR->RELU] backward L-1 times and add [LINEAR->SIGMOID] backward in a new L_model_backward function
# - Finally update the parameters.
#
# <img src="images/final outline.png" style="width:800px;height:500px;">
# <caption><center> **Figure 1**</center></caption><br>
#
#
# **Note** that for every forward function, there is a corresponding backward function. That is why at every step of your forward module you will be storing some values in a cache. The cached values are useful for computing gradients. In the backpropagation module you will then use the cache to calculate the gradients. This assignment will show you exactly how to carry out each of these steps.
# ## 3 - Initialization
#
# You will write two helper functions that will initialize the parameters for your model. The first function will be used to initialize parameters for a two layer model. The second one will generalize this initialization process to $L$ layers.
#
# ### 3.1 - 2-layer Neural Network
#
# **Exercise**: Create and initialize the parameters of the 2-layer neural network.
#
# **Instructions**:
# - The model's structure is: *LINEAR -> RELU -> LINEAR -> SIGMOID*.
# - Use random initialization for the weight matrices. Use `np.random.randn(shape)*0.01` with the correct shape.
# - Use zero initialization for the biases. Use `np.zeros(shape)`.
# In[107]:
# GRADED FUNCTION: initialize_parameters
def initialize_parameters(n_x, n_h, n_y):
"""
Argument:
n_x -- size of the input layer
n_h -- size of the hidden layer
n_y -- size of the output layer
Returns:
parameters -- python dictionary containing your parameters:
W1 -- weight matrix of shape (n_h, n_x)
b1 -- bias vector of shape (n_h, 1)
W2 -- weight matrix of shape (n_y, n_h)
b2 -- bias vector of shape (n_y, 1)
"""
np.random.seed(1)
### START CODE HERE ### (≈ 4 lines of code)
W1 = np.random.randn(n_h, n_x)*0.01
b1 = np.zeros((n_h, 1))
W2 = np.random.randn(n_y, n_h)*0.01
b2 = np.zeros((n_y, 1))
### END CODE HERE ###
assert(W1.shape == (n_h, n_x))
assert(b1.shape == (n_h, 1))
assert(W2.shape == (n_y, n_h))
assert(b2.shape == (n_y, 1))
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
return parameters
# In[108]:
parameters = initialize_parameters(3,2,1)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
# **Expected output**:
#
# <table style="width:80%">
# <tr>
# <td> **W1** </td>
# <td> [[ 0.01624345 -0.00611756 -0.00528172]
# [-0.01072969 0.00865408 -0.02301539]] </td>
# </tr>
#
# <tr>
# <td> **b1**</td>
# <td>[[ 0.]
# [ 0.]]</td>
# </tr>
#
# <tr>
# <td>**W2**</td>
# <td> [[ 0.01744812 -0.00761207]]</td>
# </tr>
#
# <tr>
# <td> **b2** </td>
# <td> [[ 0.]] </td>
# </tr>
#
# </table>
# ### 3.2 - L-layer Neural Network
#
# The initialization for a deeper L-layer neural network is more complicated because there are many more weight matrices and bias vectors. When completing the `initialize_parameters_deep`, you should make sure that your dimensions match between each layer. Recall that $n^{[l]}$ is the number of units in layer $l$. Thus for example if the size of our input $X$ is $(12288, 209)$ (with $m=209$ examples) then:
#
# <table style="width:100%">
#
#
# <tr>
# <td> </td>
# <td> **Shape of W** </td>
# <td> **Shape of b** </td>
# <td> **Activation** </td>
# <td> **Shape of Activation** </td>
# <tr>
#
# <tr>
# <td> **Layer 1** </td>
# <td> $(n^{[1]},12288)$ </td>
# <td> $(n^{[1]},1)$ </td>
# <td> $Z^{[1]} = W^{[1]} X + b^{[1]} $ </td>
#
# <td> $(n^{[1]},209)$ </td>
# <tr>
#
# <tr>
# <td> **Layer 2** </td>
# <td> $(n^{[2]}, n^{[1]})$ </td>
# <td> $(n^{[2]},1)$ </td>
# <td>$Z^{[2]} = W^{[2]} A^{[1]} + b^{[2]}$ </td>
# <td> $(n^{[2]}, 209)$ </td>
# <tr>
#
# <tr>
# <td> $\vdots$ </td>
# <td> $\vdots$ </td>
# <td> $\vdots$ </td>
# <td> $\vdots$</td>
# <td> $\vdots$ </td>
# <tr>
#
# <tr>
# <td> **Layer L-1** </td>
# <td> $(n^{[L-1]}, n^{[L-2]})$ </td>
# <td> $(n^{[L-1]}, 1)$ </td>
# <td>$Z^{[L-1]} = W^{[L-1]} A^{[L-2]} + b^{[L-1]}$ </td>
# <td> $(n^{[L-1]}, 209)$ </td>
# <tr>
#
#
# <tr>
# <td> **Layer L** </td>
# <td> $(n^{[L]}, n^{[L-1]})$ </td>
# <td> $(n^{[L]}, 1)$ </td>
# <td> $Z^{[L]} = W^{[L]} A^{[L-1]} + b^{[L]}$</td>
# <td> $(n^{[L]}, 209)$ </td>
# <tr>
#
# </table>
#
# Remember that when we compute $W X + b$ in python, it carries out broadcasting. For example, if:
#
# $$ W = \begin{bmatrix}
# j & k & l\\
# m & n & o \\
# p & q & r
# \end{bmatrix}\;\;\; X = \begin{bmatrix}
# a & b & c\\
# d & e & f \\
# g & h & i
# \end{bmatrix} \;\;\; b =\begin{bmatrix}
# s \\
# t \\
# u
# \end{bmatrix}\tag{2}$$
#
# Then $WX + b$ will be:
#
# $$ WX + b = \begin{bmatrix}
# (ja + kd + lg) + s & (jb + ke + lh) + s & (jc + kf + li)+ s\\
# (ma + nd + og) + t & (mb + ne + oh) + t & (mc + nf + oi) + t\\
# (pa + qd + rg) + u & (pb + qe + rh) + u & (pc + qf + ri)+ u
# \end{bmatrix}\tag{3} $$
# **Exercise**: Implement initialization for an L-layer Neural Network.
#
# **Instructions**:
# - The model's structure is *[LINEAR -> RELU] $ \times$ (L-1) -> LINEAR -> SIGMOID*. I.e., it has $L-1$ layers using a ReLU activation function followed by an output layer with a sigmoid activation function.
# - Use random initialization for the weight matrices. Use `np.random.rand(shape) * 0.01`.
# - Use zeros initialization for the biases. Use `np.zeros(shape)`.
# - We will store $n^{[l]}$, the number of units in different layers, in a variable `layer_dims`. For example, the `layer_dims` for the "Planar Data classification model" from last week would have been [2,4,1]: There were two inputs, one hidden layer with 4 hidden units, and an output layer with 1 output unit. Thus means `W1`'s shape was (4,2), `b1` was (4,1), `W2` was (1,4) and `b2` was (1,1). Now you will generalize this to $L$ layers!
# - Here is the implementation for $L=1$ (one layer neural network). It should inspire you to implement the general case (L-layer neural network).
# ```python
# if L == 1:
# parameters["W" + str(L)] = np.random.randn(layer_dims[1], layer_dims[0]) * 0.01
# parameters["b" + str(L)] = np.zeros((layer_dims[1], 1))
# ```
# In[109]:
# GRADED FUNCTION: initialize_parameters_deep
def initialize_parameters_deep(layer_dims):
"""
Arguments:
layer_dims -- python array (list) containing the dimensions of each layer in our network
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
Wl -- weight matrix of shape (layer_dims[l], layer_dims[l-1])
bl -- bias vector of shape (layer_dims[l], 1)
"""
np.random.seed(3)
parameters = {}
L = len(layer_dims) # number of layers in the network
for l in range(1, L):
### START CODE HERE ### (≈ 2 lines of code)
parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l-1]) * 0.01
parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))
### END CODE HERE ###
assert(parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l-1]))
assert(parameters['b' + str(l)].shape == (layer_dims[l], 1))
return parameters
# In[110]:
parameters = initialize_parameters_deep([5,4,3])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
# **Expected output**:
#
# <table style="width:80%">
# <tr>
# <td> **W1** </td>
# <td>[[ 0.01788628 0.0043651 0.00096497 -0.01863493 -0.00277388]
# [-0.00354759 -0.00082741 -0.00627001 -0.00043818 -0.00477218]
# [-0.01313865 0.00884622 0.00881318 0.01709573 0.00050034]
# [-0.00404677 -0.0054536 -0.01546477 0.00982367 -0.01101068]]</td>
# </tr>
#
# <tr>
# <td>**b1** </td>
# <td>[[ 0.]
# [ 0.]
# [ 0.]
# [ 0.]]</td>
# </tr>
#
# <tr>
# <td>**W2** </td>
# <td>[[-0.01185047 -0.0020565 0.01486148 0.00236716]
# [-0.01023785 -0.00712993 0.00625245 -0.00160513]
# [-0.00768836 -0.00230031 0.00745056 0.01976111]]</td>
# </tr>
#
# <tr>
# <td>**b2** </td>
# <td>[[ 0.]
# [ 0.]
# [ 0.]]</td>
# </tr>
#
# </table>
# ## 4 - Forward propagation module
#
# ### 4.1 - Linear Forward
# Now that you have initialized your parameters, you will do the forward propagation module. You will start by implementing some basic functions that you will use later when implementing the model. You will complete three functions in this order:
#
# - LINEAR
# - LINEAR -> ACTIVATION where ACTIVATION will be either ReLU or Sigmoid.
# - [LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID (whole model)
#
# The linear forward module (vectorized over all the examples) computes the following equations:
#
# $$Z^{[l]} = W^{[l]}A^{[l-1]} +b^{[l]}\tag{4}$$
#
# where $A^{[0]} = X$.
#
# **Exercise**: Build the linear part of forward propagation.
#
# **Reminder**:
# The mathematical representation of this unit is $Z^{[l]} = W^{[l]}A^{[l-1]} +b^{[l]}$. You may also find `np.dot()` useful. If your dimensions don't match, printing `W.shape` may help.
# In[111]:
# GRADED FUNCTION: linear_forward
def linear_forward(A, W, b):
"""
Implement the linear part of a layer's forward propagation.
Arguments:
A -- activations from previous layer (or input data): (size of previous layer, number of examples)
W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)
b -- bias vector, numpy array of shape (size of the current layer, 1)
Returns:
Z -- the input of the activation function, also called pre-activation parameter
cache -- a python dictionary containing "A", "W" and "b" ; stored for computing the backward pass efficiently
"""
### START CODE HERE ### (≈ 1 line of code)
Z = np.dot(W, A) + b
### END CODE HERE ###
assert(Z.shape == (W.shape[0], A.shape[1]))
cache = (A, W, b)
return Z, cache
# In[112]:
A, W, b = linear_forward_test_case()
Z, linear_cache = linear_forward(A, W, b)
print("Z = " + str(Z))
# **Expected output**:
#
# <table style="width:35%">
#
# <tr>
# <td> **Z** </td>
# <td> [[ 3.26295337 -1.23429987]] </td>
# </tr>
#
# </table>
# ### 4.2 - Linear-Activation Forward
#
# In this notebook, you will use two activation functions:
#
# - **Sigmoid**: $\sigma(Z) = \sigma(W A + b) = \frac{1}{ 1 + e^{-(W A + b)}}$. We have provided you with the `sigmoid` function. This function returns **two** items: the activation value "`a`" and a "`cache`" that contains "`Z`" (it's what we will feed in to the corresponding backward function). To use it you could just call:
# ``` python
# A, activation_cache = sigmoid(Z)
# ```
#
# - **ReLU**: The mathematical formula for ReLu is $A = RELU(Z) = max(0, Z)$. We have provided you with the `relu` function. This function returns **two** items: the activation value "`A`" and a "`cache`" that contains "`Z`" (it's what we will feed in to the corresponding backward function). To use it you could just call:
# ``` python
# A, activation_cache = relu(Z)
# ```
# For more convenience, you are going to group two functions (Linear and Activation) into one function (LINEAR->ACTIVATION). Hence, you will implement a function that does the LINEAR forward step followed by an ACTIVATION forward step.
#
# **Exercise**: Implement the forward propagation of the *LINEAR->ACTIVATION* layer. Mathematical relation is: $A^{[l]} = g(Z^{[l]}) = g(W^{[l]}A^{[l-1]} +b^{[l]})$ where the activation "g" can be sigmoid() or relu(). Use linear_forward() and the correct activation function.
# In[113]:
# GRADED FUNCTION: linear_activation_forward
def linear_activation_forward(A_prev, W, b, activation):
"""
Implement the forward propagation for the LINEAR->ACTIVATION layer
Arguments:
A_prev -- activations from previous layer (or input data): (size of previous layer, number of examples)
W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)
b -- bias vector, numpy array of shape (size of the current layer, 1)
activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu"
Returns:
A -- the output of the activation function, also called the post-activation value
cache -- a python dictionary containing "linear_cache" and "activation_cache";
stored for computing the backward pass efficiently
"""
if activation == "sigmoid":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
### START CODE HERE ### (≈ 2 lines of code)
Z, linear_cache = linear_forward(A_prev, W, b)
A, activation_cache = sigmoid(Z)
### END CODE HERE ###
elif activation == "relu":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
### START CODE HERE ### (≈ 2 lines of code)
Z, linear_cache = linear_forward(A_prev, W, b)
A, activation_cache = relu(Z)
### END CODE HERE ###
assert (A.shape == (W.shape[0], A_prev.shape[1]))
cache = (linear_cache, activation_cache)
return A, cache
# In[114]:
A_prev, W, b = linear_activation_forward_test_case()
A, linear_activation_cache = linear_activation_forward(A_prev, W, b, activation = "sigmoid")
print("With sigmoid: A = " + str(A))
A, linear_activation_cache = linear_activation_forward(A_prev, W, b, activation = "relu")
print("With ReLU: A = " + str(A))
# **Expected output**:
#
# <table style="width:35%">
# <tr>
# <td> **With sigmoid: A ** </td>
# <td > [[ 0.96890023 0.11013289]]</td>
# </tr>
# <tr>
# <td> **With ReLU: A ** </td>
# <td > [[ 3.43896131 0. ]]</td>
# </tr>
# </table>
#
# **Note**: In deep learning, the "[LINEAR->ACTIVATION]" computation is counted as a single layer in the neural network, not two layers.
# ### d) L-Layer Model
#
# For even more convenience when implementing the $L$-layer Neural Net, you will need a function that replicates the previous one (`linear_activation_forward` with RELU) $L-1$ times, then follows that with one `linear_activation_forward` with SIGMOID.
#
# <img src="images/model_architecture_kiank.png" style="width:600px;height:300px;">
# <caption><center> **Figure 2** : *[LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID* model</center></caption><br>
#
# **Exercise**: Implement the forward propagation of the above model.
#
# **Instruction**: In the code below, the variable `AL` will denote $A^{[L]} = \sigma(Z^{[L]}) = \sigma(W^{[L]} A^{[L-1]} + b^{[L]})$. (This is sometimes also called `Yhat`, i.e., this is $\hat{Y}$.)
#
# **Tips**:
# - Use the functions you had previously written
# - Use a for loop to replicate [LINEAR->RELU] (L-1) times
# - Don't forget to keep track of the caches in the "caches" list. To add a new value `c` to a `list`, you can use `list.append(c)`.
# In[115]:
# GRADED FUNCTION: L_model_forward
def L_model_forward(X, parameters):
"""
Implement forward propagation for the [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID computation
Arguments:
X -- data, numpy array of shape (input size, number of examples)
parameters -- output of initialize_parameters_deep()
Returns:
AL -- last post-activation value
caches -- list of caches containing:
every cache of linear_relu_forward() (there are L-1 of them, indexed from 0 to L-2)
the cache of linear_sigmoid_forward() (there is one, indexed L-1)
"""
caches = []
A = X
L = len(parameters) // 2 # number of layers in the neural network
# Implement [LINEAR -> RELU]*(L-1). Add "cache" to the "caches" list.
for l in range(1, L):
A_prev = A
### START CODE HERE ### (≈ 2 lines of code)
A, cache = linear_activation_forward(A_prev, parameters["W"+str(l)], parameters["b"+str(l)], activation = "relu")
caches.append(cache)
### END CODE HERE ###
# Implement LINEAR -> SIGMOID. Add "cache" to the "caches" list.
### START CODE HERE ### (≈ 2 lines of code)
AL, cache = linear_activation_forward(A, parameters["W"+str(L)], parameters["b"+str(L)], activation = "sigmoid")
caches.append(cache)
### END CODE HERE ###
assert(AL.shape == (1,X.shape[1]))
return AL, caches
# In[116]:
X, parameters = L_model_forward_test_case_2hidden()
AL, caches = L_model_forward(X, parameters)
print("AL = " + str(AL))
print("Length of caches list = " + str(len(caches)))
# <table style="width:50%">
# <tr>
# <td> **AL** </td>
# <td > [[ 0.03921668 0.70498921 0.19734387 0.04728177]]</td>
# </tr>
# <tr>
# <td> **Length of caches list ** </td>
# <td > 3 </td>
# </tr>
# </table>
# Great! Now you have a full forward propagation that takes the input X and outputs a row vector $A^{[L]}$ containing your predictions. It also records all intermediate values in "caches". Using $A^{[L]}$, you can compute the cost of your predictions.
# ## 5 - Cost function
#
# Now you will implement forward and backward propagation. You need to compute the cost, because you want to check if your model is actually learning.
#
# **Exercise**: Compute the cross-entropy cost $J$, using the following formula: $$-\frac{1}{m} \sum\limits_{i = 1}^{m} (y^{(i)}\log\left(a^{[L] (i)}\right) + (1-y^{(i)})\log\left(1- a^{[L](i)}\right)) \tag{7}$$
#
# In[117]:
# GRADED FUNCTION: compute_cost
def compute_cost(AL, Y):
"""
Implement the cost function defined by equation (7).
Arguments:
AL -- probability vector corresponding to your label predictions, shape (1, number of examples)
Y -- true "label" vector (for example: containing 0 if non-cat, 1 if cat), shape (1, number of examples)
Returns:
cost -- cross-entropy cost
"""
m = Y.shape[1]
# Compute loss from aL and y.
### START CODE HERE ### (≈ 1 lines of code)
cost = -1/m*np.sum(np.multiply(np.log(AL),Y)+np.multiply(np.log(1-AL),1-Y))
### END CODE HERE ###
cost = np.squeeze(cost) # To make sure your cost's shape is what we expect (e.g. this turns [[17]] into 17).
assert(cost.shape == ())
return cost
# In[118]:
Y, AL = compute_cost_test_case()
print("cost = " + str(compute_cost(AL, Y)))
# **Expected Output**:
#
# <table>
#
# <tr>
# <td>**cost** </td>
# <td> 0.41493159961539694</td>
# </tr>
# </table>
# ## 6 - Backward propagation module
#
# Just like with forward propagation, you will implement helper functions for backpropagation. Remember that back propagation is used to calculate the gradient of the loss function with respect to the parameters.
#
# **Reminder**:
# <img src="images/backprop_kiank.png" style="width:650px;height:250px;">
# <caption><center> **Figure 3** : Forward and Backward propagation for *LINEAR->RELU->LINEAR->SIGMOID* <br> *The purple blocks represent the forward propagation, and the red blocks represent the backward propagation.* </center></caption>
#
# <!--
# For those of you who are expert in calculus (you don't need to be to do this assignment), the chain rule of calculus can be used to derive the derivative of the loss $\mathcal{L}$ with respect to $z^{[1]}$ in a 2-layer network as follows:
#
# $$\frac{d \mathcal{L}(a^{[2]},y)}{{dz^{[1]}}} = \frac{d\mathcal{L}(a^{[2]},y)}{{da^{[2]}}}\frac{{da^{[2]}}}{{dz^{[2]}}}\frac{{dz^{[2]}}}{{da^{[1]}}}\frac{{da^{[1]}}}{{dz^{[1]}}} \tag{8} $$
#
# In order to calculate the gradient $dW^{[1]} = \frac{\partial L}{\partial W^{[1]}}$, you use the previous chain rule and you do $dW^{[1]} = dz^{[1]} \times \frac{\partial z^{[1]} }{\partial W^{[1]}}$. During the backpropagation, at each step you multiply your current gradient by the gradient corresponding to the specific layer to get the gradient you wanted.
#
# Equivalently, in order to calculate the gradient $db^{[1]} = \frac{\partial L}{\partial b^{[1]}}$, you use the previous chain rule and you do $db^{[1]} = dz^{[1]} \times \frac{\partial z^{[1]} }{\partial b^{[1]}}$.
#
# This is why we talk about **backpropagation**.
# !-->
#
# Now, similar to forward propagation, you are going to build the backward propagation in three steps:
# - LINEAR backward
# - LINEAR -> ACTIVATION backward where ACTIVATION computes the derivative of either the ReLU or sigmoid activation
# - [LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID backward (whole model)
# ### 6.1 - Linear backward
#
# For layer $l$, the linear part is: $Z^{[l]} = W^{[l]} A^{[l-1]} + b^{[l]}$ (followed by an activation).
#
# Suppose you have already calculated the derivative $dZ^{[l]} = \frac{\partial \mathcal{L} }{\partial Z^{[l]}}$. You want to get $(dW^{[l]}, db^{[l]} dA^{[l-1]})$.
#
# <img src="images/linearback_kiank.png" style="width:250px;height:300px;">
# <caption><center> **Figure 4** </center></caption>
#
# The three outputs $(dW^{[l]}, db^{[l]}, dA^{[l]})$ are computed using the input $dZ^{[l]}$.Here are the formulas you need:
# $$ dW^{[l]} = \frac{\partial \mathcal{L} }{\partial W^{[l]}} = \frac{1}{m} dZ^{[l]} A^{[l-1] T} \tag{8}$$
# $$ db^{[l]} = \frac{\partial \mathcal{L} }{\partial b^{[l]}} = \frac{1}{m} \sum_{i = 1}^{m} dZ^{[l](i)}\tag{9}$$
# $$ dA^{[l-1]} = \frac{\partial \mathcal{L} }{\partial A^{[l-1]}} = W^{[l] T} dZ^{[l]} \tag{10}$$
#
# **Exercise**: Use the 3 formulas above to implement linear_backward().
# In[119]:
# GRADED FUNCTION: linear_backward
def linear_backward(dZ, cache):
"""
Implement the linear portion of backward propagation for a single layer (layer l)
Arguments:
dZ -- Gradient of the cost with respect to the linear output (of current layer l)
cache -- tuple of values (A_prev, W, b) coming from the forward propagation in the current layer
Returns:
dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev
dW -- Gradient of the cost with respect to W (current layer l), same shape as W
db -- Gradient of the cost with respect to b (current layer l), same shape as b
"""
A_prev, W, b = cache
m = A_prev.shape[1]
### START CODE HERE ### (≈ 3 lines of code)
dW = 1/m*np.dot(dZ, A_prev.T)
db = 1/m*np.sum(dZ, axis=1, keepdims=True)
dA_prev = np.dot(W.T, dZ)
### END CODE HERE ###
assert (dA_prev.shape == A_prev.shape)
assert (dW.shape == W.shape)
assert (db.shape == b.shape)
return dA_prev, dW, db
# In[120]:
# Set up some test inputs
dZ, linear_cache = linear_backward_test_case()
dA_prev, dW, db = linear_backward(dZ, linear_cache)
print ("dA_prev = "+ str(dA_prev))
print ("dW = " + str(dW))
print ("db = " + str(db))
# **Expected Output**:
#
# <table style="width:90%">
# <tr>
# <td> **dA_prev** </td>
# <td > [[ 0.51822968 -0.19517421]
# [-0.40506361 0.15255393]
# [ 2.37496825 -0.89445391]] </td>
# </tr>
#
# <tr>
# <td> **dW** </td>
# <td > [[-0.10076895 1.40685096 1.64992505]] </td>
# </tr>
#
# <tr>
# <td> **db** </td>
# <td> [[ 0.50629448]] </td>
# </tr>
#
# </table>
#
#
# ### 6.2 - Linear-Activation backward
#
# Next, you will create a function that merges the two helper functions: **`linear_backward`** and the backward step for the activation **`linear_activation_backward`**.
#
# To help you implement `linear_activation_backward`, we provided two backward functions:
# - **`sigmoid_backward`**: Implements the backward propagation for SIGMOID unit. You can call it as follows:
#
# ```python
# dZ = sigmoid_backward(dA, activation_cache)
# ```
#
# - **`relu_backward`**: Implements the backward propagation for RELU unit. You can call it as follows:
#
# ```python
# dZ = relu_backward(dA, activation_cache)
# ```
#
# If $g(.)$ is the activation function,
# `sigmoid_backward` and `relu_backward` compute $$dZ^{[l]} = dA^{[l]} * g'(Z^{[l]}) \tag{11}$$.
#
# **Exercise**: Implement the backpropagation for the *LINEAR->ACTIVATION* layer.
# In[121]:
# GRADED FUNCTION: linear_activation_backward
def linear_activation_backward(dA, cache, activation):
"""
Implement the backward propagation for the LINEAR->ACTIVATION layer.
Arguments:
dA -- post-activation gradient for current layer l
cache -- tuple of values (linear_cache, activation_cache) we store for computing backward propagation efficiently
activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu"
Returns:
dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev
dW -- Gradient of the cost with respect to W (current layer l), same shape as W
db -- Gradient of the cost with respect to b (current layer l), same shape as b
"""
linear_cache, activation_cache = cache
if activation == "relu":
### START CODE HERE ### (≈ 2 lines of code)
dZ = relu_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward (dZ, linear_cache)
### END CODE HERE ###
elif activation == "sigmoid":
### START CODE HERE ### (≈ 2 lines of code)
dZ = sigmoid_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward (dZ, linear_cache)
### END CODE HERE ###
return dA_prev, dW, db
# In[122]:
AL, linear_activation_cache = linear_activation_backward_test_case()
dA_prev, dW, db = linear_activation_backward(AL, linear_activation_cache, activation = "sigmoid")
print ("sigmoid:")
print ("dA_prev = "+ str(dA_prev))
print ("dW = " + str(dW))
print ("db = " + str(db) + "\n")
dA_prev, dW, db = linear_activation_backward(AL, linear_activation_cache, activation = "relu")
print ("relu:")
print ("dA_prev = "+ str(dA_prev))
print ("dW = " + str(dW))
print ("db = " + str(db))
# **Expected output with sigmoid:**
#
# <table style="width:100%">
# <tr>
# <td > dA_prev </td>
# <td >[[ 0.11017994 0.01105339]
# [ 0.09466817 0.00949723]
# [-0.05743092 -0.00576154]] </td>
#
# </tr>
#
# <tr>
# <td > dW </td>
# <td > [[ 0.10266786 0.09778551 -0.01968084]] </td>
# </tr>
#
# <tr>
# <td > db </td>
# <td > [[-0.05729622]] </td>
# </tr>
# </table>
#
#
# **Expected output with relu:**
#
# <table style="width:100%">
# <tr>
# <td > dA_prev </td>
# <td > [[ 0.44090989 0. ]
# [ 0.37883606 0. ]
# [-0.2298228 0. ]] </td>
#
# </tr>
#
# <tr>
# <td > dW </td>
# <td > [[ 0.44513824 0.37371418 -0.10478989]] </td>
# </tr>
#
# <tr>
# <td > db </td>
# <td > [[-0.20837892]] </td>
# </tr>
# </table>
#
#
# ### 6.3 - L-Model Backward
#
# Now you will implement the backward function for the whole network. Recall that when you implemented the `L_model_forward` function, at each iteration, you stored a cache which contains (X,W,b, and z). In the back propagation module, you will use those variables to compute the gradients. Therefore, in the `L_model_backward` function, you will iterate through all the hidden layers backward, starting from layer $L$. On each step, you will use the cached values for layer $l$ to backpropagate through layer $l$. Figure 5 below shows the backward pass.
#
#
# <img src="images/mn_backward.png" style="width:450px;height:300px;">
# <caption><center> **Figure 5** : Backward pass </center></caption>
#
# ** Initializing backpropagation**:
# To backpropagate through this network, we know that the output is,
# $A^{[L]} = \sigma(Z^{[L]})$. Your code thus needs to compute `dAL` $= \frac{\partial \mathcal{L}}{\partial A^{[L]}}$.
# To do so, use this formula (derived using calculus which you don't need in-depth knowledge of):
# ```python
# dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL)) # derivative of cost with respect to AL
# ```
#
# You can then use this post-activation gradient `dAL` to keep going backward. As seen in Figure 5, you can now feed in `dAL` into the LINEAR->SIGMOID backward function you implemented (which will use the cached values stored by the L_model_forward function). After that, you will have to use a `for` loop to iterate through all the other layers using the LINEAR->RELU backward function. You should store each dA, dW, and db in the grads dictionary. To do so, use this formula :
#
# $$grads["dW" + str(l)] = dW^{[l]}\tag{15} $$
#
# For example, for $l=3$ this would store $dW^{[l]}$ in `grads["dW3"]`.
#
# **Exercise**: Implement backpropagation for the *[LINEAR->RELU] $\times$ (L-1) -> LINEAR -> SIGMOID* model.
# In[123]:
# GRADED FUNCTION: L_model_backward
def L_model_backward(AL, Y, caches):
"""
Implement the backward propagation for the [LINEAR->RELU] * (L-1) -> LINEAR -> SIGMOID group
Arguments:
AL -- probability vector, output of the forward propagation (L_model_forward())
Y -- true "label" vector (containing 0 if non-cat, 1 if cat)
caches -- list of caches containing:
every cache of linear_activation_forward() with "relu" (it's caches[l], for l in range(L-1) i.e l = 0...L-2)
the cache of linear_activation_forward() with "sigmoid" (it's caches[L-1])
Returns:
grads -- A dictionary with the gradients
grads["dA" + str(l)] = ...
grads["dW" + str(l)] = ...
grads["db" + str(l)] = ...
"""
grads = {}
L = len(caches) # the number of layers
m = AL.shape[1]
Y = Y.reshape(AL.shape) # after this line, Y is the same shape as AL
# Initializing the backpropagation
### START CODE HERE ### (1 line of code)
dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL)) # derivative of cost with respect to AL
### END CODE HERE ###
# Lth layer (SIGMOID -> LINEAR) gradients. Inputs: "AL, Y, caches". Outputs: "grads["dAL"], grads["dWL"], grads["dbL"]
### START CODE HERE ### (approx. 2 lines)
current_cache = caches[L-1]
grads["dA" + str(L)], grads["dW" + str(L)], grads["db" + str(L)] = linear_activation_backward(dAL, current_cache, activation = "sigmoid")
### END CODE HERE ###
for l in reversed(range(L-1)):
# lth layer: (RELU -> LINEAR) gradients.
# Inputs: "grads["dA" + str(l + 2)], caches". Outputs: "grads["dA" + str(l + 1)] , grads["dW" + str(l + 1)] , grads["db" + str(l + 1)]
### START CODE HERE ### (approx. 5 lines)
current_cache = caches[l]
dA_prev_temp, dW_temp, db_temp = linear_activation_backward( grads["dA" + str(l+2)], current_cache, activation = "relu")
grads["dA" + str(l + 1)] = dA_prev_temp
grads["dW" + str(l + 1)] = dW_temp
grads["db" + str(l + 1)] = db_temp
### END CODE HERE ###
return grads
# In[124]:
AL, Y_assess, caches = L_model_backward_test_case()
grads = L_model_backward(AL, Y_assess, caches)
print_grads(grads)
# **Expected Output**
#
# <table style="width:60%">
#
# <tr>
# <td > dW1 </td>
# <td > [[ 0.41010002 0.07807203 0.13798444 0.10502167]
# [ 0. 0. 0. 0. ]
# [ 0.05283652 0.01005865 0.01777766 0.0135308 ]] </td>
# </tr>
#
# <tr>
# <td > db1 </td>
# <td > [[-0.22007063]
# [ 0. ]
# [-0.02835349]] </td>
# </tr>
#
# <tr>
# <td > dA1 </td>
# <td > [[ 0.12913162 -0.44014127]
# [-0.14175655 0.48317296]
# [ 0.01663708 -0.05670698]] </td>
#
# </tr>
# </table>
#
#
# ### 6.4 - Update Parameters
#
# In this section you will update the parameters of the model, using gradient descent:
#
# $$ W^{[l]} = W^{[l]} - \alpha \text{ } dW^{[l]} \tag{16}$$
# $$ b^{[l]} = b^{[l]} - \alpha \text{ } db^{[l]} \tag{17}$$
#
# where $\alpha$ is the learning rate. After computing the updated parameters, store them in the parameters dictionary.
# **Exercise**: Implement `update_parameters()` to update your parameters using gradient descent.
#
# **Instructions**:
# Update parameters using gradient descent on every $W^{[l]}$ and $b^{[l]}$ for $l = 1, 2, ..., L$.
#
# In[125]:
# GRADED FUNCTION: update_parameters
def update_parameters(parameters, grads, learning_rate):
"""
Update parameters using gradient descent
Arguments:
parameters -- python dictionary containing your parameters
grads -- python dictionary containing your gradients, output of L_model_backward
Returns:
parameters -- python dictionary containing your updated parameters
parameters["W" + str(l)] = ...
parameters["b" + str(l)] = ...
"""
L = len(parameters) // 2 # number of layers in the neural network
# Update rule for each parameter. Use a for loop.
### START CODE HERE ### (≈ 3 lines of code)
for l in range(1, L+1):
parameters['W' + str(l)] = parameters['W' + str(l)] - learning_rate*grads["dW"+str(l)]
parameters['b' + str(l)] = parameters['b' + str(l)] - learning_rate*grads["db"+str(l)]
### END CODE HERE ###
return parameters
# In[126]:
parameters, grads = update_parameters_test_case()
parameters = update_parameters(parameters, grads, 0.1)
print ("W1 = "+ str(parameters["W1"]))
print ("b1 = "+ str(parameters["b1"]))
print ("W2 = "+ str(parameters["W2"]))
print ("b2 = "+ str(parameters["b2"]))
# **Expected Output**:
#
# <table style="width:100%">
# <tr>
# <td > W1 </td>
# <td > [[-0.59562069 -0.09991781 -2.14584584 1.82662008]
# [-1.76569676 -0.80627147 0.51115557 -1.18258802]
# [-1.0535704 -0.86128581 0.68284052 2.20374577]] </td>
# </tr>
#
# <tr>
# <td > b1 </td>
# <td > [[-0.04659241]
# [-1.28888275]
# [ 0.53405496]] </td>
# </tr>
# <tr>
# <td > W2 </td>
# <td > [[-0.55569196 0.0354055 1.32964895]]</td>
# </tr>
#
# <tr>
# <td > b2 </td>
# <td > [[-0.84610769]] </td>
# </tr>
# </table>
#
#
# ## 7 - Conclusion
#
# Congrats on implementing all the functions required for building a deep neural network!
#
# We know it was a long assignment but going forward it will only get better. The next part of the assignment is easier.
#
# In the next assignment you will put all these together to build two models:
# - A two-layer neural network
# - An L-layer neural network
#
# You will in fact use these models to classify cat vs non-cat images!
# In[ ]:
| gpl-3.0 |
pypot/scikit-learn | sklearn/linear_model/omp.py | 127 | 30417 | """Orthogonal matching pursuit algorithms
"""
# Author: Vlad Niculae
#
# License: BSD 3 clause
import warnings
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from ..utils import as_float_array, check_array, check_X_y
from ..cross_validation import check_cv
from ..externals.joblib import Parallel, delayed
import scipy
solve_triangular_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
# check_finite=False is an optimization available only in scipy >=0.12
solve_triangular_args = {'check_finite': False}
premature = """ Orthogonal matching pursuit ended prematurely due to linear
dependence in the dictionary. The requested precision might not have been met.
"""
def _cholesky_omp(X, y, n_nonzero_coefs, tol=None, copy_X=True,
return_path=False):
"""Orthogonal Matching Pursuit step using the Cholesky decomposition.
Parameters
----------
X : array, shape (n_samples, n_features)
Input dictionary. Columns are assumed to have unit norm.
y : array, shape (n_samples,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coef : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
n_active : int
Number of active features at convergence.
"""
if copy_X:
X = X.copy('F')
else: # even if we are allowed to overwrite, still copy it if bad order
X = np.asfortranarray(X)
min_float = np.finfo(X.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (X,))
potrs, = get_lapack_funcs(('potrs',), (X,))
alpha = np.dot(X.T, y)
residual = y
gamma = np.empty(0)
n_active = 0
indices = np.arange(X.shape[1]) # keeping track of swapping
max_features = X.shape[1] if tol is not None else n_nonzero_coefs
if solve_triangular_args:
# new scipy, don't need to initialize because check_finite=False
L = np.empty((max_features, max_features), dtype=X.dtype)
else:
# old scipy, we need the garbage upper triangle to be non-Inf
L = np.zeros((max_features, max_features), dtype=X.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(np.dot(X.T, residual)))
if lam < n_active or alpha[lam] ** 2 < min_float:
# atom already selected or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
if n_active > 0:
# Updates the Cholesky decomposition of X' X
L[n_active, :n_active] = np.dot(X[:, :n_active].T, X[:, lam])
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
L[n_active, n_active] = np.sqrt(1 - v)
X.T[n_active], X.T[lam] = swap(X.T[n_active], X.T[lam])
alpha[n_active], alpha[lam] = alpha[lam], alpha[n_active]
indices[n_active], indices[lam] = indices[lam], indices[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], alpha[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
residual = y - np.dot(X[:, :n_active], gamma)
if tol is not None and nrm2(residual) ** 2 <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active], n_active
else:
return gamma, indices[:n_active], n_active
def _gram_omp(Gram, Xy, n_nonzero_coefs, tol_0=None, tol=None,
copy_Gram=True, copy_Xy=True, return_path=False):
"""Orthogonal Matching Pursuit step on a precomputed Gram matrix.
This function uses the the Cholesky decomposition method.
Parameters
----------
Gram : array, shape (n_features, n_features)
Gram matrix of the input data matrix
Xy : array, shape (n_features,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol_0 : float
Squared norm of y, required if tol is not None.
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coefs : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
n_active : int
Number of active features at convergence.
"""
Gram = Gram.copy('F') if copy_Gram else np.asfortranarray(Gram)
if copy_Xy:
Xy = Xy.copy()
min_float = np.finfo(Gram.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (Gram,))
potrs, = get_lapack_funcs(('potrs',), (Gram,))
indices = np.arange(len(Gram)) # keeping track of swapping
alpha = Xy
tol_curr = tol_0
delta = 0
gamma = np.empty(0)
n_active = 0
max_features = len(Gram) if tol is not None else n_nonzero_coefs
if solve_triangular_args:
# new scipy, don't need to initialize because check_finite=False
L = np.empty((max_features, max_features), dtype=Gram.dtype)
else:
# old scipy, we need the garbage upper triangle to be non-Inf
L = np.zeros((max_features, max_features), dtype=Gram.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(alpha))
if lam < n_active or alpha[lam] ** 2 < min_float:
# selected same atom twice, or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
if n_active > 0:
L[n_active, :n_active] = Gram[lam, :n_active]
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
L[n_active, n_active] = np.sqrt(1 - v)
Gram[n_active], Gram[lam] = swap(Gram[n_active], Gram[lam])
Gram.T[n_active], Gram.T[lam] = swap(Gram.T[n_active], Gram.T[lam])
indices[n_active], indices[lam] = indices[lam], indices[n_active]
Xy[n_active], Xy[lam] = Xy[lam], Xy[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], Xy[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
beta = np.dot(Gram[:, :n_active], gamma)
alpha = Xy - beta
if tol is not None:
tol_curr += delta
delta = np.inner(gamma, beta[:n_active])
tol_curr -= delta
if abs(tol_curr) <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active], n_active
else:
return gamma, indices[:n_active], n_active
def orthogonal_mp(X, y, n_nonzero_coefs=None, tol=None, precompute=False,
copy_X=True, return_path=False,
return_n_iter=False):
"""Orthogonal Matching Pursuit (OMP)
Solves n_targets Orthogonal Matching Pursuit problems.
An instance of the problem has the form:
When parametrized by the number of non-zero coefficients using
`n_nonzero_coefs`:
argmin ||y - X\gamma||^2 subject to ||\gamma||_0 <= n_{nonzero coefs}
When parametrized by error using the parameter `tol`:
argmin ||\gamma||_0 subject to ||y - X\gamma||^2 <= tol
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
X : array, shape (n_samples, n_features)
Input data. Columns are assumed to have unit norm.
y : array, shape (n_samples,) or (n_samples, n_targets)
Input targets
n_nonzero_coefs : int
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
precompute : {True, False, 'auto'},
Whether to perform precomputations. Improves performance when n_targets
or n_samples is very large.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, optional default False
Whether or not to return the number of iterations.
Returns
-------
coef : array, shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
n_iters : array-like or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See also
--------
OrthogonalMatchingPursuit
orthogonal_mp_gram
lars_path
decomposition.sparse_encode
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
X = check_array(X, order='F', copy=copy_X)
copy_X = False
if y.ndim == 1:
y = y.reshape(-1, 1)
y = check_array(y)
if y.shape[1] > 1: # subsequent targets will be affected
copy_X = True
if n_nonzero_coefs is None and tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
n_nonzero_coefs = max(int(0.1 * X.shape[1]), 1)
if tol is not None and tol < 0:
raise ValueError("Epsilon cannot be negative")
if tol is None and n_nonzero_coefs <= 0:
raise ValueError("The number of atoms must be positive")
if tol is None and n_nonzero_coefs > X.shape[1]:
raise ValueError("The number of atoms cannot be more than the number "
"of features")
if precompute == 'auto':
precompute = X.shape[0] > X.shape[1]
if precompute:
G = np.dot(X.T, X)
G = np.asfortranarray(G)
Xy = np.dot(X.T, y)
if tol is not None:
norms_squared = np.sum((y ** 2), axis=0)
else:
norms_squared = None
return orthogonal_mp_gram(G, Xy, n_nonzero_coefs, tol, norms_squared,
copy_Gram=copy_X, copy_Xy=False,
return_path=return_path)
if return_path:
coef = np.zeros((X.shape[1], y.shape[1], X.shape[1]))
else:
coef = np.zeros((X.shape[1], y.shape[1]))
n_iters = []
for k in range(y.shape[1]):
out = _cholesky_omp(
X, y[:, k], n_nonzero_coefs, tol,
copy_X=copy_X, return_path=return_path)
if return_path:
_, idx, coefs, n_iter = out
coef = coef[:, :, :len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
x, idx, n_iter = out
coef[idx, k] = x
n_iters.append(n_iter)
if y.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return np.squeeze(coef), n_iters
else:
return np.squeeze(coef)
def orthogonal_mp_gram(Gram, Xy, n_nonzero_coefs=None, tol=None,
norms_squared=None, copy_Gram=True,
copy_Xy=True, return_path=False,
return_n_iter=False):
"""Gram Orthogonal Matching Pursuit (OMP)
Solves n_targets Orthogonal Matching Pursuit problems using only
the Gram matrix X.T * X and the product X.T * y.
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
Gram : array, shape (n_features, n_features)
Gram matrix of the input data: X.T * X
Xy : array, shape (n_features,) or (n_features, n_targets)
Input targets multiplied by X: X.T * y
n_nonzero_coefs : int
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
norms_squared : array-like, shape (n_targets,)
Squared L2 norms of the lines of y. Required if tol is not None.
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, optional default False
Whether or not to return the number of iterations.
Returns
-------
coef : array, shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
n_iters : array-like or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See also
--------
OrthogonalMatchingPursuit
orthogonal_mp
lars_path
decomposition.sparse_encode
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
Gram = check_array(Gram, order='F', copy=copy_Gram)
Xy = np.asarray(Xy)
if Xy.ndim > 1 and Xy.shape[1] > 1:
# or subsequent target will be affected
copy_Gram = True
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if tol is not None:
norms_squared = [norms_squared]
if n_nonzero_coefs is None and tol is None:
n_nonzero_coefs = int(0.1 * len(Gram))
if tol is not None and norms_squared is None:
raise ValueError('Gram OMP needs the precomputed norms in order '
'to evaluate the error sum of squares.')
if tol is not None and tol < 0:
raise ValueError("Epsilon cannot be negative")
if tol is None and n_nonzero_coefs <= 0:
raise ValueError("The number of atoms must be positive")
if tol is None and n_nonzero_coefs > len(Gram):
raise ValueError("The number of atoms cannot be more than the number "
"of features")
if return_path:
coef = np.zeros((len(Gram), Xy.shape[1], len(Gram)))
else:
coef = np.zeros((len(Gram), Xy.shape[1]))
n_iters = []
for k in range(Xy.shape[1]):
out = _gram_omp(
Gram, Xy[:, k], n_nonzero_coefs,
norms_squared[k] if tol is not None else None, tol,
copy_Gram=copy_Gram, copy_Xy=copy_Xy,
return_path=return_path)
if return_path:
_, idx, coefs, n_iter = out
coef = coef[:, :, :len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
x, idx, n_iter = out
coef[idx, k] = x
n_iters.append(n_iter)
if Xy.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return np.squeeze(coef), n_iters
else:
return np.squeeze(coef)
class OrthogonalMatchingPursuit(LinearModel, RegressorMixin):
"""Orthogonal Matching Pursuit model (OMP)
Parameters
----------
n_nonzero_coefs : int, optional
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float, optional
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If False, the regressors X are assumed to be already normalized.
precompute : {True, False, 'auto'}, default 'auto'
Whether to use a precomputed Gram and Xy matrix to speed up
calculations. Improves performance when `n_targets` or `n_samples` is
very large. Note that if you already have such matrices, you can pass
them directly to the fit method.
Read more in the :ref:`User Guide <omp>`.
Attributes
----------
coef_ : array, shape (n_features,) or (n_features, n_targets)
parameter vector (w in the formula)
intercept_ : float or array, shape (n_targets,)
independent term in decision function.
n_iter_ : int or array-like
Number of active features across every target.
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
See also
--------
orthogonal_mp
orthogonal_mp_gram
lars_path
Lars
LassoLars
decomposition.sparse_encode
"""
def __init__(self, n_nonzero_coefs=None, tol=None, fit_intercept=True,
normalize=True, precompute='auto'):
self.n_nonzero_coefs = n_nonzero_coefs
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
n_features = X.shape[1]
X, y, X_mean, y_mean, X_std, Gram, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=True)
if y.ndim == 1:
y = y[:, np.newaxis]
if self.n_nonzero_coefs is None and self.tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
self.n_nonzero_coefs_ = max(int(0.1 * n_features), 1)
else:
self.n_nonzero_coefs_ = self.n_nonzero_coefs
if Gram is False:
coef_, self.n_iter_ = orthogonal_mp(
X, y, self.n_nonzero_coefs_, self.tol,
precompute=False, copy_X=True,
return_n_iter=True)
else:
norms_sq = np.sum(y ** 2, axis=0) if self.tol is not None else None
coef_, self.n_iter_ = orthogonal_mp_gram(
Gram, Xy=Xy, n_nonzero_coefs=self.n_nonzero_coefs_,
tol=self.tol, norms_squared=norms_sq,
copy_Gram=True, copy_Xy=True,
return_n_iter=True)
self.coef_ = coef_.T
self._set_intercept(X_mean, y_mean, X_std)
return self
def _omp_path_residues(X_train, y_train, X_test, y_test, copy=True,
fit_intercept=True, normalize=True, max_iter=100):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied. If
False, they may be overwritten.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
max_iter : integer, optional
Maximum numbers of iterations to perform, therefore maximum features
to include. 100 by default.
Returns
-------
residues: array, shape (n_samples, max_features)
Residues of the prediction on the test data
"""
if copy:
X_train = X_train.copy()
y_train = y_train.copy()
X_test = X_test.copy()
y_test = y_test.copy()
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
coefs = orthogonal_mp(X_train, y_train, n_nonzero_coefs=max_iter, tol=None,
precompute=False, copy_X=False,
return_path=True)
if coefs.ndim == 1:
coefs = coefs[:, np.newaxis]
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
return np.dot(coefs.T, X_test.T) - y_test
class OrthogonalMatchingPursuitCV(LinearModel, RegressorMixin):
"""Cross-validated Orthogonal Matching Pursuit model (OMP)
Parameters
----------
copy : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If False, the regressors X are assumed to be already normalized.
max_iter : integer, optional
Maximum numbers of iterations to perform, therefore maximum features
to include. 10% of ``n_features`` but at least 5 if available.
cv : cross-validation generator, optional
see :mod:`sklearn.cross_validation`. If ``None`` is passed, default to
a 5-fold strategy
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Read more in the :ref:`User Guide <omp>`.
Attributes
----------
intercept_ : float or array, shape (n_targets,)
Independent term in decision function.
coef_ : array, shape (n_features,) or (n_features, n_targets)
Parameter vector (w in the problem formulation).
n_nonzero_coefs_ : int
Estimated number of non-zero coefficients giving the best mean squared
error over the cross-validation folds.
n_iter_ : int or array-like
Number of active features across every target for the model refit with
the best hyperparameters got by cross-validating across all folds.
See also
--------
orthogonal_mp
orthogonal_mp_gram
lars_path
Lars
LassoLars
OrthogonalMatchingPursuit
LarsCV
LassoLarsCV
decomposition.sparse_encode
"""
def __init__(self, copy=True, fit_intercept=True, normalize=True,
max_iter=None, cv=None, n_jobs=1, verbose=False):
self.copy = copy
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.cv = cv
self.n_jobs = n_jobs
self.verbose = verbose
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Training data.
y : array-like, shape [n_samples]
Target values.
Returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, y_numeric=True)
X = as_float_array(X, copy=False, force_all_finite=False)
cv = check_cv(self.cv, X, y, classifier=False)
max_iter = (min(max(int(0.1 * X.shape[1]), 5), X.shape[1])
if not self.max_iter
else self.max_iter)
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_omp_path_residues)(
X[train], y[train], X[test], y[test], self.copy,
self.fit_intercept, self.normalize, max_iter)
for train, test in cv)
min_early_stop = min(fold.shape[0] for fold in cv_paths)
mse_folds = np.array([(fold[:min_early_stop] ** 2).mean(axis=1)
for fold in cv_paths])
best_n_nonzero_coefs = np.argmin(mse_folds.mean(axis=0)) + 1
self.n_nonzero_coefs_ = best_n_nonzero_coefs
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=best_n_nonzero_coefs,
fit_intercept=self.fit_intercept,
normalize=self.normalize)
omp.fit(X, y)
self.coef_ = omp.coef_
self.intercept_ = omp.intercept_
self.n_iter_ = omp.n_iter_
return self
| bsd-3-clause |
BorisJeremic/Real-ESSI-Examples | analytic_solution/test_cases/Contact/Interface_Mesh_Types/Interface_2/HardContact_NonLinHardSoftShear/Interface_Test_Normal_Plot.py | 30 | 2779 | #!/usr/bin/python
import h5py
import matplotlib.pylab as plt
import matplotlib as mpl
import sys
import numpy as np;
import matplotlib;
import math;
from matplotlib.ticker import MaxNLocator
plt.rcParams.update({'font.size': 28})
# set tick width
mpl.rcParams['xtick.major.size'] = 10
mpl.rcParams['xtick.major.width'] = 5
mpl.rcParams['xtick.minor.size'] = 10
mpl.rcParams['xtick.minor.width'] = 5
plt.rcParams['xtick.labelsize']=24
mpl.rcParams['ytick.major.size'] = 10
mpl.rcParams['ytick.major.width'] = 5
mpl.rcParams['ytick.minor.size'] = 10
mpl.rcParams['ytick.minor.width'] = 5
plt.rcParams['ytick.labelsize']=24
###############################################################
## Analytical Solution
###############################################################
# Go over each feioutput and plot each one.
thefile = "Analytical_Solution_Normal_Stress.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
normal_strain = -finput["/Model/Elements/Element_Outputs"][6,:];
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.figure(figsize=(12,10))
plt.plot(normal_strain*100,normal_stress/1000,'-r',label='Analytical Solution', Linewidth=4, markersize=20)
plt.xlabel(r"Interface Type #")
plt.ylabel(r"Normal Stress $\sigma_n [kPa]$")
plt.hold(True)
###############################################################
## Numerical Solution
###############################################################
# Go over each feioutput and plot each one.
thefile = "Interface_Surface_Adding_axial_Load.h5.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
normal_strain = -finput["/Model/Elements/Element_Outputs"][6,:];
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.plot(normal_strain*100,normal_stress/1000,'-k',label='Numerical Solution', Linewidth=4, markersize=20)
plt.xlabel(r"Normal Strain [%]")
plt.ylabel(r"Normal Stress $\sigma_n [kPa]$")
#############################################################
# # # axes = plt.gca()
# # # axes.set_xlim([-7,7])
# # # axes.set_ylim([-1,1])
# outfigname = "Interface_Test_Normal_Stress.pdf";
# plt.axis([0, 5.5, 90, 101])
# legend = plt.legend()
# legend.get_frame().set_linewidth(0.0)
# legend.get_frame().set_facecolor('none')
plt.legend()
plt.savefig(outfigname, bbox_inches='tight')
# plt.show() | cc0-1.0 |
jhmatthews/cobra | source/emissiv_trend2.py | 1 | 6722 | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
....University of Southampton -- JM -- 30 September 2013
................plot_emissiv.py
Synopsis:
....Plot macro atom level emissivities and other information from
....diag file
Usage:
....
Arguments:
'''
import matplotlib.pyplot as plt
import pylab as pl
import numpy as np
import sys
import os
import subprocess
import read_output as rd
from constants import *
t_init = 8000.0 # this is the temperatrue of the blackbody
imax = 20 # maximum steps we iterate over
i_temp_steps = 500 # step widths in temp space
MACRO = 7 # hardwire macro atom mode
# now we want a list of pywind cmds
# we want to make files
# we want electron density
# we want neutral hydrogen density and non neutral
# pywindcmds = [ 1, 'n', 't', 'i', 1, 1, 1, 1, 2, 0, 'q']
pywindcmds = [
1,
'n',
't',
'i',
1,
1,
1,
0,
'q',
]
# first write these commands to a file
inp = open('input_comms', 'w')
for command in pywindcmds:
inp.write('%s\n' % str(command))
inp.close()
rd.setpars() # set standard plotting parameters, e.g. tex
# set the band you want to plot emissivities over
# 3000-7000 will get the balmer continuum
wavemin = 3000.0
wavemax = 7000.0
# we read from a template file template.pf
print 'Reading template parameter file'
(keywords, values) = np.loadtxt('template.pf', dtype='string',
unpack=True)
# we read a number of arguments from the command line
# the user can decide to change certain keywords
for i in range(len(sys.argv)):
keyword = sys.argv[i]
np.where(keywords == 'Line_transfer()'
for j in range(len(keywords)):
if keywords[j] == keyword:
old_value = values[j]
new_value = sys.argv[i + 1]
print 'Changing keyword %s from %s to %s' % (keyword,
old_value, new_value)
# get the line transfer mode so we know whether to get
mode = int (values[np.where(keywords == 'Line_transfer()')][0])
print mode[0]
if mode == MACRO: print 'Using macro atom line transfer'
print mode
# empty arrays to append to
hbeta_list = []
matom_emissivities = []
kpkt_emissivities = []
nh_list = []
h1_list = []
ne_list = []
t_e_list = []
hbeta_quantity = []
halpha_over = []
t_bb = []
mass_loss = float(sys.argv[2]) * 1.0e-19
print 'Mass loss rate is ', mass_loss
# now do the loop over temperature
for i in range(imax):
# obtain temperature of the star, what we ar eiterating on
tstar = t_init + i * i_temp_steps
# print some info for the user
print 'Starting cycle ' + str(i + 1) + ' of ' + str(imax)
print 'tstar = %lf' % tstar
# now we open a file for temporary pf, and write to it
inp = open('input.pf', 'w')
# cycle through keywords and write to file
for j in range(len(keywords)):
# if the keyword is tstar then we are iterating over it
if keywords[j] == 'tstar':
inp.write("tstar %lf\n" % tstar)
else:
# if not just write the value stored in the values array
inp.write('%s %s\n' % (keywords[j], values[j]))
# close file
inp.close()
# pf file created, let's run it using latest vers
os.system('py76c_dev input > output')
# run py_wind
os.system('py_wind input < input_comms > pywindout')
# now get the key values from the pywind files
root = 'input'
h1 = rd.thinshell_read(root + '.ioncH1.dat')
ne = rd.thinshell_read(root + '.ne.dat')
t_e = rd.thinshell_read(root + '.te.dat')
rootfolder = 'diag_input/'
convergence = rd.read_convergence(rootfolder + root)
print '\nconvergence fraction = %f' % convergence
# now append some important values to array
# we only want to append if the model has converged!
if mode == MACRO and convergence == 1.0:
(matom_emiss, kpkt_emiss) = rd.read_emissivity(rootfolder
+ root)
hbeta = matom_emiss[3]
nlevels_macro = len(matom_emiss)
n_array = np.arange(nlevels_macro)
hbeta_list.append(hbeta)
hbeta_quantity.append(PI * hbeta / (2.3e23 * ne ** 2))
halpha_over.append(matom_emiss[2] / hbeta)
matom_emissivities.append(matom_emiss)
kpkt_emissivities.append(kpkt_emiss)
if convergence == 1.0:
h1_list.append(h1)
ne_list.append(ne)
t_e_list.append(t_e)
t_bb.append(tstar)
if mode == MACRO and convergence == 1.0:
print '\nt_E %8.4e ne %8.4e hbeta %8.4e 4pi j /ne^2 %8.4e h1 %8.4e' \
% (t_e, ne, hbeta, 4.0 * PI * hbeta / (2.3e23 * ne ** 2),
h1)
print '\nt_E %8.4e ne %8.4e' % (t_e, ne)
# we need to normalise hbeta to be in units of erg cm^-3 s-1
hbeta_list = np.array(hbeta_list)
halpha_over = np.array(halpha_over)
ne_list = np.array(ne_list)
t_e_list = np.array(t_e_list)
h1_list = np.array(h1_list)
fig = plt.figure()
# order of things to plot
# t_bb = np.arange(t_init, t_init + (imax)*i_temp_steps, i_temp_steps)# this is the temperatrue of the blackbody
# pi_hbeta_over_nenep = ( 4.0*PI*hbeta_list / ne_list**2 ) / 1.3e+23
# arrays of the predicted values from Osterbrock
# first the array of temperatures Osterbrock gives
t_e_oster = np.arange(2500, 12500, 2500)
# now line intensities
oster_h_beta_absolute = np.array([2.7e-25, 1.54e-25, 8.30e-26,
4.21e-26]) # 4pi j_hbeta / ne **2 values
oster_h_alpha_relative = np.array([3.42, 3.10, 2.86, 2.69]) # ratios of halpha to hbeta from osterbrock
oster_h_gamma_relative = np.array([0.439, 0.458, 0.470, 0.485]) # ratios of hgamma to hbeta from osterbrock
oster_h_delta_relative = np.array([0.237, 0.25, 0.262, 0.271]) # ratios of hdelta to hbeta from osterbrock
# if we are in macro mode we want to plot lots of things
if mode == MACRO:
plot_list = [halpha_over, hbeta_quantity, hbeta_list, t_bb]
oster_list = [oster_h_beta_absolute, oster_h_gamma_relative,
oster_h_delta_relative]
ylabel = [r'$H \alpha / H \beta$', r'$4\pi j_{H \beta} / n_e^2$',
r'$H \beta$', '$T_{bb}$']
else:
plot_list = [ne_list, t_bb]
ylabel = ['$n_e$', '$T_{bb}$']
print t_e_list, plot_list[0]
# number of things to plot
n = len(plot_list)
for i in range(n):
ax = fig.add_subplot(n / 2, 2, i + 1)
print i
ax.plot(t_e_list, plot_list[i])
if i == 0:
ax.scatter(t_e_oster, oster_h_alpha_relative)
if i == 1:
ax.scatter(t_e_oster, oster_h_beta_absolute)
ax.set_ylabel(ylabel[i])
ax.set_xlabel('Electron Temp')
# finally, save the figures....
if mode == MACRO:
plt.savefig('macro.png')
else:
plt.savefig('nonmacro.png')
| gpl-2.0 |
monkeypants/MAVProxy | MAVProxy/mavproxy.py | 1 | 46772 | #!/usr/bin/env python
'''
mavproxy - a MAVLink proxy program
Copyright Andrew Tridgell 2011
Released under the GNU GPL version 3 or later
'''
import sys, os, time, socket, signal
import fnmatch, errno, threading
import serial, select
import traceback
import select
import shlex
import platform
import json
from imp import reload
try:
import queue as Queue
except ImportError:
import Queue
from builtins import input
from MAVProxy.modules.lib import textconsole
from MAVProxy.modules.lib import rline
from MAVProxy.modules.lib import mp_module
from MAVProxy.modules.lib import dumpstacks
from MAVProxy.modules.lib import mp_substitute
from MAVProxy.modules.lib import multiproc
from MAVProxy.modules.mavproxy_link import preferred_ports
# adding all this allows pyinstaller to build a working windows executable
# note that using --hidden-import does not work for these modules
try:
multiproc.freeze_support()
from pymavlink import mavwp, mavutil
import matplotlib, HTMLParser
try:
import readline
except ImportError:
import pyreadline as readline
except Exception:
pass
if __name__ == '__main__':
multiproc.freeze_support()
#The MAVLink version being used (None, "1.0", "2.0")
mavversion = None
class MPStatus(object):
'''hold status information about the mavproxy'''
def __init__(self):
self.gps = None
self.msgs = {}
self.msg_count = {}
self.counters = {'MasterIn' : [], 'MasterOut' : 0, 'FGearIn' : 0, 'FGearOut' : 0, 'Slave' : 0}
self.setup_mode = opts.setup
self.mav_error = 0
self.altitude = 0
self.last_distance_announce = 0.0
self.exit = False
self.flightmode = 'MAV'
self.last_mode_announce = 0
self.last_mode_announced = 'MAV'
self.logdir = None
self.last_heartbeat = 0
self.last_message = 0
self.heartbeat_error = False
self.last_apm_msg = None
self.last_apm_msg_time = 0
self.highest_msec = 0
self.have_gps_lock = False
self.lost_gps_lock = False
self.last_gps_lock = 0
self.watch = None
self.last_streamrate1 = -1
self.last_streamrate2 = -1
self.last_seq = 0
self.armed = False
def show(self, f, pattern=None):
'''write status to status.txt'''
if pattern is None:
f.write('Counters: ')
for c in self.counters:
f.write('%s:%s ' % (c, self.counters[c]))
f.write('\n')
f.write('MAV Errors: %u\n' % self.mav_error)
f.write(str(self.gps)+'\n')
for m in sorted(self.msgs.keys()):
if pattern is not None and not fnmatch.fnmatch(str(m).upper(), pattern.upper()):
continue
f.write("%u: %s\n" % (self.msg_count[m], str(self.msgs[m])))
def write(self):
'''write status to status.txt'''
f = open('status.txt', mode='w')
self.show(f)
f.close()
def say_text(text, priority='important'):
'''text output - default function for say()'''
mpstate.console.writeln(text)
def say(text, priority='important'):
'''text and/or speech output'''
mpstate.functions.say(text, priority)
def add_input(cmd, immediate=False):
'''add some command input to be processed'''
if immediate:
process_stdin(cmd)
else:
mpstate.input_queue.put(cmd)
class MAVFunctions(object):
'''core functions available in modules'''
def __init__(self):
self.process_stdin = add_input
self.param_set = param_set
self.get_mav_param = get_mav_param
self.say = say_text
# input handler can be overridden by a module
self.input_handler = None
class MPState(object):
'''holds state of mavproxy'''
def __init__(self):
self.console = textconsole.SimpleConsole()
self.map = None
self.map_functions = {}
self.vehicle_type = None
self.vehicle_name = None
from MAVProxy.modules.lib.mp_settings import MPSettings, MPSetting
self.settings = MPSettings(
[ MPSetting('link', int, 1, 'Primary Link', tab='Link', range=(0,4), increment=1),
MPSetting('streamrate', int, 4, 'Stream rate link1', range=(-1,500), increment=1),
MPSetting('streamrate2', int, 4, 'Stream rate link2', range=(-1,500), increment=1),
MPSetting('heartbeat', int, 1, 'Heartbeat rate', range=(0,100), increment=1),
MPSetting('mavfwd', bool, True, 'Allow forwarded control'),
MPSetting('mavfwd_rate', bool, False, 'Allow forwarded rate control'),
MPSetting('shownoise', bool, True, 'Show non-MAVLink data'),
MPSetting('baudrate', int, opts.baudrate, 'baudrate for new links', range=(0,10000000), increment=1),
MPSetting('rtscts', bool, opts.rtscts, 'enable flow control'),
MPSetting('select_timeout', float, 0.01, 'select timeout'),
MPSetting('altreadout', int, 10, 'Altitude Readout',
range=(0,100), increment=1, tab='Announcements'),
MPSetting('distreadout', int, 200, 'Distance Readout', range=(0,10000), increment=1),
MPSetting('moddebug', int, opts.moddebug, 'Module Debug Level', range=(0,3), increment=1, tab='Debug'),
MPSetting('script_fatal', bool, False, 'fatal error on bad script', tab='Debug'),
MPSetting('compdebug', int, 0, 'Computation Debug Mask', range=(0,3), tab='Debug'),
MPSetting('flushlogs', bool, False, 'Flush logs on every packet'),
MPSetting('requireexit', bool, False, 'Require exit command'),
MPSetting('wpupdates', bool, True, 'Announce waypoint updates'),
MPSetting('basealt', int, 0, 'Base Altitude', range=(0,30000), increment=1, tab='Altitude'),
MPSetting('wpalt', int, 100, 'Default WP Altitude', range=(0,10000), increment=1),
MPSetting('rallyalt', int, 90, 'Default Rally Altitude', range=(0,10000), increment=1),
MPSetting('terrainalt', str, 'Auto', 'Use terrain altitudes', choice=['Auto','True','False']),
MPSetting('rally_breakalt', int, 40, 'Default Rally Break Altitude', range=(0,10000), increment=1),
MPSetting('rally_flags', int, 0, 'Default Rally Flags', range=(0,10000), increment=1),
MPSetting('source_system', int, 255, 'MAVLink Source system', range=(0,255), increment=1, tab='MAVLink'),
MPSetting('source_component', int, 0, 'MAVLink Source component', range=(0,255), increment=1),
MPSetting('target_system', int, 0, 'MAVLink target system', range=(0,255), increment=1),
MPSetting('target_component', int, 0, 'MAVLink target component', range=(0,255), increment=1),
MPSetting('state_basedir', str, None, 'base directory for logs and aircraft directories'),
MPSetting('allow_unsigned', bool, True, 'whether unsigned packets will be accepted'),
MPSetting('dist_unit', str, 'm', 'distance unit', choice=['m', 'nm', 'miles'], tab='Units'),
MPSetting('height_unit', str, 'm', 'height unit', choice=['m', 'feet']),
MPSetting('speed_unit', str, 'm/s', 'height unit', choice=['m/s', 'knots', 'mph']),
MPSetting('vehicle_name', str, '', 'Vehicle Name', tab='Vehicle'),
])
self.completions = {
"script" : ["(FILENAME)"],
"set" : ["(SETTING)"],
"status" : ["(VARIABLE)"],
"module" : ["list",
"load (AVAILMODULES)",
"<unload|reload> (LOADEDMODULES)"]
}
self.status = MPStatus()
# master mavlink device
self.mav_master = None
# mavlink outputs
self.mav_outputs = []
self.sysid_outputs = {}
# SITL output
self.sitl_output = None
self.mav_param_by_sysid = {}
self.mav_param_by_sysid[(self.settings.target_system,self.settings.target_component)] = mavparm.MAVParmDict()
self.modules = []
self.public_modules = {}
self.functions = MAVFunctions()
self.select_extra = {}
self.continue_mode = False
self.aliases = {}
import platform
self.system = platform.system()
self.multi_instance = {}
self.instance_count = {}
self.is_sitl = False
self.start_time_s = time.time()
self.attitude_time_s = 0
@property
def mav_param(self):
'''map mav_param onto the current target system parameters'''
compid = self.settings.target_component
if compid == 0:
compid = 1
sysid = (self.settings.target_system, compid)
if not sysid in self.mav_param_by_sysid:
self.mav_param_by_sysid[sysid] = mavparm.MAVParmDict()
return self.mav_param_by_sysid[sysid]
def module(self, name):
'''Find a public module (most modules are private)'''
if name in self.public_modules:
return self.public_modules[name]
return None
def master(self):
'''return the currently chosen mavlink master object'''
if len(self.mav_master) == 0:
return None
if self.settings.link > len(self.mav_master):
self.settings.link = 1
# try to use one with no link error
if not self.mav_master[self.settings.link-1].linkerror:
return self.mav_master[self.settings.link-1]
for m in self.mav_master:
if not m.linkerror:
return m
return self.mav_master[self.settings.link-1]
def get_mav_param(param, default=None):
'''return a EEPROM parameter value'''
return mpstate.mav_param.get(param, default)
def param_set(name, value, retries=3):
'''set a parameter'''
name = name.upper()
return mpstate.mav_param.mavset(mpstate.master(), name, value, retries=retries)
def cmd_script(args):
'''run a script'''
if len(args) < 1:
print("usage: script <filename>")
return
run_script(args[0])
def cmd_set(args):
'''control mavproxy options'''
mpstate.settings.command(args)
def cmd_status(args):
'''show status'''
if len(args) == 0:
mpstate.status.show(sys.stdout, pattern=None)
else:
for pattern in args:
mpstate.status.show(sys.stdout, pattern=pattern)
def cmd_setup(args):
mpstate.status.setup_mode = True
mpstate.rl.set_prompt("")
def cmd_reset(args):
print("Resetting master")
mpstate.master().reset()
def cmd_watch(args):
'''watch a mavlink packet pattern'''
if len(args) == 0:
mpstate.status.watch = None
return
mpstate.status.watch = args
print("Watching %s" % mpstate.status.watch)
def generate_kwargs(args):
kwargs = {}
module_components = args.split(":{", 1)
module_name = module_components[0]
if (len(module_components) == 2 and module_components[1].endswith("}")):
# assume json
try:
module_args = "{"+module_components[1]
kwargs = json.loads(module_args)
except ValueError as e:
print('Invalid JSON argument: {0} ({1})'.format(module_args,
repr(e)))
return (module_name, kwargs)
def load_module(modname, quiet=False, **kwargs):
'''load a module'''
modpaths = ['MAVProxy.modules.mavproxy_%s' % modname, modname]
for (m,pm) in mpstate.modules:
if m.name == modname and not modname in mpstate.multi_instance:
if not quiet:
print("module %s already loaded" % modname)
# don't report an error
return True
ex = None
for modpath in modpaths:
try:
m = import_package(modpath)
reload(m)
module = m.init(mpstate, **kwargs)
if isinstance(module, mp_module.MPModule):
mpstate.modules.append((module, m))
if not quiet:
if kwargs:
print("Loaded module %s with kwargs = %s" % (modname, kwargs))
else:
print("Loaded module %s" % (modname,))
return True
else:
ex = "%s.init did not return a MPModule instance" % modname
break
except ImportError as msg:
ex = msg
if mpstate.settings.moddebug > 1:
import traceback
print(traceback.format_exc())
print("Failed to load module: %s. Use 'set moddebug 3' in the MAVProxy console to enable traceback" % ex)
return False
def unload_module(modname):
'''unload a module'''
for (m,pm) in mpstate.modules:
if m.name == modname:
if hasattr(m, 'unload'):
m.unload()
mpstate.modules.remove((m,pm))
print("Unloaded module %s" % modname)
return True
print("Unable to find module %s" % modname)
return False
def cmd_module(args):
'''module commands'''
usage = "usage: module <list|load|reload|unload>"
if len(args) < 1:
print(usage)
return
if args[0] == "list":
for (m,pm) in mpstate.modules:
print("%s: %s" % (m.name, m.description))
elif args[0] == "load":
if len(args) < 2:
print("usage: module load <name>")
return
(modname, kwargs) = generate_kwargs(args[1])
try:
load_module(modname, **kwargs)
except TypeError as ex:
print(ex)
print("%s module does not support keyword arguments"% modname)
return
elif args[0] == "reload":
if len(args) < 2:
print("usage: module reload <name>")
return
(modname, kwargs) = generate_kwargs(args[1])
pmodule = None
for (m,pm) in mpstate.modules:
if m.name == modname:
pmodule = pm
if pmodule is None:
print("Module %s not loaded" % modname)
return
if unload_module(modname):
import zipimport
try:
reload(pmodule)
except ImportError:
clear_zipimport_cache()
reload(pmodule)
try:
if load_module(modname, quiet=True, **kwargs):
print("Reloaded module %s" % modname)
except TypeError:
print("%s module does not support keyword arguments" % modname)
elif args[0] == "unload":
if len(args) < 2:
print("usage: module unload <name>")
return
modname = os.path.basename(args[1])
unload_module(modname)
else:
print(usage)
def cmd_alias(args):
'''alias commands'''
usage = "usage: alias <add|remove|list>"
if len(args) < 1 or args[0] == "list":
if len(args) >= 2:
wildcard = args[1].upper()
else:
wildcard = '*'
for a in sorted(mpstate.aliases.keys()):
if fnmatch.fnmatch(a.upper(), wildcard):
print("%-15s : %s" % (a, mpstate.aliases[a]))
elif args[0] == "add":
if len(args) < 3:
print(usage)
return
a = args[1]
mpstate.aliases[a] = ' '.join(args[2:])
elif args[0] == "remove":
if len(args) != 2:
print(usage)
return
a = args[1]
if a in mpstate.aliases:
mpstate.aliases.pop(a)
else:
print("no alias %s" % a)
else:
print(usage)
return
def clear_zipimport_cache():
"""Clear out cached entries from _zip_directory_cache.
See http://www.digi.com/wiki/developer/index.php/Error_messages"""
import sys, zipimport
syspath_backup = list(sys.path)
zipimport._zip_directory_cache.clear()
# load back items onto sys.path
sys.path = syspath_backup
# add this too: see https://mail.python.org/pipermail/python-list/2005-May/353229.html
sys.path_importer_cache.clear()
# http://stackoverflow.com/questions/211100/pythons-import-doesnt-work-as-expected
# has info on why this is necessary.
def import_package(name):
"""Given a package name like 'foo.bar.quux', imports the package
and returns the desired module."""
import zipimport
try:
mod = __import__(name)
except ImportError:
clear_zipimport_cache()
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
command_map = {
'script' : (cmd_script, 'run a script of MAVProxy commands'),
'setup' : (cmd_setup, 'go into setup mode'),
'reset' : (cmd_reset, 'reopen the connection to the MAVLink master'),
'status' : (cmd_status, 'show status'),
'set' : (cmd_set, 'mavproxy settings'),
'watch' : (cmd_watch, 'watch a MAVLink pattern'),
'module' : (cmd_module, 'module commands'),
'alias' : (cmd_alias, 'command aliases')
}
def shlex_quotes(value):
'''see http://stackoverflow.com/questions/6868382/python-shlex-split-ignore-single-quotes'''
lex = shlex.shlex(value)
lex.quotes = '"'
lex.whitespace_split = True
lex.commenters = ''
return list(lex)
def process_stdin(line):
'''handle commands from user'''
if line is None:
sys.exit(0)
# allow for modules to override input handling
if mpstate.functions.input_handler is not None:
mpstate.functions.input_handler(line)
return
line = line.strip()
if mpstate.status.setup_mode:
# in setup mode we send strings straight to the master
if line == '.':
mpstate.status.setup_mode = False
mpstate.status.flightmode = "MAV"
mpstate.rl.set_prompt("MAV> ")
return
if line != '+++':
line += '\r'
for c in line:
time.sleep(0.01)
mpstate.master().write(c)
return
if not line:
return
try:
args = shlex_quotes(line)
except Exception as e:
print("Caught shlex exception: %s" % e.message);
return
cmd = args[0]
while cmd in mpstate.aliases:
line = mpstate.aliases[cmd]
args = shlex.split(line) + args[1:]
cmd = args[0]
if cmd == 'help':
k = command_map.keys()
k.sort()
for cmd in k:
(fn, help) = command_map[cmd]
print("%-15s : %s" % (cmd, help))
return
if cmd == 'exit' and mpstate.settings.requireexit:
mpstate.status.exit = True
return
if not cmd in command_map:
for (m,pm) in mpstate.modules:
if hasattr(m, 'unknown_command'):
try:
if m.unknown_command(args):
return
except Exception as e:
print("ERROR in command: %s" % str(e))
print("Unknown command '%s'" % line)
return
(fn, help) = command_map[cmd]
try:
fn(args[1:])
except Exception as e:
print("ERROR in command %s: %s" % (args[1:], str(e)))
if mpstate.settings.moddebug > 1:
traceback.print_exc()
def process_master(m):
'''process packets from the MAVLink master'''
try:
s = m.recv(16*1024)
except Exception:
time.sleep(0.1)
return
# prevent a dead serial port from causing the CPU to spin. The user hitting enter will
# cause it to try and reconnect
if len(s) == 0:
time.sleep(0.1)
return
if (mpstate.settings.compdebug & 1) != 0:
return
if mpstate.logqueue_raw:
mpstate.logqueue_raw.put(bytearray(s))
if mpstate.status.setup_mode:
if mpstate.system == 'Windows':
# strip nsh ansi codes
s = s.replace("\033[K","")
sys.stdout.write(str(s))
sys.stdout.flush()
return
global mavversion
if m.first_byte and mavversion is None:
m.auto_mavlink_version(s)
msgs = m.mav.parse_buffer(s)
if msgs:
for msg in msgs:
sysid = msg.get_srcSystem()
if sysid in mpstate.sysid_outputs:
# the message has been handled by a specialised handler for this system
continue
if getattr(m, '_timestamp', None) is None:
m.post_message(msg)
if msg.get_type() == "BAD_DATA":
if opts.show_errors:
mpstate.console.writeln("MAV error: %s" % msg)
mpstate.status.mav_error += 1
def process_mavlink(slave):
'''process packets from MAVLink slaves, forwarding to the master'''
try:
buf = slave.recv()
except socket.error:
return
try:
global mavversion
if slave.first_byte and mavversion is None:
slave.auto_mavlink_version(buf)
msgs = slave.mav.parse_buffer(buf)
except mavutil.mavlink.MAVError as e:
mpstate.console.error("Bad MAVLink slave message from %s: %s" % (slave.address, e.message))
return
if msgs is None:
return
if mpstate.settings.mavfwd and not mpstate.status.setup_mode:
for m in msgs:
mpstate.master().write(m.get_msgbuf())
if mpstate.status.watch:
for msg_type in mpstate.status.watch:
if fnmatch.fnmatch(m.get_type().upper(), msg_type.upper()):
mpstate.console.writeln('> '+ str(m))
break
mpstate.status.counters['Slave'] += 1
def mkdir_p(dir):
'''like mkdir -p'''
if not dir:
return
if dir.endswith("/"):
mkdir_p(dir[:-1])
return
if os.path.isdir(dir):
return
mkdir_p(os.path.dirname(dir))
os.mkdir(dir)
def log_writer():
'''log writing thread'''
while True:
mpstate.logfile_raw.write(bytearray(mpstate.logqueue_raw.get()))
timeout = time.time() + 10
while not mpstate.logqueue_raw.empty() and time.time() < timeout:
mpstate.logfile_raw.write(mpstate.logqueue_raw.get())
while not mpstate.logqueue.empty() and time.time() < timeout:
mpstate.logfile.write(mpstate.logqueue.get())
if mpstate.settings.flushlogs or time.time() >= timeout:
mpstate.logfile.flush()
mpstate.logfile_raw.flush()
# If state_basedir is NOT set then paths for logs and aircraft
# directories are relative to mavproxy's cwd
def log_paths():
'''Returns tuple (logdir, telemetry_log_filepath, raw_telemetry_log_filepath)'''
if opts.aircraft is not None:
dirname = ""
if opts.mission is not None:
print(opts.mission)
dirname += "%s/logs/%s/Mission%s" % (opts.aircraft, time.strftime("%Y-%m-%d"), opts.mission)
else:
dirname += "%s/logs/%s" % (opts.aircraft, time.strftime("%Y-%m-%d"))
# dirname is currently relative. Possibly add state_basedir:
if mpstate.settings.state_basedir is not None:
dirname = os.path.join(mpstate.settings.state_basedir,dirname)
mkdir_p(dirname)
highest = None
for i in range(1, 10000):
fdir = os.path.join(dirname, 'flight%u' % i)
if not os.path.exists(fdir):
break
highest = fdir
if mpstate.continue_mode and highest is not None:
fdir = highest
elif os.path.exists(fdir):
print("Flight logs full")
sys.exit(1)
logname = 'flight.tlog'
logdir = fdir
else:
logname = os.path.basename(opts.logfile)
dir_path = os.path.dirname(opts.logfile)
if not os.path.isabs(dir_path) and mpstate.settings.state_basedir is not None:
dir_path = os.path.join(mpstate.settings.state_basedir,dir_path)
logdir = dir_path
mkdir_p(logdir)
return (logdir,
os.path.join(logdir, logname),
os.path.join(logdir, logname + '.raw'))
def open_telemetry_logs(logpath_telem, logpath_telem_raw):
'''open log files'''
if opts.append_log or opts.continue_mode:
mode = 'ab'
else:
mode = 'wb'
try:
mpstate.logfile = open(logpath_telem, mode=mode)
mpstate.logfile_raw = open(logpath_telem_raw, mode=mode)
print("Log Directory: %s" % mpstate.status.logdir)
print("Telemetry log: %s" % logpath_telem)
#make sure there's enough free disk space for the logfile (>200Mb)
#statvfs doesn't work in Windows
if platform.system() != 'Windows':
stat = os.statvfs(logpath_telem)
if stat.f_bfree*stat.f_bsize < 209715200:
print("ERROR: Not enough free disk space for logfile")
mpstate.status.exit = True
return
# use a separate thread for writing to the logfile to prevent
# delays during disk writes (important as delays can be long if camera
# app is running)
t = threading.Thread(target=log_writer, name='log_writer')
t.daemon = True
t.start()
except Exception as e:
print("ERROR: opening log file for writing: %s" % e)
mpstate.status.exit = True
return
def set_stream_rates():
'''set mavlink stream rates'''
if (not msg_period.trigger() and
mpstate.status.last_streamrate1 == mpstate.settings.streamrate and
mpstate.status.last_streamrate2 == mpstate.settings.streamrate2):
return
mpstate.status.last_streamrate1 = mpstate.settings.streamrate
mpstate.status.last_streamrate2 = mpstate.settings.streamrate2
for master in mpstate.mav_master:
if master.linknum == 0:
rate = mpstate.settings.streamrate
else:
rate = mpstate.settings.streamrate2
if rate != -1 and mpstate.settings.streamrate != -1:
master.mav.request_data_stream_send(mpstate.settings.target_system, mpstate.settings.target_component,
mavutil.mavlink.MAV_DATA_STREAM_ALL,
rate, 1)
def check_link_status():
'''check status of master links'''
tnow = time.time()
if mpstate.status.last_message != 0 and tnow > mpstate.status.last_message + 5:
say("no link")
mpstate.status.heartbeat_error = True
for master in mpstate.mav_master:
if not master.linkerror and (tnow > master.last_message + 5 or master.portdead):
say("link %s down" % (mp_module.MPModule.link_label(master)))
master.linkerror = True
def send_heartbeat(master):
if master.mavlink10():
master.mav.heartbeat_send(mavutil.mavlink.MAV_TYPE_GCS, mavutil.mavlink.MAV_AUTOPILOT_INVALID,
0, 0, 0)
else:
MAV_GROUND = 5
MAV_AUTOPILOT_NONE = 4
master.mav.heartbeat_send(MAV_GROUND, MAV_AUTOPILOT_NONE)
def periodic_tasks():
'''run periodic checks'''
if mpstate.status.setup_mode:
return
if (mpstate.settings.compdebug & 2) != 0:
return
if mpstate.settings.heartbeat != 0:
heartbeat_period.frequency = mpstate.settings.heartbeat
if heartbeat_period.trigger() and mpstate.settings.heartbeat != 0:
mpstate.status.counters['MasterOut'] += 1
for master in mpstate.mav_master:
send_heartbeat(master)
if heartbeat_check_period.trigger():
check_link_status()
set_stream_rates()
# call optional module idle tasks. These are called at several hundred Hz
for (m,pm) in mpstate.modules:
if hasattr(m, 'idle_task'):
try:
m.idle_task()
except Exception as msg:
if mpstate.settings.moddebug == 1:
print(msg)
elif mpstate.settings.moddebug > 1:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback,
limit=2, file=sys.stdout)
# also see if the module should be unloaded:
if m.needs_unloading:
unload_module(m.name)
def main_loop():
'''main processing loop'''
if not mpstate.status.setup_mode and not opts.nowait:
for master in mpstate.mav_master:
if master.linknum != 0:
break
print("Waiting for heartbeat from %s" % master.address)
send_heartbeat(master)
master.wait_heartbeat(timeout=0.1)
set_stream_rates()
while True:
if mpstate is None or mpstate.status.exit:
return
while not mpstate.input_queue.empty():
line = mpstate.input_queue.get()
mpstate.input_count += 1
cmds = line.split(';')
if len(cmds) == 1 and cmds[0] == "":
mpstate.empty_input_count += 1
for c in cmds:
process_stdin(c)
for master in mpstate.mav_master:
if master.fd is None:
if master.port.inWaiting() > 0:
process_master(master)
periodic_tasks()
rin = []
for master in mpstate.mav_master:
if master.fd is not None and not master.portdead:
rin.append(master.fd)
for m in mpstate.mav_outputs:
rin.append(m.fd)
for sysid in mpstate.sysid_outputs:
m = mpstate.sysid_outputs[sysid]
rin.append(m.fd)
if rin == []:
time.sleep(0.0001)
continue
for fd in mpstate.select_extra:
rin.append(fd)
try:
(rin, win, xin) = select.select(rin, [], [], mpstate.settings.select_timeout)
except select.error:
continue
if mpstate is None:
return
for fd in rin:
if mpstate is None:
return
for master in mpstate.mav_master:
if fd == master.fd:
process_master(master)
if mpstate is None:
return
continue
for m in mpstate.mav_outputs:
if fd == m.fd:
process_mavlink(m)
if mpstate is None:
return
continue
for sysid in mpstate.sysid_outputs:
m = mpstate.sysid_outputs[sysid]
if fd == m.fd:
process_mavlink(m)
if mpstate is None:
return
continue
# this allow modules to register their own file descriptors
# for the main select loop
if fd in mpstate.select_extra:
try:
# call the registered read function
(fn, args) = mpstate.select_extra[fd]
fn(args)
except Exception as msg:
if mpstate.settings.moddebug == 1:
print(msg)
# on an exception, remove it from the select list
mpstate.select_extra.pop(fd)
def input_loop():
'''wait for user input'''
while mpstate.status.exit != True:
try:
if mpstate.status.exit != True:
line = input(mpstate.rl.prompt)
except EOFError:
mpstate.status.exit = True
sys.exit(1)
mpstate.input_queue.put(line)
def run_script(scriptfile):
'''run a script file'''
try:
f = open(scriptfile, mode='r')
except Exception:
return
mpstate.console.writeln("Running script %s" % scriptfile)
sub = mp_substitute.MAVSubstitute()
for line in f:
line = line.strip()
if line == "" or line.startswith('#'):
continue
try:
line = sub.substitute(line, os.environ)
except mp_substitute.MAVSubstituteError as ex:
print("Bad variable: %s" % str(ex))
if mpstate.settings.script_fatal:
sys.exit(1)
continue
if line.startswith('@'):
line = line[1:]
else:
mpstate.console.writeln("-> %s" % line)
process_stdin(line)
f.close()
def set_mav_version(mav10, mav20, autoProtocol, mavversionArg):
'''Set the Mavlink version based on commandline options'''
# if(mav10 == True or mav20 == True or autoProtocol == True):
# print("Warning: Using deprecated --mav10, --mav20 or --auto-protocol options. Use --mavversion instead")
#sanity check the options
if (mav10 == True or mav20 == True) and autoProtocol == True:
print("Error: Can't have [--mav10, --mav20] and --auto-protocol both True")
sys.exit(1)
if mav10 == True and mav20 == True:
print("Error: Can't have --mav10 and --mav20 both True")
sys.exit(1)
if mavversionArg is not None and (mav10 == True or mav20 == True or autoProtocol == True):
print("Error: Can't use --mavversion with legacy (--mav10, --mav20 or --auto-protocol) options")
sys.exit(1)
#and set the specific mavlink version (False = autodetect)
global mavversion
if mavversionArg == "1.0" or mav10 == True:
os.environ['MAVLINK09'] = '1'
mavversion = "1"
else:
os.environ['MAVLINK20'] = '1'
mavversion = "2"
if __name__ == '__main__':
from optparse import OptionParser
parser = OptionParser("mavproxy.py [options]")
parser.add_option("--master", dest="master", action='append',
metavar="DEVICE[,BAUD]", help="MAVLink master port and optional baud rate",
default=[])
parser.add_option("", "--force-connected", dest="force_connected", help="Use master even if initial connection fails",
action='store_true', default=False)
parser.add_option("--out", dest="output", action='append',
metavar="DEVICE[,BAUD]", help="MAVLink output port and optional baud rate",
default=[])
parser.add_option("--baudrate", dest="baudrate", type='int',
help="default serial baud rate", default=57600)
parser.add_option("--sitl", dest="sitl", default=None, help="SITL output port")
parser.add_option("--streamrate",dest="streamrate", default=4, type='int',
help="MAVLink stream rate")
parser.add_option("--source-system", dest='SOURCE_SYSTEM', type='int',
default=255, help='MAVLink source system for this GCS')
parser.add_option("--source-component", dest='SOURCE_COMPONENT', type='int',
default=0, help='MAVLink source component for this GCS')
parser.add_option("--target-system", dest='TARGET_SYSTEM', type='int',
default=0, help='MAVLink target master system')
parser.add_option("--target-component", dest='TARGET_COMPONENT', type='int',
default=0, help='MAVLink target master component')
parser.add_option("--logfile", dest="logfile", help="MAVLink master logfile",
default='mav.tlog')
parser.add_option("-a", "--append-log", dest="append_log", help="Append to log files",
action='store_true', default=False)
parser.add_option("--quadcopter", dest="quadcopter", help="use quadcopter controls",
action='store_true', default=False)
parser.add_option("--setup", dest="setup", help="start in setup mode",
action='store_true', default=False)
parser.add_option("--nodtr", dest="nodtr", help="disable DTR drop on close",
action='store_true', default=False)
parser.add_option("--show-errors", dest="show_errors", help="show MAVLink error packets",
action='store_true', default=False)
parser.add_option("--speech", dest="speech", help="use text to speech",
action='store_true', default=False)
parser.add_option("--aircraft", dest="aircraft", help="aircraft name", default=None)
parser.add_option("--cmd", dest="cmd", help="initial commands", default=None, action='append')
parser.add_option("--console", action='store_true', help="use GUI console")
parser.add_option("--map", action='store_true', help="load map module")
parser.add_option(
'--load-module',
action='append',
default=[],
help='Load the specified module. Can be used multiple times, or with a comma separated list')
parser.add_option("--mav10", action='store_true', default=False, help="Use MAVLink protocol 1.0")
parser.add_option("--mav20", action='store_true', default=False, help="Use MAVLink protocol 2.0")
parser.add_option("--auto-protocol", action='store_true', default=False, help="Auto detect MAVLink protocol version")
parser.add_option("--mavversion", type='choice', choices=['1.0', '2.0'] , help="Force MAVLink Version (1.0, 2.0). Otherwise autodetect version")
parser.add_option("--nowait", action='store_true', default=False, help="don't wait for HEARTBEAT on startup")
parser.add_option("-c", "--continue", dest='continue_mode', action='store_true', default=False, help="continue logs")
parser.add_option("--dialect", default="ardupilotmega", help="MAVLink dialect")
parser.add_option("--rtscts", action='store_true', help="enable hardware RTS/CTS flow control")
parser.add_option("--moddebug", type=int, help="module debug level", default=0)
parser.add_option("--mission", dest="mission", help="mission name", default=None)
parser.add_option("--daemon", action='store_true', help="run in daemon mode, do not start interactive shell")
parser.add_option("--non-interactive", action='store_true', help="do not start interactive shell")
parser.add_option("--profile", action='store_true', help="run the Yappi python profiler")
parser.add_option("--state-basedir", default=None, help="base directory for logs and aircraft directories")
parser.add_option("--version", action='store_true', help="version information")
parser.add_option("--default-modules", default="log,signing,wp,rally,fence,param,relay,tuneopt,arm,mode,calibration,rc,auxopt,misc,cmdlong,battery,terrain,output,adsb,layout", help='default module list')
(opts, args) = parser.parse_args()
if len(args) != 0:
print("ERROR: mavproxy takes no position arguments; got (%s)" % str(args))
sys.exit(1)
# warn people about ModemManager which interferes badly with APM and Pixhawk
if os.path.exists("/usr/sbin/ModemManager"):
print("WARNING: You should uninstall ModemManager as it conflicts with APM and Pixhawk")
#set the Mavlink version, if required
set_mav_version(opts.mav10, opts.mav20, opts.auto_protocol, opts.mavversion)
from pymavlink import mavutil, mavparm
mavutil.set_dialect(opts.dialect)
#version information
if opts.version:
#pkg_resources doesn't work in the windows exe build, so read the version file
try:
import pkg_resources
version = pkg_resources.require("mavproxy")[0].version
except:
start_script = os.path.join(os.environ['LOCALAPPDATA'], "MAVProxy", "version.txt")
f = open(start_script, 'r')
version = f.readline()
print("MAVProxy is a modular ground station using the mavlink protocol")
print("MAVProxy Version: " + version)
sys.exit(1)
# global mavproxy state
mpstate = MPState()
mpstate.status.exit = False
mpstate.command_map = command_map
mpstate.continue_mode = opts.continue_mode
# queues for logging
mpstate.logqueue = Queue.Queue()
mpstate.logqueue_raw = Queue.Queue()
if opts.speech:
# start the speech-dispatcher early, so it doesn't inherit any ports from
# modules/mavutil
load_module('speech')
serial_list = mavutil.auto_detect_serial(preferred_list=preferred_ports)
if not opts.master:
print('Auto-detected serial ports are:')
for port in serial_list:
print("%s" % port)
# container for status information
mpstate.settings.target_system = opts.TARGET_SYSTEM
mpstate.settings.target_component = opts.TARGET_COMPONENT
mpstate.mav_master = []
mpstate.rl = rline.rline("MAV> ", mpstate)
def quit_handler(signum = None, frame = None):
#print('Signal handler called with signal', signum)
if mpstate.status.exit:
print('Clean shutdown impossible, forcing an exit')
sys.exit(0)
else:
mpstate.status.exit = True
# Listen for kill signals to cleanly shutdown modules
fatalsignals = [signal.SIGTERM]
try:
fatalsignals.append(signal.SIGHUP)
fatalsignals.append(signal.SIGQUIT)
except Exception:
pass
if opts.daemon or opts.non_interactive: # SIGINT breaks readline parsing - if we are interactive, just let things die
fatalsignals.append(signal.SIGINT)
for sig in fatalsignals:
signal.signal(sig, quit_handler)
load_module('link', quiet=True)
mpstate.settings.source_system = opts.SOURCE_SYSTEM
mpstate.settings.source_component = opts.SOURCE_COMPONENT
# open master link
for mdev in opts.master:
if not mpstate.module('link').link_add(mdev, force_connected=opts.force_connected):
sys.exit(1)
if not opts.master and len(serial_list) == 1:
print("Connecting to %s" % serial_list[0])
mpstate.module('link').link_add(serial_list[0].device)
elif not opts.master and len(serial_list) > 1:
print("Error: multiple possible serial ports; use --master to select a single port")
sys.exit(1)
elif not opts.master:
wifi_device = '0.0.0.0:14550'
mpstate.module('link').link_add(wifi_device)
# open any mavlink output ports
for port in opts.output:
mpstate.mav_outputs.append(mavutil.mavlink_connection(port, baud=int(opts.baudrate), input=False))
if opts.sitl:
mpstate.sitl_output = mavutil.mavudp(opts.sitl, input=False)
mpstate.settings.streamrate = opts.streamrate
mpstate.settings.streamrate2 = opts.streamrate
if opts.state_basedir is not None:
mpstate.settings.state_basedir = opts.state_basedir
msg_period = mavutil.periodic_event(1.0/15)
heartbeat_period = mavutil.periodic_event(1)
heartbeat_check_period = mavutil.periodic_event(0.33)
mpstate.input_queue = Queue.Queue()
mpstate.input_count = 0
mpstate.empty_input_count = 0
if opts.setup:
mpstate.rl.set_prompt("")
# call this early so that logdir is setup based on --aircraft
(mpstate.status.logdir, logpath_telem, logpath_telem_raw) = log_paths()
for module in opts.load_module:
modlist = module.split(',')
for mod in modlist:
process_stdin('module load %s' % (mod))
if not opts.setup:
# some core functionality is in modules
standard_modules = opts.default_modules.split(',')
for m in standard_modules:
load_module(m, quiet=True)
if opts.console:
process_stdin('module load console')
if opts.map:
process_stdin('module load map')
start_scripts = []
if 'HOME' in os.environ and not opts.setup:
start_script = os.path.join(os.environ['HOME'], ".mavinit.scr")
start_scripts.append(start_script)
if 'LOCALAPPDATA' in os.environ and not opts.setup:
start_script = os.path.join(os.environ['LOCALAPPDATA'], "MAVProxy", "mavinit.scr")
start_scripts.append(start_script)
if (mpstate.settings.state_basedir is not None and
opts.aircraft is not None):
start_script = os.path.join(mpstate.settings.state_basedir, opts.aircraft, "mavinit.scr")
start_scripts.append(start_script)
for start_script in start_scripts:
if os.path.exists(start_script):
print("Running script (%s)" % (start_script))
run_script(start_script)
if opts.aircraft is not None:
start_script = os.path.join(opts.aircraft, "mavinit.scr")
if os.path.exists(start_script):
run_script(start_script)
else:
print("no script %s" % start_script)
if opts.cmd is not None:
for cstr in opts.cmd:
cmds = cstr.split(';')
for c in cmds:
process_stdin(c)
if opts.profile:
import yappi # We do the import here so that we won't barf if run normally and yappi not available
yappi.start()
# log all packets from the master, for later replay
open_telemetry_logs(logpath_telem, logpath_telem_raw)
# run main loop as a thread
mpstate.status.thread = threading.Thread(target=main_loop, name='main_loop')
mpstate.status.thread.daemon = True
mpstate.status.thread.start()
# use main program for input. This ensures the terminal cleans
# up on exit
while (mpstate.status.exit != True):
try:
if opts.daemon or opts.non_interactive:
time.sleep(0.1)
else:
input_loop()
except KeyboardInterrupt:
if mpstate.settings.requireexit:
print("Interrupt caught. Use 'exit' to quit MAVProxy.")
#Just lost the map and console, get them back:
for (m,pm) in mpstate.modules:
if m.name in ["map", "console"]:
if hasattr(m, 'unload'):
try:
m.unload()
except Exception:
pass
reload(m)
m.init(mpstate)
else:
mpstate.status.exit = True
sys.exit(1)
if opts.profile:
yappi.get_func_stats().print_all()
yappi.get_thread_stats().print_all()
#this loop executes after leaving the above loop and is for cleanup on exit
for (m,pm) in mpstate.modules:
if hasattr(m, 'unload'):
print("Unloading module %s" % m.name)
m.unload()
sys.exit(1)
| gpl-3.0 |
start-jsk/jsk_apc | demos/instance_occlsegm/instance_occlsegm_lib/_io.py | 4 | 4202 | import math
import os
import os.path as osp
import warnings
import cv2
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from mpl_toolkits.mplot3d import Axes3D # NOQA
import numpy as np
import PIL.Image
import skimage.io
def load_off(filename):
"""Load OFF file.
Parameters
----------
filename: str
OFF filename.
"""
with open(filename, 'r') as f:
assert 'OFF' in f.readline()
verts, faces = [], []
n_verts, n_faces = None, None
for line in f.readlines():
line = line.strip()
if line.startswith('#') or not line:
continue
if n_verts is None and n_faces is None:
n_verts, n_faces, _ = map(int, line.split(' '))
elif len(verts) < n_verts:
verts.append([float(v) for v in line.split(' ')])
else:
faces.append([int(v) for v in line.split(' ')[1:]])
verts = np.array(verts, dtype=np.float64)
faces = np.array(faces, dtype=np.int64)
return verts, faces
def load_vtk(filename):
points = None
with open(filename) as f:
state = None
index = 0
for line in f:
if line.startswith('POINTS '):
_, n_points, dtype = line.split()
n_points = int(n_points)
points = np.empty((n_points, 3), dtype=dtype)
state = 'POINTS'
continue
elif line.startswith('VERTICES '):
warnings.warn('VERTICES is not currently supported.')
_, _, n_vertices = line.split()
n_vertices = int(n_vertices)
# vertices = np.empty((n_vertices, 2), dtype=np.int64)
state = 'VERTICES'
continue
if state == 'POINTS':
x, y, z = line.split()
xyz = np.array([x, y, z], dtype=np.float64)
points[index] = xyz
index += 1
elif state == 'VERTICES':
# TODO(wkentaro): support this.
pass
return points
def dump_off(filename, verts, faces):
with open(filename, 'w') as f:
f.write('OFF\n')
n_vert = len(verts)
n_face = len(faces)
n_edge = 0
f.write('{} {} {}\n'.format(n_vert, n_face, n_edge))
for vert in verts:
f.write(' '.join(map(str, vert)) + '\n')
for face in faces:
f.write(' '.join([str(len(face))] + map(str, face)) + '\n')
def dump_obj(filename, verts, faces):
"""Dump mesh data to obj file."""
with open(filename, 'w') as f:
# write vertices
f.write('g\n# %d vertex\n' % len(verts))
for vert in verts:
f.write('v %f %f %f\n' % tuple(vert))
# write faces
f.write('# %d faces\n' % len(faces))
for face in faces:
f.write('f %d %d %d\n' % tuple(face))
def _get_tile_shape(num):
x_num = int(math.sqrt(num))
y_num = 0
while x_num * y_num < num:
y_num += 1
return x_num, y_num
def imgplot(img, title=None):
if img.ndim == 2:
if np.issubdtype(img.dtype, np.floating):
cmap = 'jet'
else:
cmap = 'gray'
plt.imshow(img, cmap=cmap)
else:
plt.imshow(img)
if title is not None:
plt.title(title)
def show():
return plt.show()
def imshow(img, window_name=None):
if img.ndim == 3:
img = img[:, :, ::-1]
return cv2.imshow(window_name, img)
def imread(*args, **kwargs):
return skimage.io.imread(*args, **kwargs)
def lbread(lbl_file):
return np.asarray(PIL.Image.open(lbl_file)).astype(np.int32)
def imsave(*args, **kwargs):
if len(args) >= 1:
fname = args[0]
else:
fname = kwargs['fname']
dirname = osp.dirname(fname)
if dirname and not osp.exists(dirname):
os.makedirs(dirname)
return skimage.io.imsave(*args, **kwargs)
def waitkey(time=0):
return cv2.waitKey(time)
def waitKey(time=0):
warnings.warn('`waitKey` is deprecated. Please use `waitkey` instead.')
return waitkey(time=time)
| bsd-3-clause |
spennihana/h2o-3 | h2o-py/tests/testdir_algos/glm/pyunit_link_functions_gaussian_glm.py | 8 | 1710 | from __future__ import division
from __future__ import print_function
from past.utils import old_div
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
import pandas as pd
import zipfile
import statsmodels.api as sm
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
def link_functions_gaussian():
print("Read in prostate data.")
h2o_data = h2o.import_file(path=pyunit_utils.locate("smalldata/prostate/prostate_complete.csv.zip"))
h2o_data.head()
sm_data = pd.read_csv(zipfile.ZipFile(pyunit_utils.locate("smalldata/prostate/prostate_complete.csv.zip")).
open("prostate_complete.csv")).as_matrix()
sm_data_response = sm_data[:,9]
sm_data_features = sm_data[:,1:9]
print("Testing for family: GAUSSIAN")
print("Set variables for h2o.")
myY = "GLEASON"
myX = ["ID","AGE","RACE","CAPSULE","DCAPS","PSA","VOL","DPROS"]
print("Create models with canonical link: IDENTITY")
h2o_model = H2OGeneralizedLinearEstimator(family="gaussian", link="identity",alpha=0.5, Lambda=0)
h2o_model.train(x=myX, y=myY, training_frame=h2o_data)
sm_model = sm.GLM(endog=sm_data_response, exog=sm_data_features,
family=sm.families.Gaussian(sm.families.links.identity)).fit()
print("Compare model deviances for link function identity")
h2o_deviance = old_div(h2o_model.residual_deviance(), h2o_model.null_deviance())
sm_deviance = old_div(sm_model.deviance, sm_model.null_deviance)
assert h2o_deviance - sm_deviance < 0.01, "expected h2o to have an equivalent or better deviance measures"
if __name__ == "__main__":
pyunit_utils.standalone_test(link_functions_gaussian)
else:
link_functions_gaussian()
| apache-2.0 |
hsuantien/scikit-learn | examples/linear_model/plot_theilsen.py | 232 | 3615 | """
====================
Theil-Sen Regression
====================
Computes a Theil-Sen Regression on a synthetic dataset.
See :ref:`theil_sen_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the Theil-Sen
estimator is robust against outliers. It has a breakdown point of about 29.3%
in case of a simple linear regression which means that it can tolerate
arbitrary corrupted data (outliers) of up to 29.3% in the two-dimensional
case.
The estimation of the model is done by calculating the slopes and intercepts
of a subpopulation of all possible combinations of p subsample points. If an
intercept is fitted, p must be greater than or equal to n_features + 1. The
final slope and intercept is then defined as the spatial median of these
slopes and intercepts.
In certain cases Theil-Sen performs better than :ref:`RANSAC
<ransac_regression>` which is also a robust method. This is illustrated in the
second example below where outliers with respect to the x-axis perturb RANSAC.
Tuning the ``residual_threshold`` parameter of RANSAC remedies this but in
general a priori knowledge about the data and the nature of the outliers is
needed.
Due to the computational complexity of Theil-Sen it is recommended to use it
only for small problems in terms of number of samples and features. For larger
problems the ``max_subpopulation`` parameter restricts the magnitude of all
possible combinations of p subsample points to a randomly chosen subset and
therefore also limits the runtime. Therefore, Theil-Sen is applicable to larger
problems with the drawback of losing some of its mathematical properties since
it then works on a random subset.
"""
# Author: Florian Wilhelm -- <[email protected]>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model import RANSACRegressor
print(__doc__)
estimators = [('OLS', LinearRegression()),
('Theil-Sen', TheilSenRegressor(random_state=42)),
('RANSAC', RANSACRegressor(random_state=42)), ]
##############################################################################
# Outliers only in the y direction
np.random.seed(0)
n_samples = 200
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
w = 3.
c = 2.
noise = 0.1 * np.random.randn(n_samples)
y = w * x + c + noise
# 10% outliers
y[-20:] += -20 * x[-20:]
X = x[:, np.newaxis]
plt.plot(x, y, 'k+', mew=2, ms=8)
line_x = np.array([-3, 3])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
##############################################################################
# Outliers in the X direction
np.random.seed(0)
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
noise = 0.1 * np.random.randn(n_samples)
y = 3 * x + 2 + noise
# 10% outliers
x[-20:] = 9.9
y[-20:] += 22
X = x[:, np.newaxis]
plt.figure()
plt.plot(x, y, 'k+', mew=2, ms=8)
line_x = np.array([-3, 10])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
IvS-KULeuven/ComboCode | cc/ivs/sigproc/funclib.py | 3 | 17319 | """
Database with model functions.
To be used with the L{cc.ivs.sigproc.fit.minimizer} function or with the L{evaluate}
function in this module.
>>> p = plt.figure()
>>> x = np.linspace(-10,10,1000)
>>> p = plt.plot(x,evaluate('gauss',x,[5,1.,2.,0.5]),label='gauss')
>>> p = plt.plot(x,evaluate('voigt',x,[20.,1.,1.5,3.,0.5]),label='voigt')
>>> p = plt.plot(x,evaluate('lorentz',x,[5,1.,2.,0.5]),label='lorentz')
>>> leg = plt.legend(loc='best')
>>> leg.get_frame().set_alpha(0.5)
]include figure]]ivs_sigproc_fit_funclib01.png]
>>> p = plt.figure()
>>> x = np.linspace(0,10,1000)[1:]
>>> p = plt.plot(x,evaluate('power_law',x,[2.,3.,1.5,0,0.5]),label='power_law')
>>> p = plt.plot(x,evaluate('power_law',x,[2.,3.,1.5,0,0.5])+evaluate('gauss',x,[1.,5.,0.5,0,0]),label='power_law + gauss')
>>> leg = plt.legend(loc='best')
>>> leg.get_frame().set_alpha(0.5)
]include figure]]ivs_sigproc_fit_funclib02.png]
>>> p = plt.figure()
>>> x = np.linspace(0,10,1000)
>>> p = plt.plot(x,evaluate('sine',x,[1.,2.,0,0]),label='sine')
>>> p = plt.plot(x,evaluate('sine_linfreqshift',x,[1.,0.5,0,0,.5]),label='sine_linfreqshift')
>>> p = plt.plot(x,evaluate('sine_expfreqshift',x,[1.,0.5,0,0,1.2]),label='sine_expfreqshift')
>>> leg = plt.legend(loc='best')
>>> leg.get_frame().set_alpha(0.5)
]include figure]]ivs_sigproc_fit_funclib03.png]
>>> p = plt.figure()
>>> p = plt.plot(x,evaluate('sine',x,[1.,2.,0,0]),label='sine')
>>> p = plt.plot(x,evaluate('sine_orbit',x,[1.,2.,0,0,0.1,10.,0.1]),label='sine_orbit')
>>> leg = plt.legend(loc='best')
>>> leg.get_frame().set_alpha(0.5)
]include figure]]ivs_sigproc_fit_funclib03a.png]
>>> p = plt.figure()
>>> x_single = np.linspace(0,10,1000)
>>> x_double = np.vstack([x_single,x_single])
>>> p = plt.plot(x_single,evaluate('kepler_orbit',x_single,[2.5,0.,0.5,0,3,1.]),label='kepler_orbit (single)')
>>> y_double = evaluate('kepler_orbit',x_double,[2.5,0.,0.5,0,3,2.,-4,2.],type='double')
>>> p = plt.plot(x_double[0],y_double[0],label='kepler_orbit (double 1)')
>>> p = plt.plot(x_double[1],y_double[1],label='kepler_orbit (double 2)')
>>> p = plt.plot(x,evaluate('box_transit',x,[2.,0.4,0.1,0.3,0.5]),label='box_transit')
>>> leg = plt.legend(loc='best')
>>> leg.get_frame().set_alpha(0.5)
]include figure]]ivs_sigproc_fit_funclib04.png]
>>> p = plt.figure()
>>> x = np.linspace(-1,1,1000)
>>> gammas = [-0.25,0.1,0.25,0.5,1,2,4]
>>> y = np.array([evaluate('soft_parabola',x,[1.,0,1.,gamma]) for gamma in gammas])
divide by zero encountered in power
>>> for iy,gamma in zip(y,gammas): p = plt.plot(x,iy,label="soft_parabola $\gamma$={:.2f}".format(gamma))
>>> leg = plt.legend(loc='best')
>>> leg.get_frame().set_alpha(0.5)
]include figure]]ivs_sigproc_fit_funclib05.png]
>>> p = plt.figure()
>>> x = np.logspace(-1,2,1000)
>>> blbo = evaluate('blackbody',x,[10000.,1.],wave_units='micron',flux_units='W/m3')
>>> raje = evaluate('rayleigh_jeans',x,[10000.,1.],wave_units='micron',flux_units='W/m3')
>>> wien = evaluate('wien',x,[10000.,1.],wave_units='micron',flux_units='W/m3')
>>> p = plt.subplot(221)
>>> p = plt.title(r'$\lambda$ vs $F_\lambda$')
>>> p = plt.loglog(x,blbo,label='Black Body')
>>> p = plt.loglog(x,raje,label='Rayleigh-Jeans')
>>> p = plt.loglog(x,wien,label='Wien')
>>> leg = plt.legend(loc='best')
>>> leg.get_frame().set_alpha(0.5)
>>> blbo = evaluate('blackbody',x,[10000.,1.],wave_units='micron',flux_units='Jy')
>>> raje = evaluate('rayleigh_jeans',x,[10000.,1.],wave_units='micron',flux_units='Jy')
>>> wien = evaluate('wien',x,[10000.,1.],wave_units='micron',flux_units='Jy')
>>> p = plt.subplot(223)
>>> p = plt.title(r"$\lambda$ vs $F_\\nu$")
>>> p = plt.loglog(x,blbo,label='Black Body')
>>> p = plt.loglog(x,raje,label='Rayleigh-Jeans')
>>> p = plt.loglog(x,wien,label='Wien')
>>> leg = plt.legend(loc='best')
>>> leg.get_frame().set_alpha(0.5)
>>> x = np.logspace(0.47,3.47,1000)
>>> blbo = evaluate('blackbody',x,[10000.,1.],wave_units='THz',flux_units='Jy')
>>> raje = evaluate('rayleigh_jeans',x,[10000.,1.],wave_units='THz',flux_units='Jy')
>>> wien = evaluate('wien',x,[10000.,1.],wave_units='THz',flux_units='Jy')
>>> p = plt.subplot(224)
>>> p = plt.title(r"$\\nu$ vs $F_\\nu$")
>>> p = plt.loglog(x,blbo,label='Black Body')
>>> p = plt.loglog(x,raje,label='Rayleigh-Jeans')
>>> p = plt.loglog(x,wien,label='Wien')
>>> leg = plt.legend(loc='best')
>>> leg.get_frame().set_alpha(0.5)
>>> blbo = evaluate('blackbody',x,[10000.,1.],wave_units='THz',flux_units='W/m3')
>>> raje = evaluate('rayleigh_jeans',x,[10000.,1.],wave_units='THz',flux_units='W/m3')
>>> wien = evaluate('wien',x,[10000.,1.],wave_units='THz',flux_units='W/m3')
>>> p = plt.subplot(222)
>>> p = plt.title(r"$\\nu$ vs $F_\lambda$")
>>> p = plt.loglog(x,blbo,label='Black Body')
>>> p = plt.loglog(x,raje,label='Rayleigh-Jeans')
>>> p = plt.loglog(x,wien,label='Wien')
>>> leg = plt.legend(loc='best')
>>> leg.get_frame().set_alpha(0.5)
]include figure]]ivs_sigproc_fit_funclib06.png]
"""
import numpy as np
from numpy import pi,cos,sin,sqrt,tan,arctan
from scipy.special import erf,jn
from cc.ivs.sigproc.fit import Model, Function
#import ivs.timeseries.keplerorbit as kepler
from cc.ivs.sed import model as sed_model
import logging
logger = logging.getLogger("SP.FUNCLIB")
#{ Function Library
def blackbody(wave_units='AA',flux_units='erg/s/cm2/AA',disc_integrated=True):
"""
Blackbody (T, scale).
@param wave_units: wavelength units
@type wave_units: string
@param flux_units: flux units
@type flux_units: string
@param disc_integrated: sets units equal to SED models
@type disc_integrated: bool
"""
pnames = ['T', 'scale']
function = lambda p, x: p[1]*sed_model.blackbody(x, p[0], wave_units=wave_units,\
flux_units=flux_units,\
disc_integrated=disc_integrated)
function.__name__ = 'blackbody'
return Function(function=function, par_names=pnames)
def rayleigh_jeans(wave_units='AA',flux_units='erg/s/cm2/AA',disc_integrated=True):
"""
Rayleigh-Jeans tail (T, scale).
@param wave_units: wavelength units
@type wave_units: string
@param flux_units: flux units
@type flux_units: string
@param disc_integrated: sets units equal to SED models
@type disc_integrated: bool
"""
pnames = ['T', 'scale']
function = lambda p, x: p[1]*sed_model.rayleigh_jeans(x, p[0], wave_units=wave_units,\
flux_units=flux_units,\
disc_integrated=disc_integrated)
function.__name__ = 'rayleigh_jeans'
return Function(function=function, par_names=pnames)
def wien(wave_units='AA',flux_units='erg/s/cm2/AA',disc_integrated=True):
"""
Wien approximation (T, scale).
@param wave_units: wavelength units
@type wave_units: string
@param flux_units: flux units
@type flux_units: string
@param disc_integrated: sets units equal to SED models
@type disc_integrated: bool
"""
pnames = ['T', 'scale']
function = lambda p, x: p[1]*sed_model.wien(x, p[0], wave_units=wave_units,\
flux_units=flux_units,\
disc_integrated=disc_integrated)
function.__name__ = 'wien'
return Function(function=function, par_names=pnames)
# def kepler_orbit(type='single'):
# """
# Kepler orbits ((p,t0,e,omega,K,v0) or (p,t0,e,omega,K1,v01,K2,v02))
#
# A single kepler orbit
# parameters are: [p, t0, e, omega, k, v0]
#
# A double kepler orbit
# parameters are: [p, t0, e, omega, k_1, v0_1, k_2, v0_2]
# Warning: This function uses 2d input and output!
# """
# if type == 'single':
# pnames = ['p','t0','e','omega','k','v0']
# function = lambda p, x: kepler.radial_velocity(p, times=x, itermax=8)
# function.__name__ = 'kepler_orbit_single'
#
# return Function(function=function, par_names=pnames)
#
# elif type == 'double':
# pnames = ['p','t0','e','omega','k1','v01','k2','v02' ]
# function = lambda p, x: [kepler.radial_velocity([p[0],p[1],p[2],p[3],p[4],p[5]], times=x[0], itermax=8),
# kepler.radial_velocity([p[0],p[1],p[2],p[3],p[6],p[7]], times=x[1], itermax=8)]
# def residuals(syn, data, weights=None, errors=None, **kwargs):
# return np.hstack( [( data[0] - syn[0] ) * weights[0], ( data[1] - syn[1] ) * weights[1] ] )
# function.__name__ = 'kepler_orbit_double'
#
# return Function(function=function, par_names=pnames, resfunc=residuals)
def box_transit(t0=0.):
"""
Box transit model (cont,freq,ingress,egress,depth)
@param t0: reference time (defaults to 0)
@type t0: float
"""
pnames = 'cont','freq','ingress','egress','depth'
def function(p,x):
cont,freq,ingress,egress,depth = p
model = np.ones(len(x))*cont
phase = np.fmod((x - t0) * freq, 1.0)
phase = np.where(phase<0,phase+1,phase)
transit_place = (ingress<=phase) & (phase<=egress)
model = np.where(transit_place,model-depth,model)
return model
function.__name__ = 'box_transit'
return Function(function=function, par_names=pnames)
def polynomial(d=1):
"""
Polynomial (a1,a0).
y(x) = ai*x**i + a(i-1)*x**(i-1) + ... + a1*x + a0
@param d: degree of the polynomial
@type d: int
"""
pnames = ['a{:d}'.format(i) for i in range(d,0,-1)]+['a0']
function = lambda p, x: np.polyval(p,x)
function.__name__ = 'polynomial'
return Function(function=function, par_names=pnames)
def soft_parabola():
"""
Soft parabola (ta,vlsr,vinf,gamma).
See Olofsson 1993ApJS...87...267O.
T_A(x) = T_A(0) * [ 1 - ((x- v_lsr)/v_inf)**2 ] ** (gamma/2)
"""
pnames = ['ta','vlsr','vinf','gamma']
def function(p,x):
term = (x-p[1]) / p[2]
y = p[0] * (1- term**2)**(p[3]/2.)
if p[3]<=0: y[np.abs(term)>=1] = 0
y[np.isnan(y)] = 0
return y
function.__name__ = 'soft_parabola'
return Function(function=function, par_names=pnames)
def gauss(use_jacobian=True):
"""
Gaussian (a,mu,sigma,c)
f(x) = a * exp( - (x - mu)**2 / (2 * sigma**2) ) + c
"""
pnames = ['a', 'mu', 'sigma', 'c']
function = lambda p, x: p[0] * np.exp( -(x-p[1])**2 / (2.0*p[2]**2)) + p[3]
function.__name__ = 'gauss'
if not use_jacobian:
return Function(function=function, par_names=pnames)
else:
def jacobian(p, x):
ex = np.exp( -(x-p[1])**2 / (2.0*p[2]**2) )
return np.array([-ex, -p[0] * (x-p[1]) * ex / p[2]**2, -p[0] * (x-p[1])**2 * ex / p[2]**3, [-1 for i in x] ]).T
return Function(function=function, par_names=pnames, jacobian=jacobian)
def sine():
"""
Sine (ampl,freq,phase,const)
f(x) = ampl * sin(2pi*freq*x + 2pi*phase) + const
"""
pnames = ['ampl', 'freq', 'phase', 'const']
function = lambda p, x: p[0] * sin(2*pi*(p[1]*x + p[2])) + p[3]
function.__name__ = 'sine'
return Function(function=function, par_names=pnames)
def sine_linfreqshift(t0=0.):
"""
Sine with linear frequency shift (ampl,freq,phase,const,D).
Similar to C{sine}, but with extra parameter 'D', which is the linear
frequency shift parameter.
@param t0: reference time (defaults to 0)
@type t0: float
"""
pnames = ['ampl', 'freq', 'phase', 'const','D']
def function(p,x):
freq = (p[1] + p[4]/2.*(x-t0))*(x-t0)
return p[0] * sin(2*pi*(freq + p[2])) + p[3]
function.__name__ = 'sine_linfreqshift'
return Function(function=function, par_names=pnames)
def sine_expfreqshift(t0=0.):
"""
Sine with exponential frequency shift (ampl,freq,phase,const,K).
Similar to C{sine}, but with extra parameter 'K', which is the exponential
frequency shift parameter.
frequency(x) = freq / log(K) * (K**(x-t0)-1)
f(x) = ampl * sin( 2*pi * (frequency + phase))
@param t0: reference time (defaults to 0)
@type t0: float
"""
pnames = ['ampl', 'freq', 'phase', 'const','K']
def function(p,x):
freq = p[1] / np.log(p[4]) * (p[4]**(x-t0)-1.)
return p[0] * sin(2*pi*(freq + p[2])) + p[3]
function.__name__ = 'sine_expfreqshift'
return Function(function=function, par_names=pnames)
def sine_orbit(t0=0.,nmax=10):
"""
Sine with a sinusoidal frequency shift (ampl,freq,phase,const,forb,asini,omega,(,ecc))
Similar to C{sine}, but with extra parameter 'asini' and 'forb', which are
the orbital parameters. forb in cycles/day or something similar, asini in au.
For eccentric orbits, add longitude of periastron 'omega' (radians) and
'ecc' (eccentricity).
@param t0: reference time (defaults to 0)
@type t0: float
@param nmax: number of terms to include in series for eccentric orbit
@type nmax: int
"""
pnames = ['ampl', 'freq', 'phase', 'const','forb','asini','omega']
def ane(n,e): return 2.*sqrt(1-e**2)/e/n*jn(n,n*e)
def bne(n,e): return 1./n*(jn(n-1,n*e)-jn(n+1,n*e))
def function(p,x):
ampl,freq,phase,const,forb,asini,omega = p[:7]
ecc = None
if len(p)==8:
ecc = p[7]
cc = 173.144632674 # speed of light in AU/d
alpha = freq*asini/cc
if ecc is None:
frequency = freq*(x-t0) + alpha*(sin(2*pi*forb*x) - sin(2*pi*forb*t0))
else:
ns = np.arange(1,nmax+1,1)
ans,bns = np.array([[ane(n,ecc),bne(n,ecc)] for n in ns]).T
ksins = sqrt(ans**2*cos(omega)**2+bns**2*sin(omega)**2)
thns = arctan(bns/ans*tan(omega))
tau = -np.sum(bns*sin(omega))
frequency = freq*(x-t0) + \
alpha*(np.sum(np.array([ksins[i]*sin(2*pi*ns[i]*forb*(x-t0)+thns[i]) for i in range(nmax)]),axis=0)+tau)
return ampl * sin(2*pi*(frequency + phase)) + const
function.__name__ = 'sine_orbit'
return Function(function=function, par_names=pnames)
#def generic(func_name):
##func = model.FUNCTION(function=getattr(evaluate,func_name), par_names)
#raise NotImplementedError
def power_law():
"""
Power law (A,B,C,f0,const)
P(f) = A / (1+ B(f-f0))**C + const
"""
pnames = ['ampl','b','c','f0','const']
function = lambda p, x: p[0] / (1 + (p[1]*(x-p[3]))**p[2]) + p[4]
function.__name__ = 'power_law'
return Function(function=function, par_names=pnames)
def lorentz():
"""
Lorentz profile (ampl,mu,gamma,const)
P(f) = A / ( (x-mu)**2 + gamma**2) + const
"""
pnames = ['ampl','mu','gamma','const']
function = lambda p,x: p[0] / ((x-p[1])**2 + p[2]**2) + p[3]
function.__name__ = 'lorentz'
return Function(function=function, par_names=pnames)
def voigt():
"""
Voigt profile (ampl,mu,sigma,gamma,const)
z = (x + gamma*i) / (sigma*sqrt(2))
V = A * Real[cerf(z)] / (sigma*sqrt(2*pi))
"""
pnames = ['ampl','mu','sigma','gamma','const']
def function(p,x):
x = x-p[1]
z = (x+1j*p[3])/(p[2]*sqrt(2))
return p[0]*_complex_error_function(z).real/(p[2]*sqrt(2*pi))+p[4]
function.__name__ = 'voigt'
return Function(function=function, par_names=pnames)
#}
#{ Combination functions
def multi_sine(n=10):
"""
Multiple sines.
@param n: number of sines
@type n: int
"""
return Model(functions=[sine() for i in range(n)])
def multi_blackbody(n=3,**kwargs):
"""
Multiple black bodies.
@param n: number of blackbodies
@type n: int
"""
return Model(functions=[blackbody(**kwargs) for i in range(n)])
#}
#{ Internal Helper functions
def _complex_error_function(x):
"""
Complex error function
"""
cef_value = np.exp(-x**2)*(1-erf(-1j*x))
if sum(np.isnan(cef_value))>0:
logging.warning("Complex Error function: NAN encountered, results are biased")
noisnans = np.compress(1-np.isnan(cef_value),cef_value)
try:
last_value = noisnans[-1]
except:
last_value = 0
logging.warning("Complex Error function: all values are NAN, results are wrong")
cef_value = np.where(np.isnan(cef_value),last_value,cef_value)
return cef_value
#}
def evaluate(funcname, domain, parameters, **kwargs):
"""
Evaluate a function on specified interval with specified parameters.
Extra keywords are passed to the funcname.
Example:
>>> x = np.linspace(-5,5,1000)
>>> y = evaluate('gauss',x,[1.,0.,2.,0.])
@parameter funcname: name of the function
@type funcname: str
@parameter domain: domain to evaluate onto
@type domain: array
@parameter parameters: parameter of the function
@type parameters: array
"""
function = globals()[funcname](**kwargs)
function.setup_parameters(parameters)
return function.evaluate(domain)
if __name__=="__main__":
import doctest
from matplotlib import pyplot as plt
doctest.testmod()
plt.show() | gpl-3.0 |
JaviMerino/workload-automation | wlauto/instrumentation/energy_probe/__init__.py | 4 | 6833 | # Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=W0613,E1101,access-member-before-definition,attribute-defined-outside-init
import os
import subprocess
import signal
import struct
import csv
try:
import pandas
except ImportError:
pandas = None
from wlauto import Instrument, Parameter, Executable
from wlauto.exceptions import InstrumentError, ConfigError
from wlauto.utils.types import list_of_numbers
class EnergyProbe(Instrument):
name = 'energy_probe'
description = """Collects power traces using the ARM energy probe.
This instrument requires ``caiman`` utility to be installed in the workload automation
host and be in the PATH. Caiman is part of DS-5 and should be in ``/path/to/DS-5/bin/`` .
Energy probe can simultaneously collect energy from up to 3 power rails.
To connect the energy probe on a rail, connect the white wire to the pin that is closer to the
Voltage source and the black wire to the pin that is closer to the load (the SoC or the device
you are probing). Between the pins there should be a shunt resistor of known resistance in the
range of 5 to 20 mOhm. The resistance of the shunt resistors is a mandatory parameter
``resistor_values``.
.. note:: This instrument can process results a lot faster if python pandas is installed.
"""
parameters = [
Parameter('resistor_values', kind=list_of_numbers, default=[],
description="""The value of shunt resistors. This is a mandatory parameter."""),
Parameter('labels', kind=list, default=[],
description="""Meaningful labels for each of the monitored rails."""),
Parameter('device_entry', kind=str, default='/dev/ttyACM0',
description="""Path to /dev entry for the energy probe (it should be /dev/ttyACMx)"""),
]
MAX_CHANNELS = 3
def __init__(self, device, **kwargs):
super(EnergyProbe, self).__init__(device, **kwargs)
self.attributes_per_sample = 3
self.bytes_per_sample = self.attributes_per_sample * 4
self.attributes = ['power', 'voltage', 'current']
for i, val in enumerate(self.resistor_values):
self.resistor_values[i] = int(1000 * float(val))
def validate(self):
if subprocess.call('which caiman', stdout=subprocess.PIPE, shell=True):
raise InstrumentError('caiman not in PATH. Cannot enable energy probe')
if not self.resistor_values:
raise ConfigError('At least one resistor value must be specified')
if len(self.resistor_values) > self.MAX_CHANNELS:
raise ConfigError('{} Channels where specified when Energy Probe supports up to {}'
.format(len(self.resistor_values), self.MAX_CHANNELS))
if pandas is None:
self.logger.warning("pandas package will significantly speed up this instrument")
self.logger.warning("to install it try: pip install pandas")
def setup(self, context):
if not self.labels:
self.labels = ["PORT_{}".format(channel) for channel, _ in enumerate(self.resistor_values)]
self.output_directory = os.path.join(context.output_directory, 'energy_probe')
rstring = ""
for i, rval in enumerate(self.resistor_values):
rstring += '-r {}:{} '.format(i, rval)
self.command = 'caiman -d {} -l {} {}'.format(self.device_entry, rstring, self.output_directory)
os.makedirs(self.output_directory)
def start(self, context):
self.logger.debug(self.command)
self.caiman = subprocess.Popen(self.command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
preexec_fn=os.setpgrp,
shell=True)
def stop(self, context):
os.killpg(self.caiman.pid, signal.SIGTERM)
def update_result(self, context): # pylint: disable=too-many-locals
num_of_channels = len(self.resistor_values)
processed_data = [[] for _ in xrange(num_of_channels)]
filenames = [os.path.join(self.output_directory, '{}.csv'.format(label)) for label in self.labels]
struct_format = '{}I'.format(num_of_channels * self.attributes_per_sample)
not_a_full_row_seen = False
with open(os.path.join(self.output_directory, "0000000000"), "rb") as bfile:
while True:
data = bfile.read(num_of_channels * self.bytes_per_sample)
if data == '':
break
try:
unpacked_data = struct.unpack(struct_format, data)
except struct.error:
if not_a_full_row_seen:
self.logger.warn('possibly missaligned caiman raw data, row contained {} bytes'.format(len(data)))
continue
else:
not_a_full_row_seen = True
for i in xrange(num_of_channels):
index = i * self.attributes_per_sample
processed_data[i].append({attr: val for attr, val in
zip(self.attributes, unpacked_data[index:index + self.attributes_per_sample])})
for i, path in enumerate(filenames):
with open(path, 'w') as f:
if pandas is not None:
self._pandas_produce_csv(processed_data[i], f)
else:
self._slow_produce_csv(processed_data[i], f)
# pylint: disable=R0201
def _pandas_produce_csv(self, data, f):
dframe = pandas.DataFrame(data)
dframe = dframe / 1000.0
dframe.to_csv(f)
def _slow_produce_csv(self, data, f):
new_data = []
for entry in data:
new_data.append({key: val / 1000.0 for key, val in entry.items()})
writer = csv.DictWriter(f, self.attributes)
writer.writeheader()
writer.writerows(new_data)
| apache-2.0 |
astrofrog/numpy | doc/example.py | 8 | 3484 | """This is the docstring for the example.py module. Modules names should
have short, all-lowercase names. The module name may have underscores if
this improves readability.
Every module should have a docstring at the very top of the file. The
module's docstring may extend over multiple lines. If your docstring does
extend over multiple lines, the closing three quotation marks must be on
a line by itself, preferably preceeded by a blank line.
"""
import os # standard library imports first
# Do NOT import using *, e.g. from numpy import *
#
# Import the module using
#
# import numpy
#
# instead or import individual functions as needed, e.g
#
# from numpy import array, zeros
#
# If you prefer the use of abbreviated module names, we suggest the
# convention used by NumPy itself::
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
# These abbreviated names are not to be used in docstrings; users must
# be able to paste and execute docstrings after importing only the
# numpy module itself, unabbreviated.
from my_module import my_func, other_func
def foo(var1, var2, long_var_name='hi') :
r"""A one-line summary that does not use variable names or the
function name.
Several sentences providing an extended description. Refer to
variables using back-ticks, e.g. `var`.
Parameters
----------
var1 : array_like
Array_like means all those objects -- lists, nested lists, etc. --
that can be converted to an array. We can also refer to
variables like `var1`.
var2 : int
The type above can either refer to an actual Python type
(e.g. ``int``), or describe the type of the variable in more
detail, e.g. ``(N,) ndarray`` or ``array_like``.
Long_variable_name : {'hi', 'ho'}, optional
Choices in brackets, default first when optional.
Returns
-------
describe : type
Explanation
output : type
Explanation
tuple : type
Explanation
items : type
even more explaining
Other Parameters
----------------
only_seldom_used_keywords : type
Explanation
common_parameters_listed_above : type
Explanation
Raises
------
BadException
Because you shouldn't have done that.
See Also
--------
otherfunc : relationship (optional)
newfunc : Relationship (optional), which could be fairly long, in which
case the line wraps here.
thirdfunc, fourthfunc, fifthfunc
Notes
-----
Notes about the implementation algorithm (if needed).
This can have multiple paragraphs.
You may include some math:
.. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n}
And even use a greek symbol like :math:`omega` inline.
References
----------
Cite the relevant literature, e.g. [1]_. You may also cite these
references in the notes section above.
.. [1] O. McNoleg, "The integration of GIS, remote sensing,
expert systems and adaptive co-kriging for environmental habitat
modelling of the Highland Haggis using object-oriented, fuzzy-logic
and neural-network techniques," Computers & Geosciences, vol. 22,
pp. 585-588, 1996.
Examples
--------
These are written in doctest format, and should illustrate how to
use the function.
>>> a=[1,2,3]
>>> print [x + 3 for x in a]
[4, 5, 6]
>>> print "a\n\nb"
a
b
"""
pass
| bsd-3-clause |
YinongLong/scikit-learn | examples/linear_model/plot_multi_task_lasso_support.py | 102 | 2319 | #!/usr/bin/env python
"""
=============================================
Joint feature selection with multi-task Lasso
=============================================
The multi-task lasso allows to fit multiple regression problems
jointly enforcing the selected features to be the same across
tasks. This example simulates sequential measurements, each task
is a time instant, and the relevant features vary in amplitude
over time while being the same. The multi-task lasso imposes that
features that are selected at one time point are select for all time
point. This makes feature selection by the Lasso more stable.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import MultiTaskLasso, Lasso
rng = np.random.RandomState(42)
# Generate some 2D coefficients with sine waves with random frequency and phase
n_samples, n_features, n_tasks = 100, 30, 40
n_relevant_features = 5
coef = np.zeros((n_tasks, n_features))
times = np.linspace(0, 2 * np.pi, n_tasks)
for k in range(n_relevant_features):
coef[:, k] = np.sin((1. + rng.randn(1)) * times + 3 * rng.randn(1))
X = rng.randn(n_samples, n_features)
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
coef_multi_task_lasso_ = MultiTaskLasso(alpha=1.).fit(X, Y).coef_
###############################################################################
# Plot support and time series
fig = plt.figure(figsize=(8, 5))
plt.subplot(1, 2, 1)
plt.spy(coef_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'Lasso')
plt.subplot(1, 2, 2)
plt.spy(coef_multi_task_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'MultiTaskLasso')
fig.suptitle('Coefficient non-zero location')
feature_to_plot = 0
plt.figure()
lw = 2
plt.plot(coef[:, feature_to_plot], color='seagreen', linewidth=lw,
label='Ground truth')
plt.plot(coef_lasso_[:, feature_to_plot], color='cornflowerblue', linewidth=lw,
label='Lasso')
plt.plot(coef_multi_task_lasso_[:, feature_to_plot], color='gold', linewidth=lw,
label='MultiTaskLasso')
plt.legend(loc='upper center')
plt.axis('tight')
plt.ylim([-1.1, 1.1])
plt.show()
| bsd-3-clause |
Convertro/Hydro | src/hydro/types.py | 1 | 1848 | from hydro.exceptions import HydroException
from datetime import datetime
from pandas import DataFrame
__author__ = 'moshebasanchig'
class HydroType(object):
"""
Hydro Type is an abstract class for types that need to be parsed and injected into the queries as filters
"""
def __init__(self, value, **kwargs):
self._value = self.parse(value, **kwargs)
if self._value is None:
raise HydroException("Value is not set")
def to_string(self):
return str(self._value)
def parse(self, value, kwargs):
raise HydroException("Not implemented")
def __sub__(self, other):
interval = self.value - other.value
return interval
@property
def value(self):
return self._value
class HydroStr(HydroType):
def parse(self, value, **kwargs):
return str(value)
class HydroDatetime(HydroType):
format = "%Y-%m-%d %H:%M:%S"
def parse(self, value, **kwargs):
if 'format' in kwargs:
self.format = kwargs['format']
dt = value.split(' ')
if len(dt) == 1:
dt.append('00:00:00')
return datetime.strptime(' '.join(dt), self.format)
def to_string(self):
return datetime.strftime(self._value, self.format)
class HydroList(HydroType):
def parse(self, value, **kwargs):
if isinstance(value, list):
return value
else:
raise HydroException("Expected a list")
def to_string(self):
return ', '.join("'{0}'".format(val) for val in self._value)
class HydroDataframe(HydroType):
def parse(self, value, **kwargs):
if isinstance(value, DataFrame):
return value
else:
raise HydroException("Expected a Data Frame")
def to_string(self):
return ', '.join(self._value.columns)
| mit |
djgagne/scikit-learn | examples/neighbors/plot_approximate_nearest_neighbors_hyperparameters.py | 227 | 5170 | """
=================================================
Hyper-parameters of Approximate Nearest Neighbors
=================================================
This example demonstrates the behaviour of the
accuracy of the nearest neighbor queries of Locality Sensitive Hashing
Forest as the number of candidates and the number of estimators (trees)
vary.
In the first plot, accuracy is measured with the number of candidates. Here,
the term "number of candidates" refers to maximum bound for the number of
distinct points retrieved from each tree to calculate the distances. Nearest
neighbors are selected from this pool of candidates. Number of estimators is
maintained at three fixed levels (1, 5, 10).
In the second plot, the number of candidates is fixed at 50. Number of trees
is varied and the accuracy is plotted against those values. To measure the
accuracy, the true nearest neighbors are required, therefore
:class:`sklearn.neighbors.NearestNeighbors` is used to compute the exact
neighbors.
"""
from __future__ import division
print(__doc__)
# Author: Maheshakya Wijewardena <[email protected]>
#
# License: BSD 3 clause
###############################################################################
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Initialize size of the database, iterations and required neighbors.
n_samples = 10000
n_features = 100
n_queries = 30
rng = np.random.RandomState(42)
# Generate sample data
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=10,
random_state=0)
X_index = X[:n_samples]
X_query = X[n_samples:]
# Get exact neighbors
nbrs = NearestNeighbors(n_neighbors=1, algorithm='brute',
metric='cosine').fit(X_index)
neighbors_exact = nbrs.kneighbors(X_query, return_distance=False)
# Set `n_candidate` values
n_candidates_values = np.linspace(10, 500, 5).astype(np.int)
n_estimators_for_candidate_value = [1, 5, 10]
n_iter = 10
stds_accuracies = np.zeros((len(n_estimators_for_candidate_value),
n_candidates_values.shape[0]),
dtype=float)
accuracies_c = np.zeros((len(n_estimators_for_candidate_value),
n_candidates_values.shape[0]), dtype=float)
# LSH Forest is a stochastic index: perform several iteration to estimate
# expected accuracy and standard deviation displayed as error bars in
# the plots
for j, value in enumerate(n_estimators_for_candidate_value):
for i, n_candidates in enumerate(n_candidates_values):
accuracy_c = []
for seed in range(n_iter):
lshf = LSHForest(n_estimators=value,
n_candidates=n_candidates, n_neighbors=1,
random_state=seed)
# Build the LSH Forest index
lshf.fit(X_index)
# Get neighbors
neighbors_approx = lshf.kneighbors(X_query,
return_distance=False)
accuracy_c.append(np.sum(np.equal(neighbors_approx,
neighbors_exact)) /
n_queries)
stds_accuracies[j, i] = np.std(accuracy_c)
accuracies_c[j, i] = np.mean(accuracy_c)
# Set `n_estimators` values
n_estimators_values = [1, 5, 10, 20, 30, 40, 50]
accuracies_trees = np.zeros(len(n_estimators_values), dtype=float)
# Calculate average accuracy for each value of `n_estimators`
for i, n_estimators in enumerate(n_estimators_values):
lshf = LSHForest(n_estimators=n_estimators, n_neighbors=1)
# Build the LSH Forest index
lshf.fit(X_index)
# Get neighbors
neighbors_approx = lshf.kneighbors(X_query, return_distance=False)
accuracies_trees[i] = np.sum(np.equal(neighbors_approx,
neighbors_exact))/n_queries
###############################################################################
# Plot the accuracy variation with `n_candidates`
plt.figure()
colors = ['c', 'm', 'y']
for i, n_estimators in enumerate(n_estimators_for_candidate_value):
label = 'n_estimators = %d ' % n_estimators
plt.plot(n_candidates_values, accuracies_c[i, :],
'o-', c=colors[i], label=label)
plt.errorbar(n_candidates_values, accuracies_c[i, :],
stds_accuracies[i, :], c=colors[i])
plt.legend(loc='upper left', fontsize='small')
plt.ylim([0, 1.2])
plt.xlim(min(n_candidates_values), max(n_candidates_values))
plt.ylabel("Accuracy")
plt.xlabel("n_candidates")
plt.grid(which='both')
plt.title("Accuracy variation with n_candidates")
# Plot the accuracy variation with `n_estimators`
plt.figure()
plt.scatter(n_estimators_values, accuracies_trees, c='k')
plt.plot(n_estimators_values, accuracies_trees, c='g')
plt.ylim([0, 1.2])
plt.xlim(min(n_estimators_values), max(n_estimators_values))
plt.ylabel("Accuracy")
plt.xlabel("n_estimators")
plt.grid(which='both')
plt.title("Accuracy variation with n_estimators")
plt.show()
| bsd-3-clause |
anurag313/scikit-learn | examples/linear_model/lasso_dense_vs_sparse_data.py | 348 | 1862 | """
==============================
Lasso on dense and sparse data
==============================
We show that linear_model.Lasso provides the same results for dense and sparse
data and that in the case of sparse data the speed is improved.
"""
print(__doc__)
from time import time
from scipy import sparse
from scipy import linalg
from sklearn.datasets.samples_generator import make_regression
from sklearn.linear_model import Lasso
###############################################################################
# The two Lasso implementations on Dense data
print("--- Dense matrices")
X, y = make_regression(n_samples=200, n_features=5000, random_state=0)
X_sp = sparse.coo_matrix(X)
alpha = 1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
t0 = time()
sparse_lasso.fit(X_sp, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(X, y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
###############################################################################
# The two Lasso implementations on Sparse data
print("--- Sparse matrices")
Xs = X.copy()
Xs[Xs < 2.5] = 0.0
Xs = sparse.coo_matrix(Xs)
Xs = Xs.tocsc()
print("Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100))
alpha = 0.1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
t0 = time()
sparse_lasso.fit(Xs, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(Xs.toarray(), y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
| bsd-3-clause |
wolfiex/VisACC | revamp/gmmcluster.py | 1 | 1524 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from mpl_toolkits.mplot3d import Axes3D
from sklearn import mixture
import pandas as pd
def q(x, y):
g1 = mlab.bivariate_normal(x, y, 1.0, 1.0, -1, -1, -0.8)
g2 = mlab.bivariate_normal(x, y, 1.5, 0.8, 1, 2, 0.6)
return 0.6*g1+28.4*g2/(0.6+28.4)
def plot_q():
fig = plt.figure()
ax = fig.gca(projection='3d')
X = np.arange(-5, 5, 0.1)
Y = np.arange(-5, 5, 0.1)
X, Y = np.meshgrid(X, Y)
Z = q(X, Y)
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=plt.get_cmap('coolwarm'),
linewidth=0, antialiased=True)
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.savefig('3dgauss.png')
plt.clf()
def sample():
df = pf.read_csv('global.csv')
var = 'O3'
samples = np.array(df[['%s_x'%var , '%s_y'&var]])
plt.scatter(samples[:, 0], samples[:, 1], alpha=0.5, s=1)
'''Plot target'''
dx = 0.01
x = np.arange(np.min(samples), np.max(samples), dx)
y = np.arange(np.min(samples), np.max(samples), dx)
X, Y = np.meshgrid(x, y)
Z = q(X, Y)
CS = plt.contour(X, Y, Z, 10, alpha=0.5)
plt.clabel(CS, inline=1, fontsize=10)
plt.savefig("samples.png")
return samples
def fit_samples(samples):
gmix = mixture.GMM(n_components=2, covariance_type='full')
gmix.fit(samples)
print gmix.means_
colors = ['r' if i==0 else 'g' for i in gmix.predict(samples)]
ax = plt.gca()
ax.scatter(samples[:,0], samples[:,1], c=colors, alpha=0.8)
plt.savefig("class.png")
if __name__ == '__main__':
plot_q()
s = sample()
fit_samples(s) | mit |
OxfordSKA/bda | pybda/corrupting_gains.py | 1 | 7934 | # -*- coding: utf-8 -*-
"""Module to evaluate corrupting gains for BDA simulations."""
from __future__ import (print_function, absolute_import)
import numpy
import time
import os
def allan_deviation(data, dt, tau):
"""
Evaluate the Allan deviation of a time series.
References:
https://en.wikipedia.org/wiki/Allan_variance
Args:
data (array_like): Array of time series data.
dt (float): Sample spacing of the time series data.
tau (float): Interval at which to calculate the allan deviation.
Returns:
sm: Allan deviation
sme: error on the allan deviation
n: number of pairs in the Allan computation
"""
data = numpy.asarray(data)
num_points = data.shape[0]
m = int(tau / dt) # Number of samples in length tau
data = data[:num_points - (num_points % m)] # Resize to a multiple of m
# Reshape into blocks of length m and take the average of each block.
data = data.reshape((-1, m))
data_mean = numpy.mean(data, axis=1)
data_diff = numpy.diff(data_mean)
n = data_diff.shape[0]
a_dev = numpy.sqrt((0.5 / n) * (numpy.sum(data_diff**2)))
a_dev_err = a_dev / numpy.sqrt(n)
return a_dev, a_dev_err, n
def smooth(x, window_len=11, window='hanning'):
x = numpy.asarray(x)
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len < 3:
return x
if window not in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError("Window is on of 'flat', 'hanning', 'hamming', "
"'bartlett', 'blackman'")
s = numpy.r_[x[window_len-1:0:-1], x, x[-1:-window_len:-1]]
if window == 'flat': # moving average
w = numpy.ones(window_len, 'd')
else:
w = eval('numpy.' + window + '(window_len)')
y = numpy.convolve(w / w.sum(), s, mode='valid')
return y[:x.shape[0]]
def fractional_brownian_motion(n, hurst):
"""Generate Fractional brownian motion noise.
http://www.maths.uq.edu.au/~kroese/ps/MCSpatial.pdf
Args:
n (int): Length of the time series.
hurst (float): Hurst parameter.
Returns:
Time series array.
"""
a = 2.0 * hurst
r = numpy.empty(n + 1) * numpy.NaN
r[0] = 1.0
k = numpy.arange(1, n + 1)
r[1:] = 0.5 * ((k + 1)**a - 2.0 * k**a + (k - 1)**a)
r = numpy.append(r, r[-2:0:-1]) # first row of circulant matrix
l = numpy.real(numpy.fft.fft(r)) / (2 * n)
w = numpy.fft.fft(numpy.sqrt(l) * (numpy.random.randn(2 * n) +
1.0j * numpy.random.randn(2 * n)))
w = n**(-hurst) * numpy.cumsum(numpy.real(w[:n+1])) # Rescale
return w[:n]
def eval_complex_gains(n, dt, hurst_amp, adev_amp, std_t_mid_amp,
hurst_phase, adev_phase, std_t_mid_phase,
smoothing_length=0, tau=1.0):
amp = fractional_brownian_motion(n, hurst_amp)
if smoothing_length > 0:
amp = smooth(amp, smoothing_length)
amp *= adev_amp / allan_deviation(amp, dt, tau)[0]
amp += 1.0 - amp[n / 2] + (numpy.random.randn() * std_t_mid_amp)
phase = fractional_brownian_motion(n, hurst_phase)
if smoothing_length > 0:
phase = smooth(phase, smoothing_length)
phase *= adev_phase / allan_deviation(phase, dt, tau)[0]
phase += -phase[n / 2] + (numpy.random.randn() * std_t_mid_phase)
gain = amp * numpy.exp(1.0j * numpy.radians(phase))
return gain
def allan_dev_spectrum(data, dt):
data = numpy.asarray(data)
tau_values = numpy.arange(2, data.shape[0] / 3, 5) * dt
adev = numpy.empty_like(tau_values)
adev_err = numpy.empty_like(tau_values)
for i, tau in enumerate(tau_values):
adev[i] = allan_deviation(data, dt, tau)[0]
adev_err[i] = allan_deviation(data, dt, tau)[0]
return adev, tau_values, adev_err
def test_unblocked():
import matplotlib.pyplot as pyplot
tau = 1.0
num_steps = 6
num_antennas = 50
hurst_amp = 0.55
adev_amp = numpy.linspace(1.e-5, 1.e-3, num_steps)
std_t_mid_amp = 0.05
hurst_phase = 0.55
adev_phase = numpy.linspace(0.01, 0.2, num_steps)
std_t_mid_phase = 5.0
num_times = 5000
dump_time = 0.1
over_sample = 10
smoothing_length = over_sample * 3
n = num_times * over_sample
dt = dump_time / float(over_sample)
times = numpy.arange(n) * dt
print('No. samples = %i' % n)
print('No. steps = %i' % num_steps)
gains = numpy.empty((num_steps, num_antennas, n), dtype='c16')
t0 = time.time()
for i in range(num_steps):
print('%i %e' % (i, adev_amp[i]))
for a in range(num_antennas):
gains[i, a, :] = eval_complex_gains(n, dt, hurst_amp, adev_amp[i],
std_t_mid_amp,
hurst_phase, adev_phase[i],
std_t_mid_phase,
smoothing_length,
tau)
print('Time taken to generate gains = %.3f s' % (time.time() - t0))
fig = pyplot.figure(figsize=(7, 7))
fig.subplots_adjust(left=0.15, bottom=0.1, right=0.95, top=0.95,
hspace=0.2, wspace=0.0)
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
colors = ['r', 'g', 'b', 'm', 'y', 'k']
print('plotting ...')
y_max = 0.0
for i in range(num_steps):
for a in range(num_antennas):
if i == num_steps - 1:
ax1.plot(times, numpy.abs(gains[i, a, :]), '-', color=colors[i])
s, t, _ = allan_dev_spectrum(numpy.abs(gains[i, a, :]), dt)
ax2.semilogy(t, s, color=colors[i])
ax2.set_xlim(0, tau * 3.0)
y_max = max(y_max, s[numpy.argmax(t > tau * 3.0)])
ax2.set_ylim(0, y_max * 1.05)
y_max = numpy.max(gains[num_steps-1, :, :])
print(numpy.abs(y_max))
ax1.grid()
ax1.set_xlabel('Observation length [seconds]', fontsize='small')
ax1.set_ylabel('Gain amplitude', fontsize='small')
ax2.grid()
ax2.set_ylabel('Allan deviation', fontsize='small')
ax2.set_xlabel('Sample period, tau [seconds]', fontsize='small')
if os.path.isfile('gain_amp.png'):
os.remove('gain_amp.png')
pyplot.savefig('gain_amp.png')
# =========================================================================
fig = pyplot.figure(figsize=(7, 7))
fig.subplots_adjust(left=0.15, bottom=0.1, right=0.95, top=0.95,
hspace=0.2, wspace=0.0)
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
colors = ['r', 'g', 'b', 'm', 'y', 'k']
print('plotting ...')
y_max = 0.0
for i in range(num_steps):
for a in range(num_antennas):
if i == num_steps - 1:
ax1.plot(times, numpy.degrees(numpy.angle(gains[i, a, :])),
'-', color=colors[i])
s, t, _ = allan_dev_spectrum(
numpy.degrees(numpy.angle(gains[i, a, :])), dt)
ax2.semilogy(t, s, color=colors[i])
ax2.set_xlim(0, tau * 3.0)
y_max = max(y_max, s[numpy.argmax(t > tau * 3.0)])
ax2.set_ylim(0, y_max * 1.05)
y_max = numpy.max(gains[num_steps-1, :, :])
print(numpy.abs(y_max))
ax1.grid()
ax1.set_xlabel('Observation length [seconds]', fontsize='small')
ax1.set_ylabel('Gain phase', fontsize='small')
ax2.grid()
ax2.set_ylabel('Allan deviation', fontsize='small')
ax2.set_xlabel('Sample period, tau [seconds]', fontsize='small')
if os.path.isfile('gain_phase.png'):
os.remove('gain_phase.png')
pyplot.savefig('gain_phase.png')
if __name__ == '__main__':
test_unblocked()
| bsd-3-clause |
lcharleux/abapy | doc/example_code/materials/Hollomon.py | 1 | 1324 | from abapy.materials import Hollomon
import matplotlib.pyplot as plt
E = 1. # Young's modulus
sy = 0.001 # Yield stress
n = 0.15 # Hardening exponent
nu = 0.3
eps_max = .1 # maximum strain to be computed
N = 30 # Number of points to be computed (30 is a low value useful for graphical reasons, in real simulations, 100 is a better value).
mat1 = Hollomon(labels = 'my_material', E=E, nu=nu, sy=sy, n=n)
table1 = mat1.get_table(0, N=N, eps_max=eps_max)
eps1 = table1[:,0]
sigma1 = table1[:,1]
sigma_max1 = max(sigma1)
mat2 = Hollomon(labels = 'my_material', E=E, nu=nu, sy=sy, n=n, kind = 2)
table2 = mat2.get_table(0, N=N, eps_max=eps_max)
eps2 = table2[:,0]
sigma2 = table2[:,1]
sigma_max2 = max(sigma2)
plt.figure()
plt.clf()
plt.title('Hollomon tensile behavior: $n = {0:.2f}$, $\sigma_y / E = {1:.2e}$'.format(n, sy/E))
plt.xlabel('Strain $\epsilon$')
plt.ylabel('Stress $\sigma$')
plt.plot(eps1, sigma1, 'or-', label = 'Plasticity kind=1')
plt.plot(eps2, sigma2, 'vg-', label = 'Plasticity kind=2')
plt.plot([0., sy / E], [0., sy], 'b-', label = 'Elasticity')
plt.xticks([0., sy/E, eps_max], ['$0$', '$\epsilon_y$', '$\epsilon_{max}$'], fontsize = 16.)
plt.yticks([0., sy, sigma_max1], ['$0$', '$\sigma_y$', '$\sigma_{max}$'], fontsize = 16.)
plt.grid()
plt.legend(loc = "lower right")
plt.show()
| gpl-2.0 |
AIML/scikit-learn | sklearn/neural_network/tests/test_rbm.py | 142 | 6276 | import sys
import re
import numpy as np
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix
from sklearn.utils.testing import (assert_almost_equal, assert_array_equal,
assert_true)
from sklearn.datasets import load_digits
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.neural_network import BernoulliRBM
from sklearn.utils.validation import assert_all_finite
np.seterr(all='warn')
Xdigits = load_digits().data
Xdigits -= Xdigits.min()
Xdigits /= Xdigits.max()
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, n_iter=7, random_state=9)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
def test_partial_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=20, random_state=9)
n_samples = X.shape[0]
n_batches = int(np.ceil(float(n_samples) / rbm.batch_size))
batch_slices = np.array_split(X, n_batches)
for i in range(7):
for batch in batch_slices:
rbm.partial_fit(batch)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
assert_array_equal(X, Xdigits)
def test_transform():
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=16, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
Xt1 = rbm1.transform(X)
Xt2 = rbm1._mean_hiddens(X)
assert_array_equal(Xt1, Xt2)
def test_small_sparse():
# BernoulliRBM should work on small sparse matrices.
X = csr_matrix(Xdigits[:4])
BernoulliRBM().fit(X) # no exception
def test_small_sparse_partial_fit():
for sparse in [csc_matrix, csr_matrix]:
X_sparse = sparse(Xdigits[:100])
X = Xdigits[:100].copy()
rbm1 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm2 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm1.partial_fit(X_sparse)
rbm2.partial_fit(X)
assert_almost_equal(rbm1.score_samples(X).mean(),
rbm2.score_samples(X).mean(),
decimal=0)
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1)
def test_fit_gibbs():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]]
# from the same input
rng = np.random.RandomState(42)
X = np.array([[0.], [1.]])
rbm1 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
# you need that much iters
rbm1.fit(X)
assert_almost_equal(rbm1.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm1.gibbs(X), X)
return rbm1
def test_fit_gibbs_sparse():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from
# the same input even when the input is sparse, and test against non-sparse
rbm1 = test_fit_gibbs()
rng = np.random.RandomState(42)
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm2 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
rbm2.fit(X)
assert_almost_equal(rbm2.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm2.gibbs(X), X.toarray())
assert_almost_equal(rbm1.components_, rbm2.components_)
def test_gibbs_smoke():
# Check if we don't get NaNs sampling the full digits dataset.
# Also check that sampling again will yield different results.
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=40,
n_iter=20, random_state=42)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
X_sampled2 = rbm1.gibbs(X)
assert_true(np.all((X_sampled != X_sampled2).max(axis=1)))
def test_score_samples():
# Test score_samples (pseudo-likelihood) method.
# Assert that pseudo-likelihood is computed without clipping.
# See Fabian's blog, http://bit.ly/1iYefRk
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2,
n_iter=10, random_state=rng)
rbm1.fit(X)
assert_true((rbm1.score_samples(X) < -300).all())
# Sparse vs. dense should not affect the output. Also test sparse input
# validation.
rbm1.random_state = 42
d_score = rbm1.score_samples(X)
rbm1.random_state = 42
s_score = rbm1.score_samples(lil_matrix(X))
assert_almost_equal(d_score, s_score)
# Test numerical stability (#2785): would previously generate infinities
# and crash with an exception.
with np.errstate(under='ignore'):
rbm1.score_samples(np.arange(1000) * 100)
def test_rbm_verbose():
rbm = BernoulliRBM(n_iter=2, verbose=10)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
rbm.fit(Xdigits)
finally:
sys.stdout = old_stdout
def test_sparse_and_verbose():
# Make sure RBM works with sparse input when verbose=True
old_stdout = sys.stdout
sys.stdout = StringIO()
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm = BernoulliRBM(n_components=2, batch_size=2, n_iter=1,
random_state=42, verbose=True)
try:
rbm.fit(X)
s = sys.stdout.getvalue()
# make sure output is sound
assert_true(re.match(r"\[BernoulliRBM\] Iteration 1,"
r" pseudo-likelihood = -?(\d)+(\.\d+)?,"
r" time = (\d|\.)+s",
s))
finally:
sys.stdout = old_stdout
| bsd-3-clause |
nesterione/problem-solving-and-algorithms | problems/Calculus/mfd_other_way_sm.py | 1 | 1195 | import numpy as np
lam = 401
c = 385
ro = 8900
alpha = lam/(c*ro)
dx = 0.01
dt = 0.5
stick_length = 1
# time in seconds
time_span = 10
Tinit = 300
Tright = 350
Tleft = 320
# u p+1 m
u1 = dx*dx + 2*dt*alpha
# u p m
u2 = -(dx*dx)
# u p+1 m-1
u3 = -alpha*dt
# u p+1 m+1
u4 = -alpha*dt
dim = int(stick_length / dx) + 1 - 2 # 2 крайние точки не учитывем, переносим их вручную
A = np.diag(np.ones(dim))
B = np.zeros([dim])
# Fill begin values for vector B
import matplotlib.pyplot as plt
x_axis_step = np.arange(0, stick_length+dx/2.0, dx)
time_cnt = int(time_span/dt)
T0 = np.zeros([dim])
T0[0:dim]=Tinit
#for tt in range(0,time_cnt):
for tt in range(0,time_cnt):
for i in range(len(B)):
B[i]=-T0[i]*u2
B[0] = Tleft
B[dim-1] = Tright
A[0,0] = 1
A[dim-1,dim-1] = 1
for node in range(1, dim-1):
A[node, node-1] = u3
A[node, node+1] = u4
A[node, node] = u1
#B[node] = u2*
#B[node]-= (c+k+p)
#print(B)
#print("******",A,"******")
X = np.linalg.solve(A, B)
print(X)
plt.plot(x_axis_step, X)
T0 = X
plt.show() | apache-2.0 |
BorisJeremic/Real-ESSI-Examples | analytic_solution/test_cases/Contact/Coupled_Contact/Steady_State_Single_Foundation_Sysytem_Under_Compression/CoupledHardContact_NonLinHardSoftShear/n_1/Plot_Results.py | 15 | 3553 | #!/usr/bin/env python
#!/usr/bin/python
import h5py
from matplotlib import pylab
import matplotlib.pylab as plt
import sys
from matplotlib.font_manager import FontProperties
import math
import numpy as np
#!/usr/bin/python
import h5py
import matplotlib.pylab as plt
import matplotlib as mpl
import sys
import numpy as np;
plt.rcParams.update({'font.size': 30})
# set tick width
mpl.rcParams['xtick.major.size'] = 10
mpl.rcParams['xtick.major.width'] = 5
mpl.rcParams['xtick.minor.size'] = 10
mpl.rcParams['xtick.minor.width'] = 5
plt.rcParams['xtick.labelsize']=28
mpl.rcParams['ytick.major.size'] = 10
mpl.rcParams['ytick.major.width'] = 5
mpl.rcParams['ytick.minor.size'] = 10
mpl.rcParams['ytick.minor.width'] = 5
plt.rcParams['ytick.labelsize']=28
# Plot the figure. Add labels and titles.
plt.figure()
ax = plt.subplot(111)
ax.grid()
ax.set_xlabel("Time [s] ")
ax.set_ylabel(r"Stress [Pa] ")
# Pore Pressure
# #########################################################################
thefile = "Soil_Foundation_System_Surface_Load.h5.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
upU_p = finput["/Model/Nodes/Generalized_Displacements"][3,:]
upU_u = finput["/Model/Nodes/Generalized_Displacements"][2,:]
upU_U = finput["/Model/Nodes/Generalized_Displacements"][6,:]
u_u = finput["/Model/Nodes/Generalized_Displacements"][79,:]
sigma_zz_ = finput["/Model/Elements/Gauss_Outputs"][14,:]
# pore_pressure
ax.plot(times,upU_p,'b',linewidth=2,label=r'Pore Pressure $p$');
ax.hold(True);
# Total Stress
# #########################################################################
# Read the time and displacement
times = finput["time"][:];
T = times[len(times)-1]
sigma_zz = 400/T*times
# kinetic energy
ax.plot(times,sigma_zz,'k',linewidth=2,label=r'Total Stress $\sigma$');
ax.hold(True);
# Effective Stress
# #########################################################################
# Read the time and displacement
times = finput["time"][:];
sigma_zz_ = sigma_zz - upU_p
# kinetic energy
ax.plot(times,sigma_zz_,'r',linewidth=2,label=r'''Effective Stress $\sigma^{\prime}$''');
ax.hold(True);
max_yticks = 5
yloc = plt.MaxNLocator(max_yticks)
ax.yaxis.set_major_locator(yloc)
max_xticks = 5
yloc = plt.MaxNLocator(max_xticks)
ax.xaxis.set_major_locator(yloc)
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.35),
ncol=2, fancybox=True, shadow=True, prop={'size': 24})
pylab.savefig("Coupled_Soft_Contact_Steady_State_SF_Ststem_Under_Compression_Porosity_Effective_Stress_Principle.pdf", bbox_inches='tight')
# plt.show()
#
################################### Drainage Condition Verification #############################
ax.hold(False);
fig = plt.figure();
ax = plt.subplot(111)
ax.plot(times,upU_u*1e8,'k',linewidth=3,label=r'$upU\_u$'); ax.hold(True);
ax.plot(times,upU_U*1e8,'b',linewidth=10,label=r'$upU\_U$'); ax.hold(True);
ax.plot(times,u_u*1e8,'r',linewidth=3,label=r'$u\_u$'); ax.hold(True);
ax.grid()
ax.set_xlabel("Time [s] ")
ax.set_ylabel(r"Displacement $\times 1e^{-8}$ [m] ")
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.25),
ncol=4, fancybox=True, shadow=True, prop={'size': 24})
max_yticks = 5
yloc = plt.MaxNLocator(max_yticks)
ax.yaxis.set_major_locator(yloc)
max_xticks = 5
yloc = plt.MaxNLocator(max_xticks)
ax.xaxis.set_major_locator(yloc)
pylab.savefig("Coupled_Soft_Contact_Steady_State_SF_Ststem_Under_Compression_Porosity_Undrained_Conditions.pdf", bbox_inches='tight')
# plt.show()
| cc0-1.0 |
dereknewman/cancer_detection | combine_annotations_to_csv.py | 1 | 5177 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 9 14:35:08 2017
@author: derek
"""
import os
import pandas as pd
import numpy as np
import glob
import ntpath
import SimpleITK
import helpers
BASE_DIR = "/media/derek/disk1/kaggle_ndsb2017/"
BASE_DIR_SSD = "/media/derek/disk1/kaggle_ndsb2017/"
LUNA16_EXTRACTED_IMAGE_DIR = BASE_DIR_SSD + "luna16_extracted_images/"
TARGET_VOXEL_MM = 0.682
LUNA_SUBSET_START_INDEX = 0
LUNA16_RAW_SRC_DIR = BASE_DIR + "luna_raw/"
def find_mhd_file(patient_id):
"""find the directory path containing the '.mhd' file associated with a specific patient_id. file must be
in the form of patient_id.mhd (i.e. 1.2.34.5678.mhd)
Args:
patient_id: (string) patient's id
Returns:
src_path: (string) path to the directory containing the patient_id file
"""
for subject_no in range(LUNA_SUBSET_START_INDEX, 10):
src_dir = LUNA16_RAW_SRC_DIR + "subset" + str(subject_no) + "/"
for src_path in glob.glob(src_dir + "*.mhd"):
if patient_id in src_path:
return src_path
return None
def normalize(image):
""" Normalize image -> clip data between -1000 and 400. Scale values to -0.5 to 0.5
"""
MIN_BOUND = -1000.0
MAX_BOUND = 400.0
image = (image - MIN_BOUND) / (MAX_BOUND - MIN_BOUND)
image[image > 1] = 1.
image[image < 0] = 0.
image -= 0.5
return image
def fetch_image(src_path_file,TARGET_VOXEL_MM=1):
"""Load the '.mhd' file, extract the 3D numpy array, rescale the data,
and normalize.
Args:
src_path_file: (string) the complete path (dir + filename) to the
.mhd file to load
TARGET_VOXEL_MM: the size of each voxel (mm) after rescaling of the data
Returns:
img_array: (np array) 3D array of image data. After rescaling and normalizing
"""
patient_id = ntpath.basename(src_path_file).replace(".mhd", "") #extract patient id from filename
print("Patient: ", patient_id)
itk_img = SimpleITK.ReadImage(src_path_file)
img_array = SimpleITK.GetArrayFromImage(itk_img) #Extract actual array data
#origin = np.array(itk_img.GetOrigin()) # x,y,z Origin in world coordinates (mm)
#direction = np.array(itk_img.GetDirection()) # x,y,z Origin in world coordinates (mm)
spacing = np.array(itk_img.GetSpacing()) # spacing of voxels in world coor. (mm)
#rescale = spacing / TARGET_VOXEL_MM
img_array = helpers.rescale_patient_images(img_array, spacing, TARGET_VOXEL_MM)
img_array = normalize(img_array)
return img_array
########################################################################
########## CREATE FULL DATAFRAME OF ALL PATIENTS AND NODULES ##########
########################################################################
src_dir = LUNA16_EXTRACTED_IMAGE_DIR + "_labels/"
# Verify that the directory exists
if not os.path.isdir(src_dir):
print(src_dir + " directory does not exist")
full_dataframe = pd.DataFrame(columns=["patient_id", "coord_x", "coord_y", "coord_z", "malscore"])
for file_ in os.listdir(src_dir):
# Verify that the file is a '.csv' file
if not file_[-4:] == '.csv':
continue
df = pd.read_csv(src_dir + file_)
patient_id = file_.split("_annos_")[0] #extrect the paitent id from filename
patient_column = np.repeat(patient_id,df.shape[0])
df_short = df[["coord_x", "coord_y", "coord_z", "malscore"]] #extract jus x,y,z and malscore
df_short = df_short.assign(patient_id = patient_column) #add patient_id
full_dataframe = full_dataframe.append(df_short, ignore_index=True) #append to full
#full_dataframe.round({"coord_x": 4, "coord_y": 4, "coord_z":4})
full_dataframe = full_dataframe.drop_duplicates() #drop duplicate rows
full_dataframe.to_csv(BASE_DIR + "patID_x_y_z_mal.csv", index=False)
#########################################################################
########################################################################
########## EXTRAS ##########
########################################################################
patients = full_dataframe.patient_id.unique()
for patient in patients[0:1]:
patient_path = find_mhd_file(patient) #locate the path to the '.mhd' file
image_array = fetch_image(patient_path, TARGET_VOXEL_MM)
image_shape = image_array.shape
print(patient)
patient_df = full_dataframe.loc[full_dataframe['patient_id'] == patient] #create a dateframe assoicated to a single patient
for index, row in patient_df.iterrows():
x = int(row["coord_x"]*image_shape[0])
y = int(row["coord_y"]*image_shape[1])
z = int(row["coord_z"]*image_shape[2])
print(x,y,z)
import matplotlib.pylab as plt
import matplotlib.patches as patches
fig1,ax1 = plt.imshow(image_array[z,:,:],cmap="gray")
rect = patches.Rectangle((x-32,y-32),x+32,y+32,linewidth=1,edgecolor='r',facecolor='none')
fig1.add_patch(rect)
plt.imshow(image_array[184,:,:],cmap="gray")
im = image_array[44,:,:]
image_ = np.stack([im, im, im],axis=2)
image_[x-32:x+32,y-32:y+32,0] = 30
plt.imshow(image_, cmap = "gray")
| mit |
tdsmith/pinscreen | pinscreen.py | 1 | 18394 | #!/usr/bin/env python
import argparse
from copy import deepcopy
import os
import sys
import scipy as sp
import matplotlib as mpl
if __name__ == "__main__": mpl.use('Agg') # we need to do this right away
import numpy as np
from numpy import pi, floor, copysign, sqrt, mean
import matplotlib.pyplot as plt
from scipy import optimize, stats
from scipy.signal import sawtooth
sign = lambda x: copysign(1, x)
class Dot:
"Simple class to hold an (x,y) pair."
def __init__(self, xpos, ypos, perim):
self.xpos = xpos
self.ypos = ypos
self.perim = perim
def __repr__(self):
return 'Dot(xpos=%f, ypos=%f, perim=%f)' % (self.xpos, self.ypos, self.perim)
class SineFit:
"Stores parameters for an arbitrary sine function."
def __init__(self, amplitude, period, phase, offset, r2=None, sin=np.sin):
self.amplitude = amplitude
self.period = period
self.phase = phase
self.offset = offset
self.r2 = r2
self.sin = sin
self.normalize()
def normalize(self):
# negative amplitude = pi phase change
if self.amplitude < 0:
self.amplitude *= -1
self.phase += pi
# restrict phase to -pi ... pi
if not (-pi < self.phase and self.phase <= pi):
self.phase -= floor((self.phase+pi)/(2*pi))*(2*pi)
def eval(self, t):
"SineFit.eval(self,t): Evaluate the sine function represented by self at a point or list of points t."
singleton = not getattr(t, '__iter__', False)
if singleton:
t = [t]
ret = [self.amplitude * self.sin(2*pi/self.period * ti + self.phase) + self.offset for ti in t]
return ret[0] if singleton else ret
def __repr__(self):
return ('SineFit(amplitude=%f, period=%f, phase=%f, offset=%f, r2=%r)' %
(self.amplitude, self.period, self.phase, self.offset, self.r2))
def parse_mtrack2(fileobj):
"""frames = parse_mtrack2(fileobj)
Reads in output from ImageJ plugin MTrack2. Converts it into a list of
lists of Dot objects:
frames[0] = [Dot0, Dot1,...].
Reorganizes the dots so that 0, 1, 2, ... sqrt(n) is the top row from L to
R, thence onwards."""
headers = fileobj.readline()[:-1].split('\t')
n = (len(headers)-1)/3
if abs(sqrt(n) - int(sqrt(n))) > 0.01:
raise "Number of dots does not describe a square."
# x_col[1] is the index in line for X1
x_col, y_col = [1 + i*3 for i in xrange(n)], [2 + i*3 for i in xrange(n)]
assignments = None
fileobj.readline() # discard the line "Tracks 1 to n"
frames = []
for line in fileobj.readlines():
line = line[:-1].split('\t')
if not assignments:
# MTrack2 does not guarantee that the dots will be enumerated in any particular order,
# so we have to figure out which dot in the file is our dot 1. We do this by sorting
# the dots in the file by both x and y. For an n x n matrix, if a dot has one of the
# n lowest x values, it must be in the first column; if it's not in the first n but
# is in the first 2n, it must be in the second column, and so on.
x, y = ([(i, float(line[col])) for i, col in enumerate(x_col)],
[(i, float(line[col])) for i, col in enumerate(y_col)])
x = sorted(x, cmp=lambda a, b: cmp(a[1], b[1]))
y = sorted(y, cmp=lambda a, b: cmp(a[1], b[1]))
xi, yi = [None]*n, [None]*n
for sort_i, (file_i, _value) in enumerate(x):
xi[file_i] = sort_i
for sort_i, (file_i, _value) in enumerate(y):
yi[file_i] = sort_i
assignments = [None] * n
for i in xrange(n):
row = int(floor(yi[i] / int(sqrt(n))))
col = int(floor(xi[i] / int(sqrt(n))))
assignments[i] = row*int(sqrt(n)) + col
frame = [None]*n
for i in xrange(n):
frame[assignments[i]] = Dot(float(line[x_col[i]]), float(line[y_col[i]]), 1)
frames.append(frame)
print [i for i in enumerate(assignments)]
return frames
def sinefit(frames, dt, sin=np.sin):
"""fit_parameters = sinefit(frames)
Takes the output of parse_csv and runs a sine-fitting function against it.
For frames with n dots, returns an n-element list of tuples of SineFit objects (x,y).
e.g., fit_parameters[0] = (SineFit(for dot0 in x), SineFit(for dot0 in y))
"""
# p = [amplitude, period, phase offset, y offset]
fitfunc = lambda p, x: p[0] * sin(2*pi/p[1]*x + p[2]) + p[3]
errfunc = lambda p, x, y: fitfunc(p, x) - y
p0 = [1., 1., 0., 0.]
t = np.arange(len(frames)) * dt
fit_parameters = []
for idot in xrange(len(frames[0])):
print 'Sine fitting: dot %d' % idot
dx, dy = zip(*[(frame[idot].xpos, frame[idot].ypos) for frame in frames])
p0[0] = (max(dx)-min(dx))/2.0
p0[3] = np.mean(dx)
# FIXME: "success" here is not a valid success measure
px, success = optimize.leastsq(errfunc, p0, args=(t, dx))
if not success:
raise "Problem with optimize for dot %d in x" % idot
xfit = SineFit(*px, sin=sin)
xfit.r2 = stats.mstats.pearsonr(dx, xfit.eval(t))[0] ** 2
p0[0] = (max(dy)-min(dy))/2.0
p0[3] = np.mean(dy)
py, success = optimize.leastsq(errfunc, p0, args=(t, dy))
if not success:
raise "Problem with optimize for dot %d in y" % idot
yfit = SineFit(*py, sin=sin)
yfit.r2 = stats.mstats.pearsonr(dy, yfit.eval(t))[0] ** 2
fit_parameters.append((xfit, yfit))
return fit_parameters
def process_coordinates(fit_parameters):
"""(center_x, center_y, resting_x, resting_y, extended_x, extended_y) = process_coordinates(fit_parameters)
finds the resting and extended position for each dot, using the sine fit parameters."""
# start by finding a coordinate system based on the center of the device.
# assume the outer dots make a perfect square and (0,0) is upper left.
X, Y = 0, 1
center_x = ((fit_parameters[-1][0].offset - fit_parameters[-1][0].amplitude) -
(fit_parameters[0][0].offset + fit_parameters[0][0].amplitude)) / 2 + fit_parameters[0][0].offset
center_y = ((fit_parameters[-1][1].offset - fit_parameters[-1][1].amplitude) -
(fit_parameters[0][1].offset + fit_parameters[0][1].amplitude)) + fit_parameters[0][1].offset
# resting positions fall when y is minimized
resting_y = [dot[Y].offset + dot[Y].amplitude for dot in fit_parameters]
resting_x = [xfit.eval(yfit.period/(2*pi) * (pi/2 - yfit.phase)) for (xfit, yfit) in fit_parameters]
# extended positions fall where y is maximized
extended_y = [dot[Y].offset - dot[Y].amplitude for dot in fit_parameters]
extended_x = [xfit.eval(yfit.period/(2*pi) * (3*pi/2 - yfit.phase)) for (xfit, yfit) in fit_parameters]
return (center_x, center_y, resting_x, resting_y, extended_x, extended_y)
def process_coordinates_from_data(fit_parameters, frames, dt):
"""(center_x, center_y, resting_x, resting_y, extended_x, extended_y)
finds the resting and extended position for each dot, using the data."""
X, Y = 0, 1
center_x = ((fit_parameters[-1][0].offset - fit_parameters[-1][0].amplitude) -
(fit_parameters[0][0].offset + fit_parameters[0][0].amplitude)) / 2 + fit_parameters[0][0].offset
center_y = ((fit_parameters[-1][1].offset - fit_parameters[-1][1].amplitude) -
(fit_parameters[0][1].offset + fit_parameters[0][1].amplitude)) + fit_parameters[0][1].offset
# resting y positions fall when y is maximized, at t = period (pi/2 - phase) / (2 pi)
# (this is because of our choice of coordinate system, where extension is up, towards 0)
N = len(frames)
y_max_t = [np.arange(start=yfit.period * (pi/2-yfit.phase) / (2*pi),
stop=N*dt,
step=yfit.period) for _, yfit in fit_parameters]
y_max_t = [a[a > 0] for a in y_max_t]
# extended y positions fall when y is minimized, at t = period (3 pi / 2 - phase) / (2 pi)
y_min_t = [np.arange(start=yfit.period * (3*pi/2-yfit.phase) / (2*pi),
stop=N*dt,
step=yfit.period) for _, yfit in fit_parameters]
y_min_t = [a[a > 0] for a in y_min_t]
y_max_i = [np.rint(yt / dt).astype(int) for yt in y_max_t]
y_min_i = [np.rint(yt / dt).astype(int) for yt in y_min_t]
resting_x, resting_y = [], []
extended_x, extended_y = [], []
n_dots = len(frames[0])
for dot in range(n_dots):
extended_x.append(mean([frames[i][dot].xpos for i in y_min_i[dot]]))
extended_y.append(mean([frames[i][dot].ypos for i in y_min_i[dot]]))
resting_x.append(mean([frames[i][dot].xpos for i in y_max_i[dot]]))
resting_y.append(mean([frames[i][dot].ypos for i in y_max_i[dot]]))
return (center_x, center_y, resting_x, resting_y, extended_x, extended_y)
def find_center_by_frame(frames):
return [(np.mean([dot.xpos for dot in frame]),
np.mean([dot.ypos for dot in frame])) for frame in frames]
def recenter(frames):
"""frames, [[residuals_x, residuals_y]] = recenter(frames)
If the center of the device moved while your movie was running, that would
be bad. But if you were providing a symmetric stretch, we can correct for
it. This function makes sure that all frames have the same center point.
It works on the assumption that (mean(x), mean(y)) is always the true
center of the device. It computes an x offset and y offset value for each
frame and then adds the same offset to all points within a frame to
recenter it."""
frames = deepcopy(frames)
center_by_frame = find_center_by_frame(frames)
center_x, center_y = center_by_frame[0]
centers_x, centers_y = zip(*center_by_frame)
# show displacement of center from center_x, _y
centers_x = [frame_pos - center_x for frame_pos in centers_x]
centers_y = [frame_pos - center_y for frame_pos in centers_y]
# add dx, dy to each dot in each frame
for i, frame in enumerate(frames):
for dot in frame:
dot.xpos -= centers_x[i]
dot.ypos -= centers_y[i]
return frames, [centers_x, centers_y]
def calculate_peak_strain(fit_parameters, resting_x, resting_y, extended_x, extended_y):
# calculate strain at peak extension
n_dots = len(fit_parameters)
strain_x = []
strain_y = []
# now, compute the discrete derivative in each axis
n = int(sqrt(n_dots))
for di in xrange(n_dots):
# in x
if di % n == 0: # left edge
strain_x.append((extended_x[di+1]-extended_x[di] - (resting_x[di+1]-resting_x[di])) /
(resting_x[di+1]-resting_x[di]))
elif di % n == (n-1): # right edge
strain_x.append((extended_x[di]-extended_x[di-1] - (resting_x[di]-resting_x[di-1])) /
(resting_x[di]-resting_x[di-1]))
else: # in the center
strain_x.append((extended_x[di+1]-extended_x[di-1] - (resting_x[di+1]-resting_x[di-1])) /
(resting_x[di+1]-resting_x[di-1]))
# in y
if di < n: # top row
strain_y.append((extended_y[di+n]-extended_y[di] - (resting_y[di+n]-resting_y[di])) /
(resting_y[di+n]-resting_y[di]))
elif di >= n_dots-n: # bottom row
strain_y.append((extended_y[di]-extended_y[di-n] - (resting_y[di]-resting_y[di-n])) /
(resting_y[di]-resting_y[di-n]))
else: # in the center
strain_y.append((extended_y[di+n]-extended_y[di-n] - (resting_y[di+n]-resting_y[di-n])) /
(resting_y[di+n]-resting_y[di-n]))
return (strain_x, strain_y)
def write_plots(frames, fit_parameters, jitter, directory, peak_strain, dt,
min_strain=-0.1, max_strain=0.3):
# draw residual plots for each sine fit in x and y
t = np.arange(len(frames)) * dt
fit = lambda t, sf: sf.eval(t)
# TODO show phase for each regression
# plot the sine fits first
for idot in xrange(len(frames[0])):
actual_x = [frame[idot].xpos for frame in frames]
actual_y = [frame[idot].ypos for frame in frames]
fit_x, fit_y = fit_parameters[idot]
plt.clf()
plt.plot(t, actual_x, 'b.', t, fit(t, fit_x), 'b-')
plt.plot(t, actual_y, 'r.', t, fit(t, fit_y), 'r-')
plt.title('Dot %d' % idot)
plt.xlabel('Time (s)')
plt.ylabel('Displacement (px)')
# plt.legend(['in X', 'fit in X', 'in Y', 'fit in Y'])
axes = plt.gca()
axes.text(0.95, 0.5,
(r'x: $%.2f sin(\frac{2 \pi}{%.2f} t + %.2f) + %.2f$; $R^2=%.4f$' '\n'
r'y: $%.2f sin(\frac{2 \pi}{%.2f} t + %.2f) + %.2f$; $R^2=%.4f$'
% (fit_x.amplitude, fit_x.period, fit_x.phase, fit_x.offset, fit_x.r2,
fit_y.amplitude, fit_y.period, fit_y.phase, fit_y.offset, fit_y.r2)),
verticalalignment='center', horizontalalignment='right', transform=axes.transAxes)
plt.savefig('%s/dot_%04d_fit.png' % (directory, idot))
# plot the resting and extended coordinates
(center_x, center_y, resting_x, resting_y, extended_x, extended_y) = process_coordinates(fit_parameters)
plt.clf()
plt.axis([min(extended_x+resting_x)-50, max(extended_x+resting_x)+50,
max(extended_y+resting_y)+50, min(extended_y+resting_y)-50])
plt.quiver(resting_x, resting_y,
[ext-rest for (ext, rest) in zip(extended_x, resting_x)],
[ext-rest for (ext, rest) in zip(extended_y, resting_y)],
units='xy', angles='xy', scale=1.0)
plt.savefig('%s/coordinates.png' % directory)
# plot coordinate system jitter
plt.clf()
plt.plot(t, jitter[0], t, jitter[1])
plt.legend(['x', 'y'])
plt.savefig('%s/center_displacement.png' % directory)
plt.clf()
center_by_frame = find_center_by_frame(frames)
plt.plot(t, zip(*center_by_frame)[0], t, zip(*center_by_frame)[1])
plt.savefig('%s/center_position_post.png' % directory)
n = int(sqrt(len(fit_parameters)))
min_strain = min_strain or min(peak_strain[0] + peak_strain[1])
max_strain = max_strain or max(peak_strain[0] + peak_strain[1])
matrix = lambda axis: np.array(peak_strain[axis]).reshape(n, n)
for (axis, label) in [(0, 'x'), (1, 'y')]:
plt.clf()
plt.pcolor(matrix(axis), edgecolor='k', vmin=min_strain, vmax=max_strain)
for i in range(n):
for j in range(n):
plt.text(i+0.5, j+0.5, "%.4f" % matrix(axis)[j, i],
horizontalalignment='center',
verticalalignment='center')
ax = plt.gca()
ax.set_ylim(ax.get_ylim()[::-1])
plt.colorbar(ticks=[-.05, 0, .05, .1, .15, .2, .25])
plt.savefig('%s/peakstrain_%s.png' % (directory, label))
f = open('%s/index.html' % directory, 'w')
print >> f, "<!DOCTYPE html>\n<html><head><title>Regression results</title></head><body>"
print >> f, '<h1>Dot positions</h1><img src="coordinates.png" />'
print >> f, ('<h1>Center displacement (pre-correction)</h1>'
'<img src="center_displacement.png" />')
print >> f, ('<h1>Center position (post-correction)</h1>'
'<img src="center_position_post.png" />')
print >> f, ('<h1>Peak strain: x</h1>'
'<img src="peakstrain_x.png" />'
'<p>Mean peak x strain: %f Standard deviation: %f</p>'
% (np.mean(peak_strain[0]), np.std(peak_strain[0])))
print >> f, ('<h1>Peak strain: y</h1>'
'<img src="peakstrain_y.png" />'
'<p>Mean peak y strain: %f Standard deviation: %f</p>'
% (np.mean(peak_strain[1]), np.std(peak_strain[1])))
for idot in xrange(len(frames[0])):
print >> f, '<h1>Dot %d</h1><img src="dot_%04d_fit.png" />' % (idot, idot)
print >> f, '</body></html>'
f.close()
def main():
sin_functions = {
'sine': np.sin,
'sawtooth': lambda x: sawtooth(x-3*pi/2, width=0.5),
}
parser = argparse.ArgumentParser()
parser.add_argument('infile')
parser.add_argument('outpath')
parser.add_argument('--overwrite', '-O', action='store_true',
help="Don't complain if outpath already exists")
parser.add_argument('--fit-function', '-f',
help='Choose a function to fit the waveforms',
choices=sin_functions.keys(), default='sine')
parser.add_argument(
'--strains-from', '-s',
choices=['fit', 'data'], default='fit',
help=("Choose whether strains will be computed from the peaks of the "
"fit function (fit; default) or from the average of the observed "
"strains at the extended position determined from the fits (data)."))
parser.add_argument('--fps', type=float, default=30.)
args = parser.parse_args()
try:
os.makedirs(args.outpath) # do this first so we aren't surprised later
except OSError:
if not args.overwrite:
print >> sys.stderr, "Output path exists. Use --overwrite to run anyway."
sys.exit(1)
f = open(args.infile, 'rU')
frames = parse_mtrack2(f)
f.close()
centered_frames, jitter = recenter(frames)
fit_parameters = sinefit(frames, dt=1./args.fps, sin=sin_functions[args.fit_function])
if args.strains_from == 'fit':
(_center_x, _center_y, resting_x, resting_y, extended_x, extended_y) = \
process_coordinates(fit_parameters)
elif args.strains_from == 'data':
(_center_x, _center_y, resting_x, resting_y, extended_x, extended_y) = \
process_coordinates_from_data(fit_parameters, frames, 1./args.fps)
else:
raise ValueError
peak_strain = calculate_peak_strain(fit_parameters, resting_x, resting_y, extended_x, extended_y)
write_plots(frames, fit_parameters, jitter, args.outpath, peak_strain,
dt=1./args.fps, min_strain=-0.05, max_strain=0.25)
if __name__ == '__main__':
main()
| mit |
LiaoPan/scikit-learn | sklearn/datasets/tests/test_mldata.py | 384 | 5221 | """Test functionality of mldata fetching utilities."""
import os
import shutil
import tempfile
import scipy as sp
from sklearn import datasets
from sklearn.datasets import mldata_filename, fetch_mldata
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import mock_mldata_urlopen
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import with_setup
from sklearn.utils.testing import assert_array_equal
tmpdir = None
def setup_tmpdata():
# create temporary dir
global tmpdir
tmpdir = tempfile.mkdtemp()
os.makedirs(os.path.join(tmpdir, 'mldata'))
def teardown_tmpdata():
# remove temporary dir
if tmpdir is not None:
shutil.rmtree(tmpdir)
def test_mldata_filename():
cases = [('datasets-UCI iris', 'datasets-uci-iris'),
('news20.binary', 'news20binary'),
('book-crossing-ratings-1.0', 'book-crossing-ratings-10'),
('Nile Water Level', 'nile-water-level'),
('MNIST (original)', 'mnist-original')]
for name, desired in cases:
assert_equal(mldata_filename(name), desired)
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_download():
"""Test that fetch_mldata is able to download and cache a data set."""
_urlopen_ref = datasets.mldata.urlopen
datasets.mldata.urlopen = mock_mldata_urlopen({
'mock': {
'label': sp.ones((150,)),
'data': sp.ones((150, 4)),
},
})
try:
mock = fetch_mldata('mock', data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data"]:
assert_in(n, mock)
assert_equal(mock.target.shape, (150,))
assert_equal(mock.data.shape, (150, 4))
assert_raises(datasets.mldata.HTTPError,
fetch_mldata, 'not_existing_name')
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_one_column():
_urlopen_ref = datasets.mldata.urlopen
try:
dataname = 'onecol'
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
datasets.mldata.urlopen = mock_mldata_urlopen({dataname: {'x': x}})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "data"]:
assert_in(n, dset)
assert_not_in("target", dset)
assert_equal(dset.data.shape, (2, 3))
assert_array_equal(dset.data, x)
# transposing the data array
dset = fetch_mldata(dataname, transpose_data=False, data_home=tmpdir)
assert_equal(dset.data.shape, (3, 2))
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_multiple_column():
_urlopen_ref = datasets.mldata.urlopen
try:
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
y = sp.array([1, -1])
z = sp.arange(12).reshape(4, 3)
# by default
dataname = 'threecol-default'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: (
{
'label': y,
'data': x,
'z': z,
},
['z', 'data', 'label'],
),
})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by order
dataname = 'threecol-order'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['y', 'x', 'z']), })
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by number
dataname = 'threecol-number'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['z', 'x', 'y']),
})
dset = fetch_mldata(dataname, target_name=2, data_name=0,
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
assert_array_equal(dset.data, z)
assert_array_equal(dset.target, y)
# by name
dset = fetch_mldata(dataname, target_name='y', data_name='z',
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
finally:
datasets.mldata.urlopen = _urlopen_ref
| bsd-3-clause |
LumenResearch/heatmappy | heatmappy/heatmap.py | 1 | 7675 | from abc import ABCMeta, abstractmethod
from functools import partial
import io
import os
import random
from matplotlib.colors import LinearSegmentedColormap
import numpy
from PIL import Image
try:
from PySide import QtCore, QtGui
except ImportError:
pass
_asset_file = partial(os.path.join, os.path.dirname(__file__), 'assets')
def _img_to_opacity(img, opacity):
img = img.copy()
alpha = img.split()[3]
alpha = alpha.point(lambda p: int(p * opacity))
img.putalpha(alpha)
return img
class Heatmapper:
def __init__(self, point_diameter=50, point_strength=0.2, opacity=0.65,
colours='default',
grey_heatmapper='PIL'):
"""
:param opacity: opacity (between 0 and 1) of the generated heatmap overlay
:param colours: Either 'default', 'reveal',
OR the path to horizontal image which will be converted to a scale
OR a matplotlib LinearSegmentedColorMap instance.
:param grey_heatmapper: Required to draw points on an image as a greyscale
heatmap. If not using the default, this must be an object
which fulfils the GreyHeatmapper interface.
"""
self.opacity = opacity
self._colours = None
self.colours = colours
if grey_heatmapper == 'PIL':
self.grey_heatmapper = PILGreyHeatmapper(point_diameter, point_strength)
elif grey_heatmapper == 'PySide':
self.grey_heatmapper = PySideGreyHeatmapper(point_diameter, point_strength)
else:
self.grey_heatmapper = grey_heatmapper
@property
def colours(self):
return self._colours
@colours.setter
def colours(self, colours):
self._colours = colours
if isinstance(colours, LinearSegmentedColormap):
self._cmap = colours
else:
files = {
'default': _asset_file('default.png'),
'reveal': _asset_file('reveal.png'),
}
scale_path = files.get(colours) or colours
self._cmap = self._cmap_from_image_path(scale_path)
@property
def point_diameter(self):
return self.grey_heatmapper.point_diameter
@point_diameter.setter
def point_diameter(self, point_diameter):
self.grey_heatmapper.point_diameter = point_diameter
@property
def point_strength(self):
return self.grey_heatmapper.point_strength
@point_strength.setter
def point_strength(self, point_strength):
self.grey_heatmapper.point_strength = point_strength
def heatmap(self, width, height, points, base_path=None, base_img=None):
"""
:param points: sequence of tuples of (x, y), eg [(9, 20), (7, 3), (19, 12)]
:return: If base_path of base_img provided, a heat map from the given points
is overlayed on the image. Otherwise, the heat map alone is returned
with a transparent background.
"""
heatmap = self.grey_heatmapper.heatmap(width, height, points)
heatmap = self._colourised(heatmap)
heatmap = _img_to_opacity(heatmap, self.opacity)
if base_path:
background = Image.open(base_path)
return Image.alpha_composite(background.convert('RGBA'), heatmap)
elif base_img is not None:
return Image.alpha_composite(base_img.convert('RGBA'), heatmap)
else:
return heatmap
def heatmap_on_img_path(self, points, base_path):
width, height = Image.open(base_path).size
return self.heatmap(width, height, points, base_path=base_path)
def heatmap_on_img(self, points, img):
width, height = img.size
return self.heatmap(width, height, points, base_img=img)
def _colourised(self, img):
""" maps values in greyscale image to colours """
arr = numpy.array(img)
rgba_img = self._cmap(arr, bytes=True)
return Image.fromarray(rgba_img)
@staticmethod
def _cmap_from_image_path(img_path):
img = Image.open(img_path)
img = img.resize((256, img.height))
colours = (img.getpixel((x, 0)) for x in range(256))
colours = [(r/255, g/255, b/255, a/255) for (r, g, b, a) in colours]
return LinearSegmentedColormap.from_list('from_image', colours)
class GreyHeatMapper(metaclass=ABCMeta):
@abstractmethod
def __init__(self, point_diameter, point_strength):
self.point_diameter = point_diameter
self.point_strength = point_strength
@abstractmethod
def heatmap(self, width, height, points):
"""
:param points: sequence of tuples of (x, y), eg [(9, 20), (7, 3), (19, 12)]
:return: a white image of size width x height with black areas painted at
the given points
"""
pass
class PySideGreyHeatmapper(GreyHeatMapper):
def __init__(self, point_diameter, point_strength):
super().__init__(point_diameter, point_strength)
self.point_strength = int(point_strength * 255)
def heatmap(self, width, height, points):
base_image = QtGui.QImage(width, height, QtGui.QImage.Format_ARGB32)
base_image.fill(QtGui.QColor(255, 255, 255, 255))
self._paint_points(base_image, points)
return self._qimage_to_pil_image(base_image).convert('L')
def _paint_points(self, img, points):
painter = QtGui.QPainter(img)
painter.setRenderHint(QtGui.QPainter.Antialiasing)
pen = QtGui.QPen(QtGui.QColor(0, 0, 0, 0))
pen.setWidth(0)
painter.setPen(pen)
for point in points:
self._paint_point(painter, *point)
painter.end()
def _paint_point(self, painter, x, y):
grad = QtGui.QRadialGradient(x, y, self.point_diameter/2)
grad.setColorAt(0, QtGui.QColor(0, 0, 0, max(self.point_strength, 0)))
grad.setColorAt(1, QtGui.QColor(0, 0, 0, 0))
brush = QtGui.QBrush(grad)
painter.setBrush(brush)
painter.drawEllipse(
x - self.point_diameter/2,
y - self.point_diameter/2,
self.point_diameter,
self.point_diameter
)
@staticmethod
def _qimage_to_pil_image(qimg):
buffer = QtCore.QBuffer()
buffer.open(QtCore.QIODevice.ReadWrite)
qimg.save(buffer, "PNG")
bytes_io = io.BytesIO()
bytes_io.write(buffer.data().data())
buffer.close()
bytes_io.seek(0)
return Image.open(bytes_io)
class PILGreyHeatmapper(GreyHeatMapper):
def __init__(self, point_diameter, point_strength):
super().__init__(point_diameter, point_strength)
def heatmap(self, width, height, points):
heat = Image.new('L', (width, height), color=255)
dot = (Image.open(_asset_file('450pxdot.png')).copy()
.resize((self.point_diameter, self.point_diameter), resample=Image.ANTIALIAS))
dot = _img_to_opacity(dot, self.point_strength)
for x, y in points:
x, y = int(x - self.point_diameter/2), int(y - self.point_diameter/2)
heat.paste(dot, (x, y), dot)
return heat
if __name__ == '__main__':
randpoint = lambda max_x, max_y: (random.randint(0, max_x), random.randint(0, max_y))
example_img = Image.open(_asset_file('cat.jpg'))
example_points = (randpoint(*example_img.size) for _ in range(500))
heatmapper = Heatmapper(colours='default')
heatmapper.colours = 'reveal'
heatmapper.heatmap_on_img(example_points, example_img).save('out.png')
| mit |
sherlockcoin/Electrum-NavCoin | plugins/plot/qt.py | 11 | 3548 | from PyQt4.QtGui import *
from electrum.plugins import BasePlugin, hook
from electrum.i18n import _
import datetime
from electrum.util import format_satoshis
from electrum.bitcoin import COIN
try:
import matplotlib.pyplot as plt
import matplotlib.dates as md
from matplotlib.patches import Ellipse
from matplotlib.offsetbox import AnchoredOffsetbox, TextArea, DrawingArea, HPacker
flag_matlib=True
except:
flag_matlib=False
class Plugin(BasePlugin):
def is_available(self):
if flag_matlib:
return True
else:
return False
@hook
def export_history_dialog(self, window, hbox):
wallet = window.wallet
history = wallet.get_history()
if len(history) > 0:
b = QPushButton(_("Preview plot"))
hbox.addWidget(b)
b.clicked.connect(lambda: self.do_plot(wallet, history))
else:
b = QPushButton(_("No history to plot"))
hbox.addWidget(b)
def do_plot(self, wallet, history):
balance_Val=[]
fee_val=[]
value_val=[]
datenums=[]
unknown_trans = 0
pending_trans = 0
counter_trans = 0
balance = 0
for item in history:
tx_hash, confirmations, value, timestamp, balance = item
if confirmations:
if timestamp is not None:
try:
datenums.append(md.date2num(datetime.datetime.fromtimestamp(timestamp)))
balance_Val.append(1000.*balance/COIN)
except [RuntimeError, TypeError, NameError] as reason:
unknown_trans += 1
pass
else:
unknown_trans += 1
else:
pending_trans += 1
value_val.append(1000.*value/COIN)
if tx_hash:
label = wallet.get_label(tx_hash)
label = label.encode('utf-8')
else:
label = ""
f, axarr = plt.subplots(2, sharex=True)
plt.subplots_adjust(bottom=0.2)
plt.xticks( rotation=25 )
ax=plt.gca()
x=19
test11="Unknown transactions = "+str(unknown_trans)+" Pending transactions = "+str(pending_trans)+" ."
box1 = TextArea(" Test : Number of pending transactions", textprops=dict(color="k"))
box1.set_text(test11)
box = HPacker(children=[box1],
align="center",
pad=0.1, sep=15)
anchored_box = AnchoredOffsetbox(loc=3,
child=box, pad=0.5,
frameon=True,
bbox_to_anchor=(0.5, 1.02),
bbox_transform=ax.transAxes,
borderpad=0.5,
)
ax.add_artist(anchored_box)
plt.ylabel('mBTC')
plt.xlabel('Dates')
xfmt = md.DateFormatter('%Y-%m-%d')
ax.xaxis.set_major_formatter(xfmt)
axarr[0].plot(datenums,balance_Val,marker='o',linestyle='-',color='blue',label='Balance')
axarr[0].legend(loc='upper left')
axarr[0].set_title('History Transactions')
xfmt = md.DateFormatter('%Y-%m-%d')
ax.xaxis.set_major_formatter(xfmt)
axarr[1].plot(datenums,value_val,marker='o',linestyle='-',color='green',label='Value')
axarr[1].legend(loc='upper left')
# plt.annotate('unknown transaction = %d \n pending transactions = %d' %(unknown_trans,pending_trans),xy=(0.7,0.05),xycoords='axes fraction',size=12)
plt.show()
| mit |
h2educ/scikit-learn | sklearn/manifold/tests/test_mds.py | 324 | 1862 | import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_raises
from sklearn.manifold import mds
def test_smacof():
# test metric smacof using the data of "Modern Multidimensional Scaling",
# Borg & Groenen, p 154
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.451, .252],
[.016, -.238],
[-.200, .524]])
X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1)
X_true = np.array([[-1.415, -2.471],
[1.633, 1.107],
[.249, -.067],
[-.468, 1.431]])
assert_array_almost_equal(X, X_true, decimal=3)
def test_smacof_error():
# Not symmetric similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# Not squared similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# init not None and not correct format:
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.016, -.238],
[-.200, .524]])
assert_raises(ValueError, mds.smacof, sim, init=Z, n_init=1)
def test_MDS():
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed")
mds_clf.fit(sim)
| bsd-3-clause |
andrewstuart/data-science-from-scratch | code/clustering.py | 60 | 6438 | from __future__ import division
from linear_algebra import squared_distance, vector_mean, distance
import math, random
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
class KMeans:
"""performs k-means clustering"""
def __init__(self, k):
self.k = k # number of clusters
self.means = None # means of clusters
def classify(self, input):
"""return the index of the cluster closest to the input"""
return min(range(self.k),
key=lambda i: squared_distance(input, self.means[i]))
def train(self, inputs):
self.means = random.sample(inputs, self.k)
assignments = None
while True:
# Find new assignments
new_assignments = map(self.classify, inputs)
# If no assignments have changed, we're done.
if assignments == new_assignments:
return
# Otherwise keep the new assignments,
assignments = new_assignments
for i in range(self.k):
i_points = [p for p, a in zip(inputs, assignments) if a == i]
# avoid divide-by-zero if i_points is empty
if i_points:
self.means[i] = vector_mean(i_points)
def squared_clustering_errors(inputs, k):
"""finds the total squared error from k-means clustering the inputs"""
clusterer = KMeans(k)
clusterer.train(inputs)
means = clusterer.means
assignments = map(clusterer.classify, inputs)
return sum(squared_distance(input,means[cluster])
for input, cluster in zip(inputs, assignments))
def plot_squared_clustering_errors(plt):
ks = range(1, len(inputs) + 1)
errors = [squared_clustering_errors(inputs, k) for k in ks]
plt.plot(ks, errors)
plt.xticks(ks)
plt.xlabel("k")
plt.ylabel("total squared error")
plt.show()
#
# using clustering to recolor an image
#
def recolor_image(input_file, k=5):
img = mpimg.imread(path_to_png_file)
pixels = [pixel for row in img for pixel in row]
clusterer = KMeans(k)
clusterer.train(pixels) # this might take a while
def recolor(pixel):
cluster = clusterer.classify(pixel) # index of the closest cluster
return clusterer.means[cluster] # mean of the closest cluster
new_img = [[recolor(pixel) for pixel in row]
for row in img]
plt.imshow(new_img)
plt.axis('off')
plt.show()
#
# hierarchical clustering
#
def is_leaf(cluster):
"""a cluster is a leaf if it has length 1"""
return len(cluster) == 1
def get_children(cluster):
"""returns the two children of this cluster if it's a merged cluster;
raises an exception if this is a leaf cluster"""
if is_leaf(cluster):
raise TypeError("a leaf cluster has no children")
else:
return cluster[1]
def get_values(cluster):
"""returns the value in this cluster (if it's a leaf cluster)
or all the values in the leaf clusters below it (if it's not)"""
if is_leaf(cluster):
return cluster # is already a 1-tuple containing value
else:
return [value
for child in get_children(cluster)
for value in get_values(child)]
def cluster_distance(cluster1, cluster2, distance_agg=min):
"""finds the aggregate distance between elements of cluster1
and elements of cluster2"""
return distance_agg([distance(input1, input2)
for input1 in get_values(cluster1)
for input2 in get_values(cluster2)])
def get_merge_order(cluster):
if is_leaf(cluster):
return float('inf')
else:
return cluster[0] # merge_order is first element of 2-tuple
def bottom_up_cluster(inputs, distance_agg=min):
# start with every input a leaf cluster / 1-tuple
clusters = [(input,) for input in inputs]
# as long as we have more than one cluster left...
while len(clusters) > 1:
# find the two closest clusters
c1, c2 = min([(cluster1, cluster2)
for i, cluster1 in enumerate(clusters)
for cluster2 in clusters[:i]],
key=lambda (x, y): cluster_distance(x, y, distance_agg))
# remove them from the list of clusters
clusters = [c for c in clusters if c != c1 and c != c2]
# merge them, using merge_order = # of clusters left
merged_cluster = (len(clusters), [c1, c2])
# and add their merge
clusters.append(merged_cluster)
# when there's only one cluster left, return it
return clusters[0]
def generate_clusters(base_cluster, num_clusters):
# start with a list with just the base cluster
clusters = [base_cluster]
# as long as we don't have enough clusters yet...
while len(clusters) < num_clusters:
# choose the last-merged of our clusters
next_cluster = min(clusters, key=get_merge_order)
# remove it from the list
clusters = [c for c in clusters if c != next_cluster]
# and add its children to the list (i.e., unmerge it)
clusters.extend(get_children(next_cluster))
# once we have enough clusters...
return clusters
if __name__ == "__main__":
inputs = [[-14,-5],[13,13],[20,23],[-19,-11],[-9,-16],[21,27],[-49,15],[26,13],[-46,5],[-34,-1],[11,15],[-49,0],[-22,-16],[19,28],[-12,-8],[-13,-19],[-41,8],[-11,-6],[-25,-9],[-18,-3]]
random.seed(0) # so you get the same results as me
clusterer = KMeans(3)
clusterer.train(inputs)
print "3-means:"
print clusterer.means
print
random.seed(0)
clusterer = KMeans(2)
clusterer.train(inputs)
print "2-means:"
print clusterer.means
print
print "errors as a function of k"
for k in range(1, len(inputs) + 1):
print k, squared_clustering_errors(inputs, k)
print
print "bottom up hierarchical clustering"
base_cluster = bottom_up_cluster(inputs)
print base_cluster
print
print "three clusters, min:"
for cluster in generate_clusters(base_cluster, 3):
print get_values(cluster)
print
print "three clusters, max:"
base_cluster = bottom_up_cluster(inputs, max)
for cluster in generate_clusters(base_cluster, 3):
print get_values(cluster)
| unlicense |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.