repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python-packages/nolearn-0.5/build/lib.linux-x86_64-2.7/nolearn/tests/test_dbn.py | 2 | 4191 | import numpy as np
from scipy.sparse import csr_matrix
from sklearn.cross_validation import cross_val_score
from sklearn import datasets
from sklearn.metrics import f1_score
from ..dataset import Dataset
def pytest_funcarg__digits(request):
digits = datasets.load_digits()
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
ds = Dataset(data, digits.target).scale()
ds.test_size = 0.5
return ds.train_test_split()
def pytest_funcarg__iris(request):
iris = datasets.load_iris()
ds = Dataset(iris.data, iris.target).scale()
return ds
def test_callback(digits):
from ..dbn import DBN
fine_tune_call_args = []
pretrain_call_args = []
def fine_tune_callback(net, epoch):
fine_tune_call_args.append((net, epoch))
def pretrain_callback(net, epoch, layer):
pretrain_call_args.append((net, epoch, layer))
X_train, X_test, y_train, y_test = digits
clf = DBN(
[X_train.shape[1], 4, 10],
epochs=3,
epochs_pretrain=2,
use_re_lu=False,
fine_tune_callback=fine_tune_callback,
pretrain_callback=pretrain_callback,
)
clf.fit(X_train, y_train)
assert fine_tune_call_args == [
(clf, 1), (clf, 2), (clf, 3)]
assert pretrain_call_args == [
(clf, 1, 0), (clf, 2, 0), (clf, 1, 1), (clf, 2, 1)]
def test_errors(digits):
from ..dbn import DBN
X_train, X_test, y_train, y_test = digits
clf = DBN(
[-1, 4, 10],
epochs=3,
epochs_pretrain=3,
use_re_lu=False,
)
clf.fit(X_train, y_train)
assert len(clf.errors_pretrain_) == 2
assert len(clf.errors_pretrain_[0]) == 3
assert len(clf.errors_pretrain_[1]) == 3
assert len(clf.errors_fine_tune_) == 3
assert len(clf.losses_fine_tune_) == 3
def test_labelencoding(iris):
from ..dbn import DBN
X_train, X_test, y_train, y_test = iris.train_test_split()
y_train = y_train * 1.3
y_test = y_test * 1.3
clf = DBN(
[-1, 4, -1],
epochs=10,
)
clf.fit(X_train, y_train)
predicted_labels = np.unique(clf.predict(X_test))
assert len(predicted_labels) > 1
for label in predicted_labels:
# predicted labels should correspond to what's in y_test
assert label in y_test
def test_functional_iris(iris):
from ..dbn import DBN
X_train, X_test, y_train, y_test = iris.train_test_split()
clf = DBN(
[-1, 4, 3],
learn_rates=0.3,
output_act_funct='Linear',
epochs=50,
)
scores = cross_val_score(clf, iris.data, iris.target, cv=5)
assert scores.mean() > 0.85
def test_functional_digits_no_pretrain(digits):
from ..dbn import DBN
X_train, X_test, y_train, y_test = digits
clf = DBN(
[64, 32, 10],
verbose=0,
)
clf.fit(X_train, y_train)
predicted = clf.predict(X_test)
assert f1_score(y_test, predicted) > 0.9
assert 0.9 < clf.score(X_test, y_test) < 1.0
def test_functional_digits_with_pretrain(digits):
from ..dbn import DBN
X_train, X_test, y_train, y_test = digits
clf = DBN(
[64, 32, 10],
epochs_pretrain=10,
use_re_lu=False,
verbose=0,
)
clf.fit(X_train, y_train)
predicted = clf.predict(X_test)
assert f1_score(y_test, predicted) > 0.9
assert 0.9 < clf.score(X_test, y_test) < 1.0
def test_sparse_support(digits):
from ..dbn import DBN
X_train, X_test, y_train, y_test = digits
X_train = csr_matrix(X_train)
X_test = csr_matrix(X_test)
clf = DBN(
[64, 32, 10],
epochs_pretrain=10,
use_re_lu=False,
verbose=0,
)
clf.fit(X_train, y_train)
predicted = clf.predict(X_test)
assert f1_score(y_test, predicted) > 0.9
assert 0.9 < clf.score(X_test, y_test) < 1.0
def test_layer_sizes_auto(iris):
from ..dbn import DBN
X_train, X_test, y_train, y_test = iris.train_test_split()
clf = DBN(
[-1, 4, -1],
)
clf.fit(X_train, y_train)
assert clf.net_.weights[0].shape == (4, 4)
assert clf.net_.weights[1].shape == (4, 3)
| bsd-3-clause |
meduz/scikit-learn | examples/linear_model/plot_polynomial_interpolation.py | 168 | 2088 | #!/usr/bin/env python
"""
========================
Polynomial interpolation
========================
This example demonstrates how to approximate a function with a polynomial of
degree n_degree by using ridge regression. Concretely, from n_samples 1d
points, it suffices to build the Vandermonde matrix, which is n_samples x
n_degree+1 and has the following form:
[[1, x_1, x_1 ** 2, x_1 ** 3, ...],
[1, x_2, x_2 ** 2, x_2 ** 3, ...],
...]
Intuitively, this matrix can be interpreted as a matrix of pseudo features (the
points raised to some power). The matrix is akin to (but different from) the
matrix induced by a polynomial kernel.
This example shows that you can do non-linear regression with a linear model,
using a pipeline to add non-linear features. Kernel methods extend this idea
and can induce very high (even infinite) dimensional feature spaces.
"""
print(__doc__)
# Author: Mathieu Blondel
# Jake Vanderplas
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
def f(x):
""" function to approximate by polynomial interpolation"""
return x * np.sin(x)
# generate points used to plot
x_plot = np.linspace(0, 10, 100)
# generate points and keep a subset of them
x = np.linspace(0, 10, 100)
rng = np.random.RandomState(0)
rng.shuffle(x)
x = np.sort(x[:20])
y = f(x)
# create matrix versions of these arrays
X = x[:, np.newaxis]
X_plot = x_plot[:, np.newaxis]
colors = ['teal', 'yellowgreen', 'gold']
lw = 2
plt.plot(x_plot, f(x_plot), color='cornflowerblue', linewidth=lw,
label="ground truth")
plt.scatter(x, y, color='navy', s=30, marker='o', label="training points")
for count, degree in enumerate([3, 4, 5]):
model = make_pipeline(PolynomialFeatures(degree), Ridge())
model.fit(X, y)
y_plot = model.predict(X_plot)
plt.plot(x_plot, y_plot, color=colors[count], linewidth=lw,
label="degree %d" % degree)
plt.legend(loc='lower left')
plt.show()
| bsd-3-clause |
simpeg/processing | processing/DCIPtools/sectionPlotting.py | 1 | 4283 | import matplotlib.pyplot as plt
import numpy as np
import JDataObject as Jdata
import pylab as plt
from scipy.interpolate import griddata
################################################################
# define the file required for import
fileName = "/Users/juan/Documents/testData/DAT_B.DAT"
unitType = "appResistivity"
# unitType = "appChareability"
xp = []
yp = []
val = []
val2 = []
Nlevel = ["N = 1", "N = 2", "N = 3", "N = 4", "N = 5", "N = 6", "N = 7"]
z_n = [-100, -200, -300, -400, -500, -600, -700]
vmin_rho, vmax_rho = 10, 4000
vmin_mx, vmax_mx = 0, 18
# =================================================================
# Code Start
patch = Jdata.loadDias(fileName) # loads data
# calculated mid-pt data points
for src in range(len(patch.readings)):
for rx in range(len(patch.readings[src].Vdp)):
xp.append(
patch.readings[src].Vdp[rx].getXplotpoint(patch.readings[src].Idp))
yp.append(
patch.readings[src].Vdp[rx].getZplotpoint(patch.readings[src].Idp))
val.append(
np.abs(patch.readings[src].Vdp[rx].Rho))
val2.append(
np.abs(patch.readings[src].Vdp[rx].Mx))
# convert to numpy
midx = np.asarray(xp)
midz = np.asarray(yp)
rho = np.asarray(val)
mx = np.asarray(val2)
xNLevel = np.min(midx) - 200
x_n = np.zeros(len(z_n))
x_n = x_n + xNLevel
# Grid points
grid_x, grid_z = np.mgrid[np.min(midx):np.max(midx),
np.min(midz):np.max(midz)]
# create an axes to plot on
ax = plt.subplot(2, 1, 1, aspect='equal')
# create an axes to plot on
ax2 = plt.subplot(2, 1, 2, aspect='equal')
# check which data to plot
# if unitType == "appResistivity":
vmin = vmin_rho
vmax = vmax_rho
ax.axes.set_title("Apparent Resistivity (ohm-m)", y=1.14)
grid_rho = griddata(np.c_[midx, midz], rho.T, (grid_x, grid_z),
method='cubic')
grid_rho = np.ma.masked_where(np.isnan(grid_rho), grid_rho)
name = 'custom_div_cmap'
pcolorOpts = {}
from matplotlib.colors import LinearSegmentedColormap
custom_map = LinearSegmentedColormap.from_list(name=name, colors=['aqua', [0, 0.85, 1, 1], [0.1, 1, 0.1, 1], 'yellow', [1, 0.7, 0, 1], [1, 0.2, 0.2, 1], [0.95, 0.9, 1, 1]], N=200)
CS = ax.contour(grid_x[:, 0], grid_z[0, :], grid_rho.T, 15, linewidths=0.5, colors='k')
ph = ax.pcolormesh(grid_x[:, 0], grid_z[0, :], grid_rho.T, cmap=custom_map, clim=(vmin, vmax), vmin=vmin, vmax=vmax, **pcolorOpts)
# , vmin=vmin, vmax=vmax, {})
cbar = plt.colorbar(ph, ax=ax, format="%.0f", fraction=0.04, orientation="vertical")
cbar.set_label("App.Res.", size=12)
for i, txt in enumerate(val):
ax.annotate(int(rho[i]), (midx[i], midz[i]), size=8)
# else:
vmin = vmin_mx
vmax = vmax_mx
ax2.axes.set_title("Apparent Chargeability (mV/V)", y=1.11)
grid_mx = griddata(np.c_[midx, midz], mx.T, (grid_x, grid_z),
method='cubic')
grid_mx = np.ma.masked_where(np.isnan(grid_mx), grid_mx)
name = 'custom_div_cmap1'
pcolorOpts = {}
# from matplotlib.colors import LinearSegmentedColormap
# custom_map = LinearSegmentedColormap.from_list(name=name, colors=['aqua', [0, 0.85, 1, 1], [0.1, 1, 0.1, 1], 'yellow', [1, 0.7, 0, 1], [1, 0.2, 0.2, 1], [0.95, 0.9, 1, 1]], N=200)
CS2 = ax2.contour(grid_x[:, 0], grid_z[0, :], grid_mx.T, 15, linewidths=0.5, colors='k')
ph2 = ax2.pcolormesh(grid_x[:, 0], grid_z[0, :], grid_mx.T, cmap=custom_map, clim=(vmin, vmax), vmin=vmin, vmax=vmax, **pcolorOpts)
# , vmin=vmin, vmax=vmax, {})
cbar2 = plt.colorbar(ph2, ax=ax2, format="%.0f", fraction=0.04, orientation="vertical")
cbar2.set_label("App.Mx.", size=12)
for i, txt in enumerate(val):
ax2.annotate(int(mx[i]), (midx[i], midz[i]), size=8)
for i, txt in enumerate(Nlevel):
ax.annotate(Nlevel[i], (x_n[i], z_n[i]), size=9)
ax2.annotate(Nlevel[i], (x_n[i], z_n[i]), size=9)
ax.axes.get_yaxis().set_visible(False)
ax.axes.set_ylim(np.min(midz) - 50, np.max(midz) + 50)
ax.axes.set_xlim(np.min(midx) - 250, np.max(midx) + 100)
ax.xaxis.tick_top()
ax.xaxis.set_label_position('top')
ax.tick_params(labelsize=8)
ax2.axes.get_yaxis().set_visible(False)
ax2.axes.set_ylim(np.min(midz) - 50, np.max(midz) + 50)
ax2.axes.set_xlim(np.min(midx) - 250, np.max(midx) + 100)
ax2.xaxis.tick_top()
ax2.xaxis.set_label_position('top')
ax2.tick_params(labelsize=8)
plt.show()
| mit |
pylayers/pylayers | pylayers/antprop/antenna.py | 1 | 183342 | # -*- coding:Utf-8 -*-
"""
.. currentmodule:: pylayers.antprop.antenna
.. autosummary::
:members:
"""
from __future__ import print_function
import doctest
import os
import glob
import re
import pdb
import sys
import numpy as np
import scipy.linalg as la
import matplotlib.pylab as plt
from scipy import io
from matplotlib import rc
from matplotlib import cm # colormaps
from mpl_toolkits.mplot3d import axes3d
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.ticker import MaxNLocator
from scipy.special import sici, fresnel
import pandas as pd
import pylayers.util.pyutil as pyu
import pylayers.util.geomutil as geu
from pylayers.util.project import PyLayers
from pylayers.antprop.spharm import *
from pylayers.antprop.antssh import ssh, SSHFunc2, SSHFunc, SSHCoeff, CartToSphere
from pylayers.antprop.coeffModel import *
import copy
from mayavi import mlab
try:
from pylayers.antprop.antvsh import vsh
except:
pass
import PIL.Image as Image
class Pattern(PyLayers):
""" Class Pattern
MetaClass of Antenna
A pattern is evaluated with the 3 np.array parameters
theta
phi
fGHz
This class implements pattern methods.
The name of a pattern method starts by p.
Each pattern method has a unique dictionnary argument 'param'
If self.grid dimensions are
Nt x Np x Nf
else:
Ndir x Nf
"""
def __init__(self):
PyLayers.__init__(self)
self.grid = False
self.evaluated = False
self.full = False
def eval(self, **kwargs):
""" evaluate pattern functions
Parameters
----------
th: np.array
if this array is present it means grid = False
ph: np.array
pt : np.array (3,N)
pr : np.array (3,N)
azoffset : int (0)
Rfloor:bool
if true add gain value to reflected ray on the floor.
values are append at the end of sqG.
fGHz:list
[]
nth: int
90
nph: int
181
first: boolean
True if first call (to define self.param)
th0 : float
theta initial value
th1 : float
theta finale value
ph0 : float
phi initial value
ph1 : float
phi final value
Examples
--------
>>> from pylayers.antprop.aarray import *
>>> A0=Antenna('Omni', param={'pol':'t','GmaxdB':0})
>>> A1=Antenna('Gauss')
>>> A2=Antenna('3gpp')
>>> A3=ULArray()
>>> A0.eval()
>>> A1.eval()
>>> A2.eval()
>>> #A3.eval()
"""
defaults = {'Rfloor': False,
'nth': 90,
'nph': 181,
'th0': 0,
'th1': np.pi,
'ph0': 0,
'ph1': 2*np.pi,
'azoffset': 0,
'inplace': True
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
if 'fGHz' not in kwargs:
# case antenna has been measured
if hasattr(self,'_fGHz'):
self.fGHz=self._fGHz
elif 'fGHz' not in self.__dict__:
self.fGHz = np.array([2.4])
else:
if type(kwargs['fGHz'])==np.ndarray:
self.fGHz = kwargs['fGHz']
else:
self.fGHz = np.array([kwargs['fGHz']])
# if current antenna is a measured antenna, pass the request frequencies
# in particular if antenna pattern is type = nfc
if hasattr(self,'_fGHz'):
self.param.update({'fGHz':self.fGHz})
self.nf = len(self.fGHz)
#
# if th and ph are empty
# if pt and pr are empty
# calculates from th0,th1,nth
# ph0,phi,nph
# else
# calculates from points coordinates pt and pr
# else
# take specified values
#
if ('th' not in kwargs) and ('ph' not in kwargs):
if ('pt' not in kwargs) and ('pr' not in kwargs):
# grid = True
# Determine theta and phi fr
self.theta = np.linspace(kwargs['th0'],kwargs['th1'],kwargs['nth'])
self.phi = np.linspace(kwargs['ph0'],kwargs['ph1'],kwargs['nph'],endpoint=False)
self.grid = True
condth = np.abs((kwargs['th1']-kwargs['th0'])-np.pi)<1e-2
condph = np.abs((kwargs['ph1']-kwargs['ph0'])-2*np.pi)<1e-2
if (condth and condph):
self.full = True
else:
# Gain between 2 points (One or 2 directions (uf Rfloor)
# grid = False
si = kwargs['pr']-kwargs['pt']
ssi = np.sqrt(np.sum(si*si,axis=0))
sn = si/ssi[None,:]
self.theta = np.arccos(sn[2,:])
self.phi = np.mod(np.arctan2(sn[1,:],sn[0,:])+kwargs['azoffset'],2*np.pi)
self.grid = False
if kwargs['Rfloor']:
dR = np.sqrt(ssi**2 + (kwargs['pr'][2,:] + kwargs['pt'][2,:])**2) # reflexion length
thetaR = np.arccos((kwargs['pr'][2,:] + kwargs['pt'][2,:]) / dR)
self.theta = np.hstack([self.theta,thetaR])
self.phi = np.hstack([self.phi,self.phi])
else :
self.grid = False
self.full = False
assert(len(kwargs['th'])==len(kwargs['ph']))
self.theta = kwargs['th']
self.phi = kwargs['ph']
if self.typ=='azel':
self.theta=np.linspace(-np.pi,np.pi,360)
self.phi=np.linspace(-np.pi,np.pi,360)
self.nth = len(self.theta)
self.nph = len(self.phi)
#
# evaluation of the specific Pattern__p function
#
Ft,Fp = eval('self._Pattern__p'+self.typ)(param=self.param)
if kwargs['inplace']:
self.Ft = Ft
self.Fp = Fp
self.evaluated = True
self.gain()
else:
return Ft,Fp
def vsh(self,threshold=-1):
if self.evaluated:
vsh(self)
self.C.s1tos2()
self.C.s2tos3(threshold=threshold)
else:
print('antenna must be evaluated to be converted into spherical harmonics.')
def ssh(self,L=89,dsf=1):
if self.evaluated:
ssh(self,L,dsf)
def __pdipole(self,**kwargs):
""" dipole antenna along z axis
From Balanis (Formula 4.62(a))
.. math::
F_{\theta}(\theta,\phi) = \left[ \frac{\cos\left(\frac{kl}{2}\cos\theta\right)- \cos\left(\frac{kl}{2}\right)}{\sin \theta} \right]
"""
defaults = { 'param' : { 'l' : 0.25 } }
if 'param' not in kwargs or kwargs['param']=={}:
kwargs['param']=defaults['param']
l = kwargs['param']['l']
if self.grid:
# Nth x Nphx Nf
k = 2*np.pi*self.fGHz[None,None,:]/0.3
usmall = self.theta<=1e-1
Ft = (np.cos((k*l/2)*np.ones(len(self.phi))[None,:,None]*np.cos(self.theta[:,None,None]))-np.cos(k*l/2))/np.sin(self.theta[:,None,None])
Ft[usmall,:,:] = -(k*l/4)*self.theta[usmall][:,None,None]*np.sin(k*l/2)
self.evaluated = True
else:
k = 2*np.pi*np.fGHz[None,:] /0.3
usmall = self.theta<=1e-1
Ft = (np.cos((k*l/2)*np.cos(self.theta[:,None]))-np.cos(k*l/2))/np.sin(self.theta[:,None])
Ft[usmall,:,:] = -(k*l/4)*self.theta[usmall][:,None,None]*np.sin(k*l/2)
# Nd x Nf
Fp = np.zeros(Ft.shape)
return Ft,Fp
def __pPatch(self,**kwargs):
""" Patch antenna
from Balanis (14-40b) page 835 (Rectangular Patch)
"""
defaults = { 'param' : { 'h':0.001588, 'W':0.01186, 'L':0.00906 } }
if 'param' not in kwargs or kwargs['param']=={}:
kwargs['param']=defaults['param']
def __pOmni(self,**kwargs):
""" omnidirectional pattern
Parameters
----------
param : dict
dictionnary of parameters
+ pol : string
't'| 'p'
+ GmaxdB : float
0
self.grid is used for switching between :
if True angular grid : nth x nph x nf
if False direction : ndir x nf
"""
defaults = { 'param' : { 'pol' : 't', 'GmaxdB': 0 } }
if 'param' not in kwargs or kwargs['param']=={}:
kwargs['param']=defaults['param']
self.param = kwargs['param']
self.GmaxdB = self.param['GmaxdB']
self.pol = self.param['pol']
G = pow(10.,self.GmaxdB/10.) # linear gain
if self.grid:
# Nth x Nphx Nf
self.sqG = np.array(np.sqrt(G))*np.ones(len(self.fGHz))[None,None,:]
self.evaluated = True
else:
# Nd x Nf
self.sqG = np.array(np.sqrt(G))*np.ones(len(self.fGHz))[None,:]
Ft,Fp = self.radF()
return Ft,Fp
def __paperture(self,**kwargs):
""" Aperture Pattern
Aperture in the (x,y) plane. Main lobe in theta=0 direction
polar indicates the orientation of the Electric field either 'x' or 'y'
See theoretical background in :
http://www.ece.rutgers.edu/~orfanidi/ewa/ch18.pdf
Parameters
----------
HPBW_x_deg : float
Half Power Beamwidth (degrees)
HPBW_y_deg : float
Half Power Beamwidth (degrees)
"""
defaults = {'param': {'HPBW_x_deg':40,
'HPBW_y_deg':10,
'Gfactor':27000,
'fcGHz': 27.5,
'polar':'x',
'window':'rect'
}}
if 'param' not in kwargs or kwargs['param']=={}:
kwargs['param']=defaults['param']
self.param = kwargs['param']
deg_to_rad = np.pi/180.
ld_c = 0.3/self.param['fcGHz']
ld = 0.3/self.fGHz
Dx = 0.886*ld_c/(self.param['HPBW_x_deg']*deg_to_rad)
Dy = 0.886*ld_c/(self.param['HPBW_y_deg']*deg_to_rad)
Dx_n = Dx/ld
Dy_n = Dy/ld
if self.grid:
# Nth x Nph x Nf
theta = self.theta[:,None,None]
phi = self.phi[None,:,None]
else:
# Ndir x Nf
theta = self.theta[:,None]
phi = self.phi[:,None]
vx = Dx_n[...,:]*np.sin(theta)*np.cos(phi) # 18.1.4
vy = Dy_n[...,:]*np.sin(theta)*np.sin(phi) # 18.1.4
F_nor = ((1+np.cos(theta))/2.)*np.abs(np.sinc(vx)*np.sinc(vy))
HPBW_x = (0.886*ld/Dx)/deg_to_rad
HPBW_y = (0.886*ld/Dy)/deg_to_rad
Gmax = self.param['Gfactor']/(HPBW_x*HPBW_y)
F = np.sqrt(Gmax[...,:])*F_nor # Ndir x Nf
# Handling repartition on both vector components
# enforce E.y = 0
if self.param['polar']=='x':
Ft = F/np.sqrt(1+(np.cos(theta)*np.sin(phi)/np.cos(phi))**2)
Fp = (-np.cos(theta)*np.sin(phi)/np.cos(phi))*Ft
nan_bool = np.isnan(Fp)
Fp[nan_bool] = F[nan_bool]
# enforce E.x = 0
if self.param['polar']=='y':
Ft = F/np.sqrt(1+(np.cos(theta)*np.cos(phi)/np.sin(phi))**2)
Fp = (np.cos(theta)*np.cos(phi)/np.sin(phi))*Ft
nan_bool = np.isnan(Fp)
Fp[nan_bool] = F[nan_bool]
# enforce E.x = 0
#
# This is experimental
# How to apply the 2D windowing properly ?
#
# if self.param['window']!='rect':
# Nt = self.Fp.shape[0]
# Np = self.Fp.shape[1]
# Wp = np.fft.ifftshift(np.hamming(Nt)[:,None]*np.ones(Np)[None,:])[:,:,None]
# Wt = np.fft.ifftshift(np.ones(Nt)[:,None]*np.hamming(Np)[None,:])[:,:,None]
# Wu = np.fft.ifftshift(np.ones(Nt)[:,None]*np.ones(Np)[None,:])[:,:,None]
# Wi = np.fft.ifftshift(np.hamming(Nt)[:,None]*np.hamming(Np)[None,:])[:,:,None]
# W = np.fft.fftshift(np.hamming(Nt)[:,None]*np.hamming(Np)[None,:])[:,:,None]
# # Fp : t x p x f ou r x f
# # Ft : t x p x f ou r x f
#
# Kp = np.fft.ifft2(self.Fp,axes=(0,1))
# Kt = np.fft.ifft2(self.Ft,axes=(0,1))
#
# self.Fp = np.fft.fft2(Kp*Wt,axes=(0,1))
# self.Ft = np.fft.fft2(Kt*Wp,axes=(0,1))
return Ft,Fp
def __pnfc(self,**kwargs):
""" nfc Pattern
interpolation of Ft,Fp for measured antenna
Interpolation between known values of Ft and Fp contained in
self._Ft and self._Fp to a given set of theta, phi.
"""
defaults = {'param': {'fGHz':[]}}
if 'param' not in kwargs or kwargs['param']=={}:
kwargs['param']=defaults['param']
self.param = kwargs['param']
# if self.grid:
# # Nth x Nph x Nf
# theta = self.theta[:,None,None]
# phi = self.phi[None,:,None]
# else:
uf=np.ndarray(shape=0,dtype=int)
for k in self.param['fGHz']:
uf = np.hstack((uf,np.where(self._fGHz<=k)[0][-1]))
theta = self.theta
phi = self.phi
# th0=np.array([0.12,3.1415,0.01])
# ph0=np.array([0.01,0.5,2])
# find closest theta arg : N
dth = self._theta[:,None]-theta
udth = abs(dth).argmin(axis=0)
# determine sign of this arg to know which from N-1 or N+1 is candidate
sdth = np.sign(np.diag(dth[udth]))
# specific process if the find argument is N-1 or N+1 a.k.a self._theta-th >0 or <0
neg_mask = sdth<0
pos_mask = ~neg_mask
cudth= np.ndarray((len(theta)),dtype=int)
cudth[pos_mask]=udth[pos_mask]-1
cudth[neg_mask]=udth[neg_mask]
ratio_th = (theta-self._theta[cudth])/(self._theta[cudth+1]-self._theta[cudth])
# find closest phi arg : N
dph = self._phi[:,None]-phi
udph = abs(dph).argmin(axis=0)
# determine sign of this arg to know which from N-1 or N+1 is candidate
sdph = np.sign(np.diag(dph[udph]))
# specific process if the find argument is N-1 or N+1 a.k.a self._phi-ph >0 or <0
neg_mask = sdph<0
pos_mask = ~neg_mask
cudph= np.ndarray((len(phi)),dtype=int)
cudph[pos_mask]=udph[pos_mask]-1
cudph[neg_mask]=udph[neg_mask]
ratio_ph = (phi-self._phi[cudph])/(self._phi[cudph+1]-self._phi[cudph])
if self.grid:
Ft=self._Ft[cudth,:,:][...,uf]*(1.-ratio_th[:,None,None])+ratio_th[:,None,None]*self._Ft[cudth+1,:,:][...,uf]
Ft=Ft[:,cudph,:]*(1.-ratio_ph[None,:,None])+ratio_ph[None,:,None]*Ft[:,cudph+1,:]
Fp=self._Fp[cudth,:,:][...,uf]*(1.-ratio_th[:,None,None])+ratio_th[:,None,None]*self._Fp[cudth+1,:,:][...,uf]
Fp=Fp[:,cudph,:]*(1.-ratio_ph[None,:,None])+ratio_ph[None,:,None]*Fp[:,cudph+1,:]
else:
Ft0=self._Ft[cudth,cudph,:][...,uf]*(1.-ratio_th[:,None])+ratio_th[:,None]*self._Ft[cudth+1,cudph,:][...,uf]
Ft1=self._Ft[cudth,cudph+1,:][...,uf]*(1.-ratio_th[:,None])+ratio_th[:,None]*self._Ft[cudth+1,cudph+1,:][...,uf]
Ft = Ft0*(1.-ratio_ph[:,None])+Ft1*ratio_ph[:,None]
Fp0=self._Fp[cudth,cudph,:][...,uf]*(1.-ratio_th[:,None])+ratio_th[:,None]*self._Fp[cudth+1,cudph,:][...,uf]
Fp1=self._Fp[cudth,cudph+1,:][...,uf]*(1.-ratio_th[:,None])+ratio_th[:,None]*self._Fp[cudth+1,cudph+1,:][...,uf]
Fp = Fp0*(1.-ratio_ph[:,None])+Fp1*ratio_ph[:,None]
return Ft,Fp
def __paperture2(self,**kwargs):
""" Aperture Pattern
Aperture in the (x,y) plane. Main lobe in theta=0 direction
polar indicates the orientation of the Electric field either 'x' or 'y'
See theoretical background in :
http://www.ece.rutgers.edu/~orfanidi/ewa/ch18.pdf
Parameters
----------
HPBW_x_deg : float
Half Power Beamwidth (degrees)
HPBW_y_deg : float
Half Power Beamwidth (degrees)
"""
defaults = {'param': {'HPBW_a_deg':40,
'HPBW_b_deg':10,
'Gfactor':27000,
'fcGHz': 27.5,
'polar':'x',
'window':'rect'
}}
if 'param' not in kwargs or kwargs['param']=={}:
kwargs['param']=defaults['param']
self.param = kwargs['param']
deg_to_rad = np.pi/180.
ld_c = 0.3/self.param['fcGHz']
ld = 0.3/self.fGHz
a = 1.189*ld_c/(self.param['HPBW_a_deg']*deg_to_rad)
b = 0.886*ld_c/(self.param['HPBW_b_deg']*deg_to_rad)
a_n = a/ld
b_n = b/ld
if self.grid:
# Nth x Nph x Nf
theta = self.theta[:,None,None]
phi = self.phi[None,:,None]
else:
# Ndir x Nf
theta = self.theta[:,None]
phi = self.phi[:,None]
vx = a_n[...,:]*np.sin(theta)*np.cos(phi) # 18.1.4
vy = b_n[...,:]*np.sin(theta)*np.sin(phi) # 18.1.4
#F_nor = ((1+np.cos(theta))/2.)*np.abs(np.sinc(vx)*np.sinc(vy))
F_nor = (1+np.cos(theta))/2*(np.cos(np.pi*vx)/(1-4*vx**2))*np.sinc(vy) # 18.1.3 + suppression rear radiation
HPBW_a = (1.189*ld/a)/deg_to_rad
HPBW_b = (0.886*ld/b)/deg_to_rad
Gmax = self.param['Gfactor']/(HPBW_a*HPBW_b)
F = np.sqrt(Gmax[...,:])*F_nor # Ndir x Nf
# Handling repartition on both vector components
# enforce E.y = 0
if self.param['polar']=='x':
Ft = F/np.sqrt(1+(np.cos(theta)*np.sin(phi)/np.cos(phi))**2)
Fp = (-np.cos(theta)*np.sin(phi)/np.cos(phi))*Ft
nan_bool = np.isnan(Fp)
Fp[nan_bool] = F[nan_bool]
# enforce E.x = 0
if self.param['polar']=='y':
Ft = F/np.sqrt(1+(np.cos(theta)*np.cos(phi)/np.sin(phi))**2)
Fp = (np.cos(theta)*np.cos(phi)/np.sin(phi))*Ft
nan_bool = np.isnan(Fp)
Fp[nan_bool] = F[nan_bool]
# enforce E.x = 0
#
# This is experimeintal
# How to apply the 2D windowing properly ?
#
# if self.param['window']!='rect':
# Nt = self.Fp.shape[0]
# Np = self.Fp.shape[1]
# Wp = np.fft.ifftshift(np.hamming(Nt)[:,None]*np.ones(Np)[None,:])[:,:,None]
# Wt = np.fft.ifftshift(np.ones(Nt)[:,None]*np.hamming(Np)[None,:])[:,:,None]
# Wu = np.fft.ifftshift(np.ones(Nt)[:,None]*np.ones(Np)[None,:])[:,:,None]
# Wi = np.fft.ifftshift(np.hamming(Nt)[:,None]*np.hamming(Np)[None,:])[:,:,None]
# W = np.fft.fftshift(np.hamming(Nt)[:,None]*np.hamming(Np)[None,:])[:,:,None]
# # Fp : t x p x f ou r x f
# # Ft : t x p x f ou r x f
#
# Kp = np.fft.ifft2(self.Fp,axes=(0,1))
# Kt = np.fft.ifft2(self.Ft,axes=(0,1))
#
# self.Fp = np.fft.fft2(Kp*Wt,axes=(0,1))
# self.Ft = np.fft.fft2(Kt*Wp,axes=(0,1))
return Ft,Fp
def __phplanesectoralhorn(self,**kwargs):
""" H plane sectoral horn
Parameters
----------
rho1 : float
sector radius (meter)
a1 : float
aperture dimension along x (greatest value in meters)
b1 : float
aperture dimension along y (greatest value in meters)
Notes
-----
Maximum gain in theta =0
Polarized along y axis (Jx=0,Jz=0)
"""
defaults = {'param': {'rho1':0.198,
'a1':0.088, # aperture dimension along x
'b1':0.0126, # aperture dimension along y
'fcGHz':28,
'GcmaxdB':19,
'Nx':20,
'Ny':20}}
if 'param' not in kwargs or kwargs['param']=={}:
kwargs['param']=defaults['param']
self.param = kwargs['param']
#H-plane antenna
rho1 = self.param['rho1']
a1 = self.param['a1']
b1 = self.param['b1']
Nx = self.param['Nx']
Ny = self.param['Ny']
fcGHz = self.param['fcGHz']
GcmaxdB = self.param['GcmaxdB']
assert(a1>b1), "a1 should be greater than b1 (see fig 13.1O(a) Balanis"
lbda = 0.3/self.fGHz
k = 2*np.pi/lbda
eta0 = np.sqrt(4*np.pi*1e-7/8.85429e-12)
if self.grid:
# X,Y aperture points (t,p,x,y,f)
X = np.arange(-a1/2,a1/2,a1/(Nx-1))[None,None,:,None,None]
Y = np.arange(-b1/2,b1/2,b1/(Ny-1))[None,None,None,:,None]
# angular domain (theta,phi)
Theta= self.theta[:,None,None,None,None]
Phi = self.phi[None,:,None,None,None]
else:
# X,Y aperture points (r,x,y,f)
X = np.arange(-a1/2,a1/2,a1/(Nx-1))[None,:,None,None]
Y = np.arange(-b1/2,b1/2,b1/(Ny-1))[None,None,:,None]
# angular domain (theta,phi)
Theta= self.theta[:,None,None,None]
Phi= self.phi[:,None,None,None]
#% Aperture field Ea:
# Ea is an approximation of the aperture field:
# (from: C. A. Balanis, Antenna Theoy: Analysis and Design. New York
# Wiley, 1982. ... Section 13.3.1 )
Ea = np.cos(X*np.pi/a1)*np.exp(-.5*1j*k*((X**2)/(rho1)+(Y**2)/(rho1)))
Jy = -Ea/eta0
Mx = Ea
# cosine direction
ctsp = np.cos(Theta)*np.sin(Phi)
cp = np.cos(Phi)
ctcp = np.cos(Theta)*np.cos(Phi)
sp = np.sin(Phi)
stcp = np.sin(Theta)*np.cos(Phi)
stsp = np.sin(Theta)*np.sin(Phi)
# N & L
ejkrrp = np.exp(1j*k*( X*stcp + Y*stsp)) # exp(jk (r.r'))
if self.grid:
N_theta = np.einsum('tpnmf->tpf',Jy*ctsp*ejkrrp) # 12-12 a assuming Jx,Jz=0
N_phi = np.einsum('tpnmf->tpf',Jy*cp*ejkrrp) # 12-12 b ""
L_theta = np.einsum('tpnmf->tpf',Mx*ctcp*ejkrrp) # 12-12 c assuming My,Mz=0
L_phi = np.einsum('tpnmf->tpf',-Mx*sp*ejkrrp) # 12-12 d ""
else:
N_theta = np.einsum('rnmf->rf',Jy*ctsp*ejkrrp) # 12-12 a assuming Jx,Jz=0
N_phi = np.einsum('rnmf->rf',Jy*cp*ejkrrp) # 12-12 b ""
L_theta = np.einsum('rnmf->rf',Mx*ctcp*ejkrrp) # 12-12 c assuming My,Mz=0
L_phi = np.einsum('rnmf->rf',-Mx*sp*ejkrrp) # 12-12 d ""
# Far-Field
Ft = -L_phi - eta0*N_theta # 12-10b p 661
Fp = L_theta - eta0*N_phi # 12-10c p 661
G = Ft*np.conj(Ft)+Fp*np.conj(Fp)
if self.grid:
# Umax : ,f
self.Umax = G.max(axis=(0,1))
Ft = Ft/np.sqrt(self.Umax[None,None,:])
Fp = Fp/np.sqrt(self.Umax[None,None,:])
# centered frequency range
fcc = np.abs(self.fGHz-fcGHz)
idxc = np.where(fcc==np.min(fcc))[0][0]
# Gain @ center frequency
#G = _gain(Ft[:,:,idxc],Fp[:,:,idxc])
G = _gain(Ft,Fp)
# effective half power beamwidth
self.ehpbw, self.hpster = _hpbw(G,self.theta,self.phi)
self.Gfactor = 10**(GcmaxdB/10.)*self.ehpbw[idxc]
Gmax = self.Gfactor/self.ehpbw
Ft = np.sqrt(Gmax[None,None,:])*Ft
Fp = np.sqrt(Gmax[None,None,:])*Fp
else:
##
## Ft (r x f )
## Fp (r x f )
##
Ft = Ft/np.sqrt(self.Umax[None,:])
Fp = Fp/np.sqrt(self.Umax[None,:])
Gmax = self.Gfactor/self.ehpbw
Ft = np.sqrt(Gmax[None,:])*Ft
Fp = np.sqrt(Gmax[None,:])*Fp
return Ft,Fp
def __patoll(self,**kwargs):
"""
"""
paramdef = {'iband':0,
'polar':-45.0,
'tilt':0
}
param = kwargs.pop('param')
if param =={}:
param = paramdef
iband = param.pop('iband')
polar = param.pop('polar')
tilt = param.pop('tilt')
# TODO check tilt value is compatible
lbands = list(dict(self.atoll).keys())
# Gver : 360,Nf
# Ghor : 360,Nf
Gver = self.atoll[lbands[iband]][polar]['ver'][:,tilt,:]
self.fGHz = self.atoll[lbands[iband]][polar]['freq']
self.tilt_theo = self.atoll[lbands[iband]][polar]['tilt'][tilt]
Ghor = self.atoll[lbands[iband]][polar]['hor'][:,tilt,:]
shG = Gver.shape
Nhor = Ghor.shape[0]
Nver = Gver.shape[0]
# grid mode (180,360,Nf)
rmax = int(Nver/2)
self.theta = np.linspace(0,np.pi,rmax+1)
self.phi = np.linspace(0,2*np.pi-2*np.pi/Nhor,Nhor)
#self.nth = len(self.theta)
#self.nph = len(self.phi)
#a1 = np.kron(self.theta,np.ones(len(self.phi)))
#2 = np.kron(np.ones(len(self.theta)),self.phi)
#g = np.hstack((a1[:,None],a2[:,None]))
sqG = np.ones((181,360,shG[-1]))
uvermax = zeros(shG[-1]).astype(int)
for k in range(shG[-1]):
# find the maximum in the vertical plane
uvermax[k] = np.where(Gver[:,k]==np.max(Gver[:,k]))[0][0]
# offset of vertical pattern
Gver_roll = np.roll(Gver[:,k],-uvermax[k])
# first row (pole)
sqG[0,:,k] = np.sqrt(10**(Gver_roll[0]/10))
# last row (pole)
sqG[-1,:,k] = np.sqrt(10**(Gver_roll[rmax]/10))
# first column (Vertical)
c1 = 0
sqG[:,c1,k] = np.sqrt(10**(Gver_roll[0:rmax+1]/10))
# third column (Vertical)
c3 = 180
sqG[1:-1,c3,k] = np.sqrt(10**(Gver_roll[rmax+1:][::-1]/10))
# second column (Horizontal)
c2 = 90
sqG[:,c2,k] = np.sqrt(10**(Ghor[0:181,k]/10))
# fourth column (Horizontal)
c4 = 270
sqG[1:-1,c4,k] = np.sqrt(10**(Ghor[rmax+1:,k][::-1]/10))
u1 = np.linspace(1,89,89)/89.
#
# interpolation
#
sqG[1:-1,1:90,k] = sqG[1:-1,0,k][:,None]*(1-u1[None,:])+sqG[1:-1,90,k][:,None]*u1[None,:]
sqG[1:-1,91:180,k]= sqG[1:-1,90,k][:,None]*(1-u1[None,:])+sqG[1:-1,180,k][:,None]*u1[None,:]
sqG[1:-1,181:270,k] = sqG[1:-1,180,k][:,None]*(1-u1[None,:])+sqG[1:-1,270,k][:,None]*u1[None,:]
sqG[1:-1,271:,k]= sqG[1:-1,270,k][:,None]*(1-u1[None,:])+sqG[1:-1,0,k][:,None]*u1[None,:]
#plt.plot(sqG[:,0,:])
#plt.plot(sqG[:,180,:])
#plt.plot(sqG[:,90,:])
#plt.plot(sqG[:,270,:])
Ft = sqG/np.sqrt(2)
Fp = sqG/np.sqrt(2)
return Ft,Fp
def __phorn(self,**kwargs):
""" Horn antenna
http://www.ece.rutgers.edu/~orfanidi/ewa/ch18.pdf (18.2)
Parameters
----------
Half Power Beamwidth (degrees)
"""
defaults = {'param': {'sigma_a':1.2593,
'sigma_b':1.0246,
'A_wl':16,
'B_wl':3,
'fcGHz':28.,
'polar':'x'
}}
if 'param' not in kwargs or kwargs['param']=={}:
kwargs['param']=defaults['param']
self.param = kwargs['param']
deg_to_rad = np.pi/180.
ld_c = 0.3/self.param['fcGHz']
ld = 0.3/self.fGHz
A_wl = kwargs['param']['A_wl']
B_wl = kwargs['param']['B_wl']
A = A_wl*ld_c
B = B_wl*ld_c
sigma_a = kwargs['param']['sigma_a']
sigma_b = kwargs['param']['sigma_b']
#b = kwargs['param']['b']
#Ra = (A/(A-a))*RA
#Rb = (B/(B-b))*RB
#La = np.sqrt(Ra**2+A**2/4)
#Lb = np.sqrt(Rb**2+B**2/4)
#alpha = np.arctan(A/(2*Ra))
#beta = np.arctan(B/(2*Rb))
#Delta_a = A**2/(8*Ra)
#Delta_b = B**2/(8*Rb)
#sigma_a = A/np.sqrt((2*ld*Ra))
#sigma_b = B/np.sqrt((2*ld*Rb))
A_n = A/ld
B_n = B/ld
if self.grid:
# Nth x Nph x Nf
theta = self.theta[:,None,None]
phi = self.phi[None,:,None]
else:
# Ndir x Nf
theta = self.theta[:,None]
phi = self.phi[:,None]
vx = A_n[...,:]*np.sin(theta)*np.cos(phi) # 18.3.4
vy = B_n[...,:]*np.sin(theta)*np.sin(phi) # 18.3.4
F = ((1+np.cos(theta))/2.)*(F1(vx,sigma_a)*F0(vy,sigma_b))
normF = np.abs(F1(0,sigma_a)*F0(0,sigma_b))**2
F_nor = F/np.sqrt(normF)
efficiency = 0.125*normF # 18.4.3
Gmax = efficiency*4*np.pi*A*B/ld**2
F = np.sqrt(Gmax[...,:])*F_nor # Ndir x Nf
# Handling repatition on both vector components
# enforce E.y = 0
if self.param['polar']=='x':
Ft = F/np.sqrt(1+(np.cos(theta)*np.sin(phi)/np.cos(phi))**2)
Fp = (-np.cos(theta)*np.sin(phi)/np.cos(phi))*Ft
nan_bool = np.isnan(Fp)
Fp[nan_bool] = F[nan_bool]
# enforce E.x = 0
if self.param['polar']=='y':
Ft = F/np.sqrt(1+(np.cos(theta)*np.cos(phi)/np.sin(phi))**2)
Fp = (np.cos(theta)*np.cos(phi)/np.sin(phi))*Ft
nan_bool = np.isnan(Fp)
Fp[nan_bool] = F[nan_bool]
return Ft,Fp
def __pazel(self,**kwargs):
""" Azimuth elevation pattern from file
Parameters
----------
filename : ANT filename
Notes
-----
The 3D pattern is obtained by taking the product
of azimuth pattern and elevation pattern.
"""
defaults = {'param': {'filename' : '',
'pol':'V'}}
f = open(kwargs['param']['filename'])
Gthetaphi = f.readlines()
f.close()
Gthetaphi = np.array(Gthetaphi).astype('float')
Gaz = Gthetaphi[360:]
Gel = Gthetaphi[:360]
sqGazlin = np.sqrt(pow(10,Gaz/10.))
sqGellin = np.sqrt(pow(10,Gel/10.))
if self.grid :
# Nth x Nph x Nf
if kwargs['param']['pol']=='V':
Ft = np.ones((360,360,1))
Fp = np.zeros((360,360,1))
#Ft[180,:] = sqGazlin[:,None]
#Ft[:,180] = sqGellin[:,None]
Ft = sqGazlin[None,:,None]*sqGellin[:,None,None]
if kwargs['param']['pol']=='H':
Fp = np.ones((360,360,1))
Ft = np.zeros((360,360,1))
Fp = sqGazlin[None,:,None]*sqGellin[:,None,None]
#self.Fp[180,:]= sqGazlin[:,None]
#self.Fp[:,180]= sqGellin[:,None]
if kwargs['param']['pol']=='45':
Fp = np.ones((360,360,1))
Ft = np.ones((360,360,1))
# Azimuth
Ft = (1/sqrt(2))*sqGazlin[None,:,None]*sqGellin[:,None,None]
Fp = (1/sqrt(2))*sqGazlin[None,:,None]*sqGellin[:,None,None]
#self.Fp[180,:]= sqGazlin[:,None]
#self.Fp[180,:]= (1/sqrt(2))*sqGazlin[:,None]
#Ft[180,:]= (1/sqrt(2))*sqGazlin[:,None]
# Elevation
#self.Fp[:,180]= (1/sqrt(2))*sqGellin[:,None]
#Ft[:,180]= (1/sqrt(2))*sqGellin[:,None]
#Ft = sqGthlin[:,None,None]
#self.Fp = sqGphlin[None,:,None]
# Ft = self.sqGmax * ( np.exp(-2.76*argth[:,None,None]) * np.exp(-2.76*argphi[None,:,None]) )
# self.Fp = self.sqGmax * ( np.exp(-2.76*argth[:,None,None]) * np.exp(-2.76*argphi[None,:,None]) )
self.evaluated = True
else:
pass
# #
# # Nd x Nf
# #
# Ft = self.sqGmax * ( np.exp(-2.76*argth) * np.exp(-2.76*argphi) )
# Fp = self.sqGmax * ( np.exp(-2.76*argth) * np.exp(-2.76*argphi) )
# # add frequency axis (Ndir x Nf)
# Ft = np.dot(Ft[:,None],np.ones(len(self.fGHz))[None,:])
# self.Fp = np.dot(Fp[:,None],np.ones(len(self.fGHz))[None,:])
return Ft,Fp
def __pGauss(self,**kwargs):
""" Gauss pattern
Parameters
----------
p0 : phi main lobe (0-2pi)
p3 : 3dB aperture angle
t0 : theta main lobe (0-pi)
t3 : 3dB aperture angle
TODO : finish implementation of polar
"""
defaults = {'param':{'p0' : 0,
't0' : np.pi/2,
'p3' : np.pi/6,
't3' : np.pi/6,
'pol':'th'
}}
if 'param' not in kwargs or kwargs['param']=={}:
kwargs['param']=defaults['param']
self.typ='Gauss'
self.param = kwargs['param']
p0 = self.param['p0']
t0 = self.param['t0']
p3 = self.param['p3']
t3 = self.param['t3']
pol = self.param['pol']
self.Gmax = 16/(t3*p3)
self.GdB = 10*np.log10(self.Gmax)
self.sqGmax = np.sqrt(self.Gmax)
argth = ((self.theta-t0)**2)/t3
e1 = np.mod(self.phi-p0,2*np.pi)
e2 = np.mod(p0-self.phi,2*np.pi)
e = np.minimum(e1,e2)
argphi = (e**2)/p3
Nf = len(self.fGHz)
if self.grid :
Nt = len(self.theta)
Np = len(self.phi)
# Nth x Nph x Nf
# Ft = self.sqGmax * ( np.exp(-2.76*argth[:,None,None]) * np.exp(-2.76*argphi[None,:,None]) )
# self.Fp = self.sqGmax * ( np.exp(-2.76*argth[:,None,None]) * np.exp(-2.76*argphi[None,:,None]) )
if pol=='th':
Ft = self.sqGmax * ( np.exp(-2.76*argth[:,None,None]) * np.exp(-2.76*argphi[None,:,None]) *np.ones(len(self.fGHz))[None,None,:])
Fp = np.zeros((Nt,Np,Nf))
if pol=='ph':
Ft = np.zeros((Nt,Np,Nf))
Fp = self.sqGmax * ( np.exp(-2.76*argth[:,None,None]) * np.exp(-2.76*argphi[None,:,None]) *np.ones(len(self.fGHz))[None,None,:])
else:
#
# Nd x Nf
#
Nd = len(self.theta)
assert(len(self.phi)==Nd)
if pol=='th':
Ft = self.sqGmax * ( np.exp(-2.76*argth) * np.exp(-2.76*argphi) )
Fp = np.zeros(Nd)
if pol=='ph':
Ft = np.zeros(Nd)
Fp = self.sqGmax * ( np.exp(-2.76*argth) * np.exp(-2.76*argphi) )
# add frequency axis (Ndir x Nf)
Ft = np.dot(Ft[:,None],np.ones(len(self.fGHz))[None,:])
Fp = np.dot(Fp[:,None],np.ones(len(self.fGHz))[None,:])
return Ft,Fp
def __p3gpp(self,**kwargs):
""" 3GPP antenna pattern
Parameters
----------
thtilt : theta tilt antenna
hpbwv : half power beamwidth v
hpbwh : half power beamwidth h
sllv : side lobe level
fbrh : front back ratio
gm :
pol : h | v | c
if pattern
Ft nth x nphi x nf
Fp nth x nphi x nf
else
Ft ndir x nf (==nth, ==nph)
Fp ndir x nf (==nth, ==nph)
"""
defaults = {'param' : {'thtilt':0, # antenna tilt
'hpbwv' :6.2,# half power beamwidth v
'hpbwh' :65, # half power beamwidth h
'sllv': -18, # side lobe level
'fbrh': 30, # front back ratio
'gm': 18, #
'pol':'p' #t , p , c
}}
if 'param' not in kwargs or kwargs['param']=={}:
kwargs['param'] = defaults['param']
#if 'param' not in kwargs:
#kwargs['param']=defaults['param']
self.typ = "3gpp"
self.param = kwargs['param']
thtilt = self.param['thtilt']
hpbwh = self.param['hpbwh']
hpbwv = self.param['hpbwv']
sllv = self.param['sllv']
fbrh = self.param['fbrh']
gm = self.param['gm']
pol = self.param['pol']
self.pol = pol
# convert radian to degree
phi = self.phi*180/np.pi-180
theta = self.theta*180/np.pi-90
if self.grid:
#Nth x Nph x Nf
GvdB = np.maximum(-12*((theta-thtilt)/hpbwv)**2,sllv)[:,None,None]
GhdB = (-np.minimum(12*(phi/hpbwh)**2,fbrh)+gm)[None,:,None]
GdB = GhdB+GvdB
self.sqG = np.sqrt(10**(GdB/10.))*np.ones(self.nf)[None,None,:]
self.evaluated = True
else:
#Nd x Nf
GvdB = np.maximum(-12*((theta-thtilt)/hpbwv)**2,sllv)[:,None]
GhdB = (-np.minimum(12*(phi/hpbwh)**2,fbrh)+gm)[:,None]
GdB = GhdB+GvdB
self.sqG = np.sqrt(10**(GdB/10.))
# radiating functions are deduced from square root of gain
Ft,Fp = self.radF()
return Ft,Fp
def __pvsh1(self,**kwargs):
""" calculate pattern from VSH Coeffs (shape 1)
Parameters
----------
theta : ndarray (1xNdir)
phi : ndarray (1xNdir)
k : int
frequency index
Returns
-------
Ft , Fp
"""
assert hasattr(self,'C'),'no spherical coefficient'
assert hasattr(self.C.Br,'s1'),'no shape 1 coeff in vsh'
if self.grid:
theta = np.kron(self.theta, np.ones(self.nph))
phi = np.kron(np.ones(self.nth),self.phi)
else:
theta = self.theta
phi = self.phi
Nt = len(theta)
Np = len(phi)
if self.grid:
theta = np.kron(theta, np.ones(Np))
phi = np.kron(np.ones(Nt),phi)
nray = len(theta)
Br = self.C.Br.s1[:, :, :]
Bi = self.C.Bi.s1[:, :, :]
Cr = self.C.Cr.s1[:, :, :]
Ci = self.C.Ci.s1[:, :, :]
L = self.C.Br.L1
M = self.C.Br.M1
# The - sign is necessary to get the good reconstruction
# deduced from observation
# May be it comes from a different definition of theta in SPHEREPACK
ind = index_vsh(L, M)
l = ind[:, 0]
m = ind[:, 1]
#
V, W = VW(l, m, theta, phi)
#
# broadcasting along frequency axis
#
V = np.expand_dims(V,0)
W = np.expand_dims(V,0)
#
# k : frequency axis
# l : axis l (theta)
# m : axis m (phi)
#
# The following cannot work du to shape issue!:
# Fth = np.einsum('klm,kilm->ki',Br,np.real(V.T)) - \
# np.einsum('klm,kilm->ki',Bi,np.imag(V.T)) + \
# np.einsum('klm,kilm->ki',Ci,np.real(W.T)) + \
# np.einsum('klm,kilm->ki',Cr,np.imag(W.T))
# Fph = -np.einsum('klm,kilm->ki',Cr,np.real(V.T)) + \
# np.einsum('klm,kilm->ki',Ci,np.imag(V.T)) + \
# np.einsum('klm,kilm->ki',Bi,np.real(W.T)) + \
# np.einsum('klm,kilm->ki',Br,np.imag(W.T))
# this is replaced without garantee of correct
# broadcasting on fequency by :
Brr = Br[:,l,m]
Bir = Bi[:,l,m]
Crr = Cr[:,l,m]
Cir = Ci[:,l,m]
Fth = np.dot(Brr, np.real(V.T)) - \
np.dot(Bir, np.imag(V.T)) + \
np.dot(Cir, np.real(W.T)) + \
np.dot(Crr, np.imag(W.T))
Fph = -np.dot(Crr, np.real(V.T)) + \
np.dot(Cir, np.imag(V.T)) + \
np.dot(Bir, np.real(W.T)) + \
np.dot(Brr, np.imag(W.T))
# here Nf x Nd
Ft = Fth.transpose()
Fp = Fph.transpose()
# then Nd x Nf
if self.grid:
# Nth x Nph x Nf
Ft = Ft.reshape(self.nth, self.nph,self.nf)
Fp = Fp.reshape(self.nth, self.nph,self.nf)
# last axis should be frequency
assert(Ft.shape[-1]==self.nf)
assert(Fp.shape[-1]==self.nf)
return Ft, Fp
def __pvsh3(self,**kwargs):
""" calculate pattern from vsh3
"""
assert hasattr(self,'C'),'no spherical coefficient'
assert hasattr(self.C.Br,'s3'),'no shape 3 coeff in vsh'
if self.grid:
theta = np.kron(self.theta, np.ones(self.nph))
phi = np.kron(np.ones(self.nth),self.phi)
else:
theta = self.theta
phi = self.phi
Br = self.C.Br.s3
lBr = self.C.Br.ind3[:, 0]
mBr = self.C.Br.ind3[:, 1]
Bi = self.C.Bi.s3
Cr = self.C.Cr.s3
Ci = self.C.Ci.s3
L = lBr.max()
M = mBr.max()
# vector spherical harmonics basis functions
# V, W = VW(lBr, mBr, theta, phi)
V, W = VW(lBr, mBr, theta, phi)
Fth = np.dot(Br, np.real(V.T)) - \
np.dot(Bi, np.imag(V.T)) + \
np.dot(Ci, np.real(W.T)) + \
np.dot(Cr, np.imag(W.T))
Fph = -np.dot(Cr, np.real(V.T)) + \
np.dot(Ci, np.imag(V.T)) + \
np.dot(Bi, np.real(W.T)) + \
np.dot(Br, np.imag(W.T))
# here Nf x Nd
Ft = Fth.transpose()
Fp = Fph.transpose()
# then Nd x Nf
if self.grid:
# Nth x Nph x Nf
Ft = Ft.reshape(self.nth, self.nph,self.nf)
Fp = Fp.reshape(self.nth, self.nph,self.nf)
# last axis should be frequency
assert(Ft.shape[-1]==self.nf)
assert(Fp.shape[-1]==self.nf)
return Ft,Fp
def __psh3(self,**kwargs):
""" calculate pattern for sh3
Parameters
----------
"""
assert hasattr(self,'S'),'no spherical coefficient'
assert hasattr(self.S.Cx,'s3'),'no shape 3 coeff in ssh'
if self.grid:
theta = np.kron(self.theta, np.ones(self.nph))
phi = np.kron(np.ones(self.nth),self.phi)
else:
theta = self.theta
phi = self.phi
cx = self.S.Cx.s3
cy = self.S.Cy.s3
cz = self.S.Cz.s3
lmax = self.S.Cx.lmax
Y ,indx = SSHFunc2(lmax, theta,phi)
k = self.S.Cx.k2
if self.grid:
Ex = np.dot(cx,Y[k])
Ey = np.dot(cy,Y[k])
Ez = np.dot(cz,Y[k])
Fth,Fph = CartToSphere(theta, phi, Ex, Ey,Ez, bfreq = True, pattern = True )
Ft = Fth.transpose()
Fp = Fph.transpose()
Ft = Ft.reshape(self.nth, self.nph,self.nf)
Fp = Fp.reshape(self.nth, self.nph,self.nf)
else:
Ex = np.dot(cx,Y[k])
Ey = np.dot(cy,Y[k])
Ez = np.dot(cz,Y[k])
Fth,Fph = CartToSphere(theta, phi, Ex, Ey,Ez, bfreq = True, pattern = False)
Ft = Fth.transpose()
Fp = Fph.transpose()
assert(Ft.shape[-1]==self.nf)
assert(Fp.shape[-1]==self.nf)
return Ft,Fp
def __pwireplate(self,**kwargs):
""" pattern wire plate antenna
"""
defaults = {'param':{'t0' : 5*np.pi/6,
'GmaxdB': 5
}}
if 'param' not in kwargs or kwargs['param']=={}:
kwargs['param']=defaults['param']
self.typ='wireplate'
self.param = kwargs['param']
t0 = self.param['t0']
GmaxdB = self.param['GmaxdB']
Gmax = pow(GmaxdB/10.,10)
sqGmax = np.sqrt(Gmax)
uth1 = np.where(self.theta < t0)[0]
uth2 = np.where(self.theta >= t0)[0]
p = t0
q = np.pi/2.
A = np.array(([[3*p**2,2*p,1],[p**3,p**2,p],[q**3,q**2,q]]))
Y = np.array(([0,1,1/(1.*sqGmax)]))
self.poly = la.solve(A,Y)
argth1 = np.abs(self.poly[0]*self.theta[uth1]**3
+ self.poly[1]*self.theta[uth1]**2
+ self.poly[2]*self.theta[uth1])
argth2 = -(1/(np.pi-t0)**2)*(self.theta[uth2]-t0)**2+1
argth = np.hstack((argth1,argth2))[::-1]
if self.grid:
Ft = sqGmax * (argth[:,None])
Fp = sqGmax * (argth[:,None])
else:
Fat = sqGmax * argth
Fap = sqGmax * argth
Ft = np.dot(Fat[:,None],np.ones(len(self.fGHz))[None,:])
Fp = np.dot(Fap[:,None],np.ones(len(self.fGHz))[None,:])
return Ft,Fp
def __pcst(self,**kwargs):
""" read antenna in text format
"""
defaults = {'param':{'p' : 2,
'directory':'ant/FF_Results_txt_port_1_2/',
'fGHz':np.arange(2,6.5,0.5)}}
if 'param' not in kwargs or kwargs['param']=={}:
param=defaults['param']
else:
param=kwargs['param']
self.fGHz = param['fGHz']
self.nf = len(self.fGHz)
for f in param['fGHz']:
if ((int(f*10))%10)==0:
_filename1 = 'E_port'+str(param['p'])+'_f'+str(int(f))+'GHz.txt'
_filename2 = 'E_port'+str(param['p'])+'_f'+str(int(f))+'Ghz.txt'
# print 'toto'
else:
_filename1 = 'E_port'+str(param['p'])+'_f'+str(f)+'GHz.txt'
_filename2 = 'E_port'+str(param['p'])+'_f'+str(f)+'Ghz.txt'
filename1 = pyu.getlong(_filename1, param['directory'])
filename2 = pyu.getlong(_filename2, param['directory'])
try:
df = pd.read_csv(filename1,sep=';')
except:
df = pd.read_csv(filename2,sep=';')
columns = df.columns
theta = (df[columns[0]]*np.pi/180).values.reshape(72,37)
phi = (df[columns[1]]*np.pi/180).values.reshape(72,37)
modGrlzdB = df[columns[2]]
mFt = df[columns[3]]
pFt = df[columns[4]]
mFp = df[columns[5]]
pFp = df[columns[6]]
ratiodB = df[columns[7]]
Ft = (10**(mFt/20)*np.exp(1j*pFt*np.pi/180)).values.reshape(72,37)
Fp = (10**(mFp/20)*np.exp(1j*pFp*np.pi/180)).values.reshape(72,37)
Ft = Ft.swapaxes(0,1)
Fp = Fp.swapaxes(0,1)
try:
tFt=np.concatenate((tFt,Ft[...,None]),axis=2)
tFp=np.concatenate((tFp,Fp[...,None]),axis=2)
except:
tFt=Ft[...,None]
tFp=Fp[...,None]
self.phi = phi[:,0]
self.theta = theta[0,:]
self.nth = len(self.theta)
self.nph = len(self.phi)
Ft = tFt
Fp = tFp
return Ft,Fp
def __pHertz(self,**kwargs):
""" Hertz dipole
param = {'param':{'le':np.array([0,0,1])}}
le unit vector defining the dipole orientation
"""
defaults = {'param':{'le':np.array([0,0,1])}}
if 'param' not in kwargs or kwargs['param']=={}:
kwargs['param']=defaults['param']
#k = 2*np.pi*self.fGHz[None,None,None,:]/0.3
param=kwargs['param']
if self.grid:
le = param['le'][:,None,None]
xr = np.sin(self.theta)[None,:,None]*np.cos(self.phi)[None,None,:]
yr = np.sin(self.theta)[None,:,None]*np.sin(self.phi)[None,None,:]
zr = np.cos(self.theta)[None,:,None]*np.ones(len(self.phi))[None,None,:]
r = np.concatenate((xr,yr,zr),axis=0)
xp = -np.sin(self.phi)[None,None,:]*np.ones(len(self.theta))[None,:,None]
yp = np.cos(self.phi)[None,None,:]*np.ones(len(self.theta))[None,:,None]
zp = np.zeros(len(self.phi))[None,None,:]*np.ones(len(self.theta))[None,:,None]
ph = np.concatenate((xp,yp,zp),axis=0)
xt = np.cos(self.theta)[None,:,None]*np.cos(self.phi)[None,None,:]
yt = np.cos(self.theta)[None,:,None]*np.sin(self.phi)[None,None,:]
zt = -np.sin(self.theta)[None,:,None]*np.ones(len(self.phi))[None,None,:]
th = np.concatenate((xt,yt,zt),axis=0)
vec = le - np.einsum('kij,kij->ij',le,r)[None,...]*r
#G = 1j*30*k*vec
Ft = np.sqrt(3/2.)*np.einsum('kij,kij->ij',vec,th)[...,None]
Fp = np.sqrt(3/2.)*np.einsum('kij,kij->ij',vec,ph)[...,None]
else:
le = param['le'][:,None]
xr = np.sin(self.theta)*np.cos(self.phi)
yr = np.sin(self.theta)*np.sin(self.phi)
zr = np.cos(self.theta)
r = np.concatenate((xr,yr,zr),axis=0)
xp = -np.sin(self.phi)
yp = np.cos(self.phi)
zp = np.zeros(len(self.phi))
ph = np.concatenate((xp,yp,zp),axis=0)
xt = np.cos(self.theta)*np.cos(self.phi)
yt = np.cos(self.theta)*np.sin(self.phi)
zt = -np.sin(self.theta)
th = np.concatenate((xt,yt,zt),axis=0)
vec = le - np.einsum('ki,ki->i',le,r)[None,...]*r
#G = 1j*30*k*vec
Ft = np.sqrt(3/2.)*np.einsum('ki,ki->i',vec,th)[...,None]
Fp = np.sqrt(3/2.)*np.einsum('ki,ki->i',vec,ph)[...,None]
return Ft,Fp
def __pHuygens(self,**kwargs):
""" Huygens source
param : dict
le : direction of electric current
n : normal to aperture
"""
defaults = {'param':{'le':np.array([0,0,1]),
'n':np.array([1,0,0])}}
if 'param' not in kwargs or kwargs['param']=={}:
kwargs['param']=defaults['param']
#k = 2*np.pi*self.fGHz[None,None,None,:]/0.3
param=kwargs['param']
if self.grid:
le = param['le'][:,None,None]
n = param['n'][:,None,None]
xr = np.sin(self.theta)[None,:,None]*np.cos(self.phi)[None,None,:]
yr = np.sin(self.theta)[None,:,None]*np.sin(self.phi)[None,None,:]
zr = np.cos(self.theta)[None,:,None]*np.ones(len(self.phi))[None,None,:]
r = np.concatenate((xr,yr,zr),axis=0)
xp = -np.sin(self.phi)[None,None,:]*np.ones(len(self.theta))[None,:,None]
yp = np.cos(self.phi)[None,None,:]*np.ones(len(self.theta))[None,:,None]
zp = np.zeros(len(self.phi))[None,None,:]*np.ones(len(self.theta))[None,:,None]
ph = np.concatenate((xp,yp,zp),axis=0)
xt = np.cos(self.theta)[None,:,None]*np.cos(self.phi)[None,None,:]
yt = np.cos(self.theta)[None,:,None]*np.sin(self.phi)[None,None,:]
zt = -np.sin(self.theta)[None,:,None]*np.ones(len(self.phi))[None,None,:]
th = np.concatenate((xt,yt,zt),axis=0)
vec1 = le - np.einsum('kij,kij->ij',le,r)[None,...]*r
cro1 = np.cross(le,n,axisa=0,axisb=0,axisc=0)
vec2 = np.cross(cro1,r,axisa=0,axisb=0,axisc=0)
vec = vec1-vec2
#G = 1j*30*k*vec
Ft = np.sqrt(3/4.)*np.einsum('kij,kij->ij',vec,th)[...,None]
Fp = np.sqrt(3/4.)*np.einsum('kij,kij->ij',vec,ph)[...,None]
#Ft = np.einsum('kij,kij->ij',vec,th)[...,None]
#Fp = np.einsum('kij,kij->ij',vec,ph)[...,None]
else:
le = param['le'][:,None]
xr = np.sin(self.theta)*np.cos(self.phi)
yr = np.sin(self.theta)*np.sin(self.phi)
zr = np.cos(self.theta)
r = np.concatenate((xr,yr,zr),axis=0)
xp = -np.sin(self.phi)
yp = np.cos(self.phi)
zp = np.zeros(len(self.phi))
ph = np.concatenate((xp,yp,zp),axis=0)
xt = np.cos(self.theta)*np.cos(self.phi)
yt = np.cos(self.theta)*np.sin(self.phi)
zt = -np.sin(self.theta)
th = np.concatenate((xt,yt,zt),axis=0)
vec1 = le - np.einsum('ki,ki->i',le,r)[None,...]*r
cro1 = np.cross(le,n,axisa=0,axisb=0,axisc=0)
vec2 = np.cross(cro1,r,axisa=0,axisb=0,axisc=0)
vec = vec1-vec2
#G = 1j*30*k*vec
Ft = np.sqrt(3)*np.einsum('ki,ki->i',vec,th)[...,None]
Fp = np.sqrt(3)*np.einsum('ki,ki->i',vec,ph)[...,None]
return Ft,Fp
def __pArray(self,**kwargs):
""" Array factor
Parameters
----------
Sc : np.array
coupling S matrix
Notes
-----
Nd : Number of directions
Np : Number of points (antenna elements)
Nf : Number of frequency
Nb : Number of beams
"""
defaults = {'param':{'Sc':[]}}
if 'param' not in kwargs or kwargs['param']=={}:
kwargs['param']=defaults['param']
self.param = kwargs['param']
lamda = (0.3/self.fGHz)
k = 2*np.pi/lamda
if self.grid:
sx = np.sin(self.theta[:,None])*np.cos(self.phi[None,:]) # Ntheta x Nphi
sy = np.sin(self.theta[:,None])*np.sin(self.phi[None,:]) # Ntheta x Nphi
sz = np.cos(self.theta[:,None])*np.ones(len(self.phi))[None,:] # Ntheta x Nphi
sx = sx.reshape(self.nth*self.nph)
sy = sy.reshape(self.nth*self.nph)
sz = sz.reshape(self.nth*self.nph)
else:
sx = np.sin(self.theta)*np.cos(self.phi) # ,Nd
sy = np.sin(self.theta)*np.sin(self.phi) # ,Nd
sz = np.cos(self.theta) # ,Nd
self.s = np.vstack((sx,sy,sz)).T # Nd x 3
#
# F = exp(+jk s.p)
#
lshp = np.array(self.p.shape)
if len(lshp)>2:
Np = np.prod(lshp[1:])
p = self.p.reshape(3,Np)
else:
p = self.p
Np = p.shape[1]
self.Sc = self.param['Sc']
if self.Sc==[]:
# Sc : Np x Np x Nf
self.Sc = np.eye(Np)[...,None]
#Sc2 = np.random.rand(Np,Np)[...,None]
#pdb.set_trace()
#
# Get the weights
#
# w : b x a x f
lshw = np.array(self.w.shape)
if len(lshw)>2:
Np2 = np.prod(lshw[0:-1])
assert(Np2==Np)
w = self.w.reshape(Np,lshw[-1])
else:
w = self.w
# s : Nd x 3
# p : 3 x Np
#
# sdotp : Nd x Np
sdotp = np.dot(self.s,p) # s . p
for a in self.la:
if not self.grid:
a.eval(grid=self.grid,ph=self.phi,th=self.theta)
else:
a.eval(grid=self.grid)
# aFt : Nt x Np x Nf |Nd x Nf
# aFp : Nt x Np x Nf |Nd x Nf
aFt = a.Ft
aFp = a.Fp
#
# Force conversion to Nd x Nf
#
shF = aFt.shape
aFt = aFt.reshape(np.prod(shF[0:-1]),shF[-1])
aFp = aFp.reshape(np.prod(shF[0:-1]),shF[-1])
#
# Same pattern on each point
#
aFt = aFt[:,None,:]
aFp = aFp[:,None,:]
#
# Nf : frequency
# Nd : direction
# Np : points or array antenna element position
# Nb : number of beams
#
# w : Np x Nf
# Sc : Np x Np x Nf
#
#
# w' = w.Sc Np x Nf
#
# Coupling is implemented here
# Rules : The repeated index k is the common dimension of the product
# w : Np(k) x Nf(i)
# Sc : Np(k) x Np(m) x Nf(i)
# wp : Np(m) x Nf(i)
wp = np.einsum('ki,kmi->mi',w,self.Sc)
# add direction axis (=0) in w
#if len(.w.shape)==3:
# self.wp = self.wp[None,:,:,:]
# aFT : Nd x Np x Nf
# E : Nd x Np x Nf
E = np.exp(1j*k[None,None,:]*sdotp[:,:,None])
#
# wp : Np x Nf
# Fp : Nd x Np x Nf
# Ft : Nd x Np x Nf
#
Ft = wp[None,...]*aFt*E
Fp = wp[None,...]*aFp*E
if self.grid:
#
# Integrate over the Np points (axis =1)
# only if self.grid
# Fp : Nd x Nf
# Ft : Nd x Nf
#
Ft = np.sum(Ft,axis=1)
Fp = np.sum(Fp,axis=1)
sh = Ft.shape
Ft = Ft.reshape(self.nth,self.nph,sh[1])
Fp = Fp.reshape(self.nth,self.nph,sh[1])
return Ft,Fp
def radF(self):
""" evaluate radiation fonction w.r.t polarization
self.pol : 't' : theta , 'p' : phi n, 'c' : circular
"""
assert self.pol in ['t','p','c']
if self.pol=='p':
Fp = self.sqG
Ft = np.zeros(Fp.shape)
#if len(self.sqG.shape)==3:
# Ft = np.array([0])*np.ones(len(self.fGHz))[None,None,:]
#else:
# Ft = np.array([0])*np.ones(len(self.fGHz))[None,:]
if self.pol=='t':
#if len(self.sqG.shape)==3:
# Fp = np.array([0])*np.ones(len(self.fGHz))[None,None,:]
#else:
# Fp = np.array([0])*np.ones(len(self.fGHz))[None,:]
Ft = self.sqG
Fp = np.zeros(Ft.shape)
if self.pol=='c':
Fp = (1./np.sqrt(2))*self.sqG
Ft = (1j/np.sqrt(2))*self.sqG
return Ft,Fp
def gain(self):
""" calculates antenna gain
Notes
-----
This function updates the following attributes
+ self.G : np.array(Nt,Np,Nf) dtype:float
linear gain
or np.array(Nr,Nf)
+ self.sqG : np.array(Nt,Np,Nf) dtype:float
linear sqare root of gain
or np.array(Nr,Nf)
+ self.efficiency : np.array (,Nf) dtype:float
efficiency
+ self.hpster : np.array (,Nf) dtype:float
half power solid angle : 1 ~ 4pi steradian
+ self.ehpbw : np.array (,Nf) dtyp:float
equivalent half power beamwidth (radians)
.. math:: G(\\theta,\\phi) = |F_{\\theta}|^2 + |F_{\\phi}|^2
"""
self.G = np.real( self.Fp * np.conj(self.Fp)
+ self.Ft * np.conj(self.Ft))
if self.grid:
dt = self.theta[1]-self.theta[0]
dp = self.phi[1]-self.phi[0]
Nt = len(self.theta)
Np = len(self.phi)
Gs = self.G * np.sin(self.theta)[:, None, None] * np.ones(Np)[None, :, None]
self.efficiency = np.sum(np.sum(Gs,axis=0),axis=0)*dt*dp/(4*np.pi)
self.sqG = np.sqrt(self.G)
self.GdB = 10*np.log10(self.G)
# GdBmax (,Nf)
# Get direction of Gmax and get the polarisation state in that direction
self.GdBmax = np.max(np.max(self.GdB,axis=0),axis=0)
#
# The GdB maximum is determined over all the frequencies
#
GdBmax = np.max(self.GdBmax)
#self.umax = np.array(np.where(self.GdB==self.GdBmax))[:,0]
self.umax = np.array(np.where(self.GdB==GdBmax))[:,0]
self.theta_max = self.theta[self.umax[0]]
self.phi_max = self.phi[self.umax[1]]
M = geu.SphericalBasis(np.array([[self.theta_max,self.phi_max]]))
self.sl = M[:,2].squeeze()
uth = M[:,0]
uph = M[:,1]
el = self.Ft[tuple(self.umax)]*uth + self.Fp[tuple(self.umax)]*uph
eln = el/np.linalg.norm(el)
self.el = eln.squeeze()
self.hl = np.cross(self.sl,self.el)
#assert((self.efficiency<1.0).all()),pdb.set_trace()
self.hpster=np.zeros(len(self.fGHz))
self.ehpbw=np.zeros(len(self.fGHz))
for k in range(len(self.fGHz)):
U = np.zeros((Nt,Np))
A = self.GdB[:,:,k]*np.ones(Nt)[:,None]*np.ones(Np)[None,:]
u = np.where(A>(self.GdBmax[k]-3))
U[u] = 1
V = U*np.sin(self.theta)[:,None]
self.hpster[k] = np.sum(V)*dt*dp/(4*np.pi)
self.ehpbw[k] = np.arccos(1-2*self.hpster[k])
else:
self.sqG = np.sqrt(self.G)
self.GdB = 10*np.log10(self.G)
def plotG(self,**kwargs):
""" antenna plot gain in 2D
Parameters
----------
fGHz : frequency
plan : 'theta' | 'phi' depending on the selected plan to be displayed
angdeg : phi or theta in degrees, if plan=='phi' it corresponds to theta
GmaxdB : max gain to be displayed (20)
polar : boolean
dyn : 8 ,
legend : True,
polar : boolean
linear or polar representation
topos : False,
source :satimo,
show : True,
mode : string
'index' |
color: string
'black'
Returns
-------
fig
ax
Notes
-----
self.nth and self.nph has to be correctly set
Examples
--------
.. plot::
:include-source:
>>> import matplotlib.pyplot as plt
>>> from pylayers.antprop.antenna import *
>>> A = Antenna('defant.vsh3')
>>> fig,ax = A.plotG(fGHz=[2,3,4],plan='theta',angdeg=0)
>>> fig,ax = A.plotG(fGHz=[2,3,4],plan='phi',angdeg=90)
"""
if not self.evaluated:
self.eval(pattern=True)
dtr = np.pi/180.
defaults = {'fGHz' : [],
'dyn' : 8 ,
'plan': 'phi',
'angdeg' : 90,
'legend':True,
'GmaxdB':20,
'polar':True,
'topos':False,
'source':'satimo',
'show':True,
'mode':'index',
'color':'black',
'u':0,
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
args = {}
for k in kwargs:
if k not in defaults:
args[k] = kwargs[k]
if 'fig' not in kwargs:
fig = plt.figure(figsize=(8, 8))
else:
fig = kwargs['fig']
if 'ax' not in kwargs:
#ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], polar=True, facecolor='#d5de9c')
if kwargs['polar']:
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], polar=True )
else:
ax = fig.add_subplot(111)
else:
ax = kwargs['ax']
u = kwargs['u']
rc('grid', color='#316931', linewidth=1, linestyle='-')
rc('xtick', labelsize=15)
rc('ytick', labelsize=15)
DyndB = kwargs['dyn'] * 5
GmindB = kwargs['GmaxdB'] - DyndB
#print "DyndB",DyndB
#print "GmindB",GmindB
# force square figure and square axes looks better for polar, IMO
t1 = np.arange(5, DyndB + 5, 5)
t2 = np.arange(GmindB + 5, kwargs['GmaxdB'] + 5, 5)
col = ['k', 'r', 'g', 'b', 'm', 'c', 'y']
cpt = 0
#if len(self.fGHz) > 1 :
# fstep = self.fGHz[1]-self.fGHz[0]
#else :
# fstep = np.array((abs(self.fGHz-kwargs['fGHz'][0])+1))
#dtheta = self.theta[1,0]-self.theta[0,0]
#dphi = self.phi[0,1]-self.phi[0,0]
dtheta = self.theta[1]-self.theta[0]
dphi = self.phi[1]-self.phi[0]
if kwargs['fGHz']==[]:
lfreq = [self.fGHz[0]]
else:
lfreq = kwargs['fGHz']
for f in lfreq:
df = abs(self.fGHz-f)
ik0 = np.where(df==min(df))
ik = ik0[0][0]
#ik=0
chaine = 'f = %3.2f GHz' %(self.fGHz[ik])
# all theta
if kwargs['plan']=='theta':
itheta = np.arange(self.nth)
iphi1 = np.where(abs(self.phi-kwargs['angdeg']*dtr)<dphi)[0][0]
Np = self.nph
# 0 <= theta <= pi/2
u1 = np.where((self.theta <= np.pi / 2.) & (self.theta >= 0))[0]
# 0 < theta < pi
u2 = np.arange(self.nth)
# pi/2 < theta <= pi
u3 = np.nonzero((self.theta <= np.pi) & ( self.theta > np.pi / 2))[0]
#
# handle broadcasted axis =1 --> index 0
shsqG = self.sqG.shape
if shsqG[0]==1:
u1 = 0
u2 = 0
u3 = 0
if shsqG[1]==1:
iphi1 = 0
iphi2 = 0
if len(shsqG)==3: # if only one frequency point
if shsqG[2]==1:
ik = 0
else:
if shsqG[3]==1:
ik = 0
# handle parity
if np.mod(Np, 2) == 0:
iphi2 = np.mod(iphi1 + int(Np / 2), Np)
else:
iphi2 = np.mod(iphi1 + int((Np - 1) / 2), Np)
if len(shsqG)==3:
arg1 = (u1,iphi1,ik)
arg2 = (u2,iphi2,ik)
arg3 = (u3,iphi1,ik)
else:
if shsqG[3]==1:
u = 0
arg1 = (u1,iphi1,u,ik)
arg2 = (u2,iphi2,u,ik)
arg3 = (u3,iphi1,u,ik)
# polar diagram
#pdb.set_trace()
if kwargs['polar']:
if kwargs['source']=='satimo':
r1 = -GmindB + 20 * np.log10( self.sqG[arg1]+1e-12)
r2 = -GmindB + 20 * np.log10( self.sqG[arg2]+1e-12)
r3 = -GmindB + 20 * np.log10( self.sqG[arg3]+1e-12)
#print max(r1)+GmindB
#print max(r2)+GmindB
#print max(r3)+GmindB
if kwargs['source']=='cst':
r1 = -GmindB + 20 * np.log10( self.sqG[arg1]/np.sqrt(30)+1e-12)
r2 = -GmindB + 20 * np.log10( self.sqG[arg2]/np.sqrt(30)+1e-12)
r3 = -GmindB + 20 * np.log10( self.sqG[arg3]/np.sqrt(30)+1e-12)
if type(r1)!= np.ndarray:
r1 = np.array([r1])*np.ones(len(self.phi))
if type(r2)!= np.ndarray:
r2 = np.array([r2])*np.ones(len(self.phi))
if type(r3)!= np.ndarray:
r3 = np.array([r3])*np.ones(len(self.phi))
negr1 = np.nonzero(r1 < 0)
negr2 = np.nonzero(r2 < 0)
negr3 = np.nonzero(r3 < 0)
r1[negr1[0]] = 0
r2[negr2[0]] = 0
r3[negr3[0]] = 0
r = np.hstack((r1[::-1], r2, r3[::-1], r1[-1]))
a1 = np.arange(0, 360, 30)
a2 = [90, 60, 30, 0, 330, 300, 270, 240, 210, 180, 150, 120]
rline2, rtext2 = plt.thetagrids(a1, a2)
# linear diagram
else:
r1 = 20 * np.log10( self.sqG[arg1]+1e-12)
r2 = 20 * np.log10( self.sqG[arg2]+1e-12)
r3 = 20 * np.log10( self.sqG[arg3]+1e-12)
r = np.hstack((r1[::-1], r2, r3[::-1], r1[-1]))
# angular basis for phi
angle = np.linspace(0, 2 * np.pi, len(r), endpoint=True)
plt.title(u'$\\theta$ plane')
if kwargs['plan']=='phi':
iphi = np.arange(self.nph)
itheta = np.where(abs(self.theta-kwargs['angdeg']*dtr)<dtheta)[0][0]
angle = self.phi[iphi]
if len(self.sqG.shape)==3:
arg = [itheta,iphi,ik]
else:
arg = [itheta,iphi,u,ik]
if kwargs['polar']:
if np.prod(self.sqG.shape)!=1:
r = -GmindB + 20 * np.log10(self.sqG[arg])
neg = np.nonzero(r < 0)
r[neg] = 0
else:
r = -GmindB+ 20*np.log10(self.sqG[0,0,0]*np.ones(np.shape(angle)))
# plt.title(u'H plane - $\phi$ degrees')
a1 = np.arange(0, 360, 30)
a2 = [0, 30, 60, 90, 120 , 150 , 180 , 210, 240 , 300 , 330]
#rline2, rtext2 = plt.thetagrids(a1, a2)
else:
r = 20 * np.log10(self.sqG[arg])
plt.title(u'$\\phi$ plane ')
# actual plotting
if len(lfreq)>1:
ax.plot(angle, r, color=col[cpt], lw=2, label=chaine)
else:
ax.plot(angle, r, color=kwargs['color'], lw=2, label=chaine)
cpt = cpt + 1
if kwargs['polar']:
rline1, rtext1 = plt.rgrids(t1, t2)
#ax.set_rmax(t1[-1])
#ax.set_rmin(t1[0])
if kwargs['legend']:
ax.legend()
if kwargs['show']:
plt.ion()
plt.show()
return(fig,ax)
class Antenna(Pattern):
""" Antenna
Attributes
----------
name : Antenna name
nf : number of frequency
nth : number of theta
nph : number of phi
Ft : Normalized Ftheta (ntheta,nphi,nf)
Fp : Normalized Fphi (ntheta,nphi,nf)
sqG : square root of gain (ntheta,nphi,nf)
theta : theta base 1 x ntheta
phi : phi base 1 x phi
C : VSH Coefficients
Methods
-------
info : Display information about antenna
vsh : calculates Vector Spherical Harmonics
show3 : Geomview diagram
plot3d : 3D diagram plotting using matplotlib toolkit
Antenna trx file can be stored in various order
natural : HFSS
ncp : near filed chamber
It is important when initializing an antenna object
to be aware of the typ of trx file
.trx (ASCII Vectorial antenna Pattern)
F Phi Theta Fphi Ftheta
"""
def __init__(self,typ='Omni',**kwargs):
""" class constructor
Parameters
----------
typ : 'Omni','Gauss','WirePlate','3GPP','atoll'
_filename : string
antenna file name
directory : str
antenna subdirectory of the current project
the file is seek in the $BASENAME/ant directory
nf : integer
number of frequency
ntheta : integer
number of theta (default 181)
nphi : integer
number of phi (default 90)
source : string
source of data { 'satimo' | 'cst' | 'hfss' }
Notes
-----
The supported data formats for storing antenna patterns are
'mat': Matlab File
'vsh2': unthresholded vector spherical coefficients
'vsh3': thresholded vector spherical cpoefficients
'atoll': Atoll antenna file format
'trx' : Satimo NFC raw data
'trx1' : Satimo NFC raw data (deprecated)
A = Antenna('my_antenna.mat')
"""
defaults = {'directory': 'ant',
'source':'satimo',
'ntheta':90,
'nphi':181,
'L':90, # L max
'param':{},
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
if 'fGHz' in kwargs:
if type(kwargs['fGHz'])==np.ndarray:
self.fGHz=kwargs['fGHz']
else:
self.fGHz=np.array([kwargs['fGHz']])
#mayavi selection
self._is_selected=False
self.source = kwargs['source']
self.param = kwargs['param']
# super(Antenna,self).__init__()
#Pattern.__init__(self)
#
# if typ string has an extension it is a file
#
if isinstance(typ,str):
AntennaName,Extension = os.path.splitext(typ)
self.ext = Extension[1:]
if self.ext=='':
self.fromfile = False
else:
self.fromfile = True
else:
self.fromfile = True
self.tau = 0
self.evaluated = False
#determine if pattern for all theta/phi is constructed
if self.fromfile:
if isinstance(typ,str):
self._filename = typ
if self.ext == 'vsh3':
self.typ='vsh3'
self.loadvsh3()
if self.ext == 'vsh2':
self.typ='vsh2'
self.loadvsh2()
if self.ext == 'sh3':
self.typ='sh3'
self.loadsh3()
if self.ext == 'sh2':
self.typ='sh2'
self.loadsh2()
if self.ext == 'trx1':
self.typ='trx'
self.load_trx(kwargs['directory'],self.nf,self.nth,self.nph)
if self.ext == 'trx':
self.typ='trx'
self.loadtrx(kwargs['directory'],kwargs['param'])
if self.ext == 'mat':
self.typ='mat'
self.loadmat(kwargs['directory'])
if self.ext == 'cst':
self.typ='cst'
if self.ext == 'txt':
self.typ='atoll'
self.load_atoll(kwargs['directory'])
elif isinstance(typ,list):
self._filename = typ
self.ext='hfss'
self.loadhfss(typ, self.nth, self.nph)
else: # not from file
self.typ = typ
self._filename = typ
if self.typ=='vsh3':
self.initvsh()
else:
self.eval()
def __repr__(self):
st = ''
st = st + 'type : ' + self.typ +'\n'
st = st+'------------------------\n'
if 'param' in self.__dict__:
for k in self.param:
st = st + ' ' + k + ' : ' + str(self.param[k])+'\n'
if hasattr(self,'atoll'):
for k1 in list(dict(self.atoll).keys()):
st = st + str(k1)+'\n'
for k2 in self.atoll[k1]:
st = st + ' '+ str(k2)+'\n'
st = st+'------------------------\n'
rtd = 180./np.pi
if self.fromfile:
if isinstance(self._filename,str):
st = st + 'file name : ' + self._filename+'\n'
else:
for i in range(len(self._filename)):
st = st + 'FileName : ' + self._filename[i]+'\n'
# #st = st + 'file type : ' + self.typ+'\n'
if 'fGHz' in self.__dict__:
st = st + "fmin : %4.2f" % (self.fGHz[0]) + "GHz\n"
st = st + "fmax : %4.2f" % (self.fGHz[-1]) + "GHz\n"
try:
st = st + "step : %4.2f" % (1000*(self.fGHz[1]-self.fGHz[0])) + "MHz\n"
except:
st = st + "step : None\n"
st = st + "Nf : %d" % (len(self.fGHz)) +"\n"
#
#
if hasattr(self,'C'):
st = st + self.C.__repr__()
if hasattr(self,'S'):
st = st + self.S.__repr__()
if self.evaluated:
st = st + '-----------------------\n'
st = st + ' evaluated \n'
st = st + '-----------------------\n'
st = st + "Ntheta : %d" % (self.nth) + "\n"
st = st + "Nphi : %d" % (self.nph) + "\n"
# kwargs[k] = defaults[k]
u = np.where(self.sqG==self.sqG.max())
if self.grid:
if len(u[0])>1:
S = self.sqG[(u[0][0],u[1][0],u[2][0])]
ut = u[0][0]
up = u[1][0]
uf = u[2][0]
else:
S = self.sqG[u]
ut = u[0]
up = u[1]
uf = u[2]
else:
if len(u[0])>1:
S = self.sqG[(u[0][0],u[1][0])]
ud = u[0][0]
uf = u[1][0]
else:
S = self.sqG[u]
ud = u[0]
uf = u[1]
st = st + "GdBmax :"+str(self.GdBmax[0])+' '+str(self.GdBmax[-1])+'\n'
st = st + "Gmax direction : .sl" + str(self.sl)+'\n'
st = st + "Orientation of E field in Gmax direction : .el " + str(self.el)+'\n'
st = st + "Orientation of H field in Gmax direction : .hl " + str(self.hl)+'\n'
st = st + "effective HPBW : .ehpbw " + str(self.ehpbw[0])+' '+str(self.ehpbw[-1])+'\n'
if self.source=='satimo':
GdB = 20*np.log10(S)
# see WHERE1 D4.1 sec 3.1.1.2.2
if self.source=='cst':
GdB = 20*np.log10(S/np.sqrt(30))
#st = st + "GmaxdB : %4.2f dB \n" % (GdB)
st = st + " f = %4.2f GHz \n" % (self.fGHz[uf])
if self.grid:
st = st + " theta = %4.2f (degrees) \n" % (self.theta[ut]*rtd)
st = st + " phi = %4.2f (degrees) \n" % (self.phi[up]*rtd)
else:
st = st + " Ray n :" + str(ud)+' \n'
else:
st = st + 'Not evaluated\n'
#
#
# if self.typ == 'mat':
# #st = st + self.DataFile + '\n'
# st = st + 'antenna name : '+ self.AntennaName + '\n'
# st = st + 'date : ' + self.Date +'\n'
# st = st + 'time : ' + self.StartTime +'\n'
# st = st + 'Notes : ' + self.Notes+'\n'
# st = st + 'Serie : ' + str(self.Serie)+'\n'
# st = st + 'Run : ' + str(self.Run)+'\n'
# st = st + "Nb theta (lat) : "+ str(self.nth)+'\n'
# st = st + "Nb phi (lon) :"+ str(self.nph)+'\n'
#
# if self.typ == 'Gauss':
# st = st + 'Gaussian pattern' + '\n'
# st = st + 'phi0 : ' + str(self.p0) +'\n'
# st = st + 'theta0 :' + str(self.t0) + '\n'
# st = st + 'phi 3dB :' + str(self.p3) + '\n'
# st = st + 'theta 3dB :' + str(self.t3) + '\n'
# st = st + 'Gain dB :' + str(self.GdB) + '\n'
# st = st + 'Gain linear :' + str(self.G ) + '\n'
# st = st + 'sqrt G :' + str(self.sqG) + '\n'
return(st)
def initvsh(self,lmax=45):
""" Initialize a void vsh structure
Parameters
----------
fGHz : array
lmax : int
level max
"""
nf = len(self.fGHz)
Br = 1j * np.zeros((nf, lmax, lmax-1))
Bi = 1j * np.zeros((nf, lmax, lmax-1))
Cr = 1j * np.zeros((nf, lmax, lmax-1))
Ci = 1j * np.zeros((nf, lmax, lmax-1))
Br = VCoeff(typ='s1', fmin=self.fGHz[0], fmax=self.fGHz[-1], data=Br)
Bi = VCoeff(typ='s1', fmin=self.fGHz[0], fmax=self.fGHz[-1], data=Bi)
Cr = VCoeff(typ='s1', fmin=self.fGHz[0], fmax=self.fGHz[-1], data=Cr)
Ci = VCoeff(typ='s1', fmin=self.fGHz[0], fmax=self.fGHz[-1], data=Ci)
self.C = VSHCoeff(Br, Bi, Cr, Ci)
def ls(self, typ='vsh3'):
""" list the antenna files in antenna project directory
Parameters
----------
typ : string optional
{'mat'|'trx'|'vsh2'|'sh2'|'vsh3'|'sh3'}
Returns
-------
lfile_s : list
sorted list of all the .str file of strdir
"""
if typ=='vsh3':
pathname = pstruc['DIRANT'] + '/*.' + typ
if typ=='sh3':
pathname = pstruc['DIRANT'] + '/*.' + typ
if typ=='mat':
pathname = pstruc['DIRANT'] + '/*.' + typ
if typ=='trx':
pathname = pstruc['DIRANT'] + '/*.' + typ
lfile_l = glob.glob(basename+'/'+pathname)
lfile_s = []
for fi in lfile_l:
fis = pyu.getshort(fi)
lfile_s.append(fis)
lfile_s.sort()
return lfile_s
def photo(self,directory=''):
""" show a picture of the antenna
Parameters
----------
directory : string
"""
if directory == '':
directory = os.path.join('ant','UWBAN','PhotosVideos')
_filename = 'IMG_'+self.PhotoFile.split('-')[1]+'.JPG'
filename = pyu.getlong(_filename,directory)
if sys.version_info.major==2:
I = Image.open(filename)
else:
I = image.open(filename)
I.show()
def load_atoll(self,directory="ant"):
""" load antenna from Atoll file
Atoll format provides Antenna gain in the horizontal and vertical plane
for different frequencies and different tilt values
Parameters
----------
directory : string
Notes
-----
attol dictionnary is created
atoll[keyband][polar]['hor'] = Ghor.reshape(360,ct,cf)
atoll[keyband][polar]['ver'] = Gver.reshape(360,ct,cf)
atoll[keyband][polar]['tilt'] = np.array(tilt)
atoll[keyband][polar]['freq'] = np.array(tilt)
"""
_filemat = self._filename
fileatoll = pyu.getlong(_filemat, directory)
fd = open(fileatoll)
lis = fd.readlines()
tab = []
for li in lis:
lispl= li.split('\t')
if (lispl[0]!=''):
tab.append(lispl)
deg_to_rad = np.pi/180.
lbs_to_kg = 0.45359237
columns = tab[0]
#pdb.set_trace()
for k in np.arange(len(tab)-1):
df = pd.DataFrame([tab[k+1]],columns=columns)
try:
dff=dff.append(df)
except:
dff= df
self.raw = dff
dff = dff.iloc[:,[0,8,9,10,2,5,7,14,11,16,17,13,6,12]]
#dff = df['Name','Gain (dBi)','FMin','FMax','FREQUENCY','Pattern','V_WIDTH','H_WIDTH','DIMENSIONS HxWxD (INCHES)','WEIGHT (LBS)']
dff.columns = ['Name','Fmin','Fmax','F','Gmax','G','Hpbw','H_width','V_width','HxWxD','Weight','Tilt','Etilt','Ftob']
dff=dff.apply(lambda x :pd.to_numeric(x,errors='ignore'))
#
# Parse polarization in the field name
#
upolarp45 = ['(+45)' in x for x in dff['Name']]
upolarm45 = ['(-45)' in x for x in dff['Name']]
if (sum(upolarp45)>0):
dff.loc[upolarp45,'Polar']=45
if (sum(upolarm45)>0):
dff.loc[upolarm45,'Polar']=-45
atoll = {}
dfband = dff.groupby(['Fmin'])
for b in dfband:
keyband = str(b[0])+'-'+str(b[1]['Fmax'].values[0])
atoll[keyband]={} # band
dfpol = b[1].groupby(['Polar'])
for p in dfpol:
atoll[keyband][p[0]] = {} # polar
dftilt = p[1].groupby(['Tilt'])
Ghor = np.empty((360,1)) # angle , tilt , frequency
Gver = np.empty((360,1)) # angle ,
ct = 0
tilt = []
for t in dftilt:
dffreq = t[1].groupby(['F'])
ct+=1
cf=0
tilt.append(t[0])
freq = []
for f in dffreq:
freq.append(f[0])
cf+=1
if len(f[1])==1:
df = f[1]
else:
df = f[1].iloc[0:1]
Gmax = df['Gmax'].values
str1 = df.loc[:,'G'].values[0].replace(' ',' ')
lstr = str1.split(' ')
Pattern = [ eval(x) for x in lstr[0:-1]]
# 4 fist field / # of points
Nd,db,dc,Np = Pattern[0:4]
#print(Nd,b,c,Np)
tmp = np.array(Pattern[4:4+2*Np]).reshape(Np,2)
ah = tmp[:,0]
ghor = Gmax-tmp[:,1]
# 4 fist field / # of points
da,db,dc,dd = Pattern[4+2*Np:4+2*Np+4]
#pdb.set_trace()
#print a,b,c,d
tmp = np.array(Pattern[4+2*Np+4:]).reshape(dc,2)
gver = Gmax-tmp[:,0]
av = tmp[:,1]
try:
Ghor = np.hstack((Ghor,ghor[:,None]))
Gver = np.hstack((Gver,gver[:,None]))
except:
pdb.set_trace()
Ghor = np.delete(Ghor,0,1)
Gver = np.delete(Gver,0,1)
atoll[keyband][p[0]]['hor'] = Ghor.reshape(360,ct,cf)
atoll[keyband][p[0]]['ver'] = Gver.reshape(360,ct,cf)
atoll[keyband][p[0]]['tilt'] = np.array(tilt)
atoll[keyband][p[0]]['freq'] = np.array(freq)
self.atoll = atoll
# lbands : list of antenna bands
# Gmax = eval(self.df['Gain (dBi)'].values[0])
#fig = plt.figure()
#ax =plt.gca(projection='polar')
#ax =plt.gca()
#ax.plot(H2[:,1]*deg_to_rad,Gain-H2[:,0],'r',label='vertical',linewidth=2)
#ax.plot(H1[:,0]*deg_to_rad,Gain-H1[:,1],'b',label='horizontal',linewidth=2)
#ax.set_rmin(-30)
#plt.title(dir1+'/'+filename+' Gain : '+df['Gain (dBi)'].values[0])
#BXD-634X638XCF-EDIN.txt
#BXD-636X638XCF-EDIN.txt
def loadmat(self, directory="ant"):
""" load an antenna stored in a mat file
Parameters
----------
directory : str , optional
default 'ant'
Examples
--------
.. plot::
:include-source:
>>> import matplotlib.pyplot as plt
>>> from pylayers.antprop.antenna import *
>>> A = Antenna('S1R1.mat',directory='ant/UWBAN/Matfile')
>>> f,a = A.plotG(plan='theta',angdeg=0)
>>> f,a = A.plotG(plan='phi',angdeg=90,fig=f,ax=a)
>>> txt = plt.title('S1R1 antenna : st loadmat')
>>> plt.show()
"""
_filemat = self._filename
filemat = pyu.getlong(_filemat, directory)
d = io.loadmat(filemat, squeeze_me=True, struct_as_record=False)
ext = _filemat.replace('.mat', '')
d = d[ext]
#
#
#
self.typ = 'mat'
self.Date = str(d.Date)
self.Notes = str(d.Notes)
self.PhotoFile = str(d.PhotoFile)
self.Serie = eval(str(d.Serie))
self.Run = eval(str(d.Run))
self.DataFile = str(d.DataFile)
self.StartTime = str(d.StartTime)
self.AntennaName = str(d.AntennaName)
self.fGHz = d.freq/1.e9
self.theta = d.theta
self.phi = d.phi
self.Ft = d.Ftheta
self.Fp = d.Fphi
self.Fp = self.Fp.swapaxes(0, 2)
self.Fp = self.Fp.swapaxes(0, 1)
self.Ft = self.Ft.swapaxes(0, 2)
self.Ft = self.Ft.swapaxes(0, 1)
Gr = np.real(self.Fp * np.conj(self.Fp) + \
self.Ft * np.conj(self.Ft))
self.sqG = np.sqrt(Gr)
self.nth = len(self.theta)
self.nph = len(self.phi)
if type(self.fGHz) == float:
self.nf = 1
else:
self.nf = len(self.fGHz)
self.evaluated = True
self.grid = True
def load_trx(self, directory="ant", nf=104, ntheta=181, nphi=90, ncol=6):
""" load a trx file (deprecated)
Parameters
----------
directory : str
directory where is located the trx file (default : ant)
nf : float
number of frequency points
ntheta : float
number of theta
nphi : float
number of phi
TODO : DEPRECATED (Fix the Ft and Fp format with Nf as last axis)
"""
_filetrx = self._filename
filename = pyu.getlong(_filetrx, directory)
if ncol == 6:
pattern = """^.*\t.*\t.*\t.*\t.*\t.*\t.*$"""
else:
pattern = """^.*\t.*\t.*\t.*\t.*\t.*\t.*\t.*$"""
fd = open(filename, 'r')
d = fd.read().split('\r\n')
fd.close()
k = 0
#while ((re.search(pattern1,d[k]) is None ) & (re.search(pattern2,d[k]) is None )):
while re.search(pattern, d[k]) is None:
k = k + 1
d = d[k:]
N = len(d)
del d[N - 1]
r = '\t'.join(d)
r.replace(' ', '')
d = np.array(r.split()).astype('float')
#
# TODO Parsing the header
#
#nf = 104
#nphi = 90
#ntheta = 181
N = nf * nphi * ntheta
d = d.reshape(N, 7)
F = d[:, 0]
PHI = d[:, 1]
THETA = d[:, 2]
Fphi = d[:, 3] + d[:, 4] * 1j
Ftheta = d[:, 5] + d[:, 6] * 1j
self.Fp = Fphi.reshape((nf, nphi, ntheta))
self.Ft = Ftheta.reshape((nf, nphi, ntheta))
Ttheta = THETA.reshape((nf, nphi, ntheta))
Tphi = PHI.reshape((nf, nphi, ntheta))
Tf = F.reshape((nf, nphi, ntheta))
self.Fp = self.Fp.swapaxes(1, 2)
self.Ft = self.Ft.swapaxes(1, 2)
Ttheta = Ttheta.swapaxes(1, 2)
Tphi = Tphi.swapaxes(1, 2)
Tf = Tf.swapaxes(1, 2)
self.fGHz = Tf[:, 0, 0]
self.theta = Ttheta[0, :, 0]
#self.phi = Tphi[0,0,:]
#
# Temporaire
#
A1 = self.Fp[:, 90:181, :]
A2 = self.Fp[:, 0:91, :]
self.Fp = np.concatenate((A1, A2[:, ::-1, :]), axis=2)
A1 = self.Ft[:, 90:181, :]
A2 = self.Ft[:, 0:91, :]
self.Ft = np.concatenate((A1, A2[:, ::-1, :]), axis=2)
self.theta = np.linspace(0, np.pi, 91)
self.phi = np.linspace(0, 2 * np.pi, 180, endpoint=False)
self.nth = 91
self.nph = 180
self.nf = 104
self.evaluated = True
def pattern(self,theta=[],phi=[],typ='s3'):
""" return multidimensionnal radiation patterns
Parameters
----------
theta : array
1xNt
phi : array
1xNp
typ : string
{s1|s2|s3}
"""
if theta == []:
theta = np.linspace(0,np.pi,30)
if phi == []:
phi = np.linspace(0,2*np.pi,60)
self.grid = True
Nt = len(theta)
Np = len(phi)
Nf = len(self.fGHz)
#Th = np.kron(theta, np.ones(Np))
#Ph = np.kron(np.ones(Nt), phi)
if typ =='s1':
FTh, FPh = self.Fsynth1(theta, phi)
if typ =='s2':
FTh, FPh = self.Fsynth2b(theta,phi)
if typ =='s3':
FTh, FPh = self.Fsynth3(theta, phi)
#FTh = Fth.reshape(Nf, Nt, Np)
#FPh = Fph.reshape(Nf, Nt, Np)
return(FTh,FPh)
def coeffshow(self,**kwargs):
""" display antenna coefficient
typ : string
'ssh' |'vsh'
L : maximum level
kf : frequency index
vmin : float
vmax : float
"""
defaults = {'typ':'vsh',
'L':20,
'kf':46,
'vmin':-40,
'vmax':0,
'cmap':cm.hot_r,
'dB':True
}
for k in defaults:
if k not in kwargs:
kwargs[k]=defaults[k]
L = kwargs['L']
kf = kwargs['kf']
# calculates mode energy
# linear and log scale
# E : f , l , m
if kwargs['typ']=='vsh':
E = self.C.energy(typ='s1')
if kwargs['typ']=='ssh':
E = self.S.energy(typ='s1')
# Aem : f,l
# calculates energy integrated over m
Aem = np.sum(E,axis=2)
Aem_dB = 10*np.log10(Aem)
# Ael : f,m
# calculates energy integrated over l
Ael = np.sum(E,axis=1)
Ael_dB = 10*np.log10(Ael)
fig, ax = plt.subplots()
fig.set_figwidth(15)
fig.set_figheight(10)
if kwargs['dB']:
im = ax.imshow(10*np.log10(E[kf,:,:]),
vmin = kwargs['vmin'],
vmax = kwargs['vmax'],
extent =[-L,L,L,0],
interpolation = 'nearest',
cmap = kwargs['cmap'])
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
axHistx = divider.append_axes("top", 1., pad=0.5, sharex=ax)
axHisty = divider.append_axes("left", 1., pad=0.5, sharey=ax)
#axHistx.bar(range(-L,L),Aem)
#axHisty.barh(range(0,L),Ael )
axHistx.yaxis.set_ticks(np.array([0,0.2,0.4,0.6,0.8]))
axHisty.xaxis.set_ticks(np.array([0,0.1,0.2,0.3]))
cbar = plt.colorbar(im, cax=cax)
fig.tight_layout()
plt.text(-0.02,0.6 ,'levels',
horizontalalignment='right',
verticalalignment='top',
transform=ax.transAxes,
rotation =90, fontsize= 15)
plt.text(0.6,1.1 ,'free space',
horizontalalignment='right',
verticalalignment='top',
transform=ax.transAxes,
fontsize= 15)
plt.text(0.55,-0.1 ,'modes',
horizontalalignment='right'
,verticalalignment='top', transform=ax.transAxes, fontsize= 15)
return fig,ax
def errel(self,kf=-1, dsf=1, typ='s3'):
""" calculates error between antenna pattern and reference pattern
Parameters
----------
kf : integer
frequency index. If k=-1 integration over all frequency
dsf : down sampling factor
typ :
Returns
-------
errelTh : float
relative error on :math:`F_{\\theta}`
errelPh : float
relative error on :math:`F_{\phi}`
errel : float
Notes
-----
.. math::
\epsilon_r^{\\theta} =
\\frac{|F_{\\theta}(\\theta,\phi)-\hat{F}_{\\theta}(\\theta,\phi)|^2}
{|F_{\\theta}(\\theta,\phi)|^2}
\epsilon_r^{\phi} =
\\frac{|F_{\phi}(\\theta,\phi)-\hat{F}_{\phi}(\\theta,\phi)|^2}
{|F_{\\theta}(\\theta,\phi)|^2}
"""
#
# Retrieve angular bases from the down sampling factor dsf
#
theta = self.theta[::dsf]
phi = self.phi[::dsf]
Nt = len(theta)
Np = len(phi)
#Th = np.kron(theta, np.ones(Np))
#Ph = np.kron(np.ones(Nt), phi)
if typ =='s1':
FTh, FPh = self.Fsynth1(theta, phi)
if typ =='s2':
FTh, FPh = self.Fsynth2b(theta, phi)
if typ =='s3':
FTh, FPh = self.Fsynth3(theta, phi)
#FTh = Fth.reshape(self.nf, Nt, Np)
#FPh = Fph.reshape(self.nf, Nt, Np)
#
# Jacobian
#
#st = outer(sin(theta),ones(len(phi)))
st = np.sin(theta).reshape((len(theta), 1))
#
# Construct difference between reference and reconstructed
#
if kf!=-1:
dTh = (FTh[kf, :, :] - self.Ft[kf, ::dsf, ::dsf])
dPh = (FPh[kf, :, :] - self.Fp[kf, ::dsf, ::dsf])
#
# squaring + Jacobian
#
dTh2 = np.real(dTh * np.conj(dTh)) * st
dPh2 = np.real(dPh * np.conj(dPh)) * st
vTh2 = np.real(self.Ft[kf, ::dsf, ::dsf] \
* np.conj(self.Ft[kf, ::dsf, ::dsf])) * st
vPh2 = np.real(self.Fp[kf, ::dsf, ::dsf] \
* np.conj(self.Fp[kf, ::dsf, ::dsf])) * st
mvTh2 = np.sum(vTh2)
mvPh2 = np.sum(vPh2)
errTh = np.sum(dTh2)
errPh = np.sum(dPh2)
else:
dTh = (FTh[:, :, :] - self.Ft[:, ::dsf, ::dsf])
dPh = (FPh[:, :, :] - self.Fp[:, ::dsf, ::dsf])
#
# squaring + Jacobian
#
dTh2 = np.real(dTh * np.conj(dTh)) * st
dPh2 = np.real(dPh * np.conj(dPh)) * st
vTh2 = np.real(self.Ft[:, ::dsf, ::dsf] \
* np.conj(self.Ft[:, ::dsf, ::dsf])) * st
vPh2 = np.real(self.Fp[:, ::dsf, ::dsf] \
* np.conj(self.Fp[:, ::dsf, ::dsf])) * st
mvTh2 = np.sum(vTh2)
mvPh2 = np.sum(vPh2)
errTh = np.sum(dTh2)
errPh = np.sum(dPh2)
errelTh = (errTh / mvTh2)
errelPh = (errPh / mvPh2)
errel =( (errTh + errPh) / (mvTh2 + mvPh2))
return(errelTh, errelPh, errel)
def loadhfss(self,lfa = [], Nt=72,Np=37):
""" load antenna from HFSS file
Parameters
----------
lfa : list of antenna file
Nt : int
Number of angle theta
Np : int
Number of angle phi
Notes
-----
One file per frequency point
th , ph , abs_grlz,th_absdB,th_phase,ph_absdB,ph_phase_ax_ratio
"""
# lfa : list file antenna
self.nf = len(lfa)
fGHz = []
lacsv = []
Fphi = np.empty((self.nf,self.nth,self.nph))
Ftheta = np.empty((self.nf,self.nth,self.nph))
SqG = np.empty((self.nf,self.nth,self.nph))
for i in range (len(lfa)):
fGHz.append(eval(lfa[i].split('.csv')[0][-4]))
lacsv.append(pd.read_csv(lfa[i],
header=False,
sep=',',
names=['th','ph','abs_grlz','th_absdB','th_phase','ph_absdB','ph_phase','ax_ratio'],
index_col=False))
th=lacsv[i].th.reshape(Np,Nt)*np.pi/180.
ph=lacsv[i].ph.reshape(Np,Nt)*np.pi/180.
Greal = lacsv[i].abs_grlz.reshape(Np,Nt)
th_dB = lacsv[i].th_absdB.reshape(Np,Nt)
ph_dB = lacsv[i].ph_absdB.reshape(Np,Nt)
th_lin = pow(10,th_dB/20.)
ph_lin = pow(10,ph_dB/20.)
#th_phase = lacsv[i].th_phase.reshape(72,37)*np.pi/180.
#ph_phase = lacsv[i].ph_phase.reshape(72,37)*np.pi/180.
#axratio=lacsv[i].ax_ratio.reshape(72,37)
Fphi[i,:,:] = ph_lin.swapaxes(1,0)
Ftheta[i,:,:] = th_lin.swapaxes(1,0)
SqG[i,:,:] = Greal.swapaxes(1,0)
self.fGHz = np.array(fGHz)
#self.theta = th[0,:].reshape(Nt,1)
#self.phi = ph[:,0].reshape(1,Np)
self.theta = th[0,:]
self.phi = ph[:,0]
self.Fp=Fphi
self.Ft=Ftheta
self.sqG=SqG
def loadtrx(self,directory,param={}):
""" load trx file (SATIMO Near Field Chamber raw data)
Parameters
----------
directory
self._filename: short name of the antenna file
the file is seek in the $BASENAME/ant directory
fmin fmax Nf phmin phmax Nphi thmin thmax Ntheta #EDelay
0 1 2 3 4 5 6 7 8 9
1 10 121 0 6.19 72 0 3.14 37 0
param : dict
mode : string
mode 1 : columns are organized ['f','phi','th','ReFph','ImFphi','ReFth','ImFth']
mode 2 : columns are organized ['f','phi','th','GdB','GdB_ph','GdB_th']
mode2 corresponds to TRXV2
The measured values of Fp Ft and sqG and the associated theta and phi range
are stored using the underscore prefix.
e.g. self._Ft; self._Fp; self._sqG
Notes
------
for mode 2 :
it is require to create a header file "header_<_filename>.txt with the structure
# fmin fmax Nf phmin phmax Nphi thmin thmax Ntheta #EDelay
and to remove header for trx file.
Warning Mode 2 invert automatocally apply _swap_theta_phi !
"""
if param== {}:
param = {'mode' : 1}
_filetrx = self._filename
_headtrx = 'header_' + _filetrx
_headtrx = _headtrx.replace('trx', 'txt')
headtrx = pyu.getlong(_headtrx, directory)
filename = pyu.getlong(_filetrx, directory)
#
# Trx header structure
#
# fmin fmax Nf phmin phmax Nphi thmin thmax Ntheta #EDelay
# 0 1 2 3 4 5 6 7 8 9
# 1 10 121 0 6.19 72 0 3.14 37 0
#
#
foh = open(headtrx)
ligh = foh.read()
foh.close()
fmin = eval(ligh.split()[0])
fmax = eval(ligh.split()[1])
nf = eval(ligh.split()[2])
phmin = eval(ligh.split()[3])
phmax = eval(ligh.split()[4])
nphi = eval(ligh.split()[5])
thmin = eval(ligh.split()[6])
thmax = eval(ligh.split()[7])
ntheta = eval(ligh.split()[8])
#
# The electrical delay in column 9 is optional
#
try:
tau = eval(ligh.split()[9]) # tau : delay (ns)
except:
tau = 0
#
# Data are stored in 7 columns in mode 1
#
# 0 1 2 3 4 5 6
# f phi th ReFph ImFphi ReFth ImFth
#
#
fi = open(filename)
d = np.array(fi.read().split())
N = len(d)
if param['mode'] == 1:
M = N / 7
d = d.reshape(M, 7)
elif param['mode'] == 2:
M = N / 6
d = d.reshape(M, 6)
d = d.astype('float')
f = d[:, 0]
if f[0] == 0:
print("error : frequency cannot be zero")
# detect frequency unit
# if values are above 2000 its means frequency is not expressed
# in GHz
#
if (f[0] > 2000):
f = f / 1.0e9
phi = d[:, 1]
theta = d[:, 2]
#
# type : refers to the way the angular values are stored in the file
# Detection of file type
#
# nfc
# f phi theta
# 2 1 0
# Natural
# f phi theta
# 2 0 1
#
# auto detect storage mode looping
#
dphi = abs(phi[0] - phi[1])
dtheta = abs(theta[0] - theta[1])
if (dphi == 0) & (dtheta != 0):
typ = 'nfc'
if (dtheta == 0) & (dphi != 0):
typ = 'natural'
self.typ = typ
if param['mode']==1:
Fphi = d[:, 3] + d[:, 4] * 1j
Ftheta = d[:, 5] + d[:, 6] * 1j
elif param['mode']==2:
Fphi = 10**(d[:, 4]/20)
Ftheta = 10**(d[:, 5]/20)
#
# Normalization
#
G = np.real(Fphi * np.conj(Fphi) + Ftheta * np.conj(Ftheta))
SqG = np.sqrt(G)
#Fphi = Fphi/SqG
#Ftheta = Ftheta/SqG
#Fphi = Fphi
#Ftheta = Ftheta
#
# Reshaping
#
if typ == 'natural':
self._Fp = Fphi.reshape((nf, ntheta, nphi))
self._Ft = Ftheta.reshape((nf, ntheta, nphi))
self._sqG = SqG.reshape((nf, ntheta, nphi))
Ttheta = theta.reshape((nf, ntheta, nphi))
Tphi = phi.reshape((nf, ntheta, nphi))
Tf = f.reshape((nf, ntheta, nphi))
self._Fp = self.Fp.swapaxes(0, 1).swapaxes(1,2)
self._Ft = self.Ft.swapaxes(0, 1).swapaxes(1,2)
self._sqG = self.sqG.swapaxes(0, 1).swapaxes(1,2)
Ttheta = Ttheta.swapaxes(0, 1).swapaxes(1,2)
Tphi = Tphi.swapaxes(0, 1).swapaxes(1,2)
Tf = Tf.swapaxes(0, 1).swapaxes(1,2)
if typ == 'nfc':
self._Fp = Fphi.reshape((nf, nphi, ntheta))
self._Ft = Ftheta.reshape((nf, nphi, ntheta))
self._sqG = SqG.reshape((nf, nphi, ntheta))
Ttheta = theta.reshape((nf, nphi, ntheta))
Tphi = phi.reshape((nf, nphi, ntheta))
Tf = f.reshape((nf, nphi, ntheta))
#
# Force natural order (f,theta,phi)
# This is not the order of the satimo nfc which is (f,phi,theta)
#
# self.Fp = self.Fp.swapaxes(1, 2)
# self.Ft = self.Ft.swapaxes(1, 2)
# self.sqG = self.sqG.swapaxes(1, 2)
self._Fp = self._Fp.swapaxes(0, 2)
self._Ft = self._Ft.swapaxes(0, 2)
self._sqG = self._sqG.swapaxes(0, 2)
Ttheta = Ttheta.swapaxes(0, 2)
Tphi = Tphi.swapaxes(0, 2)
Tf = Tf.swapaxes(0, 2)
# sqg=np.sqrt(10**(d[:,3]/10))
# self.sqG=sqg.reshape((nf, nphi, ntheta)).swapaxes(0, 2)
self._fGHz = Tf[0, 0, :]
self._theta = Ttheta[:, 0, 0]
self._phi = Tphi[0, :, 0]
#
# check header consistency
#
np.testing.assert_almost_equal(self._fGHz[0],fmin,6)
np.testing.assert_almost_equal(self._fGHz[-1],fmax,6)
np.testing.assert_almost_equal(self._theta[0],thmin,3)
np.testing.assert_almost_equal(self._theta[-1],thmax,3)
np.testing.assert_almost_equal(self._phi[0],phmin,3)
np.testing.assert_almost_equal(self._phi[-1],phmax,3)
self._nf = nf
self._nth = ntheta
self._nph = nphi
self._tau = tau
if param['mode']==2:
self._swap_theta_phi()
self.evaluated = False
def _swap_theta_phi(self):
""" swapping theta and phi in case where e.g.
theta in [0, 2*pi] and phi in [0,pi]
swapping allow to correctly return with the assumption
where
theta in [0,pi] and phi [0,2*pi] and allow e.g using vsh
methods.
"""
assert self._nth>self._nph,'nth < nph so swapping is not possible'
mid_nth = int(np.ceil(self._nth/2.))
new_nph = self._nph*2
# process for self.sqG
B1=self._sqG[:mid_nth,...]#self.sqG[:65,...]
B2=self._sqG[mid_nth:,...]#self.sqG[65:,...]
B2i= B2[::-1,...]
R=np.zeros((mid_nth,new_nph,self._nf))#R=np.zeros((65,128,31))
R[:,:mid_nth-1,:]=B1 #R[:,:64,:]=B1
R[:-1,mid_nth-1:,:]=B2i # R[:-1,64:,:]=B2i
R[-1,mid_nth-1:,:]=B1[-1,:,:]# R[-1,64:,:]=B1[-1,:,:]
self._sqG = R
# process for self.Ft
B1=self._Ft[:mid_nth,...]#self.Ft[:65,...]
B2=self._Ft[mid_nth:,...]#self.Ft[65:,...]
B2i= B2[::-1,...]
R=np.zeros((mid_nth,new_nph,self._nf))#R=np.zeros((65,128,31))
R[:,:mid_nth-1,:]=B1 #R[:,:64,:]=B1
R[:-1,mid_nth-1:,:]=B2i # R[:-1,64:,:]=B2i
R[-1,mid_nth-1:,:]=B1[-1,:,:]# R[-1,64:,:]=B1[-1,:,:]
self._Ft = R
# process for self.Fp
B1=self._Fp[:mid_nth,...]#self.Ft[:65,...]
B2=self._Fp[mid_nth:,...]#self.Ft[65:,...]
B2i= B2[::-1,...]
R=np.zeros((mid_nth,new_nph,self._nf))#R=np.zeros((65,128,31))
R[:,:mid_nth-1,:]=B1 #R[:,:64,:]=B1
R[:-1,mid_nth-1:,:]=B2i # R[:-1,64:,:]=B2i
R[-1,mid_nth-1:,:]=B1[-1,:,:]# R[-1,64:,:]=B1[-1,:,:]
self._Fp = R
# update theta,phi
self._theta = np.linspace(0,np.pi,mid_nth)
self._phi = np.linspace(0,2*np.pi,new_nph)
self._nth = mid_nth
self._nph = new_nph
def checkpole(self, kf=0):
""" display the reconstructed field on pole for integrity verification
Parameters
----------
kf : int
frequency index default 0
"""
Ft0 = self.Ft[kf, 0, :]
Fp0 = self.Fp[kf, 0, :]
Ftp = self.Ft[kf, -1, :]
Fpp = self.Fp[kf, -1, :]
phi = self.phi
Ex0 = Ft0 * np.cos(phi) - Fp0 * np.sin(phi)
Ey0 = Ft0 * np.sin(phi) + Fp0 * np.cos(phi)
Exp = Ftp * np.cos(phi) - Fpp * np.sin(phi)
Eyp = Ftp * np.sin(phi) + Fpp * np.cos(phi)
plt.subplot(4, 2, 1)
plt.plot(phi, np.real(Ex0))
plt.subplot(4, 2, 2)
plt.plot(phi, np.imag(Ex0))
plt.subplot(4, 2, 3)
plt.plot(phi, np.real(Ey0))
plt.subplot(4, 2, 4)
plt.plot(phi, np.imag(Ey0))
plt.subplot(4, 2, 5)
plt.plot(phi, np.real(Exp))
plt.subplot(4, 2, 6)
plt.plot(phi, np.imag(Exp))
plt.subplot(4, 2, 7)
plt.plot(phi, np.real(Eyp))
plt.subplot(4, 2, 8)
plt.plot(phi, np.imag(Eyp))
def info(self):
""" gives info about antenna object
"""
print(self._filename)
print("type : ", self.typ)
if self.typ == 'mat':
print(self.DataFile)
print(self.AntennaName)
print(self.Date)
print(self.StartTime)
print(self.Notes)
print(self.Serie)
print(self.Run)
print("Nb theta (lat) :", self.nth)
print("Nb phi (lon) :", self.nph)
if self.typ =='nfc':
print( "--------------------------")
print( "fmin (GHz) :", self.fGHz[0])
print( "fmax (GHz) :", self.fGHz[-1])
print( "Nf :", self.nf)
print( "thmin (rad) :", self.theta[0])
print( "thmax (rad) :", self.theta[-1])
print( "Nth :", self.nth)
print( "phmin (rad) :", self.phi[0])
print( "phmax (rad) :", self.phi[-1])
print( "Nph :", self.nph)
try:
self.C.info()
except:
print("No vsh coefficient calculated yet")
#@mlab.show
def _show3(self,bnewfig = True,
bcolorbar =True,
name=[],
binteract=False,
btitle=True,
bcircle=True,
**kwargs ):
""" show3 mayavi
Parameters
----------
btitle : boolean
display title
bcolorbar : boolean
display colorbar
binteract : boolean
enable interactive mode
bcircle : boolean
draw a circle
newfig: boolean
see also
--------
antprop.antenna._computemesh
"""
if not self.evaluated:
self.eval(pattern=True)
# k is the frequency index
if hasattr(self, 'p'):
lpshp = len(self.p.shape)
sum_index = tuple(np.arange(1, lpshp))
po = np.mean(self.p, axis=sum_index)
kwargs['po'] = po
x, y, z, k, scalar = self._computemesh(**kwargs)
if bnewfig:
mlab.clf()
f = mlab.figure(bgcolor=(1, 1, 1), fgcolor=(0, 0, 0))
else:
f = mlab.gcf()
if 'opacity' in kwargs:
opacity = kwargs['opacity']
else:
opacity = 1
self._mayamesh = mlab.mesh(x, y, z,
scalars= scalar,
resolution = 1,
opacity = opacity,reset_zoom=False)
if name == []:
f.children[-1].name = 'Antenna ' + self._filename
else :
f.children[-1].name = name + self._filename
if bcolorbar :
mlab.colorbar()
if btitle:
mlab.title(self._filename + ' @ ' + str(self.fGHz[k]) + ' GHz',height=1,size=0.5)
def circle(typ='xy',a=1.2):
phi = np.linspace(0, 2*np.pi, 2000)
if typ=='xy':
return [ a*np.cos(phi) ,
a*np.sin(phi) ,
np.zeros(len(phi))
]
if typ=='yz':
return [ np.zeros(len(phi)),
a*np.cos(phi) ,
a*np.sin(phi)
]
if typ=='xz':
return [ a*np.cos(phi),
a*np.zeros(len(phi)),
np.sin(phi)
]
# draw 3D circle around the pattern
if bcircle:
xc,yc,zc =circle('xy') # blue
mlab.plot3d(xc,yc,zc,color=(0,0,1))
xc,yc,zc =circle('yz') # red
mlab.plot3d(xc,yc,zc,color=(1,0,0))
xc,yc,zc =circle('xz') # green
mlab.plot3d(xc,yc,zc,color=(0,1,0))
if binteract:
self._outline = mlab.outline(self._mayamesh, color=(.7, .7, .7))
self._outline.visible=False
def picker_callback(picker):
""" Picker callback: this get called when on pick events.
"""
if picker.actor in self._mayamesh.actor.actors:
self._outline.visible = not self._outline.visible
self._is_selected=self._outline.visible
picker = f.on_mouse_pick(picker_callback)
return(f)
def _computemesh(self,**kwargs):
""" compute mesh from theta phi
Parameters
----------
fGHz : np.array()
default [] : takes center frequency fa[len(fa)/2]
po : np.array()
location point of the antenna
T : np.array
rotation matrix
minr : float
minimum radius in meter
maxr : float
maximum radius in meters
tag : string
ilog : boolean
title : boolean
Returns
-------
(x, y, z, k)
x , y , z values in cartesian axis
k frequency point evaluated
"""
defaults = {'fGHz': [],
'po': np.array([0, 0, 0]),
'T': np.eye(3),
'minr': 0.1,
'maxr': 1,
'scale': 1.,
'tag' : 'Pat',
'txru' : 0,
'ilog' : False,
'title': True,
}
for key, value in defaults.items():
if key not in kwargs:
kwargs[key] = value
fGHz = kwargs['fGHz']
minr = kwargs['minr']
maxr = kwargs['maxr']
tag = kwargs['tag']
ilog = kwargs['ilog']
txru = kwargs['txru']
scale = kwargs['scale']
po = kwargs['po']
# T is an unitary matrix
T = kwargs['T']
if fGHz == []:
# self.ext == '' <=> mathematically generated => nf = 1
if self.ext != '':
k = int(len(self.fGHz)/2)
else:
k = 0
else :
if self.ext != '':
k = np.where(self.fGHz>=fGHz)[0][0]
else:
k = 0
if len(self.Ft.shape)==2:
r = self.sqG[:,k]
elif len(self.Ft.shape)==3:
r = self.sqG[:,:,k]
else:
r = self.sqG[:,:,txru,k]
th = self.theta[:,None]
phi = self.phi[None,:]
if ilog :
r = 10*np.log10(abs(r))
else:
r = abs(r)
if r.max() != r.min():
u = (r - r.min()) /(r.max() - r.min())
else : u = r
r = minr + (maxr-minr) * u
x = scale*r * np.sin(th) * np.cos(phi)
y = scale*r * np.sin(th) * np.sin(phi)
z = scale*r * np.cos(th)
if z.shape[1] != y.shape[1]:
z = z*np.ones(y.shape[1])
p = np.concatenate((x[...,None],
y[...,None],
z[...,None]),axis=2)
#
# antenna cs -> glogal cs
# q : Nt x Np x 3
q = np.einsum('ij,klj->kli',T,p)
#
# translation
#
scalar=(q[...,0]**2+q[...,1]**2+q[...,2]**2)
q[...,0]=q[...,0]+po[0]
q[...,1]=q[...,1]+po[1]
q[...,2]=q[...,2]+po[2]
x = q[...,0]
y = q[...,1]
z = q[...,2]
return x, y, z, k, scalar
def show3(self,k=0,po=[],T=[],txru=0,typ='G', mode='linear', silent=False):
""" show3 geomview
Parameters
----------
k : frequency index
po : poition of the antenna
T : GCS of the antenna
typ : string
'G' | 'Ft' | 'Fp'
mode : string
'linear'| 'not implemented'
silent : boolean
True | False
Examples
--------
>>> from pylayers.antprop.antenna import *
>>> import numpy as np
>>> import matplotlib.pylab as plt
>>> A = Antenna('defant.sh3')
>>> #A.show3()
"""
if not self.evaluated:
self.eval(pattern=True)
f = self.fGHz[k]
# 3 axis : nth x nph x nf
if len(self.Ft.shape)==3:
if typ == 'G':
V = self.sqG[:, :,k]
if typ == 'Ft':
V = self.Ft[:, :,k]
if typ == 'Fp':
V = self.Fp[:, :,k]
if typ == 'Ft':
V = self.Ft[:,:,k]
# 4 axis : nth x nph x ntxru x nf
if len(self.Ft.shape)==4:
if typ == 'G':
V = self.sqG[:, :, txru,k]
if typ == 'Ft':
V = self.Ft[:, : ,txru,k]
if typ == 'Fp':
V = self.Fp[:, :,txru,k]
if po ==[]:
po = np.array([0, 0, 0])
if T ==[]:
T = np.eye(3)
_filename = 'antbody'
geo = geu.Geomoff(_filename)
# geo.pattern requires the following shapes
# theta (Ntx1)
# phi (1xNp)
#if len(np.shape(self.theta))==1:
# theta = self.theta[:,None]
#else:
# theta=self.theta
theta = self.theta
#if len(np.shape(self.phi))==1:
# phi = self.phi[None,:]
#else:
# phi=self.phi
phi = self.phi
geo.pattern(theta,phi,V,po=po,T=T,ilog=False,minr=0.01,maxr=0.2)
#filename = geom_pattern(self.theta, self.phi, V, k, po, minr, maxr, typ)
#filename = geom_pattern(self.theta, self.phi, V, k, po, minr, maxr, typ)
if not silent:
geo.show3()
def plot3d(self, k=0, typ='Gain', col=True):
""" show 3D pattern in matplotlib
Parameters
----------
k : frequency index
typ = 'Gain'
= 'Ftheta'
= 'Fphi'
if col -> color coded plot3D
else -> simple plot3D
"""
fig = plt.figure()
ax = axes3d.Axes3D(fig)
if typ == 'Gain':
V = self.sqG[:, :,k]
if typ == 'Ftheta':
V = self.Ft[ :, :,k]
if typ == 'Fphi':
V = self.Fp[ :, :,k]
vt = np.ones(self.nth)
vp = np.ones(self.nph)
Th = np.outer(self.theta, vp)
Ph = np.outer(vt, self.phi)
pdb.set_trace()
X = abs(V) * np.cos(Ph) * np.sin(Th)
Y = abs(V) * np.sin(Ph) * np.sin(Th)
Z = abs(V) * np.cos(Th)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
if col:
ax.plot_surface(X, Y, Z, rstride=1, cstride=1,
cmap=cm.hot_r,shade=True)
else:
ax.plot3D(np.ravel(X), np.ravel(Y), np.ravel(Z))
plt.show()
def pol3d(self, k=0, R=50, St=4, Sp=4, silent=False):
""" Display polarisation diagram in 3D
Parameters
----------
k : int
frequency index
R : float
radius of the sphere
St : int
downsampling factor along theta
Sp : int
downsampling factor along phi
silent : Boolean
(if True the file is created and not displayed')
The file created is named : Polar{ifreq}.list
it is placed in the /geom directory of the project
"""
_filename = 'Polar' + str(10000 + k)[1:] + '.list'
filename = pyu.getlong(_filename, pstruc['DIRGEOM'])
fd = open(filename, "w")
fd.write("LIST\n")
Nt = self.nth
Np = self.nph
N = 10
plth = np.arange(0, Nt, St)
plph = np.arange(0, Np, Sp)
for m in plph:
for n in plth:
#theta = self.theta[n,0]
theta = self.theta[n]
#print "m,theta= :",m,theta*180/np.pi
#phi = self.phi[0,m]
phi = self.phi[m]
#print "n,phi=:",n,phi*180/np.pi
B = geu.vec_sph(theta, phi)
p = R * np.array((np.cos(phi) * np.sin(theta),
np.sin(phi) * np.sin(theta),
np.cos(theta)))
fd.write('{\n')
geu.ellipse(fd, p, B[0, :], B[1, :], self.Ft[n, m , k], self.Fp[n, m , k], N)
fd.write('}\n')
fd.close()
if not silent:
chaine = "geomview " + filename + " 2>/dev/null &"
os.system(chaine)
def mse(self, Fth, Fph, N=0):
""" mean square error between original and reconstructed
Parameters
----------
Fth : np.array
Fph : np.array
N : int
Notes
-----
Calculate the relative mean square error between original pattern A.Ftheta , A.Fphi and the
pattern given as argument of the function Fth , Fph
The mse is evaluated on both polarization and normalized over the energy of each
original pattern.
The function returns the maximum between those two errors
N is a parameter which allows to suppress value at the pole for the calculation of the error
if N=0 all values are kept else N < n < Nt - N
"""
sh = np.shape(self.Ft)
Nf = sh[0]
Nt = sh[1]
Np = sh[2]
# plage de theta (exclusion du pole)
pt = np.arange(N, Nt - N, 1)
Fthr = Fth.reshape(sh)
Fphr = Fph.reshape(sh)
Gr = np.real(Fphr * np.conj(Fphr) + Fthr * np.conj(Fthr))
SqGr = np.sqrt(Gr)
Fthr = Fthr[:, pt, :].ravel()
Fphr = Fphr[:, pt, :].ravel()
SqGr = SqGr[:, pt, :].ravel()
Ftho = self.Ft[:, pt, :].ravel()
Fpho = self.Fp[:, pt, :].ravel()
SqGo = self.sqG[:, pt, :].ravel()
Etho = np.sqrt(np.dot(np.conj(Ftho), Ftho))
Epho = np.sqrt(np.dot(np.conj(Fpho), Fpho))
Eo = np.sqrt(np.dot(np.conj(Ftho), Ftho) + np.dot(np.conj(Fpho), Fpho))
errth = Ftho - Fthr
errph = Fpho - Fphr
Err = np.real(np.sqrt(np.dot(np.conj(errth), errth) + np.dot(np.conj(errph), errph)))
Errth = np.real(np.sqrt(np.dot(np.conj(errth), errth)))
Errph = np.real(np.sqrt(np.dot(np.conj(errph), errph)))
#Errth_rel = Errth/Etho
#Errph_rel = Errph/Epho
Errth_rel = Errth / Eo
Errph_rel = Errph / Eo
Err_rel = Err / Eo
return Err_rel, Errth_rel, Errph_rel
def getdelay(self,delayCandidates = np.arange(-10,10,0.001)):
""" get electrical delay
Parameters
----------
delayCandidates : ndarray dalay in (ns)
default np.arange(-10,10,0.001)
Returns
-------
electricalDelay : float
Author : Troels Pedersen (Aalborg University)
B.Uguen
"""
if self.evaluated:
maxPowerInd = np.unravel_index(np.argmax(abs(self.Ft)),np.shape(self.Ft))
elD = delayCandidates[np.argmax(abs(
np.dot(self.Ft[maxPowerInd[0],maxPowerInd[1],:]
,np.exp(2j*np.pi*self.fGHz[:,None]
*delayCandidates[None,:]))))]
#electricalDelay = delayCandidates[np.argmax(abs(
# np.dot(self.Ft[:,maxPowerInd[1],maxPowerInd[2]]
# ,np.exp(2j*np.pi*freq.reshape(len(freq),1)
# *delayCandidates.reshape(1,len(delayCandidates))))
# ))]
return(elD)
else:
raise Warning('Antenna has not been evaluated')
def elec_delay(self,tau):
r""" apply an electrical delay
Parameters
----------
tau : float
electrical delay in nanoseconds
Notes
-----
This function applies an electrical delay math::`\exp{+2 j \pi f \tau)`
on the phase of diagram math::``F_{\theta}`` and math::`F_{\phi}`
Examples
--------
.. plot::
:include-source:
>>> from pylayers.antprop.antenna import *
>>> A = Antenna('S2R2.sh3')
>>> A.eval()
>>> tau = A.getdelay()
>>> A.elec_delay(tau)
"""
self.tau = self.tau+tau
if self.evaluated:
Ftheta = self.Ft
Fphi = self.Fp
sh = np.shape(Ftheta)
e = np.exp(2 * np.pi * 1j * self.fGHz[None,None,:]* tau)
#E = np.outer(e, ones(sh[1] * sh[2]))
#Fth = Ftheta.reshape(sh[0], sh[1] * sh[2])
#EFth = Fth * E
#self.Ft = EFth.reshape(sh[0], sh[1], sh[2])
self.Ft = self.Ft*e
self.Fp = self.Fp*e
#Fph = Fphi.reshape(sh[0], sh[1] * sh[2])
#EFph = Fph * E
#self.Fp = EFph.reshape(sh[0], sh[1], sh[2])
else:
raise Warning('antenna has not been evaluated')
def Fsynth(self,theta=[],phi=[],):
""" Perform Antenna synthesis
Parameters
----------
theta : np.array
phi : np.array
call Antenna.Fpatt or Antenna.Fsynth3
Notes
-----
The antenna pattern synthesis is done either from spherical
harmonics coefficients or from an analytical expression of the
radiation pattern.
"""
if ((self.fromfile) or (self.typ=='vsh') or (self.typ=='ssh')):
Ft,Fp = self.Fsynth3(theta,phi)
self.gain()
self.evaluated=True
else :
Ft = self.Ft
Fp = self.Fp
self.theta = theta
self.phi = phi
eval('self.p'+self.typ)()
#Ft,Fp = self.Fpatt(theta,phi,pattern)
return (Ft,Fp)
#def Fsynth1(self, theta, phi, k=0):
def Fsynth1(self, theta, phi):
""" calculate complex antenna pattern from VSH Coefficients (shape 1)
Parameters
----------
theta : ndarray (1xNdir)
phi : ndarray (1xNdir)
k : int
frequency index
Returns
-------
Ft , Fp
"""
Nt = len(theta)
Np = len(phi)
if self.grid:
theta = np.kron(theta, np.ones(Np))
phi = np.kron(np.ones(Nt),phi)
nray = len(theta)
#Br = self.C.Br.s1[k, :, :]
#Bi = self.C.Bi.s1[k, :, :]
#Cr = self.C.Cr.s1[k, :, :]
#Ci = self.C.Ci.s1[k, :, :]
Br = self.C.Br.s1[:, :, :]
Bi = self.C.Bi.s1[:, :, :]
Cr = self.C.Cr.s1[:, :, :]
Ci = self.C.Ci.s1[:, :, :]
N = self.C.Br.N1
M = self.C.Br.M1
#print "N,M",N,M
#
# The - sign is necessary to get the good reconstruction
# deduced from observation
# May be it comes from a different definition of theta in SPHEREPACK
x = -np.cos(theta)
Pmm1n, Pmp1n = AFLegendre3(N, M, x)
ind = index_vsh(N, M)
n = ind[:, 0]
m = ind[:, 1]
#~ V, W = VW(n, m, x, phi, Pmm1n, Pmp1n)
V, W = VW(n, m, x, phi)
#
# broadcasting along frequency axis
#
V = np.expand_dims(V,0)
W = np.expand_dims(V,0)
#
# k : frequency axis
# l : coeff l
# m
# Fth = np.eisum('klm,kilm->ki',Br,np.real(V.T)) - \
# np.eisum('klm,kilm->ki',Bi,np.imag(V.T)) + \
# np.eisum('klm,kilm->ki',Ci,np.real(W.T)) + \
# np.eisum('klm,kilm->ki',Cr,np.imag(W.T))
# Fph = -np.eisum('klm,kilm->ki',Cr,np.real(V.T)) + \
# np.eisum('klm,kilm->ki',Ci,np.imag(V.T)) + \
# np.eisum('klm,kilm->ki',Bi,np.real(W.T)) + \
# np.eisum('klm,kilm->ki',Br,np.imag(W.T))
Brr = Br[:,l,m]
Bir = Bi[:,l,m]
Crr = Cr[:,l,m]
Cir = Ci[:,l,m]
Fth = np.dot(Brr, np.real(V.T)) - \
np.dot(Bir, np.imag(V.T)) + \
np.dot(Cir, np.real(W.T)) + \
np.dot(Crr, np.imag(W.T))
Fph = -np.dot(Crr, np.real(V.T)) + \
np.dot(Cir, np.imag(V.T)) + \
np.dot(Bir, np.real(W.T)) + \
np.dot(Brr, np.imag(W.T))
#Fth = np.dot(Br, np.real(V.T)) - \
# np.dot(Bi, np.imag(V.T)) + \
# np.dot(Ci, np.real(W.T)) + \
# np.dot(Cr, np.imag(W.T))
#Fph = -np.dot(Cr, np.real(V.T)) + \
# np.dot(Ci, np.imag(V.T)) + \
# np.dot(Bi, np.real(W.T)) + \
# np.dot(Br, np.imag(W.T))
if self.grid:
Nf = len(self.fGHz)
Fth = Fth.reshape(Nf, Nt, Np)
Fph = Fph.reshape(Nf, Nt, Np)
return Fth, Fph
def Fsynth2s(self,dsf=1):
""" pattern synthesis from shape 2 vsh coefficients
Parameters
----------
phi
Notes
-----
Calculate complex antenna pattern from VSH Coefficients (shape 2)
for the specified directions (theta,phi)
theta and phi arrays needs to have the same size
"""
theta = self.theta[::dsf]
phi = self.phi[::dsf]
Nt = len(theta)
Np = len(phi)
theta = np.kron(theta, np.ones(Np))
phi = np.kron(np.ones(Nt), phi)
Ndir = len(theta)
Br = self.C.Br.s2 # Nf x K2
Bi = self.C.Bi.s2 # Nf x K2
Cr = self.C.Cr.s2 # Nf x K2
Ci = self.C.Ci.s2 # Nf x K2
Nf = np.shape(self.C.Br.s2)[0]
K2 = np.shape(self.C.Br.s2)[1]
L = self.C.Br.N2 # int
M = self.C.Br.M2 # int
#print "N,M",N,M
#
# The - sign is necessary to get the good reconstruction
# deduced from observation
# May be it comes from a different definition of theta in SPHEREPACK
x = -np.cos(theta)
Pmm1n, Pmp1n = AFLegendre3(L, M, x)
ind = index_vsh(L, M)
l = ind[:, 0]
m = ind[:, 1]
V, W = VW2(l, m, x, phi, Pmm1n, Pmp1n) # K2 x Ndir
# Fth , Fph are Nf x Ndir
tEBr = []
tEBi = []
tECr = []
tECi = []
for k in range(K2):
BrVr = np.dot(Br[:,k].reshape(Nf,1),
np.real(V.T)[k,:].reshape(1,Ndir))
BiVi = np.dot(Bi[:,k].reshape(Nf,1),
np.imag(V.T)[k,:].reshape(1,Ndir))
CiWr = np.dot(Ci[:,k].reshape(Nf,1),
np.real(W.T)[k,:].reshape(1,Ndir))
CrWi = np.dot(Cr[:,k].reshape(Nf,1),
np.imag(W.T)[k,:].reshape(1,Ndir))
CrVr = np.dot(Cr[:,k].reshape(Nf,1),
np.real(V.T)[k,:].reshape(1,Ndir))
CiVi = np.dot(Ci[:,k].reshape(Nf,1),
np.imag(V.T)[k,:].reshape(1,Ndir))
BiWr = np.dot(Bi[:,k].reshape(Nf,1),
np.real(W.T)[k,:].reshape(1,Ndir))
BrWi = np.dot(Br[:,k].reshape(Nf,1),
np.imag(W.T)[k,:].reshape(1,Ndir))
EBr = np.sum(BrVr*np.conj(BrVr)*np.sin(theta)) + \
np.sum(BrWi*np.conj(BrWi)*np.sin(theta))
EBi = np.sum(BiVi*np.conj(BiVi)*np.sin(theta)) + \
np.sum(BiWr*np.conj(BiWr)*np.sin(theta))
ECr = np.sum(CrWi*np.conj(CrWi)*np.sin(theta)) + \
+ np.sum(CrVr*np.conj(CrVr)*np.sin(theta))
ECi = np.sum(CiWr*np.conj(CiWr)*np.sin(theta)) + \
+ np.sum(CiVi*np.conj(CiVi)*np.sin(theta))
tEBr.append(EBr)
tEBi.append(EBi)
tECr.append(ECr)
tECi.append(ECi)
#Fth = np.dot(Br, np.real(V.T)) - np.dot(Bi, np.imag(V.T)) + \
# np.dot(Ci, np.real(W.T)) + np.dot(Cr, np.imag(W.T))
#Fph = -np.dot(Cr, np.real(V.T)) + np.dot(Ci, np.imag(V.T)) + \
# np.dot(Bi, np.real(W.T)) + np.dot(Br, np.imag(W.T))
return np.array(tEBr),np.array(tEBi),np.array(tECr),np.array(tECi)
def Fsynth2b(self, theta, phi):
""" pattern synthesis from shape 2 vsh coefficients
Parameters
----------
theta : 1 x Nt
phi : 1 x Np
Notes
-----
Calculate complex antenna pattern from VSH Coefficients (shape 2)
for the specified directions (theta,phi)
theta and phi arrays needs to have the same size
"""
Nt = len(theta)
Np = len(phi)
if self.grid:
theta = np.kron(theta, np.ones(Np))
phi = np.kron(np.ones(Nt),phi)
Br = self.C.Br.s2 # Nf x K2
Bi = self.C.Bi.s2 # Nf x K2
Cr = self.C.Cr.s2 # Nf x K2
Ci = self.C.Ci.s2 # Nf x K2
L = self.C.Br.N2 # int
M = self.C.Br.M2 # int
#print "N,M",N,M
#
# The - sign is necessary to get the good reconstruction
# deduced from observation
# May be it comes from a different definition of theta in SPHEREPACK
x = -np.cos(theta)
Pmm1n, Pmp1n = AFLegendre3(L, M, x)
ind = index_vsh(L, M)
l = ind[:, 0]
m = ind[:, 1]
V, W = VW2(l, m, x, phi, Pmm1n, Pmp1n) # K2 x Ndir
# Fth , Fph are Nf x Ndir
Fth = np.dot(Br, np.real(V.T)) - np.dot(Bi, np.imag(V.T)) + \
np.dot(Ci, np.real(W.T)) + np.dot(Cr, np.imag(W.T))
Fph = -np.dot(Cr, np.real(V.T)) + np.dot(Ci, np.imag(V.T)) + \
np.dot(Bi, np.real(W.T)) + np.dot(Br, np.imag(W.T))
if self.grid:
Nf = len(self.fGHz)
Fth = Fth.reshape(Nf, Nt, Np)
Fph = Fph.reshape(Nf, Nt, Np)
return Fth, Fph
def Fsynth2(self, theta, phi, typ = 'vsh'):
""" pattern synthesis from shape 2 vsh coeff
Parameters
----------
theta : array 1 x Nt
phi : array 1 x Np
pattern : boolean
default False
typ : string
{vsh | ssh}
Notes
-----
Calculate complex antenna pattern from VSH Coefficients (shape 2)
for the specified directions (theta,phi)
theta and phi arrays needs to have the same size
"""
self.nth = len(theta)
self.nph = len(phi)
self.nf = len(self.fGHz)
if typ =='vsh' :
if self.grid:
theta = np.kron(theta, np.ones(self.nph))
phi = np.kron(np.ones(self.nth),phi)
Br = self.C.Br.s2
Bi = self.C.Bi.s2
Cr = self.C.Cr.s2
Ci = self.C.Ci.s2
N = self.C.Br.N2
M = self.C.Br.M2
#print "N,M",N,M
#
# The - sign is necessary to get the good reconstruction
# deduced from observation
# May be it comes from a different definition of theta in SPHEREPACK
x = -np.cos(theta)
Pmm1n, Pmp1n = AFLegendre3(N, M, x)
ind = index_vsh(N, M)
n = ind[:, 0]
m = ind[:, 1]
#~ V, W = VW(n, m, x, phi, Pmm1n, Pmp1n)
V, W = VW(n, m, x, phi)
Fth = np.dot(Br, np.real(V.T)) - np.dot(Bi, np.imag(V.T)) + \
np.dot(Ci, np.real(W.T)) + np.dot(Cr, np.imag(W.T))
Fph = -np.dot(Cr, np.real(V.T)) + np.dot(Ci, np.imag(V.T)) + \
np.dot(Bi, np.real(W.T)) + np.dot(Br, np.imag(W.T))
if self.grid:
Fth = Fth.reshape(self.nf, self.nth, self.nph)
Fph = Fph.reshape(self.nf, self.nth, self.nph)
if typ=='ssh':
cx = self.S.Cx.s2
cy = self.S.Cy.s2
cz = self.S.Cz.s2
lmax = self.S.Cx.lmax
Y ,indx = SSHFunc(lmax, theta,phi)
Ex = np.dot(cx,Y).reshape(self.nf,self.nth,self.nph)
Ey = np.dot(cy,Y).reshape(self.nf,self.nth,self.nph)
Ez = np.dot(cz,Y).reshape(self.nf,self.nth,self.nph)
Fth,Fph = CartToSphere (theta, phi, Ex, Ey,Ez, bfreq = True )
self.evaluated = True
return Fth, Fph
def Fsynth3(self,theta=[],phi=[],typ='vsh'):
r""" synthesis of a complex antenna pattern from SH coefficients
(vsh or ssh in shape 3)
Ndir is the number of directions
Parameters
----------
theta : ndarray (1xNdir if not pattern) (1xNtheta if pattern)
phi : ndarray (1xNdir if not pattter) (1xNphi if pattern)
pattern : boolean
if True theta and phi are reorganized for building the pattern
typ : 'vsh' | 'ssh' | 'hfss'
Returns
-------
if self.grid:
Fth : ndarray (Ntheta x Nphi)
Fph : ndarray (Ntheta x Nphi)
else:
Fth : ndarray (1 x Ndir)
Fph : ndarray (1 x Ndir)
See Also
--------
pylayers.antprop.channel._vec2scalA
Examples
--------
.. plot::
:include-source:
>>> from pylayers.antprop.antenna import *
>>> import numpy as np
>>> import matplotlib.pylab as plt
>>> A = Antenna('defant.vsh3')
>>> F = A.eval(grid=True)
All Br,Cr,Bi,Ci have the same (l,m) index in order to evaluate only
once the V,W function
If the data comes from a cst file like the antenna used in WHERE1 D4.1
the pattern is multiplied by $\frac{4\pi}{120\pi}=\frac{1}{\sqrt{30}$
"""
#typ = self.typ
#self._filename.split('.')[1]
#if typ=='satimo':
# coeff=1.
#if typ=='cst':
# coeff=1./sqrt(30)
#assert typ in ['ssh','vsh','hfss'],
assert (hasattr(self,'C') or hasattr(self,'S')),"No SH coeffs evaluated"
Nf = len(self.fGHz)
if theta==[]:
theta=np.linspace(0,np.pi,45)
if phi == []:
phi= np.linspace(0,2*np.pi,90)
Nt = len(theta)
Np = len(phi)
self.nth = len(theta)
self.nph = len(phi)
if self.grid:
#self.theta = theta[:,None]
#self.phi = phi[None,:]
self.theta = theta
self.phi = phi
theta = np.kron(theta, np.ones(Np))
phi = np.kron(np.ones(Nt),phi)
if typ =='vsh':
nray = len(theta)
Br = self.C.Br.s3
lBr = self.C.Br.ind3[:, 0]
mBr = self.C.Br.ind3[:, 1]
Bi = self.C.Bi.s3
Cr = self.C.Cr.s3
Ci = self.C.Ci.s3
L = lBr.max()
M = mBr.max()
# vector spherical harmonics basis functions
V, W = VW(lBr, mBr, theta, phi)
Fth = np.dot(Br, np.real(V.T)) - \
np.dot(Bi, np.imag(V.T)) + \
np.dot(Ci, np.real(W.T)) + \
np.dot(Cr, np.imag(W.T))
Fph = -np.dot(Cr, np.real(V.T)) + \
np.dot(Ci, np.imag(V.T)) + \
np.dot(Bi, np.real(W.T)) + \
np.dot(Br, np.imag(W.T))
if self.grid:
Fth = Fth.reshape(Nf, Nt, Np)
Fph = Fph.reshape(Nf, Nt, Np)
if typ == 'ssh':
cx = self.S.Cx.s3
cy = self.S.Cy.s3
cz = self.S.Cz.s3
lmax = self.S.Cx.lmax
Y ,indx = SSHFunc2(lmax, theta,phi)
#k = self.S.Cx.k2[:,0]
# same k for x y and z
k = self.S.Cx.k2
if pattern :
Ex = np.dot(cx,Y[k])
Ey = np.dot(cy,Y[k])
Ez = np.dot(cz,Y[k])
Fth,Fph = CartToSphere(theta, phi, Ex, Ey,Ez, bfreq = True, pattern = True )
Fth = Fth.reshape(Nf,Nt,Np)
Fph = Fph.reshape(Nf,Nt,Np)
else:
Ex = np.dot(cx,Y[k])
Ey = np.dot(cy,Y[k])
Ez = np.dot(cz,Y[k])
Fth,Fph = CartToSphere (theta, phi, Ex, Ey,Ez, bfreq = True, pattern = False)
#self.Fp = Fph
#self.Ft = Fth
#G = np.real(Fph * np.conj(Fph) + Fth * np.conj(Fth))
#self.sqG = np.sqrt(G)
#if self.grid:
# self.Fp = Fph
# self.Ft = Fth
# G = np.real(Fph * np.conj(Fph) + Fth * np.conj(Fth))
# self.sqG = np.sqrt(G)
self.evaluated = True
#if typ == 'hfss':
# scipy.interpolate.griddata()
# Fth = self.Ft
# Fph = self.Fp
# TODO create 2 different functions for pattern and not pattern
#if not self.grid:
return Fth, Fph
#else:
# return None,None
def movie_vsh(self, mode='linear'):
""" animates vector spherical coeff w.r.t frequency
Parameters
----------
mode : string
'linear' |
"""
Brmin = abs(self.C.Br[:, 0:20, 0:20]).min()
Brmax = abs(self.C.Br[:, 0:20, 0:20]).max()
Bimin = abs(self.C.Bi[:, 0:20, 0:20]).min()
Bimax = abs(self.C.Bi[:, 0:20, 0:20]).max()
Crmin = abs(self.C.Cr[:, 0:20, 0:20]).min()
Crmax = abs(self.C.Cr[:, 0:20, 0:20]).max()
Cimin = abs(self.C.Ci[:, 0:20, 0:20]).min()
Cimax = abs(self.C.Ci[:, 0:20, 0:20]).max()
# print(Brmin, Brmax, Bimin, Bimax, Crmin, Crmax, Cimin, Cimax)
for k in range(self.nf):
plt.figure()
stf = ' f=' + str(self.fGHz[k]) + ' GHz'
subplot(221)
pcolor(abs(self.C.Br.s1[k, 0:20, 0:20]),
vmin=Brmin, vmax=Brmax, edgecolors='k')
#xlabel('m',fontsize=12)
ylabel('n', fontsize=12)
title('$|Br_{n}^{(m)}|$' + stf, fontsize=10)
colorbar()
subplot(222)
pcolor(abs(self.C.Bi.s1[k, 0:20, 0:20]),
vmin=Bimin, vmax=Bimax, edgecolors='k')
#xlabel('m',fontsize=12)
ylabel('n', fontsize=12)
title('$|Bi_{n}^{(m)}|$' + stf, fontsize=10)
colorbar()
subplot(223)
pcolor(abs(self.C.Cr.s1[k, 0:20, 0:20]),
vmin=Crmin, vmax=Crmax, edgecolors='k')
xlabel('m', fontsize=12)
#ylabel('n',fontsize=12)
title('$|Cr_{n}^{(m)}|$' + stf, fontsize=10)
colorbar()
subplot(224)
pcolor(abs(self.C.Ci.s1[k, 0:20, 0:20]),
vmin=Cimin, vmax=Cimax, edgecolors='k')
xlabel('m', fontsize=12)
#ylabel('n',fontsize=12)
title('$|Ci_{n}^{(m)}|$' + stf, fontsize=10)
colorbar()
filename = str('%03d' % k) + '.png'
savefig(filename, dpi=100)
clf()
command = ('mencoder',
'mf://*.png',
'-mf',
'type=png:w=800:h=600:fps=1',
'-ovc',
'lavc',
'-lavcopts',
'vcodec=mpeg4',
'-oac',
'copy',
'-o',
'vshcoeff.avi')
subprocess.check_call(command)
def minsh3(self, emax=0.05):
""" creates vsh3 with significant coeff until given relative reconstruction error
Parameters
----------
emax : float
error default 0.05
Notes
-----
Create antenna's vsh3 file which only contains
the significant vsh coefficients in shape 3,
in order to obtain a reconstruction maximal error = emax
This function requires a reading of .trx file before being executed
"""
#th = np.kron(self.theta, np.ones(self.nph))
#ph = np.kron(np.ones(self.nth), self.phi)
if not self.grid:
self.grid = True
Fth3, Fph3 = self.Fsynth3(self.theta, self.phi)
Err = self.mse(Fth3, Fph3, 0)
Enc = self.C.ens3()
n = len(Enc)
pos = 0
while (pos < n) & (Err[0] < emax):
Emin = Enc[pos]
d = self.C.drag3(Emin)
Fth3, Fph3 = self.Fsynth3(self.theta, self.phi)
Err = self.mse(Fth3, Fph3, 0)
if Err[0] >= emax:
i = d[0][0]
i3 = d[1][0]
self.C.put3(i, i3)
Fth3, Fph3 = self.Fsynth3(self.theta,self.phi)
Err = self.mse(Fth3, Fph3, 0)
pos = pos + 1
def savevsh3(self,force=False):
""" save antenna in vsh3 format
Create a .vsh3 antenna file
"""
# create vsh3 file
_filevsh3 = os.path.splitext(self._filename)[0]+'.vsh3'
filevsh3 = pyu.getlong(_filevsh3, pstruc['DIRANT'])
#filevsh3 = pyu.getlong(self._filename,'ant')
if os.path.isfile(filevsh3) and not force:
print( filevsh3, ' already exist')
else:
print( 'create ', filevsh3, ' file')
coeff = {}
coeff['fmin'] = self.fGHz[0]
coeff['fmax'] = self.fGHz[-1]
coeff['Br.ind'] = self.C.Br.ind3
coeff['Bi.ind'] = self.C.Bi.ind3
coeff['Cr.ind'] = self.C.Cr.ind3
coeff['Ci.ind'] = self.C.Ci.ind3
coeff['Br.k'] = self.C.Br.k2
coeff['Bi.k'] = self.C.Bi.k2
coeff['Cr.k'] = self.C.Cr.k2
coeff['Ci.k'] = self.C.Ci.k2
coeff['Br.s3'] = self.C.Br.s3
coeff['Bi.s3'] = self.C.Bi.s3
coeff['Cr.s3'] = self.C.Cr.s3
coeff['Ci.s3'] = self.C.Ci.s3
if self.evaluated:
coeff['sl'] = self.sl
coeff['el'] = self.el
io.savemat(filevsh3, coeff, appendmat=False)
def savesh2(self):
""" save coeff in .sh2 antenna file
"""
# create sh2 file
#typ = self._filename.split('.')[1]
#self.typ = typ
_filesh2 = self._filename.replace('.'+ self.typ, '.sh2')
filesh2 = pyu.getlong(_filesh2, pstruc['DIRANT'])
if os.path.isfile(filesh2):
print(filesh2, ' already exist')
else:
print('create ', filesh2, ' file')
coeff = {}
coeff['fmin'] = self.fGHz[0]
coeff['fmax'] = self.fGHz[-1]
coeff['Cx.ind'] = self.S.Cx.ind2
coeff['Cy.ind'] = self.S.Cy.ind2
coeff['Cz.ind'] = self.S.Cz.ind2
coeff['Cx.lmax']= self.S.Cx.lmax
coeff['Cy.lmax']= self.S.Cy.lmax
coeff['Cz.lmax']= self.S.Cz.lmax
coeff['Cx.s2'] = self.S.Cx.s2
coeff['Cy.s2'] = self.S.Cy.s2
coeff['Cz.s2'] = self.S.Cz.s2
io.savemat(filesh2, coeff, appendmat=False)
def savesh3(self):
""" save antenna in sh3 format
create a .sh3 antenna file
"""
# create sh3 file
# if self._filename has an extension
# it is replace by .sh3
#typ = self._filename.split('.')[1]
#self.typ = typ
_filesh3 = self._filename.replace('.'+ self.typ, '.sh3')
filesh3 = pyu.getlong(_filesh3, pstruc['DIRANT'])
if os.path.isfile(filesh3):
print(filesh3, ' already exist')
else:
print('create ', filesh3, ' file')
coeff = {}
coeff['fmin'] = self.fGHz[0]
coeff['fmax'] = self.fGHz[-1]
coeff['Cx.ind'] = self.S.Cx.ind3
coeff['Cy.ind'] = self.S.Cy.ind3
coeff['Cz.ind'] = self.S.Cz.ind3
coeff['Cx.k'] = self.S.Cx.k2
coeff['Cy.k'] = self.S.Cy.k2
coeff['Cz.k'] = self.S.Cz.k2
coeff['Cx.lmax']= self.S.Cx.lmax
coeff['Cy.lmax']= self.S.Cy.lmax
coeff['Cz.lmax']= self.S.Cz.lmax
coeff['Cx.s3'] = self.S.Cx.s3
coeff['Cy.s3'] = self.S.Cy.s3
coeff['Cz.s3'] = self.S.Cz.s3
io.savemat(filesh3, coeff, appendmat=False)
def loadvsh3(self):
""" Load antenna's vsh3 file
vsh3 file contains a thresholded version of vsh coefficients in shape 3
"""
_filevsh3 = self._filename
filevsh3 = pyu.getlong(_filevsh3, pstruc['DIRANT'])
self.evaluated = False
if os.path.isfile(filevsh3):
coeff = io.loadmat(filevsh3, appendmat=False)
#
# This test is to fix a problem with 2 different
# behavior of io.loadmat
#
if type(coeff['fmin']) == float:
fmin = coeff['fmin']
fmax = coeff['fmax']
else:
fmin = coeff['fmin'][0][0]
fmax = coeff['fmax'][0][0]
# .. Warning
# Warning modification takes only one dimension for k
# if the .vsh3 format evolve it may not work anymore
#
Br = VCoeff('s3', fmin, fmax, coeff['Br.s3'],
coeff['Br.ind'], coeff['Br.k'][0])
Bi = VCoeff('s3', fmin, fmax, coeff['Bi.s3'],
coeff['Bi.ind'], coeff['Bi.k'][0])
Cr = VCoeff('s3', fmin, fmax, coeff['Cr.s3'],
coeff['Cr.ind'], coeff['Cr.k'][0])
Ci = VCoeff('s3', fmin, fmax, coeff['Ci.s3'],
coeff['Ci.ind'], coeff['Ci.k'][0])
self.C = VSHCoeff(Br, Bi, Cr, Ci)
self.nf = np.shape(Br.s3)[0]
self.fGHz = np.linspace(fmin, fmax, self.nf)
if 'sl' in coeff:
self.sl = coeff['sl'][0]
self.el = coeff['el'][0]
else:
print(_filevsh3, ' does not exist')
def loadsh3(self):
""" Load antenna's sh3 file
sh3 file contains a thesholded version of ssh coefficients in shape 3
"""
_filesh3 = self._filename.split('.')[0]+'.sh3'
filesh3 = pyu.getlong(_filesh3, pstruc['DIRANT'])
self.evaluated = False
if os.path.isfile(filesh3):
coeff = io.loadmat(filesh3, appendmat=False)
#
# This test is to fix a problem with 2 different
# behavior of io.loadmat
#
if type(coeff['fmin']) == float:
fmin = coeff['fmin']
fmax = coeff['fmax']
else:
fmin = coeff['fmin'][0][0]
fmax = coeff['fmax'][0][0]
# .. Warning
# Warning modification takes only one dimension for k
# if the .sh3 format evolve it may not work anymore
#
if type(coeff['Cx.lmax']) == float:
lmax = coeff['Cx.lmax']
else:
lmax = coeff['Cx.lmax'][0][0]
Cx = SCoeff(typ = 's3',
fmin = fmin ,
fmax = fmax ,
lmax = lmax,
data = coeff['Cx.s3'],
ind = coeff['Cx.ind'],
k = np.squeeze(coeff['Cx.k']))
Cy = SCoeff(typ= 's3',
fmin = fmin ,
fmax = fmax ,
lmax = lmax,
data = coeff['Cy.s3'],
ind = coeff['Cy.ind'],
k = np.squeeze(coeff['Cy.k']))
Cz = SCoeff(typ = 's3',
fmin = fmin ,
fmax = fmax ,
data = coeff['Cz.s3'],
lmax = lmax,
ind = coeff['Cz.ind'],
k = np.squeeze(coeff['Cz.k']))
if not 'S' in self.__dict__.keys():
self.S = SSHCoeff(Cx, Cy,Cz)
else:
self.S.sets3(Cx,Cy,Cz)
self.nf = np.shape(Cx.s3)[0]
self.fGHz = np.linspace(fmin, fmax, self.nf)
else:
print(_filesh3, ' does not exist')
def savevsh2(self, filename = ''):
""" save coeff in a .vsh2 antenna file
Parameters
----------
filename : string
"""
# create vsh2 file
if filename == '':
_filevsh2 = self._filename.replace('.trx', '.vsh2')
_filevsh2 = filename
filevsh2 = pyu.getlong(_filevsh2, pstruc['DIRANT'])
if os.path.isfile(filevsh2):
print(filevsh2, ' already exist')
else:
print('create ', filevsh2, ' file')
coeff = {}
coeff['fmin'] = self.fGHz[0]
coeff['fmax'] = self.fGHz[-1]
coeff['Br.ind'] = self.C.Br.ind2
coeff['Bi.ind'] = self.C.Bi.ind2
coeff['Cr.ind'] = self.C.Cr.ind2
coeff['Ci.ind'] = self.C.Ci.ind2
coeff['Br.s2'] = self.C.Br.s2
coeff['Bi.s2'] = self.C.Bi.s2
coeff['Cr.s2'] = self.C.Cr.s2
coeff['Ci.s2'] = self.C.Ci.s2
io.savemat(filevsh2, coeff, appendmat=False)
def loadsh2(self):
""" load spherical harmonics coefficient in shape 2
"""
_filesh2 = self._filename.split('.')[0]+'.sh2'
filesh2 = pyu.getlong(_filesh2, pstruc['DIRANT'])
if os.path.isfile(filesh2):
coeff = io.loadmat(filesh2, appendmat=False)
#
# This test is to fix a problem with 2 different
# behavior of io.loadmat
#
if type(coeff['fmin']) == float:
fmin = coeff['fmin']
fmax = coeff['fmax']
else:
fmin = coeff['fmin'][0][0]
fmax = coeff['fmax'][0][0]
if type(coeff['Cx.lmax']) == float:
lmax = coeff['Cx.lmax']
else:
lmax = coeff['Cx.lmax'][0][0]
Cx = SCoeff(typ='s2',
fmin=fmin,
fmax=fmax,
lmax = lmax,
data=coeff['Cx.s2'],
ind=coeff['Cx.ind'])
Cy = SCoeff(typ='s2',
fmin=fmin,
fmax=fmax,
lmax = lmax,
data=coeff['Cy.s2'],
ind=coeff['Cy.ind'])
Cz = SCoeff(typ='s2',
fmin=fmin,
fmax=fmax,
lmax = lmax,
data=coeff['Cz.s2'],
ind=coeff['Cz.ind'])
self.S = SSHCoeff(Cx, Cy,Cz)
Nf = np.shape(Cx.s2)[0]
self.fGHz = np.linspace(fmin, fmax, Nf)
else:
print( _filesh2, ' does not exist')
def loadvsh2(self):
""" load antenna from .vsh2 file format
Load antenna's vsh2 file which only contains
the vsh coefficients in shape 2
"""
_filevsh2 = self._filename
filevsh2 = pyu.getlong(_filevsh2, pstruc['DIRANT'])
if os.path.isfile(filevsh2):
coeff = io.loadmat(filevsh2, appendmat=False)
#
# This test is to fix a problem with 2 different
# behavior of io.loadmat
#
if type(coeff['fmin']) == float:
fmin = coeff['fmin']
fmax = coeff['fmax']
else:
fmin = coeff['fmin'][0][0]
fmax = coeff['fmax'][0][0]
Br = VCoeff(typ='s2', fmin=fmin, fmax=fmax,
data=coeff['Br.s2'], ind=coeff['Br.ind'])
Bi = VCoeff(typ='s2', fmin=fmin, fmax=fmax,
data=coeff['Bi.s2'], ind=coeff['Bi.ind'])
Cr = VCoeff(typ='s2', fmin=fmin, fmax=fmax,
data=coeff['Cr.s2'], ind=coeff['Cr.ind'])
Ci = VCoeff(typ='s2', fmin=fmin, fmax=fmax,
data=coeff['Ci.s2'], ind=coeff['Ci.ind'])
self.C = VSHCoeff(Br, Bi, Cr, Ci)
Nf = np.shape(Br.s2)[0]
self.fGHz = np.linspace(fmin, fmax, Nf)
else:
print( _filevsh2, ' does not exist')
def loadvsh3_old(self):
""" Load antenna vsh coefficients in shape 3
"""
_filevsh3 = self._filename
filevsh3 = getlong(_filevsh3, pstruc['DIRANT'])
fmin = 2.
fmax = 8.
if os.path.isfile(filevsh3):
coeff = io.loadmat(filevsh3, appendmat=False)
Br = VCoeff('s3', fmin, fmax, coeff['Br.s3'],
coeff['Br.ind'], coeff['Br.k'])
Bi = VCoeff('s3', fmin, fmax, coeff['Bi.s3'],
coeff['Bi.ind'], coeff['Bi.k'])
Cr = VCoeff('s3', fmin, fmax, coeff['Cr.s3'],
coeff['Cr.ind'], coeff['Cr.k'])
Ci = VCoeff('s3', fmin, fmax, coeff['Ci.s3'],
coeff['Ci.ind'], coeff['Ci.k'])
self.C = VSHCoeff(Br, Bi, Cr, Ci)
self.fGHz = np.linspace(fmin, fmax, 121)
else:
print(_filevsh3, ' does not exist')
def pol2cart(self, ith):
""" converts FTheta, FPhi to Fx,Fy,Fz for theta=ith
Parameters
----------
ith : theta index
Returns
-------
Fx
Fy
Fz
See Also
--------
cart2pol
"""
Fth = self.Ft[:, ith, :]
Fph = self.Fp[:, ith, :]
th = self.theta[ith]
ph = self.phi
Fx = Fth * np.cos(th) * np.cos(ph) - Fph * np.sin(ph)
Fy = Fth * np.cos(th) * np.sin(ph) + Fph * np.cos(ph)
Fz = (-1) * Fth * np.sin(th)
return(Fx, Fy, Fz)
def cart2pol(self, Fx, Fy, Fz, ith):
""" converts Fx,Fy,Fz to Ftheta, Fphi for theta=ith
Parameters
----------
Fx : np.array
Fy : np.array
Fz : np.array
ith : theta index
See Also
--------
pol2cart
"""
th = self.theta[ith]
ph = self.phi
Fth = Fx * np.cos(th) * np.cos(ph) + Fy * np.cos(th) * np.sin(ph) - Fz * np.sin(th)
Fph = -Fx * np.sin(ph) + Fy * np.cos(th)
SqG = np.sqrt(np.real(Fph * np.conj(Fph) + Fth * np.conj(Fth)))
self.sqG[:, ith, :] = SqG
self.Ft[:, ith, :] = Fth
self.Fp[:, ith, :] = Fph
def forcesympol(A):
""" plot VSH transform vsh basis in 3D plot
Parameters
----------
n,m : integer values (m<=n)
theta : ndarray
phi : ndarray
sf : boolean
if sf : plotted figures are saved in a *.png file
else : plotted figures aren't saved
Examples
--------
.. plot::
:include-source:
>>> from pylayers.antprop.antenna import *
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> n=5
>>> m=3
>>> theta = np.linspace(0,np.pi,30)
>>> phi = np.linspace(0,2*np.pi,60)
>>> plotVW(n,m,theta,phi)
"""
# calculate v and w
if m <= n:
theta[np.where(theta == np.pi / 2)[0]] = np.pi / 2 + \
1e-10 # .. todo :: not clean
x = -np.cos(theta)
Pmm1n, Pmp1n = AFLegendre(n, m, x)
t1 = np.sqrt((n + m) * (n - m + 1))
t2 = np.sqrt((n - m) * (n + m + 1))
y1 = t1 * Pmm1n[:, m, n] - t2 * Pmp1n[:, m, n]
y2 = t1 * Pmm1n[:, m, n] + t2 * Pmp1n[:, m, n]
Ephi = np.exp(1j * m * phi)
cphi = np.cos(m * phi)
if m == 0:
sphi = 1e-10
else:
sphi = np.sin(m * phi)
ny = len(y1)
ne = len(Ephi)
vy = np.ones(ny)
ve = np.ones(ne)
Y1 = np.outer(y1, ve)
Y2 = np.outer(y2, ve)
EPh = np.outer(vy, Ephi)
const = (-1.0) ** n / (2 * np.sqrt(n * (n + 1)))
V = const * Y1 * EPh
#V[np.isinf(V)|isnan(V)]=0
Vcos = cphi * V
Vsin = sphi * V
if m == 0:
#W=np.zeros((len(theta),len(phi)))
W = np.ones((len(theta), len(phi))) * 1e-10
else:
Waux = Y2 * EPh
x1 = 1.0 / x
W = np.outer(x1, const) * Waux
Wcos = cphi * W
Wsin = sphi * W
# plot V and W
Ntheta = np.size(theta)
vt = np.ones(Ntheta)
Nphi = np.size(phi)
vp = np.ones(Nphi)
Phi = np.outer(vt, phi)
Theta = np.outer(theta, vp)
#figdirV='/home/rburghel/Bureau/bases_decomposition_VW/base_V_Vsin_Vcos/'
figdirV = './'
ext1 = '.pdf'
ext2 = '.eps'
ext3 = '.png'
fig = plt.figure()
ax = axes3d.Axes3D(fig)
X = abs(V) * np.cos(Phi) * np.sin(Theta)
Y = abs(V) * np.sin(Phi) * np.sin(Theta)
Z = abs(V) * np.cos(Theta)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.hot_r)
ax.set_xlim3d([-1, 1])
ax.set_ylim3d([-1, 1])
ax.set_zlim3d([-1, 1])
if sf:
sz = fig.get_size_inches()
fig.set_size_inches(sz * 1.8)
figname = figdirV + 'V' + str(n) + str(m)
fig.savefig(figname + ext1, orientation='portrait')
fig.savefig(figname + ext2, orientation='portrait')
fig.savefig(figname + ext3, orientation='portrait')
fig = plt.figure()
ax = axes3d.Axes3D(fig)
X = abs(Vcos) * np.cos(Phi) * np.sin(Theta)
Y = abs(Vcos) * np.sin(Phi) * np.sin(Theta)
Z = abs(Vcos) * np.cos(Theta)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.hot_r)
ax.set_xlim3d([-1, 1])
ax.set_ylim3d([-1, 1])
ax.set_zlim3d([-1, 1])
if sf:
sz = fig.get_size_inches()
fig.set_size_inches(sz * 1.8)
figname = figdirV + 'Vcos' + str(n) + str(m) + '.jpg'
fig.savefig(figname + ext1, orientation='portrait')
fig.savefig(figname + ext2, orientation='portrait')
fig.savefig(figname + ext3, orientation='portrait')
fig = plt.figure()
ax = axes3d.Axes3D(fig)
X = abs(Vsin) * np.cos(Phi) * np.sin(Theta)
Y = abs(Vsin) * np.sin(Phi) * np.sin(Theta)
Z = abs(Vsin) * np.cos(Theta)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.hot_r)
ax.set_xlim3d([-1, 1])
ax.set_ylim3d([-1, 1])
ax.set_zlim3d([-1, 1])
if sf:
sz = fig.get_size_inches()
fig.set_size_inches(sz * 1.8)
figname = figdirV + 'Vsin' + str(n) + str(m) + '.jpg'
fig.savefig(figname + ext1, orientation='portrait')
fig.savefig(figname + ext2, orientation='portrait')
fig.savefig(figname + ext3, orientation='portrait')
#figdirW='/home/rburghel/Bureau/bases_decomposition_VW/base_W_Wsin_Wcos/'
figdirW = './'
fig = plt.figure()
ax = axes3d.Axes3D(fig)
X = abs(W) * np.cos(Phi) * np.sin(Theta)
Y = abs(W) * np.sin(Phi) * np.sin(Theta)
Z = abs(W) * np.cos(Theta)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.hot_r)
ax.set_xlim3d([-1, 1])
ax.set_ylim3d([-1, 1])
ax.set_zlim3d([-1, 1])
if sf:
sz = fig.get_size_inches()
fig.set_size_inches(sz * 1.8)
figname = figdirW + 'W' + str(n) + str(m)
fig.savefig(figname + ext1, orientation='portrait')
fig.savefig(figname + ext2, orientation='portrait')
fig.savefig(figname + ext3, orientation='portrait')
fig = plt.figure()
ax = axes3d.Axes3D(fig)
X = abs(Wcos) * np.cos(Phi) * np.sin(Theta)
Y = abs(Wcos) * np.sin(Phi) * np.sin(Theta)
Z = abs(Wcos) * np.cos(Theta)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.hot_r)
ax.set_xlim3d([-1, 1])
ax.set_ylim3d([-1, 1])
ax.set_zlim3d([-1, 1])
if sf:
sz = fig.get_size_inches()
fig.set_size_inches(sz * 1.8)
figname = figdirW + 'Wcos' + str(n) + str(m)
fig.savefig(figname + ext1, orientation='portrait')
fig.savefig(figname + ext2, orientation='portrait')
fig.savefig(figname + ext3, orientation='portrait')
fig = plt.figure()
ax = axes3d.Axes3D(fig)
X = abs(Wsin) * np.cos(Phi) * np.sin(Theta)
Y = abs(Wsin) * np.sin(Phi) * np.sin(Theta)
fig = plt.figure()
ax = axes3d.Axes3D(fig)
X = abs(Wsin) * np.cos(Phi) * np.sin(Theta)
Y = abs(Wsin) * np.sin(Phi) * np.sin(Theta)
Z = abs(Wsin) * np.cos(Theta)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.hot_r)
ax.set_xlim3d([-1, 1])
ax.set_ylim3d([-1, 1])
ax.set_zlim3d([-1, 1])
if sf:
sz = fig.get_size_inches()
fig.set_size_inches(sz * 1.8)
figname = figdirW + 'Wsin' + str(n) + str(m)
fig.savefig(figname + ext1, orientation='portrait')
fig.savefig(figname + ext2, orientation='portrait')
fig.savefig(figname + ext3, orientation='portrait')
plt.show()
else:
print("Error: m>n!!!")
def compdiag(k, A, th, ph, Fthr, Fphr, typ='modulus', lang='english', fontsize=18):
""" makes comparison between original pattern and reconstructed pattern
Parameters
----------
k : frequency index
A : Antenna
ph : phi base (1 x Np)
th : theta base (1 x Nt)
Fthr : Fth output of Fsynth Nf x (Ntheta*Tphi)
Fphr : Fth output of Fsynth Nf x (Ntheta*Tphi)
lang = 'french'
= 'english'
"""
Nf = np.shape(Fthr)[0]
#Fthr = Fthr.reshape(Nf,len(th),len(ph))
#Fphr = Fphr.reshape(Nf,len(th),len(ph))
plt.figure()
rc('text', usetex=True)
Ftho = A.Ftheta
Fpho = A.Fphi
# limites module Fthr, Ftho, Fphr, Fpho
maxTr = abs(Fthr[:, :, k]).max()
maxTo = abs(Ftho[:, :, k ]).max()
MmT = max(maxTr, maxTo)
minTr = abs(Fthr[ :, :, k ]).min()
minTo = abs(Ftho[ :, :, k ]).min()
mmT = min(minTr, minTo)
maxPr = abs(Fphr[ :, :, k ]).max()
maxPo = abs(Fpho[ :, :, k ]).max()
MmP = max(maxPr, maxPo)
minPr = abs(Fphr[ :, :, k ]).min()
minPo = abs(Fpho[ :, :, k ]).min()
mmP = min(minPr, minPo)
# limites real Fthr, Ftho, Fphr, Fpho
maxTrr = np.real(Fthr[ :, :, k ]).max()
maxTor = np.real(Ftho[ :, :, k ]).max()
MrT = max(maxTrr, maxTor)
minTrr = np.real(Fthr[ :, :, k ]).min()
minTor = np.real(Ftho[ :, :, k ]).min()
mrT = min(minTrr, minTor)
maxPrr = np.real(Fphr[ :, :, k ]).max()
maxPor = np.real(Fpho[ :, :, k ]).max()
MrP = max(maxPrr, maxPor)
minPrr = np.real(Fphr[ :, :, k ]).min()
minPor = np.real(Fpho[ :, :, k ]).min()
mrP = min(minPrr, minPor)
# limites real Fthr, Ftho, Fphr, Fpho
maxTri = np.imag(Fthr[ :, :, k ]).max()
maxToi = np.imag(Ftho[ :, :, k ]).max()
MiT = max(maxTri, maxToi)
minTri = np.imag(Fthr[ :, :, k ]).min()
minToi = np.imag(Ftho[ :, :, k ]).min()
miT = min(minTri, minToi)
maxPri = np.imag(Fphr[ :, :, k ]).max()
maxPoi = np.imag(Fpho[ :, :, k ]).max()
MiP = max(maxPri, maxPoi)
minPri = np.imag(Fphr[ :, :, k ]).min()
minPoi = np.imag(Fpho[ :, :, k ]).min()
miP = min(minPri, minPoi)
# limithes arg Fth,Fph
maxATr = np.angle(Fthr[ :, :, k ]).max()
maxATo = np.angle(Ftho[ :, :, k ]).max()
maT = max(maxATr, maxATo)
minATr = np.angle(Fthr[ :, :, k ]).min()
minATo = np.angle(Ftho[ :, :, k ]).min()
maT0 = min(minATr, minATo)
maxAPr = np.angle(Fphr[ :, :, k ]).max()
maxAPo = np.angle(Fpho[ :, :, k ]).max()
maP = max(maxAPr, maxAPo)
minAPr = np.angle(Fphr[ :, :, k ]).min()
minAPo = np.angle(Fpho[ :, :, k ]).min()
maP0 = min(minAPr, minAPo)
ax = plt.axes([0, 0, 360, 180])
rtd = 180 / np.pi
plt.subplot(221)
if typ == 'modulus':
#
#cmap=cm.jet
#pcolor(A.phi*rtd,A.theta*rtd,abs(Ftho[k,:,:]),vmin=0,vmax=mmT)
#
#cmap= gray
#pcolor(A.phi*rtd,A.theta*rtd,abs(Ftho[k,:,:]),cmap=cm.gray_r,vmin=0,vmax=mmT)
#
#cmap=cm.hot
plt.pcolor(A.phi * rtd, A.theta * rtd, abs(Ftho[ :, :, k ]),
cmap=cm.hot_r, vmin=mmT, vmax=MmT)
plt.title(r'$|F_{\theta}|$ original', fontsize=fontsize)
if typ == 'real':
#pcolor(A.phi*rtd,A.theta*rtd,real(Ftho[k,:,:]),cmap=cm.gray_r,vmin=0,vmax=mmT)
plt.pcolor(A.phi * rtd, A.theta * rtd, np.real(Ftho[ :, :, k ]),
cmap=cm.hot_r, vmin=mrT, vmax=MrT)
title(r'Re ($F_{\theta}$) original', fontsize=fontsize)
if typ == 'imag':
#pcolor(A.phi*rtd,A.theta*rtd,imag(Ftho[k,:,:]),cmap=cm.gray_r,vmin=0,vmax=mmT)
pcolor(A.phi * rtd, A.theta * rtd, np.imag(Ftho[ :, :, k ]),
cmap=cm.hot_r, vmin=miT, vmax=MiT)
title(r'Im ($F_{\theta}$) original', fontsize=fontsize)
if typ == 'phase':
#pcolor(A.phi*rtd,A.theta*rtd,angle(Ftho[k,:,:]),cmap=cm.gray_r,vmin=maT0,vmax=maT)
plt.pcolor(A.phi * rtd, A.theta * rtd, np.angle(Ftho[ :, :, k ]),
cmap=cm.hot_r, vmin=maT0, vmax=maT)
if lang == 'french':
plt.title(r'Arg ($F_{\theta}$) original', fontsize=fontsize)
else:
plt.title(r'Ang ($F_{\theta}$) original', fontsize=fontsize)
plt.axis([0, 360, 0, 180])
plt.ylabel(r'$\theta$ (deg)', fontsize=fontsize)
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
cbar = plt.colorbar()
for t in cbar.ax.get_yticklabels():
t.set_fontsize(fontsize)
plt.subplot(222)
if typ == 'modulus':
plt.pcolor(A.phi * rtd, A.theta * rtd, abs(Fpho[:, :, k ]),
cmap=cm.hot_r, vmin=mmP, vmax=MmP)
plt.title('$|F_{\phi}|$ original', fontsize=fontsize)
if typ == 'real':
plt.pcolor(A.phi * rtd, A.theta * rtd, np.real(Fpho[ :, :, k ]),
cmap=cm.hot_r, vmin=mrP, vmax=MrP)
plt.title('Re ($F_{\phi}$) original', fontsize=fontsize)
if typ == 'imag':
plt.pcolor(A.phi * rtd, A.theta * rtd, np.imag(Fpho[ :, :, k ]),
cmap=cm.hot_r, vmin=miP, vmax=MiP)
plt.title('Im ($F_{\phi}$) original', fontsize=fontsize)
if typ == 'phase':
plt.pcolor(A.phi * rtd, A.theta * rtd, np.angle(Fpho[ :, :, k ]),
cmap=cm.hot_r, vmin=maP0, vmax=maP)
if lang == 'french':
plt.title('Arg ($F_{\phi}$) original', fontsize=fontsize)
else:
plt.title('Ang ($F_{\phi}$) original', fontsize=fontsize)
plt.axis([0, 360, 0, 180])
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
cbar = plt.colorbar()
for t in cbar.ax.get_yticklabels():
t.set_fontsize(fontsize)
plt.subplot(223)
if typ == 'modulus':
plt.pcolor(ph * rtd, th * rtd, abs(Fthr[:, :, k ]),
cmap=cm.hot_r, vmin=mmT, vmax=MmT)
if lang == 'french':
plt.title(r'$|F_{\theta}|$ reconstruit', fontsize=fontsize)
else:
plt.title(r'$|F_{\theta}|$ reconstructed', fontsize=fontsize)
if typ == 'real':
plt.pcolor(ph * rtd, th * rtd, np.real(Fthr[:,:,k ]),
cmap=cm.hot_r, vmin=mrT, vmax=MrT)
if lang == 'french':
title(r'Re ($F_{\theta}$) reconstruit', fontsize=fontsize)
else:
title(r'Re ($F_{\theta}$) reconstructed', fontsize=fontsize)
if typ == 'imag':
plt.pcolor(ph * rtd, th * rtd, np.imag(Fthr[ :, :, k ]),
cmap=cm.hot_r, vmin=miT, vmax=MiT)
if lang == 'french':
plt.title(r'Im ($F_{\theta}$) reconstruit', fontsize=fontsize)
else:
plt.title(r'Im ($F_{\theta}$) reconstructed', fontsize=fontsize)
if typ == 'phase':
plt.pcolor(A.phi * rtd, A.theta * rtd, np.angle(Fthr[:,:,k]),
cmap=cm.hot_r, vmin=maT0, vmax=maT)
if lang == 'french':
plt.title(r'Arg ($F_{\theta}$) reconstruit', fontsize=fontsize)
else:
plt.title(r'Ang ($F_{\theta}$) reconstructed', fontsize=fontsize)
plt.axis([0, 360, 0, 180])
plt.xlabel(r'$\phi$ (deg)', fontsize=fontsize)
plt.ylabel(r'$\theta$ (deg)', fontsize=fontsize)
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
cbar = plt.colorbar()
for t in cbar.ax.get_yticklabels():
t.set_fontsize(fontsize)
plt.subplot(224)
if typ == 'modulus':
plt.pcolor(ph * rtd, th * rtd, abs(Fphr[ :, :,k]),
cmap=cm.hot_r, vmin=mmP, vmax=MmP)
if lang == 'french':
plt.title('$|F_{\phi}|$ reconstruit', fontsize=fontsize)
else:
plt.title('$|F_{\phi}|$ reconstructed', fontsize=fontsize)
if typ == 'real':
plt.pcolor(ph * rtd, th * rtd, np.real(Fphr[ :, :,k]),
cmap=cm.hot_r, vmin=mrP, vmax=MrP)
if lang == 'french':
plt.title('Re ($F_{\phi}$) reconstruit', fontsize=fontsize)
else:
plt.title('Re ($F_{\phi}$) reconstructed', fontsize=fontsize)
if typ == 'imag':
plt.pcolor(ph * rtd, th * rtd, np.imag(Fphr[ :, :,k]),
cmap=cm.hot_r, vmin=miP, vmax=MiP)
if lang == 'french':
plt.title('Im ($F_{\phi}$) reconstruit', fontsize=fontsize)
else:
plt.title('Im ($F_{\phi}$) reconstructed', fontsize=fontsize)
if typ == 'phase':
plt.pcolor(A.phi * rtd, A.theta * rtd, np.angle(Fphr[ :, :,k]),
cmap=cm.hot_r, vmin=maP0, vmax=maP)
if lang == 'french':
plt.title('Arg ($F_{\phi}$) reconstruit', fontsize=fontsize)
else:
plt.title('Ang ($F_{\phi}$) reconstructed', fontsize=fontsize)
plt.axis([0, 360, 0, 180])
plt.xlabel(r'$\phi$ (deg)', fontsize=fontsize)
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
cbar = plt.colorbar()
for t in cbar.ax.get_yticklabels():
t.set_fontsize(fontsize)
def BeamGauss(theta,phi,Gmax=19.77,HPBW_az=10,HPBW_el=40,Tilt=10):
""" Beam with a Gaussian shape
Parameters
----------
theta : float
angle in degree
phi : float
angle in degree
Gmax : float
HPBW_az : float
Half Power Beamwidth azimuth degree
HPBW_el : float
Half Power Beamwidth elevation degree
Tilt : float
angle in degree
"""
c = np.pi/180.
az = c*(theta-(Tilt+90))*2*np.sqrt(np.log(2))
el = c*phi*2*np.sqrt(np.log(2))
taz = -(az/(HPBW_az*c))**2
tel = -(el/(HPBW_el*c))**2
gain = 10**(Gmax/10.)*np.exp(taz)*np.exp(tel)
return(gain)
def show3D(F, theta, phi, k, col=True):
""" show 3D matplotlib diagram
Parameters
----------
F : ndarray (Nf,Nt,Np)
theta : ndarray (1xNt)
angle
phi : ndarray (1xNp)
angle
theta : ndarray (Nt)
k : int
frequency index
col : boolean
if col -> color coded plot3D
if col == False -> simple plot3D
Examples
--------
.. plot::
:include-source:
>>> import matplotlib.pyplot as plt
>>> from pylayers.antprop.antenna import *
>>> A = Antenna('defant.vsh3')
>>> A.eval(grid=True)
Warnings
--------
len(theta) must be equal with shape(F)[1]
len(phi) must be equal with shape(F)[2]
"""
nth = len(theta)
nph = len(phi)
if k >= np.shape(F)[0]:
print('Error: frequency index k not in F defined interval')
if nth != np.shape(F)[1]:
print('Error: shape mistmatch between theta and F')
if nph != np.shape(F)[2]:
print('Error: shape mistmatch between phi and F')
fig = plt.figure()
ax = axes3d.Axes3D(fig)
V = F[k, :, :]
vt = np.ones(nth)
vp = np.ones(nph)
Th = np.outer(theta, vp)
Ph = np.outer(vt, phi)
X = abs(V) * np.cos(Ph) * np.sin(Th)
Y = abs(V) * np.sin(Ph) * np.sin(Th)
Z = abs(V) * np.cos(Th)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
if (col):
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.hot_r)
else:
ax.plot3D(np.ravel(X), np.ravel(Y), np.ravel(Z))
class AntPosRot(Antenna):
""" Antenna + position + Rotation
Notes
-----
This class implement an antenna at a position p
with an orientation T
"""
def __init__(self,name,p,T):
#super(AntPosRot,self).__init__(self,typ=name)
Antenna.__init__(self,typ=name)
self.p = p
self.T = T
def __repr__(self):
st = self._filename+'\n\n'
st = st +"p: "+ str(self.p)+'\n\n'
st = st +"T: "+ str(self.T)+'\n'
return(st)
def show3(self,**kwargs):
self._show3(newfig=False,
interact=False,
T=self.T,
po=self.p,
**kwargs)
def field(self,p):
""" calculate field at points p
Parameters
----------
p : np.array (N,3)
observation point
"""
rad_to_deg = 180/np.pi
assert p.shape[-1]==3
if len(p.shape)==1:
r = p[None,:]-self.p[None,:]
else:
r = p-self.p[None,:]
dist = np.sqrt(np.sum(r*r,axis=-1))[:,None]
u = r/dist
th = np.arccos(u[:,2])
ph = np.arctan2(u[:,1],u[:,0])
tang = np.vstack((th,ph)).T
#print("global",tang*rad_to_deg)
tangl,Rt = geu.BTB(tang, self.T)
#print("local",tangl*rad_to_deg)
self.eval(th=tangl[:,0],ph=tangl[:,1],grid=False)
E = (self.Ft[:,None,:]*self.T[:,2][None,:,None]
+self.Fp[:,None,:]*self.T[:,0][None,:,None])
P = np.exp(-1j*2*np.pi*self.fGHz[None,None,:]*dist[...,None]/0.3)/dist[...,None]
EP = E*P
return(EP)
def _gain(Ft,Fp):
""" calculates antenna gain
Parameters
----------
Ft
Fp
Returns
-------
G : np.array(Nt,Np,Nf) dtype:float
linear gain
or np.array(Nr,Nf)
sqG : np.array(Nt,Np,Nf) dtype:float
linear sqare root of gain
or np.array(Nr,Nf)
efficiency : np.array (,Nf) dtype:float
efficiency
hpster : np.array (,Nf) dtype:float
half power solid angle : 1 ~ 4pi steradian
ehpbw : np.array (,Nf) dtyp:float
equivalent half power beamwidth (radians)
Notes
-----
.. math::
G(\theta,phi) = |F_{\\theta}|^2 + |F_{\\phi}|^2
"""
G = np.real( Fp * np.conj(Fp)
+ Ft * np.conj(Ft) )
return(G)
def _hpbw(G,th,ph):
""" half power beamwidth
Parameters
----------
Gain : Ftheta
Nt x Np
th : np.array
,Nt
ph : np.array
,Np
Returns
-------
ehpbw : effective half power beamwidth
hpster : half power solid angle (steradians)
"""
#
GdB = 10*np.log10(G)
GdBmax = np.max(np.max(GdB,axis=0),axis=0)
dt = th[1]-th[0]
dp = ph[1]-ph[0]
Nt = len(th)
Np = len(ph)
Nf = GdB.shape[2]
hpster = np.zeros(Nf)
ehpbw = np.zeros(Nf)
for k in range(Nf):
U = np.zeros((Nt,Np))
A = GdB[:,:,k]*np.ones(Nt)[:,None]*np.ones(Np)[None,:]
u = np.where(A>(GdBmax[k]-3))
U[u] = 1
V = U*np.sin(th)[:,None]
hpster[k] = np.sum(V)*dt*dp/(4*np.pi)
ehpbw[k] = np.arccos(1-2*hpster[k])
return ehpbw,hpster
def _efficiency(G,th,ph):
""" determine antenna efficiency
Parameters
----------
Gain : Ftheta
Nt x Np
th : np.array
,Nt
ph : np.array
,Np
Returns
-------
oefficiency :
"""
#
dt = th[1]-th[0]
dp = ph[1]-ph[0]
Nt = len(th)
Np = len(ph)
Gs = G*np.sin(th)[:,None,None]*np.ones(Np)[None,:,None]
efficiency = np.sum(np.sum(Gs,axis=0),axis=0)*dt*dp/(4*np.pi)
return efficiency
def _dirmax(G,th,ph):
""" determine information in Gmax direction
Parameters
----------
Gain : Ftheta
Nt x Np
th : np.array
,Nt
# GdBmax (,Nf)
# Get direction of Gmax and get the polarisation state in that direction
#
Returns
--------
"""
GdB = 10*np.log10(G)
GdBmax = np.max(np.max(GdB,axis=0),axis=0)
umax = np.array(np.where(GdB==GdBmax))[:,0]
theta_max = th[umax[0]]
phi_max = ph[umax[1]]
M = geu.SphericalBasis(np.array([[theta_max,phi_max]]))
sl = M[:,2].squeeze()
uth = M[:,0]
uph = M[:,1]
el = Ft[tuple(umax)]*uth + Fp[tuple(umax)]*uph
eln = el/np.linalg.norm(el)
el = np.abs(eln.squeeze())
hl = np.cross(sl,el)
return GdBmax,theta_max,phi_max,(hl,sl,el)
def F0(nu,sigma):
""" F0 function for horn antenna pattern
Parameters
----------
nu : np.array
(....,nf)
sigma : np.array
(,nf)
Notes
-----
http://www.ece.rutgers.edu/~orfanidi/ewa/ch18.pdf
18.3.2
"""
nuos = nu/sigma
argp = nuos + sigma
argm = nuos - sigma
expf = np.exp(1j*(np.pi/2)*nuos**2)
sf = 1./sigma
sp , cp = fresnel(argp)
sm , cm = fresnel(argm)
Fp = cp-1j*sp
Fm = cm-1j*sm
F = sf*expf*(Fp -Fm)
return F
def F1(nu,sigma):
""" F1 function for horn antenna pattern
http://www.ece.rutgers.edu/~orfanidi/ewa/ch18.pdf
18.3.3
"""
F = 0.5*(F0(nu+0.5,sigma)+F0(nu-0.5,sigma))
return F
if (__name__ == "__main__"):
doctest.testmod()
| mit |
Haunter17/MIR_SU17 | exp3/exp3k/exp3k.py | 1 | 23423 | import numpy as np
import tensorflow as tf
import h5py
import time
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sys
# Functions for initializing neural nets parameters
def init_weight_variable(shape, nameIn):
initial = tf.truncated_normal(shape, stddev=0.1, dtype=tf.float32)
return tf.Variable(initial, name=nameIn)
def init_bias_variable(shape, nameIn):
initial = tf.constant(0.1, shape=shape, dtype=tf.float32)
return tf.Variable(initial, name=nameIn)
def conv2d(x, W):
return tf.nn.conv2d(x, W, [1, 1, 1, 1], 'VALID')
def loadData(filepath):
'''
Load and return four variables from the file with path filepath
X_train: input data for training
y_train: labels for X_train
X_val: input data for validation
y_val: labels for X_val
'''
print('==> Experiment 2l')
print('==> Loading data from {}'.format(filepath))
# benchmark
t_start = time.time()
# reading data
f = h5py.File(filepath)
X_train = np.array(f.get('trainingFeatures'))
y_train = np.array(f.get('trainingLabels'))
X_val = np.array(f.get('validationFeatures'))
y_val = np.array(f.get('validationLabels'))
t_end = time.time()
print('--Time elapsed for loading data: {t:.2f} \
seconds'.format(t = t_end - t_start))
del f
print('-- Number of training samples: {}'.format(X_train.shape[0]))
print('-- Number of validation samples: {}'.format(X_val.shape[0]))
print('Shape of X_train: %s'%str(X_train.shape))
print('Shape of y_train: %s'%str(y_train.shape))
print('Shape of X_val: %s'%str(X_val.shape))
print('Shape of y_val: %s'%str(y_val.shape))
return [X_train, y_train, X_val, y_val]
#self, X_train, y_train, X_val, y_val, num_freq, filter_row, filter_col, k1, k2, learningRate, pooling_strategy):
# set up property that makes it only be set once
# we'll use this to avoid adding tensors to the graph multiple times
import functools
def lazy_property(function):
attribute = '_cache_' + function.__name__
@property
@functools.wraps(function)
def decorator(self):
if not hasattr(self, attribute):
setattr(self, attribute, function(self))
return getattr(self, attribute)
return decorator
class Model:
def __init__(self, num_freq, X_train, y_train, X_val, y_val, filter_row, filter_col, k1, learningRate, debug):
'''
Initializer for the model
'''
# store the data
self.X_train, self.y_train, self.X_val, self.y_val = X_train, y_train, X_val, y_val
# store the parameters sent to init that define our model
self.num_freq, self.filter_row, self.filter_col, self.k1, self.learningRate, self.debug = num_freq, filter_row, filter_col, k1, learningRate, debug
# find num_training_vec, total_features, num_frames, num_classes, and l from the shape of the data
# and store them
self.storeParamsFromData()
# Set-up and store the input and output placeholders
x = tf.placeholder(tf.float32, [None, self.total_features])
y_ = tf.placeholder(tf.float32, [None, self.num_classes])
self.x = x
self.y_ = y_
# Setup and store tensor that performs the one-hot encoding
y_train_OHEnc = tf.one_hot(self.y_train.copy(), self.num_classes)
y_val_OHEnc = tf.one_hot(self.y_val.copy(), self.num_classes)
self.y_train_OHEnc = y_train_OHEnc
self.y_val_OHEnc = y_val_OHEnc
# create each lazy_property
# each lazy_property will add tensors to the graph
self.y_conv
self.cross_entropy
self.train_step
self.accuracy
# properties for use in debugging
if self.debug:
self.grads_and_vars
# print to the user that the network has been set up, along with its properties
print("Setting up Single Conv Layer Neural net with %g x %g filters, k1 = %g, learningRate = %g"%(filter_row, filter_col, k1, learningRate))
def storeParamsFromData(self):
'''
Calculate and store parameters from the raw data
total_features: The number of CQT coefficients total (incldues all context frames)
num_training_vec: The number of training examples in your dataset
num_frames: The number of context frames in each training example (total_features / num_freq)
num_classes: The number of songs we're distinguishing between in our output
l: The length of our second convolutional kernel - for now, its equal to num_frames
'''
# Neural-network model set-up
# calculating some values which will be nice as we set up the model
num_training_vec, total_features = self.X_train.shape
num_frames = int(total_features / self.num_freq)
print('-- Num frames: {}'.format(num_frames))
num_classes = int(max(self.y_train.max(), self.y_val.max()) + 1)
l = num_frames
# store what will be helpful later
self.total_features = total_features
self.num_training_vec = num_training_vec
self.num_frames = num_frames
self.num_classes = num_classes
self.l = l
@lazy_property
def y_conv(self):
# reshape the input into the form of a spectrograph
x_image = tf.reshape(self.x, [-1, self.num_freq, self.num_frames, 1])
x_image = tf.identity(x_image, name="x_image")
# first convolutional layer parameters
W_conv1 = init_weight_variable([self.filter_row, self.filter_col, 1, self.k1], "W_conv1")
b_conv1 = init_bias_variable([self.k1], "b_conv1")
# tensor that computes the output of the first convolutional layer
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_conv1 = tf.identity(h_conv1, name="h_conv_1")
# flatten out the output of the first convolutional layer to pass to the softmax layer
h_conv1_flat = tf.reshape(h_conv1, [-1, (self.num_freq - self.filter_row + 1) * (self.num_frames - self.filter_col + 1) * self.k1])
h_conv1_flat = tf.identity(h_conv1_flat, name="h_conv1_flat")
# softmax layer parameters
W_sm = init_weight_variable([(self.num_freq - self.filter_row + 1) * (self.num_frames - self.filter_col + 1) * self.k1, self.num_classes], "W_sm")
b_sm = init_bias_variable([self.num_classes], "b_sm")
# the output of the layer - un-normalized and without a non-linearity
# since cross_entropy_with_logits takes care of that
y_conv = tf.matmul(h_conv1_flat, W_sm) + b_sm
y_conv = tf.identity(y_conv, name="y_conv")
return y_conv # would want to softmax it to get an actual prediction
@lazy_property
def cross_entropy(self):
'''
Create a tensor that computes the cross entropy cost
Use the placeholder y_ as the labels, with input y_conv
Note that softmax_cross_entropy_with_logits takes care of normalizing
y_conv to make it a probability distribution
This tensor can be accessed using: self.cross_entropy
'''
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=self.y_, logits=self.y_conv))
cross_entropy = tf.identity(cross_entropy, name="cross_entropy")
return cross_entropy
@lazy_property
def optimizer(self):
'''
Create a tensor that represents the optimizer. This tensor can
be accessed using: self.optimizer
'''
optimizer = tf.train.AdamOptimizer(learning_rate = self.learningRate)
return optimizer
@lazy_property
def train_step(self):
'''
Creates a tensor that represents a single training step. This tensor
can be passed a feed_dict that has x and y_, and it will compute the gradients
and perform a single step.
This tensor can be accessed using: self.train_step
'''
return self.optimizer.minimize(self.cross_entropy)
@lazy_property
def accuracy(self):
'''
Create a tensor that computes the accuracy, using the placeholder y_ as the labeled data
and y_conv for the predictions of the network.
This tensor can be accessed using: self.accuracy
'''
correct_prediction = tf.equal(tf.argmax(self.y_conv, 1), tf.argmax(self.y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
return accuracy
'''
Properties that we'll use for debugging
'''
@lazy_property
def grads_and_vars(self):
grads_and_vars = self.optimizer.compute_gradients(self.cross_entropy, tf.trainable_variables())
return grads_and_vars
def train(self, batch_size, num_epochs, print_freq, debug_out='debug.txt'):
'''
Train the Network on the data that will have been loaded when the NN is initialized
Trained on: self.X_train, and a OH encoding of self.y_train
Trains with batch_size batches for num_epochs epochs
Debugging info is written to debug.txt (can add params to have more places to write out
to)
'''
# Starting an interactive session and initializing the parameters
#sess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=True))
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
# replace it with the one-hot encoded one --- should I replace?
y_trainOH = sess.run(self.y_train_OHEnc)[:, 0, :]
y_valOH = sess.run(self.y_val_OHEnc)[:, 0, :]
# lists to record accuracy at several points during training
train_acc_list = []
val_acc_list = []
train_acc_on_batch_list = []
# lists to record the error at several points during training
train_err_list = []
val_err_list = []
train_err_on_batch_list = []
# track which epochs you record data during
epoch_numbers = []
# record the start time
t_start = time.time()
for epoch in range(num_epochs):
epochStart = time.time()
# train by systematically pulling batches of batch_size from
# the training set and taking a training step on each batch
for i in range(0, self.num_training_vec, batch_size):
batch_end_point = min(i + batch_size, self.num_training_vec)
train_batch_data = self.X_train[i : batch_end_point]
train_batch_label = y_trainOH[i : batch_end_point]
self.train_step.run(feed_dict={self.x: train_batch_data, self.y_: train_batch_label})
epochEnd = time.time()
# print and record data now that we've trained on our full training set
if (epoch + 1) % print_freq == 0:
# timing for the measurements of cost and accuracy
evaluationStart = time.time()
# compute training (on the most recent batch and the full data set)
# and validation cost and accuracy, then print them and add them to the list
# we start with accuracy:
train_acc = self.evalByBatch(self.accuracy, X_train, y_trainOH, 5000)
train_acc_list.append(train_acc)
val_acc = self.evalByBatch(self.accuracy, X_val, y_valOH, 5000)
val_acc_list.append(val_acc)
# Now we compute the error on each set:
train_err = self.evalByBatch(self.cross_entropy, X_train, y_trainOH, 5000)
train_err_list.append(train_err)
val_err = self.evalByBatch(self.cross_entropy, X_val, y_valOH, 5000)
val_err_list.append(val_err)
# keep track of which epochs we have data for
epoch_numbers += [epoch]
# this marks the end of our evaluation
evaluationEnd = time.time()
# print a summary of our NN at this epoch
print("epoch: %d, time (train, evaluation): (%g, %g), t acc, v acc, t cost, v cost: %.5f, %.5f, %.5f, %.5f"%(epoch+1, epochEnd - epochStart, evaluationEnd - evaluationStart, train_acc, val_acc, train_err, val_err))
# debugging print outs
if self.debug:
# print out step / current value ratio for each parameter in our network
# based on training data from the most recent batch
# to the file with name debug_out
self.debug_WriteGradAndVar(train_batch_data, train_batch_label, epoch, debug_out)
# record the total time spent training the neural network
t_end = time.time()
print('--Time elapsed for training for %g epochs: %g'%(num_epochs, t_end - t_start))
# return the lists of logged data
return [train_acc_list, val_acc_list, train_err_list, val_err_list, epoch_numbers]
def evalByBatch(self, toEval, x, y_, batchSize):
weightedAvg = 0.0
for i in range(0, len(x), batchSize):
batch_end_point = min(i + batchSize, len(x))
batch_data = x[i : batch_end_point]
batch_label = y_[i : batch_end_point]
curAmount = toEval.eval(feed_dict={self.x: batch_data, self.y_: batch_label})
# weight by the length of the batch and keep adding on
weightedAvg = weightedAvg + curAmount * float(batch_end_point - i) / len(x)
return weightedAvg
def debug_WriteGradAndVar(self, xDebug, yDebug, epoch, debug_out):
'''
Helper function that prints the ratio of the training step that would be taken
on input data and labels xDebug and yDebug to the magnitude of each parameter
in the network. This gives us a sense of how much each parameter is changing.
Inputs:
xDebug: input data to calculate the gradient from
yDebug: labels for the input data
epoch: the number of the epoch (to print out to the file)
debug_out: the file to write to - if it doesn't exist it will be created
'''
file_object = open(debug_out, 'a+')
# record which epoch this is
file_object.write("Epoch: %d\n"%(epoch))
# find the current learning rate - this will be used with the gradient to find the step size
curLearningRate = self.optimizer._lr
# print each gradient and the variables they are associated with
# the gradients are stored in tuples, where the first element is a tensor
# that computes the gradient, and the second is the parameter that gradient
# is associated with
for gv in self.grads_and_vars:
curGrads = gv[0].eval(feed_dict={self.x: xDebug, self.y_: yDebug})
curSteps = curGrads * curLearningRate # scale down the graident by the learning rate
curVars = gv[1].eval()
# How much, compared to the magnitude of the weight, are we stepping
stepToVarRatio = np.absolute(np.divide(curSteps, curVars))
# print the name of the variable, then all the step ratios (step amount / current value)
# these values will have been averaged across the training examples
curName = gv[1].name
file_object.write("Variable: " + curName + "\n")
for index, step in np.ndenumerate(stepToVarRatio):
file_object.write(str(index) + ": " + str(step) + "\n")
# print summary statistics for this layer
maxVal = np.amax(stepToVarRatio)
thirdQuartile = np.percentile(stepToVarRatio, 75)
mean = np.mean(stepToVarRatio)
median = np.median(stepToVarRatio)
firstQuartile = np.percentile(stepToVarRatio, 25)
minVal = np.amin(stepToVarRatio)
file_object.write("Statistics: (%g, %g, %g, %g, %g, %g)\n"%(minVal, firstQuartile, median, mean, thirdQuartile, maxVal))
file_object.write("---------------------------------------\n")
# close the file
file_object.close()
def makeTrainingPlots(epochs, paramValues, trainingMetricLists, validationMetricLists, paramName, metricName, titles, filenames):
'''
Plots of the given training and validation metrics versus epoch number. One plot per list
in trainingMetricLists and validationMetricLists. Assume there will be the same number of sublists
in both those parameters. Titles will hold a list of strings that will be used for the titles
of the graphs. The last title will be for the plot with all the validation curves. Filenames is a list of filenames to save your plots to
Input:
epochs: a list of the epochs on which data was taken - assume all of them took
data at the same epoch numbers
paramValues: the values of the param that we were varying (to label the curves in our validation plot)
trainingMetricLists: a list of lists, where each list represents some metric on the progress of training throughout training
validationMetricLists: a list of lists, where each list represents some metric on the progress of training throughout training
paramName: name of the parameter you're varying (e.g. learningRate or kernel height)
metricName: the name of the metric (e.g. accuracy, or cross-entropy error), to be used on the y-axis
titles: titles for the graph (will include info on the params used).
*The last title will be for the validation plot
filename: the filenames to write the graphs to (will include info on the params used)
* the last filename will be for the validation plot
Output:
Write a png file for each list in trainingMetricLists/validationMetricLists with the desired plot
'''
# figure with all the validation curves
validationFig = plt.figure(figsize=(7, 4))
validationPlot = validationFig.add_subplot(111)
# go through each setup and make a plot for each
for i in range(len(trainingMetricLists)):
# pull out the list we're concerned with
trainingMetric = trainingMetricLists[i]
validationMetric = validationMetricLists[i]
# make the figure, add plots, axis lables, a title, and legend
fig = plt.figure(figsize=(7, 4))
myPlot = fig.add_subplot(111)
myPlot.plot(epochs, trainingMetric, '-', label="Training")
myPlot.plot(epochs, validationMetric, '-', label="Validation")
myPlot.set_xlabel("Epoch Number")
myPlot.set_ylabel(metricName)
myPlot.set_title(titles[i])
myPlot.legend(loc="best", frameon=False)
# Write the figure
fig.savefig(filenames[i])
# update the figure with all the validation curves
validationPlot.plot(epochs, validationMetric, '-', label=(paramName + " = " + str(paramValues[i])))
# finish labeling + write the validation plot
validationPlot.set_xlabel("Epoch Number")
validationPlot.set_ylabel(metricName)
validationPlot.set_title(titles[-1])
validationPlot.legend(loc="best", frameon=False)
validationFig.savefig(filenames[-1])
def makeBestResultPlot(paramValues, trainingMetricLists, validationMetricLists, paramName, metricName, title, filename):
'''
Plot the "best" value of the training and validation metric against the param that led to it
Best is assumed to be the largest value of the metric
Input:
trainingMetricLists: a list of lists, where each list represents some metric on the progress of training throughout training
validationMetricLists: a list of lists, where each list represents some metric on the progress of training throughout training
paramName:
metricName:
title: the title of the graph (will include info on the params used)
filename: the filename to write the graph to (will include info on the params used)
Output:
Write a png file with the desired plot
Is there a way to call the other one to do this? if didn't assume epoch number then yes - oh well
'''
bestTrainingMetrics = [max(curList) for curList in trainingMetricLists]
bestValidationMetrics = [max(curList) for curList in validationMetricLists]
# make the figure, add plots, axis lables, a title, and legend
fig = plt.figure(figsize=(7, 4))
myPlot = fig.add_subplot(111)
myPlot.plot(paramValues, bestTrainingMetrics, '-', label="Training")
myPlot.plot(paramValues, bestValidationMetrics, '-', label="Validation")
myPlot.set_xlabel(paramName)
myPlot.set_ylabel(metricName)
myPlot.set_title(title)
myPlot.legend(loc="best", frameon=False)
# Write the figure
fig.savefig(filename)
def makeEndResultPlot(paramValues, trainingMetricLists, validationMetricLists, paramName, metricName, title, filename):
'''
Plot the final value of the training and validation metric against the param that led to it
Input:
trainingMetricLists: a list of lists, where each list represents some metric on the progress of training throughout training
validationMetricLists: a list of lists, where each list represents some metric on the progress of training throughout training
paramName:
metricName:
title: the title of the graph (will include info on the params used)
filename: the filename to write the graph to (will include info on the params used)
Output:
Write a png file with the desired plot
Is there a way to call the other one to do this? if didn't assume epoch number then yes - oh well
'''
finalTrainingMetrics = [curList[-1] for curList in trainingMetricLists]
finalValidationMetrics = [curList[-1] for curList in validationMetricLists]
# make the figure, add plots, axis lables, a title, and legend
fig = plt.figure(figsize=(7, 4))
myPlot = fig.add_subplot(111)
myPlot.plot(paramValues, finalTrainingMetrics, '-', label="Training")
myPlot.plot(paramValues, finalValidationMetrics, '-', label="Validation")
myPlot.set_xlabel(paramName)
myPlot.set_ylabel(metricName)
myPlot.set_title(title)
myPlot.legend(loc="best", frameon=False)
# Write the figure
fig.savefig(filename)
'''
Our main, with 121x1 convolutional layer.
'''
# read in command line parameters
try:
# read in a list of the row numbers for the kernels
filterRowsString = sys.argv[1]
filterRowsIn = map(int, filterRowsString.strip('[]').split(','))
# read in a list of the col numbers for the kernels
filterColsString = sys.argv[2]
# map it from a string into a list of ints
filterColsIn = map(int, filterColsString.strip('[]').split(','))
# read in the number of epochs
numEpochs = int(sys.argv[3])
finalPlotName = sys.argv[4]
except Exception, e:
print('-- {}'.format(e))
# filepath to the data you want to laod
filepath = '/pylon2/ci560sp/cstrong/exp3/exp3_taylorswift_d15_1s_C1C8.mat'
# define the configurations we're going to be looking at
# in this exp: just change the number of rows in a vertical kernel
filterCols = filterColsIn
filterRows = filterRowsIn
k1s = [39] * len(filterRows)
learningRates = [0.001] * len(filterRows)
# set training parameters
batchSize = 1000
print_freq = 1
# make lists to store data
train_acc_lists = []
val_acc_lists = []
train_err_lists = []
val_err_lists = []
epoch_number_lists = []
# load data
[X_train, y_train, X_val, y_val] = loadData(filepath)
# loop through the setups and make a model each time
for i in range(len(filterRows)):
# create the model - this will create the TF graph as well as load the data
m = Model(169, X_train, y_train, X_val, y_val, filterRows[i], filterCols[i], k1s[i], learningRates[i], False)
# actually train the model (on the data it already loaded)
[train_acc_list, val_acc_list, train_err_list, val_err_list, epoch_numbers] = m.train(1000, numEpochs, print_freq)
# store the new data
train_acc_lists.append(train_acc_list)
val_acc_lists.append(val_acc_list)
train_err_lists.append(train_err_list)
val_err_lists.append(val_err_list)
epoch_number_lists.append(epoch_numbers)
del m # clear out the model to avoid huge buildup of memory
# printing
print("Filter Rows: %s"%(filterRows))
print("Filter Cols: %s"%(filterCols))
print("K1s: %s"%(k1s))
print("Learning Rates: %s"%(learningRates))
print("Train acc list: %s"%(str(train_acc_lists)))
print("Val acc list: %s"%(str(val_acc_lists)))
print("Train err list: %s"%(str(train_err_lists)))
print("Val err list: %s"%(str(val_err_lists)))
print("Epoch number lists: %s"%(str(epoch_number_lists)))
bestValues = [min(curList) for curList in val_err_lists]
print("Best Values: %s"%str(bestValues))
# plotting
trainingPlotTitles = ['Single Layer CNN with %gx%g kernels and k1=%g, LR=%f'%(filterRows[i], filterCols[i], k1s[i], learningRates[i]) for i in range(len(filterRows))]
trainingPlotTitles.append('Exp 3k, Validation Cross-Entropy Cost vs. Epoch')
trainingPlotFiles = ['exp3k_training_%gx%g_k1=%g_LR=%f_%gEpochs.png'%(filterRows[i], filterCols[i], k1s[i], learningRates[i], numEpochs) for i in range(len(filterRows))]
trainingPlotFiles.append('exp3k_validationCurves_%gEpochs'%(numEpochs))
makeTrainingPlots(epoch_number_lists[0], zip(filterRows, filterCols), train_err_lists, val_err_lists, "Shape of Kernel", "Cross Entropy Cost", trainingPlotTitles, trainingPlotFiles)
| mit |
roxyboy/bokeh | bokeh/charts/builder/tests/test_bar_builder.py | 33 | 6390 | """ This is the Bokeh charts testing interface.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from collections import OrderedDict
import unittest
import numpy as np
import pandas as pd
from bokeh.charts import Bar
from bokeh.charts.builder.tests._utils import create_chart
from bokeh.models import Range1d, FactorRange
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestBar(unittest.TestCase):
def test_supported_input(self):
xyvalues = OrderedDict()
xyvalues['python'] = [2, 5]
xyvalues['pypy'] = [12, 40]
xyvalues['jython'] = [22, 30]
for i, _xy in enumerate([xyvalues,
dict(xyvalues),
pd.DataFrame(xyvalues)]):
bar = create_chart(Bar, _xy)
builder = bar._builders[0]
np.testing.assert_array_equal(builder._data['pypy'], np.array(xyvalues['pypy']))
np.testing.assert_array_equal(builder._data['python'], np.array(xyvalues['python']))
np.testing.assert_array_equal(builder._data['jython'], np.array(xyvalues['jython']))
# test mid values, that should always be y/2 ..
np.testing.assert_array_equal(builder._data['midpython'], np.array([1, 2.5]))
np.testing.assert_array_equal(builder._data['midpypy'], np.array([6, 20]))
np.testing.assert_array_equal(builder._data['midjython'], np.array([11, 15]))
# stacked values should be 0 as base and + y/2 of the column
# skipping plain dict case as stacked values randomly fails due to
# dictionary unordered nature
if i != 1:
np.testing.assert_array_equal(builder._data['stackedpython'], np.array([1, 2.5]))
np.testing.assert_array_equal(builder._data['stackedpypy'], np.array([8, 25]))
np.testing.assert_array_equal(builder._data['stackedjython'], np.array([25, 60]))
np.testing.assert_array_equal(builder._data['cat'], np.array(['0', '1']))
np.testing.assert_array_equal(builder._data['width'], np.array([0.8, 0.8]))
np.testing.assert_array_equal(builder._data['width_cat'], np.array([0.2, 0.2]))
lvalues = [[2, 5], [12, 40], [22, 30]]
for i, _xy in enumerate([lvalues, np.array(lvalues)]):
bar = create_chart(Bar, _xy)
builder = bar._builders[0]
np.testing.assert_array_equal(builder._data['0'], np.array(lvalues[0]))
np.testing.assert_array_equal(builder._data['1'], np.array(lvalues[1]))
np.testing.assert_array_equal(builder._data['2'], np.array(lvalues[2]))
# test mid values, that should always be y/2 ..
np.testing.assert_array_equal(builder._data['mid0'], np.array([1, 2.5]))
np.testing.assert_array_equal(builder._data['mid1'], np.array([6, 20]))
np.testing.assert_array_equal(builder._data['mid2'], np.array([11, 15]))
# stacked values should be 0 as base and + y/2 of the column
np.testing.assert_array_equal(builder._data['stacked0'], np.array([1, 2.5]))
np.testing.assert_array_equal(builder._data['stacked1'], np.array([8, 25]))
np.testing.assert_array_equal(builder._data['stacked2'], np.array([25, 60]))
np.testing.assert_array_equal(builder._data['cat'], np.array(['0', '1']))
np.testing.assert_array_equal(builder._data['width'], np.array([0.8, 0.8]))
np.testing.assert_array_equal(builder._data['width_cat'], np.array([0.2, 0.2]))
def test_all_positive_input(self):
source = OrderedDict()
source['percent change 1'] = [1, 13]
source['percent change 2'] = [12, 40]
bar_chart = create_chart(Bar, source)
self.assertEqual(bar_chart._builders[0].y_range.start, 0)
self.assertEqual(bar_chart._builders[0].y_range.end, 40 * 1.1)
def test_all_negative_input(self):
source = OrderedDict()
source['percent change 1'] = [-1, -13]
source['percent change 2'] = [-12, -40]
bar_chart = create_chart(Bar, source)
# We want the start to be negative, so that data points downwards
self.assertEqual(bar_chart._builders[0].y_range.start, -40 * 1.1)
self.assertEqual(bar_chart._builders[0].y_range.end, 0)
def test_mixed_sign_input(self):
source = OrderedDict()
source['percent change 1'] = [-1, -13]
source['percent change 2'] = [12, 40]
bar_chart = create_chart(Bar, source)
self.assertEqual(bar_chart._builders[0].y_range.start, -13 * 1.1)
self.assertEqual(bar_chart._builders[0].y_range.end, 40 * 1.1)
def test_set_custom_continuous_range(self):
# Users can specify their own y_range for cases where the
# default guess is not what's desired.
source = OrderedDict()
source['percent change 1'] = [25, -13]
source['percent change 2'] = [-12, -40]
custom_y_range = Range1d(50, -50)
bar_chart = create_chart(Bar, source, continuous_range=custom_y_range)
self.assertEqual(bar_chart._builders[0].y_range, custom_y_range)
def test_invalid_continuous_range_raises_error(self):
source = OrderedDict({'p': [0, 1]})
bad_y_range = range(0, 50) # Not a Range object
with self.assertRaises(ValueError):
create_chart(Bar, source, continuous_range=bad_y_range)
def test_non_range1d_continuous_range_raises_value_error(self):
source = OrderedDict({'p': [0, 1]})
non_1d_range = FactorRange(factors=['a', 'b'])
with self.assertRaises(ValueError):
create_chart(Bar, source, continuous_range=non_1d_range)
| bsd-3-clause |
georgek/KAT | scripts/kat_plot_density.py | 1 | 5902 | #!/usr/bin/env python3
import argparse
import numpy as np
import scipy.ndimage as ndimage
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import kat_plot_colormaps as cmaps
from kat_plot_misc import *
# ----- command line parsing -----
parser = argparse.ArgumentParser(
description="""Create K-mer Density Plots.
Creates a scatter plot, where the density or "heat" at each point represents
the number of distinct K-mers at that point. Typically this is used to
visualise a matrix produced by the "kat comp" tool to compare multiplicities
from two K-mer hashes produced by different NGS reads, or to visualise the GC
vs K-mer multiplicity matrices produced by the "kat gcp" tool.""")
parser.add_argument("matrix_file", type=str,
help="The input matrix file from KAT")
parser.add_argument("-o", "--output", type=str, default="kat-density",
help="The path to the output file.")
parser.add_argument("-p", "--output_type", type=str,
help="The plot file type to create (default is based on " \
"given output name).")
parser.add_argument("-t", "--title", type=str,
help="Title for plot")
parser.add_argument("-a", "--x_label", type=str,
help="Label for x-axis")
parser.add_argument("-b", "--y_label", type=str,
help="Label for y-axis")
parser.add_argument("-c", "--z_label", type=str,
help="Label for z-axis")
parser.add_argument("-x", "--x_max", type=int,
help="Maximum value for x-axis")
parser.add_argument("-y", "--y_max", type=int,
help="Maximum value for y-axis")
parser.add_argument("-z", "--z_max", type=int,
help="Maximum value for z-axis")
parser.add_argument("-w", "--width", type=int, default=8,
help="Width of canvas")
parser.add_argument("-l", "--height", type=int, default=6,
help="Height of canvas")
parser.add_argument("--contours", choices=["none", "normal", "smooth"],
default="normal")
parser.add_argument("--not_rasterised", dest="rasterised",
action="store_false",
help="Don't rasterise graphics (slower).")
parser.add_argument("--dpi", type=int, default=300,
help="Resolution in dots per inch of output graphic.")
parser.set_defaults(rasterised=True)
parser.add_argument("-v", "--verbose", dest="verbose",
action="store_true",
help="Print extra information")
parser.set_defaults(verbose=False)
args = parser.parse_args()
# ----- end command line parsing -----
# load header information
input_file = open(args.matrix_file)
header = readheader(input_file)
if args.title is not None:
title = args.title
elif "Title" in header:
title = header["Title"]
else:
title = "Density Plot"
if args.x_label is not None:
x_label = args.x_label
elif "XLabel" in header:
x_label = header["XLabel"]
else:
x_label = "X"
if args.y_label is not None:
y_label = args.y_label
elif "YLabel" in header:
y_label = header["YLabel"]
else:
y_label = "Y"
if args.z_label is not None:
z_label = args.z_label
elif "ZLabel" in header:
z_label = header["ZLabel"]
else:
z_label = "Z"
matrix = np.loadtxt(input_file)
if "Transpose" in header and header["Transpose"] == '1':
matrix = np.transpose(matrix)
input_file.close()
if args.verbose:
print("{:d} by {:d} matrix file loaded.".format(matrix.shape[0],
matrix.shape[1]))
if args.contours == "smooth":
matrix_smooth = ndimage.gaussian_filter(matrix, sigma=2.0, order=0)
if args.x_max is None or args.y_max is None or args.z_max is None:
# find peaks
msum = np.sum(matrix)
xsums = np.sum(matrix, 0)
ysums = np.sum(matrix, 1)
peakx = findpeaks(xsums)
peaky = findpeaks(ysums)
# ignore peaks at 1
peakx = peakx[peakx != 1]
peaky = peaky[peaky != 1]
peakz = matrix[peaky,:][:,peakx]
# peakxv = xsums[peakx]
# print "peakxv: ", peakxv
# xmax = np.max(peakx[peakxv > (msum * 0.0005)]) * 2
# peakyv = ysums[peaky]
# print "peakyv: ", peakyv
# ymax = np.max(peaky[peakyv > (msum * 0.0005)]) * 2
xmax = len(xsums)
ymax = len(ysums)
for i in range(1, len(xsums), int(len(xsums)/40) + 1):
if np.sum(xsums[:i]) >= msum * 0.995:
xmax = i
break
for i in range(1, len(ysums), int(len(ysums)/40) + 1):
if np.sum(ysums[:i]) >= msum * 0.995:
ymax = i
break
zmax = np.max(peakz) * 1.1
if args.verbose:
print("Automatically detected axis limits:")
print("xmax: ", xmax)
print("ymax: ", ymax)
print("zmax: ", zmax)
if args.x_max is not None:
xmax = args.x_max
if args.y_max is not None:
ymax = args.y_max
if args.z_max is not None:
zmax = args.z_max
plt.figure(num = None, figsize=(args.width, args.height))
pcol = plt.pcolormesh(matrix, vmin=0, vmax=zmax, cmap=cmaps.viridis,
rasterized=args.rasterised)
plt.axis([0,xmax,0,ymax])
cbar = plt.colorbar()
cbar.set_label(wrap(z_label))
cbar.solids.set_rasterized(args.rasterised)
if zmax > 0:
levels = np.arange(zmax/8, zmax, zmax/8)
if args.contours == "normal":
plt.contour(matrix, colors="white", alpha=0.6, levels=levels)
elif args.contours == "smooth":
plt.contour(matrix_smooth, colors="white", alpha=0.6, levels=levels)
title = plt.title(wrap(title))
plt.xlabel(wrap(x_label))
plt.ylabel(wrap(y_label))
plt.grid(True, color="white", alpha=0.2)
plt.tight_layout()
if args.output_type is not None:
output_name = args.output + '.' + args.output_type
else:
output_name = args.output
plt.savefig(correct_filename(output_name), dpi=args.dpi)
| gpl-3.0 |
Philip-Bachman/ICML-2015 | utils.py | 1 | 11903 | """ This file contains different utility functions that are not connected
in anyway to the networks presented in the tutorials, but rather help in
processing the outputs into a more understandable way.
For example ``tile_raster_images`` helps in generating a easy to grasp
image from a set of samples or weights.
"""
import numpy as np
import PIL as PIL
# Stuff for visualizing diagnostics
from sklearn.neighbors import KernelDensity
import matplotlib as mpl
mpl.use('Agg')
class batch(object):
def __init__(self,batch_size):
self.batch_size = batch_size
def __call__(self,f):
def wrapper(t,X):
X = np.array(X)
p = 0
rem = 0
results = []
while p < len(X):
Z = X[p:p+self.batch_size]
if Z.shape[0] != self.batch_size:
zeros = np.zeros((self.batch_size-len(Z),X.shape[1]))
rem = len(Z)
Z = np.array(np.vstack((Z,zeros)),dtype=X.dtype)
temp_results = f(t,Z)
if rem != 0:
temp_results = temp_results[:rem]
results.extend(temp_results)
p += self.batch_size
return np.array(results,dtype='float32')
return wrapper
def scale_to_unit_interval(ndar, eps=1e-8):
""" Scales all values in the ndarray ndar to be between 0 and 1 """
ndar = ndar.copy()
ndar -= ndar.min()
ndar *= 1.0 / (ndar.max() + eps)
return ndar
def tile_raster_images(X, img_shape=None, tile_shape=None, tile_spacing=(0, 0),
scale=True,
output_pixel_vals=True,
colorImg=False):
"""
Transform an array with one flattened image per row, into an array in
which images are reshaped and layed out like tiles on a floor.
This function is useful for visualizing datasets whose rows are images,
and also columns of matrices for transforming those rows
(such as the first layer of a neural net).
"""
X = X * 1.0 # converts ints to floats
if colorImg:
channelSize = X.shape[1]/3
X = (X[:,0:channelSize], X[:,channelSize:2*channelSize], X[:,2*channelSize:3*channelSize], None)
assert len(img_shape) == 2
assert len(tile_shape) == 2
assert len(tile_spacing) == 2
# The expression below can be re-written in a more C style as
# follows :
#
# out_shape = [0,0]
# out_shape[0] = (img_shape[0] + tile_spacing[0]) * tile_shape[0] -
# tile_spacing[0]
# out_shape[1] = (img_shape[1] + tile_spacing[1]) * tile_shape[1] -
# tile_spacing[1]
out_shape = [(ishp + tsp) * tshp - tsp for ishp, tshp, tsp
in zip(img_shape, tile_shape, tile_spacing)]
if isinstance(X, tuple):
assert len(X) == 4
# Create an output np ndarray to store the image
if output_pixel_vals:
out_array = np.zeros((out_shape[0], out_shape[1], 4), dtype='uint8')
else:
out_array = np.zeros((out_shape[0], out_shape[1], 4), dtype=X.dtype)
#colors default to 0, alpha defaults to 1 (opaque)
if output_pixel_vals:
channel_defaults = [0, 0, 0, 255]
else:
channel_defaults = [0., 0., 0., 1.]
for i in xrange(4):
if X[i] is None:
# if channel is None, fill it with zeros of the correct
# dtype
out_array[:, :, i] = np.zeros(out_shape,
dtype='uint8' if output_pixel_vals else out_array.dtype
) + channel_defaults[i]
if i < 3:
print('WHY AM I HERE (utils.py line 101)?')
else:
# use a recurrent call to compute the channel and store it
# in the output
xi = X[i]
if scale:
# shift and scale this channel to be in [0...1]
xi = (X[i] - X[i].min()) / (X[i].max() - X[i].min())
out_array[:, :, i] = tile_raster_images(xi, img_shape, tile_shape, tile_spacing, False, output_pixel_vals)
return out_array
else:
# if we are dealing with only one channel
H, W = img_shape
Hs, Ws = tile_spacing
# generate a matrix to store the output
out_array = np.zeros(out_shape, dtype='uint8' if output_pixel_vals else X.dtype)
for tile_row in xrange(tile_shape[0]):
for tile_col in xrange(tile_shape[1]):
if tile_row * tile_shape[1] + tile_col < X.shape[0]:
if scale:
# if we should scale values to be between 0 and 1
# do this by calling the `scale_to_unit_interval`
# function
tmp = X[tile_row * tile_shape[1] + tile_col].reshape(img_shape)
this_img = scale_to_unit_interval(tmp)
else:
this_img = X[tile_row * tile_shape[1] + tile_col].reshape(img_shape)
# add the slice to the corresponding position in the
# output array
out_array[
tile_row * (H+Hs): tile_row * (H + Hs) + H,
tile_col * (W+Ws): tile_col * (W + Ws) + W
] \
= this_img * (255 if output_pixel_vals else 1)
return out_array
def visualize(EN, proto_key, layer_num, file_name):
W = EN.proto_nets[proto_key][layer_num].W.get_value(borrow=True).T
size = int(np.sqrt(W.shape[1]))
# hist(W.flatten(),bins=50)
image = PIL.Image.fromarray(tile_raster_images(X=W, \
img_shape=(size, size), tile_shape=(10,W.shape[0]/10),tile_spacing=(1, 1)))
image.save(file_name)
return
def visualize_net_layer(net_layer, file_name, colorImg=False, \
use_transpose=False, transform=None):
W = net_layer.W.get_value(borrow=False).T
if use_transpose:
W = net_layer.W.get_value(borrow=False)
if not (transform is None):
W = transform(W)
if colorImg:
size = int(np.sqrt(W.shape[1] / 3.0))
else:
size = int(np.sqrt(W.shape[1]))
num_rows = 10
num_cols = int((W.shape[0] / num_rows) + 0.999)
img_shape = (size, size)
tile_shape = (num_rows, num_cols)
image = tile_raster_images(X=W, img_shape=img_shape, tile_shape=tile_shape, \
tile_spacing=(1, 1), scale=True, colorImg=colorImg)
image = PIL.Image.fromarray(image)
image.save(file_name)
return
def visualize_samples(X_samp, file_name, num_rows=10):
d = int(np.sqrt(X_samp.shape[1]))
# hist(W.flatten(),bins=50)
image = PIL.Image.fromarray(tile_raster_images(X=X_samp, img_shape=(d, d), \
tile_shape=(num_rows,X_samp.shape[0]/num_rows),tile_spacing=(1, 1)))
image.save(file_name)
return
# Matrix to image
def mat_to_img(X, file_name, img_shape, num_rows=10, \
scale=True, colorImg=False, tile_spacing=(1,1)):
num_rows = int(num_rows)
num_cols = int((X.shape[0] / num_rows) + 0.999)
tile_shape = (num_rows, num_cols)
# make a tiled image from the given matrix's rows
image = tile_raster_images(X=X, img_shape=img_shape, \
tile_shape=tile_shape, tile_spacing=tile_spacing, \
scale=scale, colorImg=colorImg)
# convert to a standard image format and save to disk
image = PIL.Image.fromarray(image)
image.save(file_name)
return
def plot_kde_histogram(X, f_name, bins=25):
"""
Plot KDE-smoothed histogram of the data in X. Assume data is univariate.
"""
import matplotlib.pyplot as plt
X_samp = X.ravel()[:,np.newaxis]
X_min = np.min(X_samp)
X_max = np.max(X_samp)
X_range = X_max - X_min
sigma = X_range / float(bins)
plot_min = X_min - (X_range/3.0)
plot_max = X_max + (X_range/3.0)
plot_X = np.linspace(plot_min, plot_max, 1000)[:,np.newaxis]
# make a kernel density estimator for the data in X
kde = KernelDensity(kernel='gaussian', bandwidth=sigma).fit(X_samp)
# make a figure
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(plot_X, np.exp(kde.score_samples(plot_X)))
fig.savefig(f_name, dpi=None, facecolor='w', edgecolor='w', \
orientation='portrait', papertype=None, format=None, \
transparent=False, bbox_inches=None, pad_inches=0.1, \
frameon=None)
plt.close(fig)
return
def plot_kde_histogram2(X1, X2, f_name, bins=25):
"""
Plot KDE-smoothed histogram of the data in X1/X2. Assume data is 1D.
"""
import matplotlib.pyplot as plt
# make a figure and configure an axis
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hold(True)
for (X, style) in [(X1, '-'), (X2, '--')]:
X_samp = X.ravel()[:,np.newaxis]
X_min = np.min(X_samp)
X_max = np.max(X_samp)
X_range = X_max - X_min
sigma = X_range / float(bins)
plot_min = X_min - (X_range/3.0)
plot_max = X_max + (X_range/3.0)
plot_X = np.linspace(plot_min, plot_max, 1000)[:,np.newaxis]
# make a kernel density estimator for the data in X
kde = KernelDensity(kernel='gaussian', bandwidth=sigma).fit(X_samp)
ax.plot(plot_X, np.exp(kde.score_samples(plot_X)), linestyle=style)
fig.savefig(f_name, dpi=None, facecolor='w', edgecolor='w', \
orientation='portrait', papertype=None, format=None, \
transparent=False, bbox_inches=None, pad_inches=0.1, \
frameon=None)
plt.close(fig)
return
def plot_stem(x, y, f_name):
"""
Plot a stem plot.
"""
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.stem(x, y, linefmt='b-', markerfmt='bo', basefmt='r-')
fig.savefig(f_name, dpi=None, facecolor='w', edgecolor='w', \
orientation='portrait', papertype=None, format=None, \
transparent=False, bbox_inches=None, pad_inches=0.1, \
frameon=None)
plt.close(fig)
return
def plot_line(x, y, f_name):
"""
Plot a line plot.
"""
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x, y)
fig.savefig(f_name, dpi=None, facecolor='w', edgecolor='w', \
orientation='portrait', papertype=None, format=None, \
transparent=False, bbox_inches=None, pad_inches=0.1, \
frameon=None)
plt.close(fig)
return
def plot_scatter(x, y, f_name, x_label=None, y_label=None):
"""
Plot a scatter plot.
"""
import matplotlib.pyplot as plt
if x_label is None:
x_label = 'Posterior KLd'
if y_label is None:
y_label = 'Expected Log-likelihood'
fig = plt.figure()
ax = fig.add_subplot(111)
box = ax.get_position()
ax.set_position([box.x0+(0.05*box.width), box.y0+(0.05*box.height), 0.96*box.width, 0.96*box.height])
ax.set_xlabel(x_label, fontsize=22)
ax.set_ylabel(y_label, fontsize=22)
ax.hold(True)
ax.scatter(x, y, s=24, alpha=0.5, c=u'b', marker=u'o')
plt.sca(ax)
x_locs, x_labels = plt.xticks()
plt.xticks(x_locs, fontsize=18)
y_locs, y_labels = plt.yticks()
plt.yticks(y_locs, fontsize=18)
fig.savefig(f_name, dpi=None, facecolor='w', edgecolor='w', \
orientation='portrait', papertype=None, format='png', \
transparent=False, bbox_inches=None, pad_inches=0.1, \
frameon=None)
plt.close(fig)
return
| mit |
xujun10110/AIL-framework | bin/Repartition_graph.py | 2 | 2494 | #!/usr/bin/python2.7
# -*-coding:UTF-8 -*
import redis
import argparse
import ConfigParser
from pubsublogger import publisher
import matplotlib.pyplot as plt
def main():
"""Main Function"""
# CONFIG #
cfg = ConfigParser.ConfigParser()
cfg.read('./packages/config.cfg')
# SCRIPT PARSER #
parser = argparse.ArgumentParser(
description='''This script is a part of the Analysis Information Leak framework.''',
epilog='''''')
parser.add_argument('-f', type=str, metavar="filename", default="figure",
help='The absolute path name of the "figure.png"',
action='store')
args = parser.parse_args()
# REDIS #
r_serv = redis.StrictRedis(
host=cfg.get("Redis_Level_DB_Hashs", "host"),
port=cfg.getint("Redis_Level_DB_Hashs", "port"),
db=cfg.getint("Redis_Level_DB_Hashs", "db"))
# LOGGING #
publisher.port = 6380
publisher.channel = "Graph"
# FUNCTIONS #
publisher.info("""Creating the Repartition Graph""")
total_list = []
codepad_list = []
pastie_list = []
pastebin_list = []
for hash in r_serv.keys():
total_list.append(r_serv.scard(hash))
code = 0
pastie = 0
pastebin = 0
for paste in r_serv.smembers(hash):
source = paste.split("/")[5]
if source == "codepad.org":
code = code + 1
elif source == "pastie.org":
pastie = pastie + 1
elif source == "pastebin.com":
pastebin = pastebin + 1
codepad_list.append(code)
pastie_list.append(pastie)
pastebin_list.append(pastebin)
codepad_list.sort(reverse=True)
pastie_list.sort(reverse=True)
pastebin_list.sort(reverse=True)
total_list.sort(reverse=True)
plt.plot(codepad_list, 'b', label='Codepad.org')
plt.plot(pastebin_list, 'g', label='Pastebin.org')
plt.plot(pastie_list, 'y', label='Pastie.org')
plt.plot(total_list, 'r', label='Total')
plt.xscale('log')
plt.xlabel('Hashs')
plt.ylabel('Occur[Hash]')
plt.title('Repartition')
plt.legend()
plt.grid()
plt.tight_layout()
plt.savefig(args.f+".png", dpi=None, facecolor='w', edgecolor='b',
orientation='portrait', papertype=None, format="png",
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=True)
if __name__ == "__main__":
main()
| agpl-3.0 |
rexshihaoren/scikit-learn | examples/applications/face_recognition.py | 15 | 5394 | """
===================================================
Faces recognition example using eigenfaces and SVMs
===================================================
The dataset used in this example is a preprocessed excerpt of the
"Labeled Faces in the Wild", aka LFW_:
http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz (233MB)
.. _LFW: http://vis-www.cs.umass.edu/lfw/
Expected results for the top 5 most represented people in the dataset::
precision recall f1-score support
Gerhard_Schroeder 0.91 0.75 0.82 28
Donald_Rumsfeld 0.84 0.82 0.83 33
Tony_Blair 0.65 0.82 0.73 34
Colin_Powell 0.78 0.88 0.83 58
George_W_Bush 0.93 0.86 0.90 129
avg / total 0.86 0.84 0.85 282
"""
from __future__ import print_function
from time import time
import logging
import matplotlib.pyplot as plt
from sklearn.cross_validation import train_test_split
from sklearn.datasets import fetch_lfw_people
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import RandomizedPCA
from sklearn.svm import SVC
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
###############################################################################
# Download the data, if not already on disk and load it as numpy arrays
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
# introspect the images arrays to find the shapes (for plotting)
n_samples, h, w = lfw_people.images.shape
# for machine learning we use the 2 data directly (as relative pixel
# positions info is ignored by this model)
X = lfw_people.data
n_features = X.shape[1]
# the label to predict is the id of the person
y = lfw_people.target
target_names = lfw_people.target_names
n_classes = target_names.shape[0]
print("Total dataset size:")
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
print("n_classes: %d" % n_classes)
###############################################################################
# Split into a training set and a test set using a stratified k fold
# split into a training and testing set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25)
###############################################################################
# Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled
# dataset): unsupervised feature extraction / dimensionality reduction
n_components = 150
print("Extracting the top %d eigenfaces from %d faces"
% (n_components, X_train.shape[0]))
t0 = time()
pca = RandomizedPCA(n_components=n_components, whiten=True).fit(X_train)
print("done in %0.3fs" % (time() - t0))
eigenfaces = pca.components_.reshape((n_components, h, w))
print("Projecting the input data on the eigenfaces orthonormal basis")
t0 = time()
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
print("done in %0.3fs" % (time() - t0))
###############################################################################
# Train a SVM classification model
print("Fitting the classifier to the training set")
t0 = time()
param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1], }
clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid)
clf = clf.fit(X_train_pca, y_train)
print("done in %0.3fs" % (time() - t0))
print("Best estimator found by grid search:")
print(clf.best_estimator_)
###############################################################################
# Quantitative evaluation of the model quality on the test set
print("Predicting people's names on the test set")
t0 = time()
y_pred = clf.predict(X_test_pca)
print("done in %0.3fs" % (time() - t0))
print(classification_report(y_test, y_pred, target_names=target_names))
print(confusion_matrix(y_test, y_pred, labels=range(n_classes)))
###############################################################################
# Qualitative evaluation of the predictions using matplotlib
def plot_gallery(images, titles, h, w, n_row=3, n_col=4):
"""Helper function to plot a gallery of portraits"""
plt.figure(figsize=(1.8 * n_col, 2.4 * n_row))
plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
for i in range(n_row * n_col):
plt.subplot(n_row, n_col, i + 1)
plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)
plt.title(titles[i], size=12)
plt.xticks(())
plt.yticks(())
# plot the result of the prediction on a portion of the test set
def title(y_pred, y_test, target_names, i):
pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1]
true_name = target_names[y_test[i]].rsplit(' ', 1)[-1]
return 'predicted: %s\ntrue: %s' % (pred_name, true_name)
prediction_titles = [title(y_pred, y_test, target_names, i)
for i in range(y_pred.shape[0])]
plot_gallery(X_test, prediction_titles, h, w)
# plot the gallery of the most significative eigenfaces
eigenface_titles = ["eigenface %d" % i for i in range(eigenfaces.shape[0])]
plot_gallery(eigenfaces, eigenface_titles, h, w)
plt.show()
| bsd-3-clause |
IshankGulati/scikit-learn | sklearn/neighbors/graph.py | 36 | 6650 | """Nearest Neighbors graph functions"""
# Author: Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
from .base import KNeighborsMixin, RadiusNeighborsMixin
from .unsupervised import NearestNeighbors
def _check_params(X, metric, p, metric_params):
"""Check the validity of the input parameters"""
params = zip(['metric', 'p', 'metric_params'],
[metric, p, metric_params])
est_params = X.get_params()
for param_name, func_param in params:
if func_param != est_params[param_name]:
raise ValueError(
"Got %s for %s, while the estimator has %s for "
"the same parameter." % (
func_param, param_name, est_params[param_name]))
def _query_include_self(X, include_self):
"""Return the query based on include_self param"""
if include_self:
query = X._fit_X
else:
query = None
return query
def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=False, n_jobs=1):
"""Computes the (weighted) graph of k-Neighbors for points in X
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the connectivity
matrix with ones and zeros, and 'distance' will return the distances
between neighbors according to the given metric.
metric : string, default 'minkowski'
The distance metric used to calculate the k-Neighbors for each sample
point. The DistanceMetric class gives a list of available metrics.
The default distance is 'euclidean' ('minkowski' metric with the p
param equal to 2.)
include_self : bool, default=False.
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional
additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2, mode='connectivity', include_self=True)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
radius_neighbors_graph
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(n_neighbors, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self)
return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
def radius_neighbors_graph(X, radius, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=False, n_jobs=1):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the connectivity
matrix with ones and zeros, and 'distance' will return the distances
between neighbors according to the given metric.
metric : string, default 'minkowski'
The distance metric used to calculate the neighbors within a
given radius for each sample point. The DistanceMetric class
gives a list of available metrics. The default distance is
'euclidean' ('minkowski' metric with the param equal to 2.)
include_self : bool, default=False
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional
additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5, mode='connectivity', include_self=True)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(radius=radius, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self)
return X.radius_neighbors_graph(query, radius, mode)
| bsd-3-clause |
hainm/open-forcefield-group | nmr/code/analyze_dcd.py | 2 | 2427 | import os
import itertools
import mdtraj
import pandas as pd
cos = np.cos
sin = np.sin
ave = lambda x: x.mean(0).mean(0)
phi0 = np.deg2rad(0.0)
amino_acids = ["A" , "C" , "D" , "E" , "F" , "G" , "H" , "I" , "K" , "L" , "M" , "N" , "Q" , "R" , "S" , "T" , "V" , "W" , "Y"]
labels = ["%s%s" % (a0,a1) for (a0, a1) in itertools.product(amino_acids, repeat=2)]
bad_pairs = ["AI","AY","CV","CY","DW","EF","EW","FA","FC","FI","FM","FN","FQ","FT","FY","IF","IM","IT","IV","IW","IY","LF","LI","LV","LW","MF","MI","ML","MM","MY","NY","QF","QY","SF","SI","TF","TI","TW","VF","VI","VV","VW","WA","WC","WF","WI","WL","WN","WV","YF","YI","YL","YM","YN","YV"]
bad_pairs.extend(["HH", "TH", "KH"])
bad_pairs.extend(["DI", "DK"])
bad_pairs.extend(["AD","CD","DD","FD","KD","LD","ND","QD","RD","TD","VD","WD","YD","EC","EH","SE","LE"])
bad_pairs.extend(["G%s" % aa for aa in amino_acids])
bad_pairs.extend(["%sG" % aa for aa in amino_acids])
labels = list(set(labels).difference(set(bad_pairs)))
small = pd.read_csv("/home/kyleb/src/tjlane/scalar-couplings/kyleb/smaller_couplings.csv", index_col=0)
large = pd.read_csv("/home/kyleb/src/tjlane/scalar-couplings/kyleb/larger_couplings.csv", index_col=0)
averaged = 0.5 * (small + large)
data = pd.DataFrame(index=labels, columns=["expt", "C1", "C2", "S1", "S2", "CS"], dtype='float')
for label in labels:
a0, a1 = label
if not os.path.exists("/home/kyleb/dat/peptides/dcd/%s-capped.dcd" % (label)):
continue
traj = mdtraj.load("/home/kyleb/dat/peptides/dcd/%s-capped.dcd" % (label), top="/home/kyleb/dat/peptides/raw/%s-capped.pdb" % (label))
rid, indices = mdtraj.geometry.atom_sequence_finder(traj, ["H","N", "CA", "HA"], residue_offsets=[0, 0, 0, 0])
phi = mdtraj.geometry.dihedral.compute_dihedrals(traj, indices)
phi = mdtraj.geometry.dihedral.compute_phi(traj)[1]
data["C1"][label] = ave(cos(phi + phi0))
data["C2"][label] = ave(cos(phi + phi0) ** 2.)
data["S1"][label] = ave(sin(phi + phi0))
data["S2"][label] = ave(sin(phi + phi0) ** 2.)
data["CS"][label] = ave(sin(phi + phi0) * cos(phi + phi0))
data["expt"][label] = averaged[a1][a0]
data = data.dropna(axis=0)
y, X = dmatrices('expt ~ C1 + C2 + S1', data=data, return_type='dataframe')
model = sm.OLS(y, X)
results = model.fit()
print results.summary()
data["yhat"] = results.predict()
data["delta"] = data.expt - data.yhat
rms = (data.delta ** 2.).mean() ** 0.5
rms
| gpl-2.0 |
RedhawkSDR/integration-gnuhawk | gnuradio/gr-filter/examples/fir_filter_ccc.py | 13 | 3154 | #!/usr/bin/env python
from gnuradio import gr, filter
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
try:
import scipy
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
class example_fir_filter_ccc(gr.top_block):
def __init__(self, N, fs, bw, tw, atten, D):
gr.top_block.__init__(self)
self._nsamps = N
self._fs = fs
self._bw = bw
self._tw = tw
self._at = atten
self._decim = D
taps = filter.firdes.low_pass_2(1, self._fs, self._bw, self._tw, self._at)
print "Num. Taps: ", len(taps)
self.src = gr.noise_source_c(gr.GR_GAUSSIAN, 1)
self.head = gr.head(gr.sizeof_gr_complex, self._nsamps)
self.filt0 = filter.fir_filter_ccc(self._decim, taps)
self.vsnk_src = gr.vector_sink_c()
self.vsnk_out = gr.vector_sink_c()
self.connect(self.src, self.head, self.vsnk_src)
self.connect(self.head, self.filt0, self.vsnk_out)
def main():
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=10000,
help="Number of samples to process [default=%default]")
parser.add_option("-s", "--samplerate", type="eng_float", default=8000,
help="System sample rate [default=%default]")
parser.add_option("-B", "--bandwidth", type="eng_float", default=1000,
help="Filter bandwidth [default=%default]")
parser.add_option("-T", "--transition", type="eng_float", default=100,
help="Transition band [default=%default]")
parser.add_option("-A", "--attenuation", type="eng_float", default=80,
help="Stopband attenuation [default=%default]")
parser.add_option("-D", "--decimation", type="int", default=1,
help="Decmation factor [default=%default]")
(options, args) = parser.parse_args ()
put = example_fir_filter_ccc(options.nsamples,
options.samplerate,
options.bandwidth,
options.transition,
options.attenuation,
options.decimation)
put.run()
data_src = scipy.array(put.vsnk_src.data())
data_snk = scipy.array(put.vsnk_out.data())
# Plot the signals PSDs
nfft = 1024
f1 = pylab.figure(1, figsize=(12,10))
s1 = f1.add_subplot(1,1,1)
s1.psd(data_src, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
s1.psd(data_snk, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
f2 = pylab.figure(2, figsize=(12,10))
s2 = f2.add_subplot(1,1,1)
s2.plot(data_src)
s2.plot(data_snk.real, 'g')
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
andrewnc/scikit-learn | sklearn/utils/metaestimators.py | 283 | 2353 | """Utilities for meta-estimators"""
# Author: Joel Nothman
# Andreas Mueller
# Licence: BSD
from operator import attrgetter
from functools import update_wrapper
__all__ = ['if_delegate_has_method']
class _IffHasAttrDescriptor(object):
"""Implements a conditional property using the descriptor protocol.
Using this class to create a decorator will raise an ``AttributeError``
if the ``attribute_name`` is not present on the base object.
This allows ducktyping of the decorated method based on ``attribute_name``.
See https://docs.python.org/3/howto/descriptor.html for an explanation of
descriptors.
"""
def __init__(self, fn, attribute_name):
self.fn = fn
self.get_attribute = attrgetter(attribute_name)
# update the docstring of the descriptor
update_wrapper(self, fn)
def __get__(self, obj, type=None):
# raise an AttributeError if the attribute is not present on the object
if obj is not None:
# delegate only on instances, not the classes.
# this is to allow access to the docstrings.
self.get_attribute(obj)
# lambda, but not partial, allows help() to work with update_wrapper
out = lambda *args, **kwargs: self.fn(obj, *args, **kwargs)
# update the docstring of the returned function
update_wrapper(out, self.fn)
return out
def if_delegate_has_method(delegate):
"""Create a decorator for methods that are delegated to a sub-estimator
This enables ducktyping by hasattr returning True according to the
sub-estimator.
>>> from sklearn.utils.metaestimators import if_delegate_has_method
>>>
>>>
>>> class MetaEst(object):
... def __init__(self, sub_est):
... self.sub_est = sub_est
...
... @if_delegate_has_method(delegate='sub_est')
... def predict(self, X):
... return self.sub_est.predict(X)
...
>>> class HasPredict(object):
... def predict(self, X):
... return X.sum(axis=1)
...
>>> class HasNoPredict(object):
... pass
...
>>> hasattr(MetaEst(HasPredict()), 'predict')
True
>>> hasattr(MetaEst(HasNoPredict()), 'predict')
False
"""
return lambda fn: _IffHasAttrDescriptor(fn, '%s.%s' % (delegate, fn.__name__))
| bsd-3-clause |
henridwyer/scikit-learn | examples/linear_model/plot_bayesian_ridge.py | 248 | 2588 | """
=========================
Bayesian Ridge Regression
=========================
Computes a Bayesian Ridge Regression on a synthetic dataset.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
As the prior on the weights is a Gaussian prior, the histogram of the
estimated weights is Gaussian.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import BayesianRidge, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weigthts
np.random.seed(0)
n_samples, n_features = 100, 100
X = np.random.randn(n_samples, n_features) # Create Gaussian data
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the Bayesian Ridge Regression and an OLS for comparison
clf = BayesianRidge(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot true weights, estimated weights and histogram of the weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="Bayesian Ridge estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc="best", prop=dict(size=12))
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc="lower left")
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
dud225/incubator-airflow | airflow/www/views.py | 1 | 84314 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import os
import pkg_resources
import socket
import importlib
from functools import wraps
from datetime import datetime, timedelta
import dateutil.parser
import copy
from itertools import chain, product
from past.utils import old_div
from past.builtins import basestring
import inspect
import subprocess
import traceback
import sqlalchemy as sqla
from sqlalchemy import or_, desc, and_
from flask import redirect, url_for, request, Markup, Response, current_app, render_template
from flask_admin import BaseView, expose, AdminIndexView
from flask_admin.contrib.sqla import ModelView
from flask_admin.actions import action
from flask_login import flash
from flask._compat import PY2
import jinja2
import markdown
import json
from wtforms import (
Form, SelectField, TextAreaField, PasswordField, StringField)
from pygments import highlight, lexers
from pygments.formatters import HtmlFormatter
import airflow
from airflow import configuration as conf
from airflow import models
from airflow import settings
from airflow.exceptions import AirflowException
from airflow.settings import Session
from airflow.models import XCom
from airflow.operators import BaseOperator, SubDagOperator
from airflow.utils.logging import LoggingMixin
from airflow.utils.json import json_ser
from airflow.utils.state import State
from airflow.utils.db import provide_session
from airflow.utils.helpers import alchemy_to_dict
from airflow.utils import logging as log_utils
from airflow.www import utils as wwwutils
from airflow.www.forms import DateTimeForm, DateTimeWithNumRunsForm
QUERY_LIMIT = 100000
CHART_LIMIT = 200000
dagbag = models.DagBag(os.path.expanduser(conf.get('core', 'DAGS_FOLDER')))
login_required = airflow.login.login_required
current_user = airflow.login.current_user
logout_user = airflow.login.logout_user
FILTER_BY_OWNER = False
DEFAULT_SENSITIVE_VARIABLE_FIELDS = (
'password',
'secret',
'passwd',
'authorization',
'api_key',
'apikey',
'access_token',
)
if conf.getboolean('webserver', 'FILTER_BY_OWNER'):
# filter_by_owner if authentication is enabled and filter_by_owner is true
FILTER_BY_OWNER = not current_app.config['LOGIN_DISABLED']
def dag_link(v, c, m, p):
url = url_for(
'airflow.graph',
dag_id=m.dag_id)
return Markup(
'<a href="{url}">{m.dag_id}</a>'.format(**locals()))
def log_url_formatter(v, c, m, p):
return Markup(
'<a href="{m.log_url}">'
' <span class="glyphicon glyphicon-book" aria-hidden="true">'
'</span></a>').format(**locals())
def task_instance_link(v, c, m, p):
url = url_for(
'airflow.task',
dag_id=m.dag_id,
task_id=m.task_id,
execution_date=m.execution_date.isoformat())
url_root = url_for(
'airflow.graph',
dag_id=m.dag_id,
root=m.task_id,
execution_date=m.execution_date.isoformat())
return Markup(
"""
<span style="white-space: nowrap;">
<a href="{url}">{m.task_id}</a>
<a href="{url_root}" title="Filter on this task and upstream">
<span class="glyphicon glyphicon-filter" style="margin-left: 0px;"
aria-hidden="true"></span>
</a>
</span>
""".format(**locals()))
def state_token(state):
color = State.color(state)
return Markup(
'<span class="label" style="background-color:{color};">'
'{state}</span>'.format(**locals()))
def state_f(v, c, m, p):
return state_token(m.state)
def duration_f(v, c, m, p):
if m.end_date and m.duration:
return timedelta(seconds=m.duration)
def datetime_f(v, c, m, p):
attr = getattr(m, p)
dttm = attr.isoformat() if attr else ''
if datetime.now().isoformat()[:4] == dttm[:4]:
dttm = dttm[5:]
return Markup("<nobr>{}</nobr>".format(dttm))
def nobr_f(v, c, m, p):
return Markup("<nobr>{}</nobr>".format(getattr(m, p)))
def label_link(v, c, m, p):
try:
default_params = eval(m.default_params)
except:
default_params = {}
url = url_for(
'airflow.chart', chart_id=m.id, iteration_no=m.iteration_no,
**default_params)
return Markup("<a href='{url}'>{m.label}</a>".format(**locals()))
def pool_link(v, c, m, p):
url = '/admin/taskinstance/?flt1_pool_equals=' + m.pool
return Markup("<a href='{url}'>{m.pool}</a>".format(**locals()))
def pygment_html_render(s, lexer=lexers.TextLexer):
return highlight(
s,
lexer(),
HtmlFormatter(linenos=True),
)
def render(obj, lexer):
out = ""
if isinstance(obj, basestring):
out += pygment_html_render(obj, lexer)
elif isinstance(obj, (tuple, list)):
for i, s in enumerate(obj):
out += "<div>List item #{}</div>".format(i)
out += "<div>" + pygment_html_render(s, lexer) + "</div>"
elif isinstance(obj, dict):
for k, v in obj.items():
out += '<div>Dict item "{}"</div>'.format(k)
out += "<div>" + pygment_html_render(v, lexer) + "</div>"
return out
def wrapped_markdown(s):
return '<div class="rich_doc">' + markdown.markdown(s) + "</div>"
attr_renderer = {
'bash_command': lambda x: render(x, lexers.BashLexer),
'hql': lambda x: render(x, lexers.SqlLexer),
'sql': lambda x: render(x, lexers.SqlLexer),
'doc': lambda x: render(x, lexers.TextLexer),
'doc_json': lambda x: render(x, lexers.JsonLexer),
'doc_rst': lambda x: render(x, lexers.RstLexer),
'doc_yaml': lambda x: render(x, lexers.YamlLexer),
'doc_md': wrapped_markdown,
'python_callable': lambda x: render(
inspect.getsource(x), lexers.PythonLexer),
}
def data_profiling_required(f):
'''
Decorator for views requiring data profiling access
'''
@wraps(f)
def decorated_function(*args, **kwargs):
if (
current_app.config['LOGIN_DISABLED'] or
(not current_user.is_anonymous() and current_user.data_profiling())
):
return f(*args, **kwargs)
else:
flash("This page requires data profiling privileges", "error")
return redirect(url_for('admin.index'))
return decorated_function
def fused_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=running')
return Markup("<a href='{0}'>{1}</a>".format(url, m.used_slots()))
def fqueued_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=queued&sort=10&desc=1')
return Markup("<a href='{0}'>{1}</a>".format(url, m.queued_slots()))
def recurse_tasks(tasks, task_ids, dag_ids, task_id_to_dag):
if isinstance(tasks, list):
for task in tasks:
recurse_tasks(task, task_ids, dag_ids, task_id_to_dag)
return
if isinstance(tasks, SubDagOperator):
subtasks = tasks.subdag.tasks
dag_ids.append(tasks.subdag.dag_id)
for subtask in subtasks:
if subtask.task_id not in task_ids:
task_ids.append(subtask.task_id)
task_id_to_dag[subtask.task_id] = tasks.subdag
recurse_tasks(subtasks, task_ids, dag_ids, task_id_to_dag)
if isinstance(tasks, BaseOperator):
task_id_to_dag[tasks.task_id] = tasks.dag
def should_hide_value_for_key(key_name):
return any(s in key_name for s in DEFAULT_SENSITIVE_VARIABLE_FIELDS) \
and conf.getboolean('admin', 'hide_sensitive_variable_fields')
class Airflow(BaseView):
def is_visible(self):
return False
@expose('/')
@login_required
def index(self):
return self.render('airflow/dags.html')
@expose('/chart_data')
@data_profiling_required
@wwwutils.gzipped
# @cache.cached(timeout=3600, key_prefix=wwwutils.make_cache_key)
def chart_data(self):
session = settings.Session()
chart_id = request.args.get('chart_id')
csv = request.args.get('csv') == "true"
chart = session.query(models.Chart).filter_by(id=chart_id).first()
db = session.query(
models.Connection).filter_by(conn_id=chart.conn_id).first()
session.expunge_all()
session.commit()
session.close()
payload = {}
payload['state'] = 'ERROR'
payload['error'] = ''
# Processing templated fields
try:
args = eval(chart.default_params)
if type(args) is not type(dict()):
raise AirflowException('Not a dict')
except:
args = {}
payload['error'] += (
"Default params is not valid, string has to evaluate as "
"a Python dictionary. ")
request_dict = {k: request.args.get(k) for k in request.args}
from airflow import macros
args.update(request_dict)
args['macros'] = macros
sql = jinja2.Template(chart.sql).render(**args)
label = jinja2.Template(chart.label).render(**args)
payload['sql_html'] = Markup(highlight(
sql,
lexers.SqlLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
payload['label'] = label
import pandas as pd
pd.set_option('display.max_colwidth', 100)
hook = db.get_hook()
try:
df = hook.get_pandas_df(wwwutils.limit_sql(sql, CHART_LIMIT, conn_type=db.conn_type))
df = df.fillna(0)
except Exception as e:
payload['error'] += "SQL execution failed. Details: " + str(e)
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
if not payload['error'] and len(df) == CHART_LIMIT:
payload['warning'] = (
"Data has been truncated to {0}"
" rows. Expect incomplete results.").format(CHART_LIMIT)
if not payload['error'] and len(df) == 0:
payload['error'] += "Empty result set. "
elif (
not payload['error'] and
chart.sql_layout == 'series' and
chart.chart_type != "datatable" and
len(df.columns) < 3):
payload['error'] += "SQL needs to return at least 3 columns. "
elif (
not payload['error'] and
chart.sql_layout == 'columns'and
len(df.columns) < 2):
payload['error'] += "SQL needs to return at least 2 columns. "
elif not payload['error']:
import numpy as np
chart_type = chart.chart_type
data = None
if chart_type == "datatable":
chart.show_datatable = True
if chart.show_datatable:
data = df.to_dict(orient="split")
data['columns'] = [{'title': c} for c in data['columns']]
# Trying to convert time to something Highcharts likes
x_col = 1 if chart.sql_layout == 'series' else 0
if chart.x_is_date:
try:
# From string to datetime
df[df.columns[x_col]] = pd.to_datetime(
df[df.columns[x_col]])
except Exception as e:
raise AirflowException(str(e))
df[df.columns[x_col]] = df[df.columns[x_col]].apply(
lambda x: int(x.strftime("%s")) * 1000)
series = []
colorAxis = None
if chart_type == 'datatable':
payload['data'] = data
payload['state'] = 'SUCCESS'
return wwwutils.json_response(payload)
elif chart_type == 'para':
df.rename(columns={
df.columns[0]: 'name',
df.columns[1]: 'group',
}, inplace=True)
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
elif chart_type == 'heatmap':
color_perc_lbound = float(
request.args.get('color_perc_lbound', 0))
color_perc_rbound = float(
request.args.get('color_perc_rbound', 1))
color_scheme = request.args.get('color_scheme', 'blue_red')
if color_scheme == 'blue_red':
stops = [
[color_perc_lbound, '#00D1C1'],
[
color_perc_lbound +
((color_perc_rbound - color_perc_lbound)/2),
'#FFFFCC'
],
[color_perc_rbound, '#FF5A5F']
]
elif color_scheme == 'blue_scale':
stops = [
[color_perc_lbound, '#FFFFFF'],
[color_perc_rbound, '#2222FF']
]
elif color_scheme == 'fire':
diff = float(color_perc_rbound - color_perc_lbound)
stops = [
[color_perc_lbound, '#FFFFFF'],
[color_perc_lbound + 0.33*diff, '#FFFF00'],
[color_perc_lbound + 0.66*diff, '#FF0000'],
[color_perc_rbound, '#000000']
]
else:
stops = [
[color_perc_lbound, '#FFFFFF'],
[
color_perc_lbound +
((color_perc_rbound - color_perc_lbound)/2),
'#888888'
],
[color_perc_rbound, '#000000'],
]
xaxis_label = df.columns[1]
yaxis_label = df.columns[2]
data = []
for row in df.itertuples():
data.append({
'x': row[2],
'y': row[3],
'value': row[4],
})
x_format = '{point.x:%Y-%m-%d}' \
if chart.x_is_date else '{point.x}'
series.append({
'data': data,
'borderWidth': 0,
'colsize': 24 * 36e5,
'turboThreshold': sys.float_info.max,
'tooltip': {
'headerFormat': '',
'pointFormat': (
df.columns[1] + ': ' + x_format + '<br/>' +
df.columns[2] + ': {point.y}<br/>' +
df.columns[3] + ': <b>{point.value}</b>'
),
},
})
colorAxis = {
'stops': stops,
'minColor': '#FFFFFF',
'maxColor': '#000000',
'min': 50,
'max': 2200,
}
else:
if chart.sql_layout == 'series':
# User provides columns (series, x, y)
xaxis_label = df.columns[1]
yaxis_label = df.columns[2]
df[df.columns[2]] = df[df.columns[2]].astype(np.float)
df = df.pivot_table(
index=df.columns[1],
columns=df.columns[0],
values=df.columns[2], aggfunc=np.sum)
else:
# User provides columns (x, y, metric1, metric2, ...)
xaxis_label = df.columns[0]
yaxis_label = 'y'
df.index = df[df.columns[0]]
df = df.sort(df.columns[0])
del df[df.columns[0]]
for col in df.columns:
df[col] = df[col].astype(np.float)
for col in df.columns:
series.append({
'name': col,
'data': [
(k, df[col][k])
for k in df[col].keys()
if not np.isnan(df[col][k])]
})
series = [serie for serie in sorted(
series, key=lambda s: s['data'][0][1], reverse=True)]
if chart_type == "stacked_area":
stacking = "normal"
chart_type = 'area'
elif chart_type == "percent_area":
stacking = "percent"
chart_type = 'area'
else:
stacking = None
hc = {
'chart': {
'type': chart_type
},
'plotOptions': {
'series': {
'marker': {
'enabled': False
}
},
'area': {'stacking': stacking},
},
'title': {'text': ''},
'xAxis': {
'title': {'text': xaxis_label},
'type': 'datetime' if chart.x_is_date else None,
},
'yAxis': {
'title': {'text': yaxis_label},
},
'colorAxis': colorAxis,
'tooltip': {
'useHTML': True,
'backgroundColor': None,
'borderWidth': 0,
},
'series': series,
}
if chart.y_log_scale:
hc['yAxis']['type'] = 'logarithmic'
hc['yAxis']['minorTickInterval'] = 0.1
if 'min' in hc['yAxis']:
del hc['yAxis']['min']
payload['state'] = 'SUCCESS'
payload['hc'] = hc
payload['data'] = data
payload['request_dict'] = request_dict
return wwwutils.json_response(payload)
@expose('/chart')
@data_profiling_required
def chart(self):
session = settings.Session()
chart_id = request.args.get('chart_id')
embed = request.args.get('embed')
chart = session.query(models.Chart).filter_by(id=chart_id).first()
session.expunge_all()
session.commit()
session.close()
if chart.chart_type == 'para':
return self.render('airflow/para/para.html', chart=chart)
sql = ""
if chart.show_sql:
sql = Markup(highlight(
chart.sql,
lexers.SqlLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/highchart.html',
chart=chart,
title="Airflow - Chart",
sql=sql,
label=chart.label,
embed=embed)
@expose('/dag_stats')
#@login_required
def dag_stats(self):
states = [
State.SUCCESS,
State.RUNNING,
State.FAILED,
State.UPSTREAM_FAILED,
State.UP_FOR_RETRY,
State.QUEUED,
]
task_ids = []
dag_ids = []
for dag in dagbag.dags.values():
task_ids += dag.task_ids
if not dag.is_subdag:
dag_ids.append(dag.dag_id)
TI = models.TaskInstance
DagRun = models.DagRun
session = Session()
LastDagRun = (
session.query(DagRun.dag_id, sqla.func.max(DagRun.execution_date).label('execution_date'))
.group_by(DagRun.dag_id)
.subquery('last_dag_run')
)
RunningDagRun = (
session.query(DagRun.dag_id, DagRun.execution_date)
.filter(DagRun.state == State.RUNNING)
.subquery('running_dag_run')
)
# Select all task_instances from active dag_runs.
# If no dag_run is active, return task instances from most recent dag_run.
qry = (
session.query(TI.dag_id, TI.state, sqla.func.count(TI.task_id))
.outerjoin(RunningDagRun, and_(
RunningDagRun.c.dag_id == TI.dag_id,
RunningDagRun.c.execution_date == TI.execution_date)
)
.outerjoin(LastDagRun, and_(
LastDagRun.c.dag_id == TI.dag_id,
LastDagRun.c.execution_date == TI.execution_date)
)
.filter(TI.task_id.in_(task_ids))
.filter(TI.dag_id.in_(dag_ids))
.filter(or_(
RunningDagRun.c.dag_id != None,
LastDagRun.c.dag_id != None
))
.group_by(TI.dag_id, TI.state)
)
data = {}
for dag_id, state, count in qry:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
session.commit()
session.close()
payload = {}
for dag in dagbag.dags.values():
payload[dag.safe_dag_id] = []
for state in states:
try:
count = data[dag.dag_id][state]
except:
count = 0
d = {
'state': state,
'count': count,
'dag_id': dag.dag_id,
'color': State.color(state)
}
payload[dag.safe_dag_id].append(d)
return wwwutils.json_response(payload)
@expose('/code')
@login_required
def code(self):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
title = dag_id
try:
m = importlib.import_module(dag.module_name)
code = inspect.getsource(m)
html_code = highlight(
code, lexers.PythonLexer(), HtmlFormatter(linenos=True))
except IOError as e:
html_code = str(e)
return self.render(
'airflow/dag_code.html', html_code=html_code, dag=dag, title=title,
root=request.args.get('root'),
demo_mode=conf.getboolean('webserver', 'demo_mode'))
@expose('/dag_details')
@login_required
def dag_details(self):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
title = "DAG details"
session = settings.Session()
TI = models.TaskInstance
states = (
session.query(TI.state, sqla.func.count(TI.dag_id))
.filter(TI.dag_id == dag_id)
.group_by(TI.state)
.all()
)
return self.render(
'airflow/dag_details.html',
dag=dag, title=title, states=states, State=State)
@current_app.errorhandler(404)
def circles(self):
return render_template(
'airflow/circles.html', hostname=socket.getfqdn()), 404
@current_app.errorhandler(500)
def show_traceback(self):
from airflow.utils import asciiart as ascii_
return render_template(
'airflow/traceback.html',
hostname=socket.getfqdn(),
nukular=ascii_.nukular,
info=traceback.format_exc()), 500
@expose('/sandbox')
@login_required
def sandbox(self):
title = "Sandbox Suggested Configuration"
cfg_loc = conf.AIRFLOW_CONFIG + '.sandbox'
f = open(cfg_loc, 'r')
config = f.read()
f.close()
code_html = Markup(highlight(
config,
lexers.IniLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/code.html',
code_html=code_html, title=title, subtitle=cfg_loc)
@expose('/noaccess')
def noaccess(self):
return self.render('airflow/noaccess.html')
@expose('/headers')
def headers(self):
d = {
'headers': {k: v for k, v in request.headers},
}
if hasattr(current_user, 'is_superuser'):
d['is_superuser'] = current_user.is_superuser()
d['data_profiling'] = current_user.data_profiling()
d['is_anonymous'] = current_user.is_anonymous()
d['is_authenticated'] = current_user.is_authenticated()
if hasattr(current_user, 'username'):
d['username'] = current_user.username
return wwwutils.json_response(d)
@expose('/pickle_info')
def pickle_info(self):
d = {}
dag_id = request.args.get('dag_id')
dags = [dagbag.dags.get(dag_id)] if dag_id else dagbag.dags.values()
for dag in dags:
if not dag.is_subdag:
d[dag.dag_id] = dag.pickle_info()
return wwwutils.json_response(d)
@expose('/login', methods=['GET', 'POST'])
def login(self):
return airflow.login.login(self, request)
@expose('/logout')
def logout(self):
logout_user()
flash('You have been logged out.')
return redirect(url_for('admin.index'))
@expose('/rendered')
@login_required
@wwwutils.action_logging
def rendered(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
task = copy.copy(dag.get_task(task_id))
ti = models.TaskInstance(task=task, execution_date=dttm)
try:
ti.render_templates()
except Exception as e:
flash("Error rendering template: " + str(e), "error")
title = "Rendered Template"
html_dict = {}
for template_field in task.__class__.template_fields:
content = getattr(task, template_field)
if template_field in attr_renderer:
html_dict[template_field] = attr_renderer[template_field](content)
else:
html_dict[template_field] = (
"<pre><code>" + str(content) + "</pre></code>")
return self.render(
'airflow/ti_code.html',
html_dict=html_dict,
dag=dag,
task_id=task_id,
execution_date=execution_date,
form=form,
title=title,)
@expose('/log')
@login_required
@wwwutils.action_logging
def log(self):
BASE_LOG_FOLDER = os.path.expanduser(
conf.get('core', 'BASE_LOG_FOLDER'))
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dag = dagbag.get_dag(dag_id)
log_relative = "{dag_id}/{task_id}/{execution_date}".format(
**locals())
loc = os.path.join(BASE_LOG_FOLDER, log_relative)
loc = loc.format(**locals())
log = ""
TI = models.TaskInstance
session = Session()
dttm = dateutil.parser.parse(execution_date)
ti = session.query(TI).filter(
TI.dag_id == dag_id, TI.task_id == task_id,
TI.execution_date == dttm).first()
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
if ti:
host = ti.hostname
log_loaded = False
if os.path.exists(loc):
try:
f = open(loc)
log += "".join(f.readlines())
f.close()
log_loaded = True
except:
log = "*** Failed to load local log file: {0}.\n".format(loc)
else:
WORKER_LOG_SERVER_PORT = \
conf.get('celery', 'WORKER_LOG_SERVER_PORT')
url = os.path.join(
"http://{host}:{WORKER_LOG_SERVER_PORT}/log", log_relative
).format(**locals())
log += "*** Log file isn't local.\n"
log += "*** Fetching here: {url}\n".format(**locals())
try:
import requests
response = requests.get(url)
response.raise_for_status()
log += '\n' + response.text
log_loaded = True
except:
log += "*** Failed to fetch log file from worker.\n".format(
**locals())
if not log_loaded:
# load remote logs
remote_log_base = conf.get('core', 'REMOTE_BASE_LOG_FOLDER')
remote_log = os.path.join(remote_log_base, log_relative)
log += '\n*** Reading remote logs...\n'
# S3
if remote_log.startswith('s3:/'):
log += log_utils.S3Log().read(remote_log, return_error=True)
# GCS
elif remote_log.startswith('gs:/'):
log += log_utils.GCSLog().read(remote_log, return_error=True)
# unsupported
elif remote_log:
log += '*** Unsupported remote log location.'
session.commit()
session.close()
if PY2 and not isinstance(log, unicode):
log = log.decode('utf-8')
title = "Log"
return self.render(
'airflow/ti_code.html',
code=log, dag=dag, title=title, task_id=task_id,
execution_date=execution_date, form=form)
@expose('/task')
@login_required
@wwwutils.action_logging
def task(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
flash(
"Task [{}.{}] doesn't seem to exist"
" at the moment".format(dag_id, task_id),
"error")
return redirect('/admin/')
task = dag.get_task(task_id)
task = copy.copy(task)
task.resolve_template_files()
attributes = []
for attr_name in dir(task):
if not attr_name.startswith('_'):
attr = getattr(task, attr_name)
if type(attr) != type(self.task) and \
attr_name not in attr_renderer:
attributes.append((attr_name, str(attr)))
title = "Task Details"
# Color coding the special attributes that are code
special_attrs_rendered = {}
for attr_name in attr_renderer:
if hasattr(task, attr_name):
source = getattr(task, attr_name)
special_attrs_rendered[attr_name] = attr_renderer[attr_name](source)
return self.render(
'airflow/task.html',
attributes=attributes,
task_id=task_id,
execution_date=execution_date,
special_attrs_rendered=special_attrs_rendered,
form=form,
dag=dag, title=title)
@expose('/xcom')
@login_required
@wwwutils.action_logging
def xcom(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
flash(
"Task [{}.{}] doesn't seem to exist"
" at the moment".format(dag_id, task_id),
"error")
return redirect('/admin/')
session = Session()
xcomlist = session.query(XCom).filter(
XCom.dag_id == dag_id, XCom.task_id == task_id,
XCom.execution_date == dttm).all()
attributes = []
for xcom in xcomlist:
if not xcom.key.startswith('_'):
attributes.append((xcom.key, xcom.value))
title = "XCom"
return self.render(
'airflow/xcom.html',
attributes=attributes,
task_id=task_id,
execution_date=execution_date,
form=form,
dag=dag, title=title)\
@expose('/run')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def run(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
execution_date = request.args.get('execution_date')
execution_date = dateutil.parser.parse(execution_date)
force = request.args.get('force') == "true"
deps = request.args.get('deps') == "true"
try:
from airflow.executors import DEFAULT_EXECUTOR as executor
from airflow.executors import CeleryExecutor
if not isinstance(executor, CeleryExecutor):
flash("Only works with the CeleryExecutor, sorry", "error")
return redirect(origin)
except ImportError:
# in case CeleryExecutor cannot be imported it is not active either
flash("Only works with the CeleryExecutor, sorry", "error")
return redirect(origin)
ti = models.TaskInstance(task=task, execution_date=execution_date)
executor.start()
executor.queue_task_instance(
ti, force=force, ignore_dependencies=deps)
executor.heartbeat()
flash(
"Sent {} to the message queue, "
"it should start any moment now.".format(ti))
return redirect(origin)
@expose('/clear')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def clear(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
execution_date = request.args.get('execution_date')
execution_date = dateutil.parser.parse(execution_date)
confirmed = request.args.get('confirmed') == "true"
upstream = request.args.get('upstream') == "true"
downstream = request.args.get('downstream') == "true"
future = request.args.get('future') == "true"
past = request.args.get('past') == "true"
recursive = request.args.get('recursive') == "true"
dag = dag.sub_dag(
task_regex=r"^{0}$".format(task_id),
include_downstream=downstream,
include_upstream=upstream)
end_date = execution_date if not future else None
start_date = execution_date if not past else None
if confirmed:
count = dag.clear(
start_date=start_date,
end_date=end_date,
include_subdags=recursive)
flash("{0} task instances have been cleared".format(count))
return redirect(origin)
else:
tis = dag.clear(
start_date=start_date,
end_date=end_date,
include_subdags=recursive,
dry_run=True)
if not tis:
flash("No task instances to clear", 'error')
response = redirect(origin)
else:
details = "\n".join([str(t) for t in tis])
response = self.render(
'airflow/confirm.html',
message=(
"Here's the list of task instances you are about "
"to clear:"),
details=details,)
return response
@expose('/blocked')
@login_required
def blocked(self):
session = settings.Session()
DR = models.DagRun
dags = (
session.query(DR.dag_id, sqla.func.count(DR.id))
.filter(DR.state == State.RUNNING)
.group_by(DR.dag_id)
.all()
)
payload = []
for dag_id, active_dag_runs in dags:
max_active_runs = 0
if dag_id in dagbag.dags:
max_active_runs = dagbag.dags[dag_id].max_active_runs
payload.append({
'dag_id': dag_id,
'active_dag_run': active_dag_runs,
'max_active_runs': max_active_runs,
})
return wwwutils.json_response(payload)
@expose('/success')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def success(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
execution_date = request.args.get('execution_date')
execution_date = dateutil.parser.parse(execution_date)
confirmed = request.args.get('confirmed') == "true"
upstream = request.args.get('upstream') == "true"
downstream = request.args.get('downstream') == "true"
future = request.args.get('future') == "true"
past = request.args.get('past') == "true"
recursive = request.args.get('recursive') == "true"
MAX_PERIODS = 1000
# Flagging tasks as successful
session = settings.Session()
task_ids = [task_id]
dag_ids = [dag_id]
task_id_to_dag = {
task_id: dag
}
end_date = ((dag.latest_execution_date or datetime.now())
if future else execution_date)
if 'start_date' in dag.default_args:
start_date = dag.default_args['start_date']
elif dag.start_date:
start_date = dag.start_date
else:
start_date = execution_date
start_date = execution_date if not past else start_date
if recursive:
recurse_tasks(task, task_ids, dag_ids, task_id_to_dag)
if downstream:
relatives = task.get_flat_relatives(upstream=False)
task_ids += [t.task_id for t in relatives]
if recursive:
recurse_tasks(relatives, task_ids, dag_ids, task_id_to_dag)
if upstream:
relatives = task.get_flat_relatives(upstream=False)
task_ids += [t.task_id for t in relatives]
if recursive:
recurse_tasks(relatives, task_ids, dag_ids, task_id_to_dag)
TI = models.TaskInstance
if dag.schedule_interval == '@once':
dates = [start_date]
else:
dates = dag.date_range(start_date, end_date=end_date)
tis = session.query(TI).filter(
TI.dag_id.in_(dag_ids),
TI.execution_date.in_(dates),
TI.task_id.in_(task_ids)).all()
tis_to_change = session.query(TI).filter(
TI.dag_id.in_(dag_ids),
TI.execution_date.in_(dates),
TI.task_id.in_(task_ids),
TI.state != State.SUCCESS).all()
tasks = list(product(task_ids, dates))
tis_to_create = list(
set(tasks) -
set([(ti.task_id, ti.execution_date) for ti in tis]))
tis_all_altered = list(chain(
[(ti.task_id, ti.execution_date) for ti in tis_to_change],
tis_to_create))
if len(tis_all_altered) > MAX_PERIODS:
flash("Too many tasks at once (>{0})".format(
MAX_PERIODS), 'error')
return redirect(origin)
if confirmed:
for ti in tis_to_change:
ti.state = State.SUCCESS
session.commit()
for task_id, task_execution_date in tis_to_create:
ti = TI(
task=task_id_to_dag[task_id].get_task(task_id),
execution_date=task_execution_date,
state=State.SUCCESS)
session.add(ti)
session.commit()
session.commit()
session.close()
flash("Marked success on {} task instances".format(
len(tis_all_altered)))
return redirect(origin)
else:
if not tis_all_altered:
flash("No task instances to mark as successful", 'error')
response = redirect(origin)
else:
tis = []
for task_id, task_execution_date in tis_all_altered:
tis.append(TI(
task=task_id_to_dag[task_id].get_task(task_id),
execution_date=task_execution_date,
state=State.SUCCESS))
details = "\n".join([str(t) for t in tis])
response = self.render(
'airflow/confirm.html',
message=(
"Here's the list of task instances you are about "
"to mark as successful:"),
details=details,)
return response
@expose('/tree')
@login_required
@wwwutils.gzipped
@wwwutils.action_logging
def tree(self):
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
dag = dagbag.get_dag(dag_id)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_downstream=False,
include_upstream=True)
session = settings.Session()
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = dateutil.parser.parse(base_date)
else:
base_date = dag.latest_execution_date or datetime.now()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
DR = models.DagRun
dag_runs = (
session.query(DR)
.filter(
DR.dag_id==dag.dag_id,
DR.execution_date<=base_date,
DR.execution_date>=min_date)
.all()
)
dag_runs = {
dr.execution_date: alchemy_to_dict(dr) for dr in dag_runs}
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
dates = sorted(list({ti.execution_date for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if dates else None
task_instances = {}
for ti in tis:
tid = alchemy_to_dict(ti)
dr = dag_runs.get(ti.execution_date)
tid['external_trigger'] = dr['external_trigger'] if dr else False
task_instances[(ti.task_id, ti.execution_date)] = tid
expanded = []
# The default recursion traces every path so that tree view has full
# expand/collapse functionality. After 5,000 nodes we stop and fall
# back on a quick DFS search for performance. See PR #320.
node_count = [0]
node_limit = 5000 / max(1, len(dag.roots))
def recurse_nodes(task, visited):
visited.add(task)
node_count[0] += 1
children = [
recurse_nodes(t, visited) for t in task.upstream_list
if node_count[0] < node_limit or t not in visited]
# D3 tree uses children vs _children to define what is
# expanded or not. The following block makes it such that
# repeated nodes are collapsed by default.
children_key = 'children'
if task.task_id not in expanded:
expanded.append(task.task_id)
elif children:
children_key = "_children"
def set_duration(tid):
if isinstance(tid, dict) and tid.get("state") == State.RUNNING:
d = datetime.now() - dateutil.parser.parse(tid["start_date"])
tid["duration"] = d.total_seconds()
return tid
return {
'name': task.task_id,
'instances': [
set_duration(task_instances.get((task.task_id, d))) or {
'execution_date': d.isoformat(),
'task_id': task.task_id
}
for d in dates],
children_key: children,
'num_dep': len(task.upstream_list),
'operator': task.task_type,
'retries': task.retries,
'owner': task.owner,
'start_date': task.start_date,
'end_date': task.end_date,
'depends_on_past': task.depends_on_past,
'ui_color': task.ui_color,
}
data = {
'name': '[DAG]',
'children': [recurse_nodes(t, set()) for t in dag.roots],
'instances': [
dag_runs.get(d) or {'execution_date': d.isoformat()}
for d in dates],
}
data = json.dumps(data, indent=4, default=json_ser)
session.commit()
session.close()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
return self.render(
'airflow/tree.html',
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
root=root,
form=form,
dag=dag, data=data, blur=blur)
@expose('/graph')
@login_required
@wwwutils.gzipped
@wwwutils.action_logging
def graph(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
arrange = request.args.get('arrange', "LR")
dag = dagbag.get_dag(dag_id)
if dag_id not in dagbag.dags:
flash('DAG "{0}" seems to be missing.'.format(dag_id), "error")
return redirect('/admin/')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
nodes = []
edges = []
for task in dag.tasks:
nodes.append({
'id': task.task_id,
'value': {
'label': task.task_id,
'labelStyle': "fill:{0};".format(task.ui_fgcolor),
'style': "fill:{0};".format(task.ui_color),
}
})
def get_upstream(task):
for t in task.upstream_list:
edge = {
'u': t.task_id,
'v': task.task_id,
}
if edge not in edges:
edges.append(edge)
get_upstream(t)
for t in dag.roots:
get_upstream(t)
dttm = request.args.get('execution_date')
if dttm:
dttm = dateutil.parser.parse(dttm)
else:
dttm = dag.latest_execution_date or datetime.now().date()
DR = models.DagRun
drs = (
session.query(DR)
.filter_by(dag_id=dag_id)
.order_by(desc(DR.execution_date)).all()
)
dr_choices = []
dr_state = None
for dr in drs:
dr_choices.append((dr.execution_date.isoformat(), dr.run_id))
if dttm == dr.execution_date:
dr_state = dr.state
class GraphForm(Form):
execution_date = SelectField("DAG run", choices=dr_choices)
arrange = SelectField("Layout", choices=(
('LR', "Left->Right"),
('RL', "Right->Left"),
('TB', "Top->Bottom"),
('BT', "Bottom->Top"),
))
form = GraphForm(
data={'execution_date': dttm.isoformat(), 'arrange': arrange})
task_instances = {
ti.task_id: alchemy_to_dict(ti)
for ti in dag.get_task_instances(session, dttm, dttm)}
tasks = {
t.task_id: {
'dag_id': t.dag_id,
'task_type': t.task_type,
}
for t in dag.tasks}
if not tasks:
flash("No tasks found", "error")
session.commit()
session.close()
doc_md = markdown.markdown(dag.doc_md) if hasattr(dag, 'doc_md') else ''
return self.render(
'airflow/graph.html',
dag=dag,
form=form,
width=request.args.get('width', "100%"),
height=request.args.get('height', "800"),
execution_date=dttm.isoformat(),
state_token=state_token(dr_state),
doc_md=doc_md,
arrange=arrange,
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
blur=blur,
root=root or '',
task_instances=json.dumps(task_instances, indent=2),
tasks=json.dumps(tasks, indent=2),
nodes=json.dumps(nodes, indent=2),
edges=json.dumps(edges, indent=2),)
@expose('/duration')
@login_required
@wwwutils.action_logging
def duration(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = dateutil.parser.parse(base_date)
else:
base_date = dag.latest_execution_date or datetime.now()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
all_data = []
for task in dag.tasks:
data = []
for ti in task.get_task_instances(session, start_date=min_date,
end_date=base_date):
if ti.duration:
data.append([
ti.execution_date.isoformat(),
float(ti.duration) / (60*60)
])
if data:
all_data.append({'data': data, 'name': task.task_id})
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
dates = sorted(list({ti.execution_date for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if dates else None
session.commit()
session.close()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
return self.render(
'airflow/chart.html',
dag=dag,
data=json.dumps(all_data),
chart_options={'yAxis': {'title': {'text': 'hours'}}},
height="700px",
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
)
@expose('/landing_times')
@login_required
@wwwutils.action_logging
def landing_times(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = dateutil.parser.parse(base_date)
else:
base_date = dag.latest_execution_date or datetime.now()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
all_data = []
for task in dag.tasks:
data = []
for ti in task.get_task_instances(session, start_date=min_date,
end_date=base_date):
if ti.end_date:
ts = ti.execution_date
if dag.schedule_interval:
ts = dag.following_schedule(ts)
secs = old_div((ti.end_date - ts).total_seconds(), 60*60)
data.append([ti.execution_date.isoformat(), secs])
all_data.append({'data': data, 'name': task.task_id})
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
dates = sorted(list({ti.execution_date for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if dates else None
session.commit()
session.close()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
return self.render(
'airflow/chart.html',
dag=dag,
data=json.dumps(all_data),
height="700px",
chart_options={'yAxis': {'title': {'text': 'hours after 00:00'}}},
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
)
@expose('/paused')
@login_required
@wwwutils.action_logging
def paused(self):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
session = settings.Session()
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if request.args.get('is_paused') == 'false':
orm_dag.is_paused = True
else:
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
session.close()
dagbag.get_dag(dag_id)
return "OK"
@expose('/refresh')
@login_required
@wwwutils.action_logging
def refresh(self):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
session = settings.Session()
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if orm_dag:
orm_dag.last_expired = datetime.now()
session.merge(orm_dag)
session.commit()
session.close()
dagbag.get_dag(dag_id)
flash("DAG [{}] is now fresh as a daisy".format(dag_id))
return redirect('/')
@expose('/refresh_all')
@login_required
@wwwutils.action_logging
def refresh_all(self):
dagbag.collect_dags(only_if_updated=False)
flash("All DAGs are now up to date")
return redirect('/')
@expose('/gantt')
@login_required
@wwwutils.action_logging
def gantt(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
demo_mode = conf.getboolean('webserver', 'demo_mode')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
dttm = request.args.get('execution_date')
if dttm:
dttm = dateutil.parser.parse(dttm)
else:
dttm = dag.latest_execution_date or datetime.now().date()
form = DateTimeForm(data={'execution_date': dttm})
tis = [
ti
for ti in dag.get_task_instances(session, dttm, dttm)
if ti.start_date]
tis = sorted(tis, key=lambda ti: ti.start_date)
tasks = []
data = []
for i, ti in enumerate(tis):
end_date = ti.end_date or datetime.now()
tasks += [ti.task_id]
color = State.color(ti.state)
data.append({
'x': i,
'low': int(ti.start_date.strftime('%s')) * 1000,
'high': int(end_date.strftime('%s')) * 1000,
'color': color,
})
height = (len(tis) * 25) + 50
session.commit()
session.close()
hc = {
'chart': {
'type': 'columnrange',
'inverted': True,
'height': height,
},
'xAxis': {'categories': tasks, 'alternateGridColor': '#FAFAFA'},
'yAxis': {'type': 'datetime'},
'title': {
'text': None
},
'plotOptions': {
'series': {
'cursor': 'pointer',
'minPointLength': 4,
},
},
'legend': {
'enabled': False
},
'series': [{
'data': data
}]
}
return self.render(
'airflow/gantt.html',
dag=dag,
execution_date=dttm.isoformat(),
form=form,
hc=json.dumps(hc, indent=4),
height=height,
demo_mode=demo_mode,
root=root,
)
@expose('/object/task_instances')
@login_required
@wwwutils.action_logging
def task_instances(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
dttm = request.args.get('execution_date')
if dttm:
dttm = dateutil.parser.parse(dttm)
else:
return ("Error: Invalid execution_date")
task_instances = {
ti.task_id: alchemy_to_dict(ti)
for ti in dag.get_task_instances(session, dttm, dttm)}
return json.dumps(task_instances)
@expose('/variables/<form>', methods=["GET", "POST"])
@login_required
@wwwutils.action_logging
def variables(self, form):
try:
if request.method == 'POST':
data = request.json
if data:
session = settings.Session()
var = models.Variable(key=form, val=json.dumps(data))
session.add(var)
session.commit()
return ""
else:
return self.render(
'airflow/variables/{}.html'.format(form)
)
except:
return ("Error: form airflow/variables/{}.html "
"not found.").format(form), 404
class HomeView(AdminIndexView):
@expose("/")
@login_required
def index(self):
session = Session()
DM = models.DagModel
qry = None
# filter the dags if filter_by_owner and current user is not superuser
do_filter = FILTER_BY_OWNER and (not current_user.is_superuser())
if do_filter:
qry = (
session.query(DM)
.filter(
~DM.is_subdag, DM.is_active,
DM.owners.like('%' + current_user.username + '%'))
.all()
)
else:
qry = session.query(DM).filter(~DM.is_subdag, DM.is_active).all()
orm_dags = {dag.dag_id: dag for dag in qry}
import_errors = session.query(models.ImportError).all()
for ie in import_errors:
flash(
"Broken DAG: [{ie.filename}] {ie.stacktrace}".format(ie=ie),
"error")
session.expunge_all()
session.commit()
session.close()
dags = dagbag.dags.values()
if do_filter:
dags = {
dag.dag_id: dag
for dag in dags
if (
dag.owner == current_user.username and (not dag.parent_dag)
)
}
else:
dags = {dag.dag_id: dag for dag in dags if not dag.parent_dag}
all_dag_ids = sorted(set(orm_dags.keys()) | set(dags.keys()))
return self.render(
'airflow/dags.html',
dags=dags,
orm_dags=orm_dags,
all_dag_ids=all_dag_ids)
class QueryView(wwwutils.DataProfilingMixin, BaseView):
@expose('/')
@wwwutils.gzipped
def query(self):
session = settings.Session()
dbs = session.query(models.Connection).order_by(
models.Connection.conn_id).all()
session.expunge_all()
db_choices = list(
((db.conn_id, db.conn_id) for db in dbs if db.get_hook()))
conn_id_str = request.args.get('conn_id')
csv = request.args.get('csv') == "true"
sql = request.args.get('sql')
class QueryForm(Form):
conn_id = SelectField("Layout", choices=db_choices)
sql = TextAreaField("SQL", widget=wwwutils.AceEditorWidget())
data = {
'conn_id': conn_id_str,
'sql': sql,
}
results = None
has_data = False
error = False
if conn_id_str:
db = [db for db in dbs if db.conn_id == conn_id_str][0]
hook = db.get_hook()
try:
df = hook.get_pandas_df(wwwutils.limit_sql(sql, QUERY_LIMIT, conn_type=db.conn_type))
# df = hook.get_pandas_df(sql)
has_data = len(df) > 0
df = df.fillna('')
results = df.to_html(
classes=[
'table', 'table-bordered', 'table-striped', 'no-wrap'],
index=False,
na_rep='',
) if has_data else ''
except Exception as e:
flash(str(e), 'error')
error = True
if has_data and len(df) == QUERY_LIMIT:
flash(
"Query output truncated at " + str(QUERY_LIMIT) +
" rows", 'info')
if not has_data and error:
flash('No data', 'error')
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
form = QueryForm(request.form, data=data)
session.commit()
session.close()
return self.render(
'airflow/query.html', form=form,
title="Ad Hoc Query",
results=results or '',
has_data=has_data)
class AirflowModelView(ModelView):
list_template = 'airflow/model_list.html'
edit_template = 'airflow/model_edit.html'
create_template = 'airflow/model_create.html'
column_display_actions = True
page_size = 500
class ModelViewOnly(wwwutils.LoginMixin, AirflowModelView):
"""
Modifying the base ModelView class for non edit, browse only operations
"""
named_filter_urls = True
can_create = False
can_edit = False
can_delete = False
column_display_pk = True
class PoolModelView(wwwutils.SuperUserMixin, AirflowModelView):
column_list = ('pool', 'slots', 'used_slots', 'queued_slots')
column_formatters = dict(
pool=pool_link, used_slots=fused_slots, queued_slots=fqueued_slots)
named_filter_urls = True
class SlaMissModelView(wwwutils.SuperUserMixin, ModelViewOnly):
verbose_name_plural = "SLA misses"
verbose_name = "SLA miss"
column_list = (
'dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp')
column_formatters = dict(
task_id=task_instance_link,
execution_date=datetime_f,
timestamp=datetime_f,
dag_id=dag_link)
named_filter_urls = True
column_searchable_list = ('dag_id', 'task_id',)
column_filters = (
'dag_id', 'task_id', 'email_sent', 'timestamp', 'execution_date')
form_widget_args = {
'email_sent': {'disabled': True},
'timestamp': {'disabled': True},
}
class ChartModelView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "chart"
verbose_name_plural = "charts"
form_columns = (
'label',
'owner',
'conn_id',
'chart_type',
'show_datatable',
'x_is_date',
'y_log_scale',
'show_sql',
'height',
'sql_layout',
'sql',
'default_params',)
column_list = (
'label', 'conn_id', 'chart_type', 'owner', 'last_modified',)
column_formatters = dict(label=label_link, last_modified=datetime_f)
column_default_sort = ('last_modified', True)
create_template = 'airflow/chart/create.html'
edit_template = 'airflow/chart/edit.html'
column_filters = ('label', 'owner.username', 'conn_id')
column_searchable_list = ('owner.username', 'label', 'sql')
column_descriptions = {
'label': "Can include {{ templated_fields }} and {{ macros }}",
'chart_type': "The type of chart to be displayed",
'sql': "Can include {{ templated_fields }} and {{ macros }}.",
'height': "Height of the chart, in pixels.",
'conn_id': "Source database to run the query against",
'x_is_date': (
"Whether the X axis should be casted as a date field. Expect most "
"intelligible date formats to get casted properly."
),
'owner': (
"The chart's owner, mostly used for reference and filtering in "
"the list view."
),
'show_datatable':
"Whether to display an interactive data table under the chart.",
'default_params': (
'A dictionary of {"key": "values",} that define what the '
'templated fields (parameters) values should be by default. '
'To be valid, it needs to "eval" as a Python dict. '
'The key values will show up in the url\'s querystring '
'and can be altered there.'
),
'show_sql': "Whether to display the SQL statement as a collapsible "
"section in the chart page.",
'y_log_scale': "Whether to use a log scale for the Y axis.",
'sql_layout': (
"Defines the layout of the SQL that the application should "
"expect. Depending on the tables you are sourcing from, it may "
"make more sense to pivot / unpivot the metrics."
),
}
column_labels = {
'sql': "SQL",
'height': "Chart Height",
'sql_layout': "SQL Layout",
'show_sql': "Display the SQL Statement",
'default_params': "Default Parameters",
}
form_choices = {
'chart_type': [
('line', 'Line Chart'),
('spline', 'Spline Chart'),
('bar', 'Bar Chart'),
('para', 'Parallel Coordinates'),
('column', 'Column Chart'),
('area', 'Overlapping Area Chart'),
('stacked_area', 'Stacked Area Chart'),
('percent_area', 'Percent Area Chart'),
('heatmap', 'Heatmap'),
('datatable', 'No chart, data table only'),
],
'sql_layout': [
('series', 'SELECT series, x, y FROM ...'),
('columns', 'SELECT x, y (series 1), y (series 2), ... FROM ...'),
],
'conn_id': [
(c.conn_id, c.conn_id)
for c in (
Session().query(models.Connection.conn_id)
.group_by(models.Connection.conn_id)
)
]
}
def on_model_change(self, form, model, is_created=True):
if model.iteration_no is None:
model.iteration_no = 0
else:
model.iteration_no += 1
if not model.user_id and current_user and hasattr(current_user, 'id'):
model.user_id = current_user.id
model.last_modified = datetime.now()
class KnowEventView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "known event"
verbose_name_plural = "known events"
form_columns = (
'label',
'event_type',
'start_date',
'end_date',
'reported_by',
'description')
column_list = (
'label', 'event_type', 'start_date', 'end_date', 'reported_by')
column_default_sort = ("start_date", True)
class KnowEventTypeView(wwwutils.DataProfilingMixin, AirflowModelView):
pass
'''
# For debugging / troubleshooting
mv = KnowEventTypeView(
models.KnownEventType,
Session, name="Known Event Types", category="Manage")
admin.add_view(mv)
class DagPickleView(SuperUserMixin, ModelView):
pass
mv = DagPickleView(
models.DagPickle,
Session, name="Pickles", category="Manage")
admin.add_view(mv)
'''
class VariableView(wwwutils.LoginMixin, AirflowModelView):
verbose_name = "Variable"
verbose_name_plural = "Variables"
def hidden_field_formatter(view, context, model, name):
if should_hide_value_for_key(model.key):
return Markup('*' * 8)
return getattr(model, name)
form_columns = (
'key',
'val',
)
column_list = ('key', 'val', 'is_encrypted',)
column_filters = ('key', 'val')
column_searchable_list = ('key', 'val')
form_widget_args = {
'is_encrypted': {'disabled': True},
'val': {
'rows': 20,
}
}
column_sortable_list = (
'key',
'val',
'is_encrypted',
)
column_formatters = {
'val': hidden_field_formatter
}
def on_form_prefill(self, form, id):
if should_hide_value_for_key(form.key.data):
form.val.data = '*' * 8
class JobModelView(ModelViewOnly):
verbose_name_plural = "jobs"
verbose_name = "job"
column_default_sort = ('start_date', True)
column_filters = (
'job_type', 'dag_id', 'state',
'unixname', 'hostname', 'start_date', 'end_date', 'latest_heartbeat')
column_formatters = dict(
start_date=datetime_f,
end_date=datetime_f,
hostname=nobr_f,
state=state_f,
latest_heartbeat=datetime_f)
class DagRunModelView(ModelViewOnly):
verbose_name_plural = "DAG Runs"
can_delete = True
can_edit = True
can_create = True
column_editable_list = ('state',)
verbose_name = "dag run"
column_default_sort = ('execution_date', True)
form_choices = {
'state': [
('success', 'success'),
('running', 'running'),
('failed', 'failed'),
],
}
column_list = (
'state', 'dag_id', 'execution_date', 'run_id', 'external_trigger')
column_filters = column_list
column_searchable_list = ('dag_id', 'state', 'run_id')
column_formatters = dict(
execution_date=datetime_f,
state=state_f,
start_date=datetime_f,
dag_id=dag_link)
@action('set_running', "Set state to 'running'", None)
def action_set_running(self, ids):
self.set_dagrun_state(ids, State.RUNNING)
@action('set_failed', "Set state to 'failed'", None)
def action_set_failed(self, ids):
self.set_dagrun_state(ids, State.FAILED)
@action('set_success', "Set state to 'success'", None)
def action_set_success(self, ids):
self.set_dagrun_state(ids, State.SUCCESS)
@provide_session
def set_dagrun_state(self, ids, target_state, session=None):
try:
DR = models.DagRun
count = 0
for dr in session.query(DR).filter(DR.id.in_(ids)).all():
count += 1
dr.state = target_state
if target_state == State.RUNNING:
dr.start_date = datetime.now()
else:
dr.end_date = datetime.now()
session.commit()
flash(
"{count} dag runs were set to '{target_state}'".format(**locals()))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to set state', 'error')
class LogModelView(ModelViewOnly):
verbose_name_plural = "logs"
verbose_name = "log"
column_default_sort = ('dttm', True)
column_filters = ('dag_id', 'task_id', 'execution_date')
column_formatters = dict(
dttm=datetime_f, execution_date=datetime_f, dag_id=dag_link)
class TaskInstanceModelView(ModelViewOnly):
verbose_name_plural = "task instances"
verbose_name = "task instance"
column_filters = (
'state', 'dag_id', 'task_id', 'execution_date', 'hostname',
'queue', 'pool', 'operator', 'start_date', 'end_date')
named_filter_urls = True
column_formatters = dict(
log_url=log_url_formatter,
task_id=task_instance_link,
hostname=nobr_f,
state=state_f,
execution_date=datetime_f,
start_date=datetime_f,
end_date=datetime_f,
queued_dttm=datetime_f,
dag_id=dag_link, duration=duration_f)
column_searchable_list = ('dag_id', 'task_id', 'state')
column_default_sort = ('start_date', True)
form_choices = {
'state': [
('success', 'success'),
('running', 'running'),
('failed', 'failed'),
],
}
column_list = (
'state', 'dag_id', 'task_id', 'execution_date', 'operator',
'start_date', 'end_date', 'duration', 'job_id', 'hostname',
'unixname', 'priority_weight', 'queue', 'queued_dttm', 'try_number',
'pool', 'log_url')
can_delete = True
page_size = 500
@action('set_running', "Set state to 'running'", None)
def action_set_running(self, ids):
self.set_task_instance_state(ids, State.RUNNING)
@action('set_failed', "Set state to 'failed'", None)
def action_set_failed(self, ids):
self.set_task_instance_state(ids, State.FAILED)
@action('set_success', "Set state to 'success'", None)
def action_set_success(self, ids):
self.set_task_instance_state(ids, State.SUCCESS)
@action('set_retry', "Set state to 'up_for_retry'", None)
def action_set_retry(self, ids):
self.set_task_instance_state(ids, State.UP_FOR_RETRY)
@provide_session
def set_task_instance_state(self, ids, target_state, session=None):
try:
TI = models.TaskInstance
for count, id in enumerate(ids):
task_id, dag_id, execution_date = id.split(',')
execution_date = datetime.strptime(execution_date, '%Y-%m-%d %H:%M:%S')
ti = session.query(TI).filter(TI.task_id == task_id,
TI.dag_id == dag_id,
TI.execution_date == execution_date).one()
ti.state = target_state
count += 1
session.commit()
flash(
"{count} task instances were set to '{target_state}'".format(**locals()))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to set state', 'error')
class ConnectionModelView(wwwutils.SuperUserMixin, AirflowModelView):
create_template = 'airflow/conn_create.html'
edit_template = 'airflow/conn_edit.html'
list_template = 'airflow/conn_list.html'
form_columns = (
'conn_id',
'conn_type',
'host',
'schema',
'login',
'password',
'port',
'extra',
'extra__jdbc__drv_path',
'extra__jdbc__drv_clsname',
'extra__google_cloud_platform__project',
'extra__google_cloud_platform__key_path',
'extra__google_cloud_platform__scope',
)
verbose_name = "Connection"
verbose_name_plural = "Connections"
column_default_sort = ('conn_id', False)
column_list = ('conn_id', 'conn_type', 'host', 'port', 'is_encrypted', 'is_extra_encrypted',)
form_overrides = dict(_password=PasswordField)
form_widget_args = {
'is_extra_encrypted': {'disabled': True},
'is_encrypted': {'disabled': True},
}
# Used to customized the form, the forms elements get rendered
# and results are stored in the extra field as json. All of these
# need to be prefixed with extra__ and then the conn_type ___ as in
# extra__{conn_type}__name. You can also hide form elements and rename
# others from the connection_form.js file
form_extra_fields = {
'extra__jdbc__drv_path' : StringField('Driver Path'),
'extra__jdbc__drv_clsname': StringField('Driver Class'),
'extra__google_cloud_platform__project': StringField('Project Id'),
'extra__google_cloud_platform__key_path': StringField('Keyfile Path'),
'extra__google_cloud_platform__scope': StringField('Scopes (comma seperated)'),
}
form_choices = {
'conn_type': [
('fs', 'File (path)'),
('ftp', 'FTP',),
('google_cloud_platform', 'Google Cloud Platform'),
('hdfs', 'HDFS',),
('http', 'HTTP',),
('hive_cli', 'Hive Client Wrapper',),
('hive_metastore', 'Hive Metastore Thrift',),
('hiveserver2', 'Hive Server 2 Thrift',),
('jdbc', 'Jdbc Connection',),
('mysql', 'MySQL',),
('postgres', 'Postgres',),
('oracle', 'Oracle',),
('vertica', 'Vertica',),
('presto', 'Presto',),
('s3', 'S3',),
('samba', 'Samba',),
('sqlite', 'Sqlite',),
('ssh', 'SSH',),
('cloudant', 'IBM Cloudant',),
('mssql', 'Microsoft SQL Server'),
('mesos_framework-id', 'Mesos Framework ID'),
]
}
def on_model_change(self, form, model, is_created):
formdata = form.data
if formdata['conn_type'] in ['jdbc', 'google_cloud_platform']:
extra = {
key:formdata[key]
for key in self.form_extra_fields.keys() if key in formdata}
model.extra = json.dumps(extra)
@classmethod
def alert_fernet_key(cls):
return conf.get('core', 'fernet_key') is None
@classmethod
def is_secure(self):
"""
Used to display a message in the Connection list view making it clear
that the passwords and `extra` field can't be encrypted.
"""
is_secure = False
try:
import cryptography
conf.get('core', 'fernet_key')
is_secure = True
except:
pass
return is_secure
def on_form_prefill(self, form, id):
try:
d = json.loads(form.data.get('extra', '{}'))
except Exception as e:
d = {}
for field in list(self.form_extra_fields.keys()):
value = d.get(field, '')
if value:
field = getattr(form, field)
field.data = value
class UserModelView(wwwutils.SuperUserMixin, AirflowModelView):
verbose_name = "User"
verbose_name_plural = "Users"
column_default_sort = 'username'
class VersionView(wwwutils.SuperUserMixin, LoggingMixin, BaseView):
@expose('/')
def version(self):
# Look at the version from setup.py
try:
airflow_version = pkg_resources.require("airflow")[0].version
except Exception as e:
airflow_version = None
self.logger.error(e)
# Get the Git repo and git hash
git_version = None
try:
with open(os.path.join(*[settings.AIRFLOW_HOME, 'airflow', 'git_version'])) as f:
git_version = f.readline()
except Exception as e:
self.logger.error(e)
# Render information
title = "Version Info"
return self.render('airflow/version.html',
title=title,
airflow_version=airflow_version,
git_version=git_version)
class ConfigurationView(wwwutils.SuperUserMixin, BaseView):
@expose('/')
def conf(self):
raw = request.args.get('raw') == "true"
title = "Airflow Configuration"
subtitle = conf.AIRFLOW_CONFIG
if conf.getboolean("webserver", "expose_config"):
with open(conf.AIRFLOW_CONFIG, 'r') as f:
config = f.read()
else:
config = (
"# You Airflow administrator chose not to expose the "
"configuration, most likely for security reasons.")
if raw:
return Response(
response=config,
status=200,
mimetype="application/text")
else:
code_html = Markup(highlight(
config,
lexers.IniLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/code.html',
pre_subtitle=settings.HEADER + " v" + airflow.__version__,
code_html=code_html, title=title, subtitle=subtitle)
class DagModelView(wwwutils.SuperUserMixin, ModelView):
column_list = ('dag_id', 'owners')
column_editable_list = ('is_paused',)
form_excluded_columns = ('is_subdag', 'is_active')
column_searchable_list = ('dag_id',)
column_filters = (
'dag_id', 'owners', 'is_paused', 'is_active', 'is_subdag',
'last_scheduler_run', 'last_expired')
form_widget_args = {
'last_scheduler_run': {'disabled': True},
'fileloc': {'disabled': True},
'is_paused': {'disabled': True},
'last_pickled': {'disabled': True},
'pickle_id': {'disabled': True},
'last_loaded': {'disabled': True},
'last_expired': {'disabled': True},
'pickle_size': {'disabled': True},
'scheduler_lock': {'disabled': True},
'owners': {'disabled': True},
}
column_formatters = dict(
dag_id=dag_link,
)
can_delete = False
can_create = False
page_size = 50
list_template = 'airflow/list_dags.html'
named_filter_urls = True
def get_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self)
.get_query()
.filter(or_(models.DagModel.is_active, models.DagModel.is_paused))
.filter(~models.DagModel.is_subdag)
)
def get_count_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self)
.get_count_query()
.filter(models.DagModel.is_active)
.filter(~models.DagModel.is_subdag)
)
| apache-2.0 |
ej81/mercator | mercator/coastline.py | 1 | 3283 | """Coastline polygons read from an ESRI shapefile, for plotting with matplotlib."""
__author__ = "Eric Jansen"
__email__ = "[email protected]"
import os
import shapefile
import warnings
import numpy as np
from matplotlib.path import Path
from matplotlib.patches import Polygon
def _find(name, path):
if os.path.isfile(os.path.join(path, name)):
return os.path.join(path, name)
else:
name = os.path.basename(name)
for root, dirs, files in os.walk(path):
if name in files:
return os.path.join(root, name)
return None
def _split(points, parts):
num = len(parts)
if num > 1:
result = []
for index in range(0, num):
start = parts[index]
if index < num-1:
end = parts[index+1]
else:
end = -1
result += [points[start:end]]
return result
else:
return [points]
class Coastline(Polygon):
"""
Coastline background polygon. Clips itself to the axes when drawn.
"""
def __init__(self, filename, **kwargs):
"""
Create a new coastline polygon.
Parameters
----------
color : color, optional, default 'gray'
Line color of the coastline.
land : color, optional, default 'seashell'
Fill color of the land polygons.
Other parameters
----------------
kwargs : polygon properties
Other parameters passed on to :class:`~matplotlib.patches.Polygon`,
e.g. zorder=N to control drawing the land polygons above/below
other data.
"""
color = kwargs.pop('color', 'gray')
land = kwargs.pop('land', 'seashell')
self.data = []
self.extents = None
if not color:
color = 'none'
if not land:
land = 'none'
xy = [[None, None], [None, None]]
Polygon.__init__(self, xy, edgecolor=color, facecolor=land, **kwargs)
datapath = os.path.join(os.path.dirname(__file__), 'data')
coastfile = _find(filename, datapath)
if coastfile:
file = shapefile.Reader(coastfile)
for shape in file.shapes():
for points in _split(shape.points, shape.parts):
self.data += [Path(points)]
else:
raise Warning('coastline "%s" not found in directory "%s"' % (filename, datapath))
def draw(self, renderer):
"""
Clip the polygons to the view limits and let the parent handle drawing.
"""
bbox = self.axes.viewLim
if not np.array_equal(bbox.get_points(), self.extents):
self._visible = False
xy = [[None, None], [None, None]]
for path in self.data:
try:
for point in path.clip_to_bbox(bbox).vertices:
xy += [point]
self._visible = True
except ValueError:
pass
if not np.array_equal(xy[-1], [None, None]):
xy += [[None, None]]
self.set_xy(xy)
self.extents = np.copy(bbox.get_points())
return super(Coastline, self).draw(renderer)
| bsd-2-clause |
yl565/statsmodels | statsmodels/regression/tests/test_regression.py | 2 | 38342 | """
Test functions for models.regression
"""
# TODO: Test for LM
from statsmodels.compat.python import long, lrange
import warnings
import pandas
import numpy as np
from numpy.testing import (assert_almost_equal, assert_approx_equal, assert_,
assert_raises, assert_equal, assert_allclose)
from scipy.linalg import toeplitz
from statsmodels.tools.tools import add_constant, categorical
from statsmodels.compat.numpy import np_matrix_rank
from statsmodels.regression.linear_model import OLS, WLS, GLS, yule_walker
from statsmodels.datasets import longley
from scipy.stats import t as student_t
DECIMAL_4 = 4
DECIMAL_3 = 3
DECIMAL_2 = 2
DECIMAL_1 = 1
DECIMAL_7 = 7
DECIMAL_0 = 0
class CheckRegressionResults(object):
"""
res2 contains results from Rmodelwrap or were obtained from a statistical
packages such as R, Stata, or SAS and were written to model_results
"""
decimal_params = DECIMAL_4
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params,
self.decimal_params)
decimal_standarderrors = DECIMAL_4
def test_standarderrors(self):
assert_almost_equal(self.res1.bse,self.res2.bse,
self.decimal_standarderrors)
decimal_confidenceintervals = DECIMAL_4
def test_confidenceintervals(self):
#NOTE: stata rounds residuals (at least) to sig digits so approx_equal
conf1 = self.res1.conf_int()
conf2 = self.res2.conf_int()
for i in range(len(conf1)):
assert_approx_equal(conf1[i][0], conf2[i][0],
self.decimal_confidenceintervals)
assert_approx_equal(conf1[i][1], conf2[i][1],
self.decimal_confidenceintervals)
decimal_conf_int_subset = DECIMAL_4
def test_conf_int_subset(self):
if len(self.res1.params) > 1:
ci1 = self.res1.conf_int(cols=(1,2))
ci2 = self.res1.conf_int()[1:3]
assert_almost_equal(ci1, ci2, self.decimal_conf_int_subset)
else:
pass
decimal_scale = DECIMAL_4
def test_scale(self):
assert_almost_equal(self.res1.scale, self.res2.scale,
self.decimal_scale)
decimal_rsquared = DECIMAL_4
def test_rsquared(self):
assert_almost_equal(self.res1.rsquared, self.res2.rsquared,
self.decimal_rsquared)
decimal_rsquared_adj = DECIMAL_4
def test_rsquared_adj(self):
assert_almost_equal(self.res1.rsquared_adj, self.res2.rsquared_adj,
self.decimal_rsquared_adj)
def test_degrees(self):
assert_equal(self.res1.model.df_model, self.res2.df_model)
assert_equal(self.res1.model.df_resid, self.res2.df_resid)
decimal_ess = DECIMAL_4
def test_ess(self):
#Explained Sum of Squares
assert_almost_equal(self.res1.ess, self.res2.ess,
self.decimal_ess)
decimal_ssr = DECIMAL_4
def test_sumof_squaredresids(self):
assert_almost_equal(self.res1.ssr, self.res2.ssr, self.decimal_ssr)
decimal_mse_resid = DECIMAL_4
def test_mse_resid(self):
#Mean squared error of residuals
assert_almost_equal(self.res1.mse_model, self.res2.mse_model,
self.decimal_mse_resid)
decimal_mse_model = DECIMAL_4
def test_mse_model(self):
assert_almost_equal(self.res1.mse_resid, self.res2.mse_resid,
self.decimal_mse_model)
decimal_mse_total = DECIMAL_4
def test_mse_total(self):
assert_almost_equal(self.res1.mse_total, self.res2.mse_total,
self.decimal_mse_total, err_msg="Test class %s" % self)
decimal_fvalue = DECIMAL_4
def test_fvalue(self):
#didn't change this, not sure it should complain -inf not equal -inf
#if not (np.isinf(self.res1.fvalue) and np.isinf(self.res2.fvalue)):
assert_almost_equal(self.res1.fvalue, self.res2.fvalue,
self.decimal_fvalue)
decimal_loglike = DECIMAL_4
def test_loglike(self):
assert_almost_equal(self.res1.llf, self.res2.llf, self.decimal_loglike)
decimal_aic = DECIMAL_4
def test_aic(self):
assert_almost_equal(self.res1.aic, self.res2.aic, self.decimal_aic)
decimal_bic = DECIMAL_4
def test_bic(self):
assert_almost_equal(self.res1.bic, self.res2.bic, self.decimal_bic)
decimal_pvalues = DECIMAL_4
def test_pvalues(self):
assert_almost_equal(self.res1.pvalues, self.res2.pvalues,
self.decimal_pvalues)
decimal_wresid = DECIMAL_4
def test_wresid(self):
assert_almost_equal(self.res1.wresid, self.res2.wresid,
self.decimal_wresid)
decimal_resids = DECIMAL_4
def test_resids(self):
assert_almost_equal(self.res1.resid, self.res2.resid,
self.decimal_resids)
decimal_norm_resids = DECIMAL_4
def test_norm_resids(self):
assert_almost_equal(self.res1.resid_pearson, self.res2.resid_pearson,
self.decimal_norm_resids)
#TODO: test fittedvalues and what else?
class TestOLS(CheckRegressionResults):
@classmethod
def setupClass(cls):
from .results.results_regression import Longley
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
res1 = OLS(data.endog, data.exog).fit()
res2 = Longley()
res2.wresid = res1.wresid # workaround hack
cls.res1 = res1
cls.res2 = res2
res_qr = OLS(data.endog, data.exog).fit(method="qr")
model_qr = OLS(data.endog, data.exog)
Q, R = np.linalg.qr(data.exog)
model_qr.exog_Q, model_qr.exog_R = Q, R
model_qr.normalized_cov_params = np.linalg.inv(np.dot(R.T, R))
model_qr.rank = np_matrix_rank(R)
res_qr2 = model_qr.fit(method="qr")
cls.res_qr = res_qr
cls.res_qr_manual = res_qr2
def test_eigenvalues(self):
eigenval_perc_diff = (self.res_qr.eigenvals - self.res_qr_manual.eigenvals)
eigenval_perc_diff /= self.res_qr.eigenvals
zeros = np.zeros_like(eigenval_perc_diff)
assert_almost_equal(eigenval_perc_diff, zeros, DECIMAL_7)
# Robust error tests. Compare values computed with SAS
def test_HC0_errors(self):
#They are split up because the copied results do not have any DECIMAL_4
#places for the last place.
assert_almost_equal(self.res1.HC0_se[:-1],
self.res2.HC0_se[:-1], DECIMAL_4)
assert_approx_equal(np.round(self.res1.HC0_se[-1]), self.res2.HC0_se[-1])
def test_HC1_errors(self):
assert_almost_equal(self.res1.HC1_se[:-1],
self.res2.HC1_se[:-1], DECIMAL_4)
assert_approx_equal(self.res1.HC1_se[-1], self.res2.HC1_se[-1])
def test_HC2_errors(self):
assert_almost_equal(self.res1.HC2_se[:-1],
self.res2.HC2_se[:-1], DECIMAL_4)
assert_approx_equal(self.res1.HC2_se[-1], self.res2.HC2_se[-1])
def test_HC3_errors(self):
assert_almost_equal(self.res1.HC3_se[:-1],
self.res2.HC3_se[:-1], DECIMAL_4)
assert_approx_equal(self.res1.HC3_se[-1], self.res2.HC3_se[-1])
def test_qr_params(self):
assert_almost_equal(self.res1.params,
self.res_qr.params, 6)
def test_qr_normalized_cov_params(self):
#todo: need assert_close
assert_almost_equal(np.ones_like(self.res1.normalized_cov_params),
self.res1.normalized_cov_params /
self.res_qr.normalized_cov_params, 5)
def test_missing(self):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
data.endog[[3, 7, 14]] = np.nan
mod = OLS(data.endog, data.exog, missing='drop')
assert_equal(mod.endog.shape[0], 13)
assert_equal(mod.exog.shape[0], 13)
def test_rsquared_adj_overfit(self):
# Test that if df_resid = 0, rsquared_adj = 0.
# This is a regression test for user issue:
# https://github.com/statsmodels/statsmodels/issues/868
with warnings.catch_warnings(record=True):
x = np.random.randn(5)
y = np.random.randn(5, 6)
results = OLS(x, y).fit()
rsquared_adj = results.rsquared_adj
assert_equal(rsquared_adj, np.nan)
def test_qr_alternatives(self):
assert_allclose(self.res_qr.params, self.res_qr_manual.params,
rtol=5e-12)
def test_norm_resid(self):
resid = self.res1.wresid
norm_resid = resid / np.sqrt(np.sum(resid**2.0) / self.res1.df_resid)
model_norm_resid = self.res1.resid_pearson
assert_almost_equal(model_norm_resid, norm_resid, DECIMAL_7)
def test_norm_resid_zero_variance(self):
with warnings.catch_warnings(record=True):
y = self.res1.model.endog
res = OLS(y,y).fit()
assert_allclose(res.scale, 0, atol=1e-20)
assert_allclose(res.wresid, res.resid_pearson, atol=5e-11)
class TestRTO(CheckRegressionResults):
@classmethod
def setupClass(cls):
from .results.results_regression import LongleyRTO
data = longley.load()
res1 = OLS(data.endog, data.exog).fit()
res2 = LongleyRTO()
res2.wresid = res1.wresid # workaround hack
cls.res1 = res1
cls.res2 = res2
res_qr = OLS(data.endog, data.exog).fit(method="qr")
cls.res_qr = res_qr
class TestFtest(object):
"""
Tests f_test vs. RegressionResults
"""
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
cls.res1 = OLS(data.endog, data.exog).fit()
R = np.identity(7)[:-1,:]
cls.Ftest = cls.res1.f_test(R)
def test_F(self):
assert_almost_equal(self.Ftest.fvalue, self.res1.fvalue, DECIMAL_4)
def test_p(self):
assert_almost_equal(self.Ftest.pvalue, self.res1.f_pvalue, DECIMAL_4)
def test_Df_denom(self):
assert_equal(self.Ftest.df_denom, self.res1.model.df_resid)
def test_Df_num(self):
assert_equal(self.Ftest.df_num, 6)
class TestFTest2(object):
"""
A joint test that the coefficient on
GNP = the coefficient on UNEMP and that the coefficient on
POP = the coefficient on YEAR for the Longley dataset.
Ftest1 is from statsmodels. Results are from Rpy using R's car library.
"""
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
res1 = OLS(data.endog, data.exog).fit()
R2 = [[0,1,-1,0,0,0,0],[0, 0, 0, 0, 1, -1, 0]]
cls.Ftest1 = res1.f_test(R2)
hyp = 'x2 = x3, x5 = x6'
cls.NewFtest1 = res1.f_test(hyp)
def test_new_ftest(self):
assert_equal(self.NewFtest1.fvalue, self.Ftest1.fvalue)
def test_fvalue(self):
assert_almost_equal(self.Ftest1.fvalue, 9.7404618732968196, DECIMAL_4)
def test_pvalue(self):
assert_almost_equal(self.Ftest1.pvalue, 0.0056052885317493459,
DECIMAL_4)
def test_df_denom(self):
assert_equal(self.Ftest1.df_denom, 9)
def test_df_num(self):
assert_equal(self.Ftest1.df_num, 2)
class TestFtestQ(object):
"""
A joint hypothesis test that Rb = q. Coefficient tests are essentially
made up. Test values taken from Stata.
"""
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
res1 = OLS(data.endog, data.exog).fit()
R = np.array([[0,1,1,0,0,0,0],
[0,1,0,1,0,0,0],
[0,1,0,0,0,0,0],
[0,0,0,0,1,0,0],
[0,0,0,0,0,1,0]])
q = np.array([0,0,0,1,0])
cls.Ftest1 = res1.f_test((R,q))
def test_fvalue(self):
assert_almost_equal(self.Ftest1.fvalue, 70.115557, 5)
def test_pvalue(self):
assert_almost_equal(self.Ftest1.pvalue, 6.229e-07, 10)
def test_df_denom(self):
assert_equal(self.Ftest1.df_denom, 9)
def test_df_num(self):
assert_equal(self.Ftest1.df_num, 5)
class TestTtest(object):
"""
Test individual t-tests. Ie., are the coefficients significantly
different than zero.
"""
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
cls.res1 = OLS(data.endog, data.exog).fit()
R = np.identity(7)
cls.Ttest = cls.res1.t_test(R)
hyp = 'x1 = 0, x2 = 0, x3 = 0, x4 = 0, x5 = 0, x6 = 0, const = 0'
cls.NewTTest = cls.res1.t_test(hyp)
def test_new_tvalue(self):
assert_equal(self.NewTTest.tvalue, self.Ttest.tvalue)
def test_tvalue(self):
assert_almost_equal(self.Ttest.tvalue, self.res1.tvalues, DECIMAL_4)
def test_sd(self):
assert_almost_equal(self.Ttest.sd, self.res1.bse, DECIMAL_4)
def test_pvalue(self):
assert_almost_equal(self.Ttest.pvalue, student_t.sf(
np.abs(self.res1.tvalues), self.res1.model.df_resid)*2,
DECIMAL_4)
def test_df_denom(self):
assert_equal(self.Ttest.df_denom, self.res1.model.df_resid)
def test_effect(self):
assert_almost_equal(self.Ttest.effect, self.res1.params)
class TestTtest2(object):
"""
Tests the hypothesis that the coefficients on POP and YEAR
are equal.
Results from RPy using 'car' package.
"""
@classmethod
def setupClass(cls):
R = np.zeros(7)
R[4:6] = [1,-1]
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
res1 = OLS(data.endog, data.exog).fit()
cls.Ttest1 = res1.t_test(R)
def test_tvalue(self):
assert_almost_equal(self.Ttest1.tvalue, -4.0167754636397284,
DECIMAL_4)
def test_sd(self):
assert_almost_equal(self.Ttest1.sd, 455.39079425195314, DECIMAL_4)
def test_pvalue(self):
assert_almost_equal(self.Ttest1.pvalue, 2*0.0015163772380932246,
DECIMAL_4)
def test_df_denom(self):
assert_equal(self.Ttest1.df_denom, 9)
def test_effect(self):
assert_almost_equal(self.Ttest1.effect, -1829.2025687186533, DECIMAL_4)
class TestGLS(object):
"""
These test results were obtained by replication with R.
"""
@classmethod
def setupClass(cls):
from .results.results_regression import LongleyGls
data = longley.load()
exog = add_constant(np.column_stack((data.exog[:,1],
data.exog[:,4])), prepend=False)
tmp_results = OLS(data.endog, exog).fit()
rho = np.corrcoef(tmp_results.resid[1:],
tmp_results.resid[:-1])[0][1] # by assumption
order = toeplitz(np.arange(16))
sigma = rho**order
GLS_results = GLS(data.endog, exog, sigma=sigma).fit()
cls.res1 = GLS_results
cls.res2 = LongleyGls()
# attach for test_missing
cls.sigma = sigma
cls.exog = exog
cls.endog = data.endog
def test_aic(self):
assert_approx_equal(self.res1.aic+2, self.res2.aic, 3)
def test_bic(self):
assert_approx_equal(self.res1.bic, self.res2.bic, 2)
def test_loglike(self):
assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_0)
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_1)
def test_resid(self):
assert_almost_equal(self.res1.resid, self.res2.resid, DECIMAL_4)
def test_scale(self):
assert_almost_equal(self.res1.scale, self.res2.scale, DECIMAL_4)
def test_tvalues(self):
assert_almost_equal(self.res1.tvalues, self.res2.tvalues, DECIMAL_4)
def test_standarderrors(self):
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_4)
def test_fittedvalues(self):
assert_almost_equal(self.res1.fittedvalues, self.res2.fittedvalues,
DECIMAL_4)
def test_pvalues(self):
assert_almost_equal(self.res1.pvalues, self.res2.pvalues, DECIMAL_4)
def test_missing(self):
endog = self.endog.copy() # copy or changes endog for other methods
endog[[4,7,14]] = np.nan
mod = GLS(endog, self.exog, sigma=self.sigma, missing='drop')
assert_equal(mod.endog.shape[0], 13)
assert_equal(mod.exog.shape[0], 13)
assert_equal(mod.sigma.shape, (13,13))
class TestGLS_alt_sigma(CheckRegressionResults):
"""
Test that GLS with no argument is equivalent to OLS.
"""
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
ols_res = OLS(data.endog, data.exog).fit()
gls_res = GLS(data.endog, data.exog).fit()
gls_res_scalar = GLS(data.endog, data.exog, sigma=1)
cls.endog = data.endog
cls.exog = data.exog
cls.res1 = gls_res
cls.res2 = ols_res
cls.res3 = gls_res_scalar
# self.res2.conf_int = self.res2.conf_int()
def test_wrong_size_sigma_1d(self):
n = len(self.endog)
assert_raises(ValueError, GLS, self.endog, self.exog, sigma=np.ones(n-1))
def test_wrong_size_sigma_2d(self):
n = len(self.endog)
assert_raises(ValueError, GLS, self.endog, self.exog, sigma=np.ones((n-1,n-1)))
# def check_confidenceintervals(self, conf1, conf2):
# assert_almost_equal(conf1, conf2, DECIMAL_4)
class TestLM(object):
@classmethod
def setupClass(cls):
# TODO: Test HAC method
X = np.random.randn(100,3)
b = np.ones((3,1))
e = np.random.randn(100,1)
y = np.dot(X,b) + e
# Cases?
# Homoskedastic
# HC0
cls.res1_full = OLS(y,X).fit()
cls.res1_restricted = OLS(y,X[:,0]).fit()
cls.res2_full = cls.res1_full.get_robustcov_results('HC0')
cls.res2_restricted = cls.res1_restricted.get_robustcov_results('HC0')
cls.X = X
cls.Y = y
def test_LM_homoskedastic(self):
resid = self.res1_restricted.wresid
n = resid.shape[0]
X = self.X
S = np.dot(resid,resid) / n * np.dot(X.T,X) / n
Sinv = np.linalg.inv(S)
s = np.mean(X * resid[:,None], 0)
LMstat = n * np.dot(np.dot(s,Sinv),s.T)
LMstat_OLS = self.res1_full.compare_lm_test(self.res1_restricted)
LMstat2 = LMstat_OLS[0]
assert_almost_equal(LMstat, LMstat2, DECIMAL_7)
def test_LM_heteroskedastic_nodemean(self):
resid = self.res1_restricted.wresid
n = resid.shape[0]
X = self.X
scores = X * resid[:,None]
S = np.dot(scores.T,scores) / n
Sinv = np.linalg.inv(S)
s = np.mean(scores, 0)
LMstat = n * np.dot(np.dot(s,Sinv),s.T)
LMstat_OLS = self.res2_full.compare_lm_test(self.res2_restricted, demean=False)
LMstat2 = LMstat_OLS[0]
assert_almost_equal(LMstat, LMstat2, DECIMAL_7)
def test_LM_heteroskedastic_demean(self):
resid = self.res1_restricted.wresid
n = resid.shape[0]
X = self.X
scores = X * resid[:,None]
scores_demean = scores - scores.mean(0)
S = np.dot(scores_demean.T,scores_demean) / n
Sinv = np.linalg.inv(S)
s = np.mean(scores, 0)
LMstat = n * np.dot(np.dot(s,Sinv),s.T)
LMstat_OLS = self.res2_full.compare_lm_test(self.res2_restricted)
LMstat2 = LMstat_OLS[0]
assert_almost_equal(LMstat, LMstat2, DECIMAL_7)
def test_LM_heteroskedastic_LRversion(self):
resid = self.res1_restricted.wresid
resid_full = self.res1_full.wresid
n = resid.shape[0]
X = self.X
scores = X * resid[:,None]
s = np.mean(scores, 0)
scores = X * resid_full[:,None]
S = np.dot(scores.T,scores) / n
Sinv = np.linalg.inv(S)
LMstat = n * np.dot(np.dot(s,Sinv),s.T)
LMstat_OLS = self.res2_full.compare_lm_test(self.res2_restricted, use_lr = True)
LMstat2 = LMstat_OLS[0]
assert_almost_equal(LMstat, LMstat2, DECIMAL_7)
def test_LM_nonnested(self):
assert_raises(ValueError, self.res2_restricted.compare_lm_test, self.res2_full)
class TestOLS_GLS_WLS_equivalence(object):
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
y = data.endog
X = data.exog
n = y.shape[0]
w = np.ones(n)
cls.results = []
cls.results.append(OLS(y, X).fit())
cls.results.append(WLS(y, X, w).fit())
cls.results.append(GLS(y, X, 100*w).fit())
cls.results.append(GLS(y, X, np.diag(0.1*w)).fit())
def test_ll(self):
llf = np.array([r.llf for r in self.results])
llf_1 = np.ones_like(llf) * self.results[0].llf
assert_almost_equal(llf, llf_1, DECIMAL_7)
ic = np.array([r.aic for r in self.results])
ic_1 = np.ones_like(ic) * self.results[0].aic
assert_almost_equal(ic, ic_1, DECIMAL_7)
ic = np.array([r.bic for r in self.results])
ic_1 = np.ones_like(ic) * self.results[0].bic
assert_almost_equal(ic, ic_1, DECIMAL_7)
def test_params(self):
params = np.array([r.params for r in self.results])
params_1 = np.array([self.results[0].params] * len(self.results))
assert_allclose(params, params_1)
def test_ss(self):
bse = np.array([r.bse for r in self.results])
bse_1 = np.array([self.results[0].bse] * len(self.results))
assert_allclose(bse, bse_1)
def test_rsquared(self):
rsquared = np.array([r.rsquared for r in self.results])
rsquared_1 = np.array([self.results[0].rsquared] * len(self.results))
assert_almost_equal(rsquared, rsquared_1, DECIMAL_7)
class TestGLS_WLS_equivalence(TestOLS_GLS_WLS_equivalence):
# reuse test methods
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
y = data.endog
X = data.exog
n = y.shape[0]
np.random.seed(5)
w = np.random.uniform(0.5, 1, n)
w_inv = 1. / w
cls.results = []
cls.results.append(WLS(y, X, w).fit())
cls.results.append(WLS(y, X, 0.01 * w).fit())
cls.results.append(GLS(y, X, 100 * w_inv).fit())
cls.results.append(GLS(y, X, np.diag(0.1 * w_inv)).fit())
def test_rsquared(self):
# TODO: WLS rsquared is ok, GLS might have wrong centered_tss
# We only check that WLS and GLS rsquared is invariant to scaling
# WLS and GLS have different rsquared
assert_almost_equal(self.results[1].rsquared, self.results[0].rsquared,
DECIMAL_7)
assert_almost_equal(self.results[3].rsquared, self.results[2].rsquared,
DECIMAL_7)
class TestNonFit(object):
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
cls.endog = data.endog
cls.exog = data.exog
cls.ols_model = OLS(data.endog, data.exog)
def test_df_resid(self):
df_resid = self.endog.shape[0] - self.exog.shape[1]
assert_equal(self.ols_model.df_resid, long(9))
class TestWLS_CornerCases(object):
@classmethod
def setupClass(cls):
cls.exog = np.ones((1,))
cls.endog = np.ones((1,))
weights = 1
cls.wls_res = WLS(cls.endog, cls.exog, weights=weights).fit()
def test_wrong_size_weights(self):
weights = np.ones((10,10))
assert_raises(ValueError, WLS, self.endog, self.exog, weights=weights)
class TestWLSExogWeights(CheckRegressionResults):
#Test WLS with Greene's credit card data
#reg avgexp age income incomesq ownrent [aw=1/incomesq]
def __init__(self):
from .results.results_regression import CCardWLS
from statsmodels.datasets.ccard import load
dta = load()
dta.exog = add_constant(dta.exog, prepend=False)
nobs = 72.
weights = 1/dta.exog[:,2]
# for comparison with stata analytic weights
scaled_weights = ((weights * nobs)/weights.sum())
self.res1 = WLS(dta.endog, dta.exog, weights=scaled_weights).fit()
self.res2 = CCardWLS()
self.res2.wresid = scaled_weights ** .5 * self.res2.resid
# correction because we use different definition for loglike/llf
corr_ic = 2 * (self.res1.llf - self.res2.llf)
self.res2.aic -= corr_ic
self.res2.bic -= corr_ic
self.res2.llf += 0.5 * np.sum(np.log(self.res1.model.weights))
def test_wls_example():
#example from the docstring, there was a note about a bug, should
#be fixed now
Y = [1,3,4,5,2,3,4]
X = lrange(1,8)
X = add_constant(X, prepend=False)
wls_model = WLS(Y,X, weights=lrange(1,8)).fit()
#taken from R lm.summary
assert_almost_equal(wls_model.fvalue, 0.127337843215, 6)
assert_almost_equal(wls_model.scale, 2.44608530786**2, 6)
def test_wls_tss():
y = np.array([22, 22, 22, 23, 23, 23])
X = [[1, 0], [1, 0], [1, 1], [0, 1], [0, 1], [0, 1]]
ols_mod = OLS(y, add_constant(X, prepend=False)).fit()
yw = np.array([22, 22, 23.])
Xw = [[1,0],[1,1],[0,1]]
w = np.array([2, 1, 3.])
wls_mod = WLS(yw, add_constant(Xw, prepend=False), weights=w).fit()
assert_equal(ols_mod.centered_tss, wls_mod.centered_tss)
class TestWLSScalarVsArray(CheckRegressionResults):
@classmethod
def setupClass(cls):
from statsmodels.datasets.longley import load
dta = load()
dta.exog = add_constant(dta.exog, prepend=True)
wls_scalar = WLS(dta.endog, dta.exog, weights=1./3).fit()
weights = [1/3.] * len(dta.endog)
wls_array = WLS(dta.endog, dta.exog, weights=weights).fit()
cls.res1 = wls_scalar
cls.res2 = wls_array
#class TestWLS_GLS(CheckRegressionResults):
# @classmethod
# def setupClass(cls):
# from statsmodels.datasets.ccard import load
# data = load()
# cls.res1 = WLS(data.endog, data.exog, weights = 1/data.exog[:,2]).fit()
# cls.res2 = GLS(data.endog, data.exog, sigma = data.exog[:,2]).fit()
#
# def check_confidenceintervals(self, conf1, conf2):
# assert_almost_equal(conf1, conf2(), DECIMAL_4)
def test_wls_missing():
from statsmodels.datasets.ccard import load
data = load()
endog = data.endog
endog[[10, 25]] = np.nan
mod = WLS(data.endog, data.exog, weights = 1/data.exog[:,2], missing='drop')
assert_equal(mod.endog.shape[0], 70)
assert_equal(mod.exog.shape[0], 70)
assert_equal(mod.weights.shape[0], 70)
class TestWLS_OLS(CheckRegressionResults):
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
cls.res1 = OLS(data.endog, data.exog).fit()
cls.res2 = WLS(data.endog, data.exog).fit()
def check_confidenceintervals(self, conf1, conf2):
assert_almost_equal(conf1, conf2(), DECIMAL_4)
class TestGLS_OLS(CheckRegressionResults):
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
cls.res1 = GLS(data.endog, data.exog).fit()
cls.res2 = OLS(data.endog, data.exog).fit()
def check_confidenceintervals(self, conf1, conf2):
assert_almost_equal(conf1, conf2(), DECIMAL_4)
#TODO: test AR
# why the two-stage in AR?
#class test_ar(object):
# from statsmodels.datasets.sunspots import load
# data = load()
# model = AR(data.endog, rho=4).fit()
# R_res = RModel(data.endog, aic="FALSE", order_max=4)
# def test_params(self):
# assert_almost_equal(self.model.rho,
# pass
# def test_order(self):
# In R this can be defined or chosen by minimizing the AIC if aic=True
# pass
class TestYuleWalker(object):
@classmethod
def setupClass(cls):
from statsmodels.datasets.sunspots import load
data = load()
cls.rho, cls.sigma = yule_walker(data.endog, order=4,
method="mle")
cls.R_params = [1.2831003105694765, -0.45240924374091945,
-0.20770298557575195, 0.047943648089542337]
def test_params(self):
assert_almost_equal(self.rho, self.R_params, DECIMAL_4)
class TestDataDimensions(CheckRegressionResults):
@classmethod
def setupClass(cls):
np.random.seed(54321)
cls.endog_n_ = np.random.uniform(0,20,size=30)
cls.endog_n_one = cls.endog_n_[:,None]
cls.exog_n_ = np.random.uniform(0,20,size=30)
cls.exog_n_one = cls.exog_n_[:,None]
cls.degen_exog = cls.exog_n_one[:-1]
cls.mod1 = OLS(cls.endog_n_one, cls.exog_n_one)
cls.mod1.df_model += 1
cls.res1 = cls.mod1.fit()
# Note that these are created for every subclass..
# A little extra overhead probably
cls.mod2 = OLS(cls.endog_n_one, cls.exog_n_one)
cls.mod2.df_model += 1
cls.res2 = cls.mod2.fit()
def check_confidenceintervals(self, conf1, conf2):
assert_almost_equal(conf1, conf2(), DECIMAL_4)
class TestGLS_large_data(TestDataDimensions):
@classmethod
def setupClass(cls):
nobs = 1000
y = np.random.randn(nobs,1)
X = np.random.randn(nobs,20)
sigma = np.ones_like(y)
cls.gls_res = GLS(y, X, sigma=sigma).fit()
cls.gls_res_scalar = GLS(y, X, sigma=1).fit()
cls.gls_res_none= GLS(y, X).fit()
cls.ols_res = OLS(y, X).fit()
def test_large_equal_params(self):
assert_almost_equal(self.ols_res.params, self.gls_res.params, DECIMAL_7)
def test_large_equal_loglike(self):
assert_almost_equal(self.ols_res.llf, self.gls_res.llf, DECIMAL_7)
def test_large_equal_params_none(self):
assert_almost_equal(self.gls_res.params, self.gls_res_none.params,
DECIMAL_7)
class TestNxNx(TestDataDimensions):
@classmethod
def setupClass(cls):
super(TestNxNx, cls).setupClass()
cls.mod2 = OLS(cls.endog_n_, cls.exog_n_)
cls.mod2.df_model += 1
cls.res2 = cls.mod2.fit()
class TestNxOneNx(TestDataDimensions):
@classmethod
def setupClass(cls):
super(TestNxOneNx, cls).setupClass()
cls.mod2 = OLS(cls.endog_n_one, cls.exog_n_)
cls.mod2.df_model += 1
cls.res2 = cls.mod2.fit()
class TestNxNxOne(TestDataDimensions):
@classmethod
def setupClass(cls):
super(TestNxNxOne, cls).setupClass()
cls.mod2 = OLS(cls.endog_n_, cls.exog_n_one)
cls.mod2.df_model += 1
cls.res2 = cls.mod2.fit()
def test_bad_size():
np.random.seed(54321)
data = np.random.uniform(0,20,31)
assert_raises(ValueError, OLS, data, data[1:])
def test_const_indicator():
np.random.seed(12345)
X = np.random.randint(0, 3, size=30)
X = categorical(X, drop=True)
y = np.dot(X, [1., 2., 3.]) + np.random.normal(size=30)
modc = OLS(y, add_constant(X[:,1:], prepend=True)).fit()
mod = OLS(y, X, hasconst=True).fit()
assert_almost_equal(modc.rsquared, mod.rsquared, 12)
def test_706():
# make sure one regressor pandas Series gets passed to DataFrame
# for conf_int.
y = pandas.Series(np.random.randn(10))
x = pandas.Series(np.ones(10))
res = OLS(y,x).fit()
conf_int = res.conf_int()
np.testing.assert_equal(conf_int.shape, (1, 2))
np.testing.assert_(isinstance(conf_int, pandas.DataFrame))
def test_summary():
# test 734
import re
dta = longley.load_pandas()
X = dta.exog
X["constant"] = 1
y = dta.endog
with warnings.catch_warnings(record=True):
res = OLS(y, X).fit()
table = res.summary().as_latex()
# replace the date and time
table = re.sub("(?<=\n\\\\textbf\{Date:\} &).+?&",
" Sun, 07 Apr 2013 &", table)
table = re.sub("(?<=\n\\\\textbf\{Time:\} &).+?&",
" 13:46:07 &", table)
expected = """\\begin{center}
\\begin{tabular}{lclc}
\\toprule
\\textbf{Dep. Variable:} & TOTEMP & \\textbf{ R-squared: } & 0.995 \\\\
\\textbf{Model:} & OLS & \\textbf{ Adj. R-squared: } & 0.992 \\\\
\\textbf{Method:} & Least Squares & \\textbf{ F-statistic: } & 330.3 \\\\
\\textbf{Date:} & Sun, 07 Apr 2013 & \\textbf{ Prob (F-statistic):} & 4.98e-10 \\\\
\\textbf{Time:} & 13:46:07 & \\textbf{ Log-Likelihood: } & -109.62 \\\\
\\textbf{No. Observations:} & 16 & \\textbf{ AIC: } & 233.2 \\\\
\\textbf{Df Residuals:} & 9 & \\textbf{ BIC: } & 238.6 \\\\
\\textbf{Df Model:} & 6 & \\textbf{ } & \\\\
\\bottomrule
\\end{tabular}
\\begin{tabular}{lcccccc}
& \\textbf{coef} & \\textbf{std err} & \\textbf{t} & \\textbf{P$>$$|$t$|$} & \\textbf{[0.025} & \\textbf{0.975]} \\\\
\\midrule
\\textbf{GNPDEFL} & 15.0619 & 84.915 & 0.177 & 0.863 & -177.029 & 207.153 \\\\
\\textbf{GNP} & -0.0358 & 0.033 & -1.070 & 0.313 & -0.112 & 0.040 \\\\
\\textbf{UNEMP} & -2.0202 & 0.488 & -4.136 & 0.003 & -3.125 & -0.915 \\\\
\\textbf{ARMED} & -1.0332 & 0.214 & -4.822 & 0.001 & -1.518 & -0.549 \\\\
\\textbf{POP} & -0.0511 & 0.226 & -0.226 & 0.826 & -0.563 & 0.460 \\\\
\\textbf{YEAR} & 1829.1515 & 455.478 & 4.016 & 0.003 & 798.788 & 2859.515 \\\\
\\textbf{constant} & -3.482e+06 & 8.9e+05 & -3.911 & 0.004 & -5.5e+06 & -1.47e+06 \\\\
\\bottomrule
\\end{tabular}
\\begin{tabular}{lclc}
\\textbf{Omnibus:} & 0.749 & \\textbf{ Durbin-Watson: } & 2.559 \\\\
\\textbf{Prob(Omnibus):} & 0.688 & \\textbf{ Jarque-Bera (JB): } & 0.684 \\\\
\\textbf{Skew:} & 0.420 & \\textbf{ Prob(JB): } & 0.710 \\\\
\\textbf{Kurtosis:} & 2.434 & \\textbf{ Cond. No. } & 4.86e+09 \\\\
\\bottomrule
\\end{tabular}
%\\caption{OLS Regression Results}
\\end{center}"""
assert_equal(table, expected)
class TestRegularizedFit(object):
# Make sure there are no issues when there are no selected
# variables.
def test_empty_model(self):
np.random.seed(742)
n = 100
endog = np.random.normal(size=n)
exog = np.random.normal(size=(n, 3))
model = OLS(endog, exog)
result = model.fit_regularized(alpha=1000)
assert_equal(result.params, 0.)
def test_regularized(self):
import os
from . import glmnet_r_results
cur_dir = os.path.dirname(os.path.abspath(__file__))
data = np.loadtxt(os.path.join(cur_dir, "results", "lasso_data.csv"),
delimiter=",")
tests = [x for x in dir(glmnet_r_results) if x.startswith("rslt_")]
for test in tests:
vec = getattr(glmnet_r_results, test)
n = vec[0]
p = vec[1]
L1_wt = float(vec[2])
lam = float(vec[3])
params = vec[4:].astype(np.float64)
endog = data[0:int(n), 0]
exog = data[0:int(n), 1:(int(p)+1)]
endog = endog - endog.mean()
endog /= endog.std(ddof=1)
exog = exog - exog.mean(0)
exog /= exog.std(0, ddof=1)
mod = OLS(endog, exog)
rslt = mod.fit_regularized(L1_wt=L1_wt, alpha=lam)
assert_almost_equal(rslt.params, params, decimal=3)
# Smoke test for summary
smry = rslt.summary()
# Smoke test for profile likeihood
result = mod.fit_regularized(L1_wt=L1_wt, alpha=lam, profile_scale=True)
def test_formula_missing_cat():
# gh-805
import statsmodels.api as sm
from statsmodels.formula.api import ols
from patsy import PatsyError
dta = sm.datasets.grunfeld.load_pandas().data
dta.ix[0, 'firm'] = np.nan
mod = ols(formula='value ~ invest + capital + firm + year',
data=dta.dropna())
res = mod.fit()
mod2 = ols(formula='value ~ invest + capital + firm + year',
data=dta)
res2 = mod2.fit()
assert_almost_equal(res.params.values, res2.params.values)
assert_raises(PatsyError, ols, 'value ~ invest + capital + firm + year',
data=dta, missing='raise')
def test_missing_formula_predict():
# see 2171
nsample = 30
data = pandas.DataFrame({'x': np.linspace(0, 10, nsample)})
null = pandas.DataFrame({'x': np.array([np.nan])})
data = pandas.concat([data, null])
beta = np.array([1, 0.1])
e = np.random.normal(size=nsample+1)
data['y'] = beta[0] + beta[1] * data['x'] + e
model = OLS.from_formula('y ~ x', data=data)
fit = model.fit()
pred = fit.predict(exog=data[:-1])
def test_fvalue_implicit_constant():
nobs = 100
np.random.seed(2)
x = np.random.randn(nobs, 1)
x = ((x > 0) == [True, False]).astype(int)
y = x.sum(1) + np.random.randn(nobs)
w = 1 + 0.25 * np.random.rand(nobs)
from statsmodels.regression.linear_model import OLS, WLS
res = OLS(y, x).fit(cov_type='HC1')
assert_(np.isnan(res.fvalue))
assert_(np.isnan(res.f_pvalue))
res.summary()
res = WLS(y, x).fit(cov_type='HC1')
assert_(np.isnan(res.fvalue))
assert_(np.isnan(res.f_pvalue))
res.summary()
if __name__=="__main__":
import nose
# run_module_suite()
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],
exit=False)
# nose.runmodule(argv=[__file__,'-vvs','-x'], exit=False) #, '--pdb'
| bsd-3-clause |
kagayakidan/scikit-learn | examples/cluster/plot_digits_linkage.py | 369 | 2959 | """
=============================================================================
Various Agglomerative Clustering on a 2D embedding of digits
=============================================================================
An illustration of various linkage option for agglomerative clustering on
a 2D embedding of the digits dataset.
The goal of this example is to show intuitively how the metrics behave, and
not to find good clusters for the digits. This is why the example works on a
2D embedding.
What this example shows us is the behavior "rich getting richer" of
agglomerative clustering that tends to create uneven cluster sizes.
This behavior is especially pronounced for the average linkage strategy,
that ends up with a couple of singleton clusters.
"""
# Authors: Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2014
print(__doc__)
from time import time
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt
from sklearn import manifold, datasets
digits = datasets.load_digits(n_class=10)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
np.random.seed(0)
def nudge_images(X, y):
# Having a larger dataset shows more clearly the behavior of the
# methods, but we multiply the size of the dataset only by 2, as the
# cost of the hierarchical clustering methods are strongly
# super-linear in n_samples
shift = lambda x: ndimage.shift(x.reshape((8, 8)),
.3 * np.random.normal(size=2),
mode='constant',
).ravel()
X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])
Y = np.concatenate([y, y], axis=0)
return X, Y
X, y = nudge_images(X, y)
#----------------------------------------------------------------------
# Visualize the clustering
def plot_clustering(X_red, X, labels, title=None):
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
X_red = (X_red - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for i in range(X_red.shape[0]):
plt.text(X_red[i, 0], X_red[i, 1], str(y[i]),
color=plt.cm.spectral(labels[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([])
plt.yticks([])
if title is not None:
plt.title(title, size=17)
plt.axis('off')
plt.tight_layout()
#----------------------------------------------------------------------
# 2D embedding of the digits dataset
print("Computing embedding")
X_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X)
print("Done.")
from sklearn.cluster import AgglomerativeClustering
for linkage in ('ward', 'average', 'complete'):
clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10)
t0 = time()
clustering.fit(X_red)
print("%s : %.2fs" % (linkage, time() - t0))
plot_clustering(X_red, X, clustering.labels_, "%s linkage" % linkage)
plt.show()
| bsd-3-clause |
nilmtk/nilmtk | nilmtk/legacy/disaggregate/fhmm_exact.py | 1 | 21202 | import itertools
from copy import deepcopy
from collections import OrderedDict
from warnings import warn
import pickle
import nilmtk
import pandas as pd
import numpy as np
from hmmlearn import hmm
from ...feature_detectors import cluster
from . import Disaggregator
from ...datastore import HDFDataStore
def sort_startprob(mapping, startprob):
""" Sort the startprob according to power means; as returned by mapping
"""
num_elements = len(startprob)
new_startprob = np.zeros(num_elements)
for i in range(len(startprob)):
new_startprob[i] = startprob[mapping[i]]
return new_startprob
def sort_covars(mapping, covars):
new_covars = np.zeros_like(covars)
for i in range(len(covars)):
new_covars[i] = covars[mapping[i]]
return new_covars
def sort_transition_matrix(mapping, A):
"""Sorts the transition matrix according to increasing order of
power means; as returned by mapping
Parameters
----------
mapping :
A : numpy.array of shape (k, k)
transition matrix
"""
num_elements = len(A)
A_new = np.zeros((num_elements, num_elements))
for i in range(num_elements):
for j in range(num_elements):
A_new[i, j] = A[mapping[i], mapping[j]]
return A_new
def sort_learnt_parameters(startprob, means, covars, transmat):
mapping = return_sorting_mapping(means)
means_new = np.sort(means, axis=0)
startprob_new = sort_startprob(mapping, startprob)
covars_new = sort_covars(mapping, covars)
transmat_new = sort_transition_matrix(mapping, transmat)
assert np.shape(means_new) == np.shape(means)
assert np.shape(startprob_new) == np.shape(startprob)
assert np.shape(transmat_new) == np.shape(transmat)
return [startprob_new, means_new, covars_new, transmat_new]
def compute_A_fhmm(list_A):
"""
Parameters
-----------
list_pi : List of PI's of individual learnt HMMs
Returns
--------
result : Combined Pi for the FHMM
"""
result = list_A[0]
for i in range(len(list_A) - 1):
result = np.kron(result, list_A[i + 1])
return result
def compute_means_fhmm(list_means):
"""
Returns
-------
[mu, cov]
"""
states_combination = list(itertools.product(*list_means))
num_combinations = len(states_combination)
means_stacked = np.array([sum(x) for x in states_combination])
means = np.reshape(means_stacked, (num_combinations, 1))
cov = np.tile(5 * np.identity(1), (num_combinations, 1, 1))
return [means, cov]
def compute_pi_fhmm(list_pi):
"""
Parameters
-----------
list_pi : List of PI's of individual learnt HMMs
Returns
-------
result : Combined Pi for the FHMM
"""
result = list_pi[0]
for i in range(len(list_pi) - 1):
result = np.kron(result, list_pi[i + 1])
return result
def create_combined_hmm(model):
list_pi = [model[appliance].startprob_ for appliance in model]
list_A = [model[appliance].transmat_ for appliance in model]
list_means = [model[appliance].means_.flatten().tolist()
for appliance in model]
pi_combined = compute_pi_fhmm(list_pi)
A_combined = compute_A_fhmm(list_A)
[mean_combined, cov_combined] = compute_means_fhmm(list_means)
combined_model = hmm.GaussianHMM(n_components=len(pi_combined), covariance_type='full')
combined_model.startprob_ = pi_combined
combined_model.transmat_ = A_combined
combined_model.covars_ = cov_combined
combined_model.means_ = mean_combined
return combined_model
def return_sorting_mapping(means):
means_copy = deepcopy(means)
means_copy = np.sort(means_copy, axis=0)
# Finding mapping
mapping = {}
for i, val in enumerate(means_copy):
mapping[i] = np.where(val == means)[0][0]
return mapping
def decode_hmm(length_sequence, centroids, appliance_list, states):
"""
Decodes the HMM state sequence
"""
hmm_states = {}
hmm_power = {}
total_num_combinations = 1
for appliance in appliance_list:
total_num_combinations *= len(centroids[appliance])
for appliance in appliance_list:
hmm_states[appliance] = np.zeros(length_sequence, dtype=np.int)
hmm_power[appliance] = np.zeros(length_sequence)
for i in range(length_sequence):
factor = total_num_combinations
for appliance in appliance_list:
# assuming integer division (will cause errors in Python 3x)
factor = factor // len(centroids[appliance])
temp = int(states[i]) / factor
hmm_states[appliance][i] = temp % len(centroids[appliance])
hmm_power[appliance][i] = centroids[
appliance][hmm_states[appliance][i]]
return [hmm_states, hmm_power]
def _check_memory(num_appliances):
"""
Checks if the maximum resident memory is enough to handle the
combined matrix of transition probabilities
"""
# Each transmat is small (usually 2x2 or 3x3) but the combined
# matrix is dense, using much more memory
# Get the approximate memory in MB
try:
# If psutil is installed, we can get the correct total
# physical memory of the system
import psutil
available_memory = psutil.virtual_memory().total >> 20
except ImportError:
# Otherwise use a crude approximation
available_memory = 16 << 10
# We use (num_appliances + 1) here to get a pessimistic approximation:
# 8 bytes * (2 ** (num_appliances + 1)) ** 2
required_memory = ((1 << (2 * (num_appliances + 1))) << 3) >> 20
if required_memory >= available_memory:
warn("The required memory for the model may be more than the total system memory!"
" Try using fewer appliances if the training fails.")
class FHMM(Disaggregator):
"""
Attributes
----------
model : dict
predictions : pd.DataFrame()
meters : list
MIN_CHUNK_LENGTH : int
"""
def __init__(self):
self.model = {}
self.predictions = pd.DataFrame()
self.MIN_CHUNK_LENGTH = 100
self.MODEL_NAME = 'FHMM'
def train_across_buildings(self, ds, list_of_buildings, list_of_appliances,
min_activation=0.05, **load_kwargs):
"""
:param ds: nilmtk.Dataset
:param list_of_buildings: List of buildings to use for training
:param list_of_appliances: List of appliances (nilm-metadata names)
:param min_activation: Minimum activation (in fraction) to use a home in training
:param load_kwargs:
:return:
"""
_check_memory(len(list_of_appliances))
self.list_of_appliances = list_of_appliances
models = {}
for appliance in list_of_appliances:
print("Training for", appliance)
o = []
for building_num in list_of_buildings:
building = ds.buildings[building_num]
elec = building.elec
try:
df = next(elec[appliance].load(**load_kwargs)).squeeze()
appl_power = df.dropna().values.reshape(-1, 1)
activation = (df > 10).sum() * 1.0 / len(df)
if activation > min_activation:
o.append(appl_power)
except:
pass
if len(o) > 1:
o = np.array(o)
mod = hmm.GaussianHMM(2, "full")
mod.fit(o)
models[appliance] = mod
print("Means for %s are" % appliance)
print(mod.means_)
else:
print("Not enough samples for %s" % appliance)
new_learnt_models = OrderedDict()
for appliance, appliance_model in models.items():
startprob, means, covars, transmat = sort_learnt_parameters(
appliance_model.startprob_, appliance_model.means_,
appliance_model.covars_, appliance_model.transmat_)
new_learnt_models[appliance] = hmm.GaussianHMM(
startprob.size, "full", startprob, transmat)
new_learnt_models[appliance].means_ = means
new_learnt_models[appliance].covars_ = covars
learnt_model_combined = create_combined_hmm(new_learnt_models)
self.individual = new_learnt_models
self.model = learnt_model_combined
self.meters = [nilmtk.global_meter_group.select_using_appliances(type=appliance).meters[0]
for appliance in self.individual.keys()]
def train(self, metergroup, num_states_dict={}, **load_kwargs):
"""Train using 1d FHMM.
Places the learnt model in `model` attribute
The current version performs training ONLY on the first chunk.
Online HMMs are welcome if someone can contribute :)
Assumes all pre-processing has been done.
"""
learnt_model = OrderedDict()
num_meters = len(metergroup.meters)
if num_meters > 12:
max_num_clusters = 2
else:
max_num_clusters = 3
_check_memory(len((metergroup.submeters().meters)))
for i, meter in enumerate(metergroup.submeters().meters):
power_series = meter.power_series(**load_kwargs)
meter_data = next(power_series).dropna()
X = meter_data.values.reshape((-1, 1))
if not len(X):
print("Submeter '{}' has no samples, skipping...".format(meter))
continue
assert X.ndim == 2
self.X = X
num_total_states = None
# Check if the user has specific the number of states for this meter
num_total_states = num_states_dict.get(meter)
# If not, check if the number of states for the appliances was specified
if num_total_states is None:
num_apps_states = []
for appliance in meter.appliances:
num_app_state = num_states_dict.get(appliance)
if num_app_state is None:
num_app_state = num_states_dict.get(appliance.identifier.type)
if num_app_state is not None:
num_apps_states.append(num_app_state)
if num_apps_states:
num_total_states = sum(num_apps_states)
if num_states_dict.get(meter) is not None or num_states_dict.get(meter) is not None:
# User has specified the number of states for this appliance
num_total_states = num_states_dict.get(meter)
# Otherwise, find the optimum number of states via clustering
if num_total_states is None:
states = cluster(meter_data, max_num_clusters)
num_total_states = len(states)
print("Training model for submeter '{}' with {} states".format(meter, num_total_states))
learnt_model[meter] = hmm.GaussianHMM(num_total_states, "full")
# Fit
learnt_model[meter].fit(X)
# Check to see if there are any more chunks.
# TODO handle multiple chunks per appliance.
try:
next(power_series)
except StopIteration:
pass
else:
warn("The current implementation of FHMM"
" can only handle a single chunk. But there are multiple"
" chunks available. So have only trained on the"
" first chunk!")
# Combining to make a AFHMM
self.meters = []
new_learnt_models = OrderedDict()
for meter in learnt_model:
startprob, means, covars, transmat = sort_learnt_parameters(
learnt_model[meter].startprob_, learnt_model[meter].means_,
learnt_model[meter].covars_, learnt_model[meter].transmat_)
new_learnt_models[meter] = hmm.GaussianHMM(startprob.size, "full")
new_learnt_models[meter].startprob_ = startprob
new_learnt_models[meter].transmat_ = transmat
new_learnt_models[meter].means_ = means
new_learnt_models[meter].covars_ = covars
# UGLY! But works.
self.meters.append(meter)
learnt_model_combined = create_combined_hmm(new_learnt_models)
self.individual = new_learnt_models
self.model = learnt_model_combined
def disaggregate_chunk(self, test_mains):
"""Disaggregate the test data according to the model learnt previously
Performs 1D FHMM disaggregation.
For now assuming there is no missing data at this stage.
"""
# See v0.1 code
# for ideas of how to handle missing data in this code if needs be.
# Array of learnt states
learnt_states_array = []
test_mains = test_mains.dropna()
length = len(test_mains.index)
temp = test_mains.values.reshape(length, 1)
learnt_states_array.append(self.model.predict(temp))
# Model
means = OrderedDict()
for elec_meter, model in self.individual.items():
means[elec_meter] = (
model.means_.round().astype(int).flatten().tolist())
means[elec_meter].sort()
decoded_power_array = []
decoded_states_array = []
for learnt_states in learnt_states_array:
[decoded_states, decoded_power] = decode_hmm(
len(learnt_states), means, means.keys(), learnt_states)
decoded_states_array.append(decoded_states)
decoded_power_array.append(decoded_power)
prediction = pd.DataFrame(
decoded_power_array[0], index=test_mains.index)
return prediction
def disaggregate(self, mains, output_datastore, **load_kwargs):
'''Disaggregate mains according to the model learnt previously.
Parameters
----------
mains : nilmtk.ElecMeter or nilmtk.MeterGroup
output_datastore : instance of nilmtk.DataStore subclass
For storing power predictions from disaggregation algorithm.
sample_period : number, optional
The desired sample period in seconds.
**load_kwargs : key word arguments
Passed to `mains.power_series(**kwargs)`
'''
load_kwargs = self._pre_disaggregation_checks(load_kwargs)
load_kwargs.setdefault('sample_period', 60)
load_kwargs.setdefault('sections', mains.good_sections())
timeframes = []
building_path = '/building{}'.format(mains.building())
mains_data_location = building_path + '/elec/meter1'
data_is_available = False
for chunk in mains.power_series(**load_kwargs):
# Check that chunk is sensible size before resampling
if len(chunk) < self.MIN_CHUNK_LENGTH:
continue
# Record metadata
timeframes.append(chunk.timeframe)
measurement = chunk.name
# Start disaggregation
predictions = self.disaggregate_chunk(chunk)
for meter in predictions.columns:
meter_instance = meter.instance()
cols = pd.MultiIndex.from_tuples([chunk.name])
predicted_power = predictions[[meter]]
if len(predicted_power) == 0:
continue
data_is_available = True
output_df = pd.DataFrame(predicted_power)
output_df.columns = pd.MultiIndex.from_tuples([chunk.name])
key = '{}/elec/meter{}'.format(building_path, meter_instance)
output_datastore.append(key, output_df)
# Copy mains data to disag output
output_datastore.append(key=mains_data_location,
value=pd.DataFrame(chunk, columns=cols))
if data_is_available:
self._save_metadata_for_disaggregation(
output_datastore=output_datastore,
sample_period=load_kwargs['sample_period'],
measurement=measurement,
timeframes=timeframes,
building=mains.building(),
meters=self.meters
)
def disaggregate_across_buildings(self, ds, output_datastore, list_of_buildings, **load_kwargs):
"""
:param ds:
:param list_of_buildings:
:return:
"""
def get_meter_instance(ds, building_num, appliance):
elec = ds.buildings[building_num].elec
meters = elec.submeters().meters
for meter in meters:
if meter.appliances[0].type['type'] == appliance:
return meter.instance()
return -1
for building in list_of_buildings:
print("Disaggregating for building %d" % building)
mains = ds.buildings[building].elec.mains()
load_kwargs = self._pre_disaggregation_checks(load_kwargs)
load_kwargs.setdefault('sample_period', 60)
load_kwargs.setdefault('sections', mains.good_sections())
timeframes = []
building_path = '/building{}'.format(mains.building())
mains_data_location = building_path + '/elec/meter1'
data_is_available = False
building_elec = ds.buildings[building].elec
self.meters = []
for appliance in self.list_of_appliances:
m_instance = get_meter_instance(ds, building, appliance)
if m_instance != -1:
self.meters.append(building_elec[m_instance])
else:
pass
for chunk in mains.power_series(**load_kwargs):
# Check that chunk is sensible size before resampling
if len(chunk) < self.MIN_CHUNK_LENGTH:
continue
# Record metadata
timeframes.append(chunk.timeframe)
measurement = chunk.name
# Start disaggregation
predictions = self.disaggregate_chunk(chunk)
for meter in predictions.columns:
if type(meter) is str:
# training done across homes
meter_instance = get_meter_instance(ds, building, meter)
if meter_instance == -1:
continue
else:
meter_instance = meter.instance()
cols = pd.MultiIndex.from_tuples([chunk.name])
predicted_power = predictions[[meter]]
if len(predicted_power) == 0:
continue
data_is_available = True
output_df = pd.DataFrame(predicted_power)
output_df.columns = pd.MultiIndex.from_tuples([chunk.name])
key = '{}/elec/meter{}'.format(building_path, meter_instance)
output_datastore.append(key, output_df)
# Copy mains data to disag output
output_datastore.append(key=mains_data_location,
value=pd.DataFrame(chunk, columns=cols, dtype='float32'))
if data_is_available:
self._save_metadata_for_disaggregation(
output_datastore=output_datastore,
sample_period=load_kwargs['sample_period'],
measurement=measurement,
timeframes=timeframes,
building=mains.building(),
meters=self.meters
)
def import_model(self, filename):
with open(filename, 'rb') as in_file:
imported_model = pickle.load(in_file)
self.model = imported_model.model
self.individual = imported_model.individual
# Recreate datastores from filenames
for meter in self.individual.keys():
store_filename = meter.store
meter.store = HDFDataStore(store_filename)
self.meters = list(self.individual.keys())
def export_model(self, filename):
# Can't pickle datastore, so convert to filenames
original_stores = []
meters = self.meters
self.meters = None
for meter in self.individual.keys():
original_store = meter.store
original_stores.append(original_store)
meter.store = original_store.store.filename
try:
with open(filename, 'wb') as out_file:
pickle.dump(self, out_file)
finally:
# Restore the meters and stores even if the pickling fails
for original_store, meter in zip(original_stores, self.individual.keys()):
meter.store = original_store
self.meters = meters
| apache-2.0 |
massmutual/pystruct | examples/multiclass_comparision_svm_struct.py | 4 | 6536 | """
=================================
Comparing PyStruct and SVM-Struct
=================================
This example compares the performance of pystruct and SVM^struct on a
multi-class problem.
For the example to work, you need to install SVM^multiclass and
set the path in this file.
We are not using SVM^python, as that would be much slower, and we would
need to implement our own model in a SVM^python compatible way.
Instead, we just call the SVM^multiclass binary.
This comparison is only meaningful in the sense that both libraries
use general structured prediction solvers to solve the task.
The specialized implementation of the Crammer-Singer SVM in LibLinear
is much faster than either one.
For SVM^struct, the plot show CPU time as reportet by SVM^struct.
For pystruct, the plot shows the time spent in the fit function
according to time.clock.
Both models have disabled constraint caching. With constraint caching,
SVM^struct is somewhat faster, but PyStruct doesn't gain anything.
"""
import tempfile
import os
from time import clock
import numpy as np
from sklearn.datasets import dump_svmlight_file
from sklearn.datasets import fetch_mldata, load_iris, load_digits
from sklearn.metrics import accuracy_score
from sklearn.cross_validation import train_test_split
import matplotlib.pyplot as plt
from pystruct.models import MultiClassClf
from pystruct.learners import OneSlackSSVM
# please set the path to the svm-struct multiclass binaries here
svmstruct_path = "/home/user/amueller/tools/svm_multiclass/"
class MultiSVM():
"""scikit-learn compatible interface for SVM^multi.
Dumps the data to a file and calls the binary.
"""
def __init__(self, C=1.):
self.C = C
def fit(self, X, y):
self.model_file = tempfile.mktemp(suffix='.svm')
train_data_file = tempfile.mktemp(suffix='.svm_dat')
dump_svmlight_file(X, y + 1, train_data_file, zero_based=False)
C = self.C * 100. * len(X)
svmstruct_process = os.popen(svmstruct_path
+ "svm_multiclass_learn -w 3 -c %f %s %s"
% (C, train_data_file, self.model_file))
self.output_ = svmstruct_process.read().split("\n")
self.runtime_ = float(self.output_[-4].split(":")[1])
def _predict(self, X, y=None):
if y is None:
y = np.ones(len(X))
train_data_file = tempfile.mktemp(suffix='.svm_dat')
dump_svmlight_file(X, y, train_data_file, zero_based=False)
prediction_file = tempfile.mktemp(suffix='.out')
os.system(svmstruct_path + "svm_multiclass_classify %s %s %s"
% (train_data_file, self.model_file, prediction_file))
return np.loadtxt(prediction_file)
def predict(self, X):
return self._predict(X)[:, 0] - 1
def score(self, X, y):
y_pred = self.predict(X)
return accuracy_score(y, y_pred)
def decision_function(self, X):
return self._predict(X)[:, 1:]
def eval_on_data(X_train, y_train, X_test, y_test, svm, Cs):
# evaluate a single svm using varying C
accuracies, times = [], []
for C in Cs:
svm.C = C
start = clock()
svm.fit(X_train, y_train)
if hasattr(svm, "runtime_"):
times.append(svm.runtime_)
else:
times.append(clock() - start)
accuracies.append(accuracy_score(y_test, svm.predict(X_test)))
return accuracies, times
def plot_curves(curve_svmstruct, curve_pystruct, Cs, title="", filename=""):
# plot nice graphs comparing a value for the two implementations
plt.figure(figsize=(7, 4))
plt.plot(curve_svmstruct, "--", label="SVM^struct", c='red', linewidth=3)
plt.plot(curve_pystruct, "-.", label="PyStruct", c='blue', linewidth=3)
plt.xlabel("C")
plt.xticks(np.arange(len(Cs)), Cs)
plt.legend(loc='best')
plt.title(title)
if filename:
plt.savefig("%s" % filename, bbox_inches='tight')
def do_comparison(X_train, y_train, X_test, y_test, dataset):
# evaluate both svms on a given datasets, generate plots
Cs = 10. ** np.arange(-4, 1)
multisvm = MultiSVM()
svm = OneSlackSSVM(MultiClassClf(), tol=0.01)
accs_pystruct, times_pystruct = eval_on_data(X_train, y_train, X_test,
y_test, svm, Cs=Cs)
accs_svmstruct, times_svmstruct = eval_on_data(X_train, y_train,
X_test, y_test,
multisvm, Cs=Cs)
plot_curves(times_svmstruct, times_pystruct, Cs=Cs,
title="learning time (s) %s" % dataset,
filename="times_%s.pdf" % dataset)
plot_curves(accs_svmstruct, accs_pystruct, Cs=Cs,
title="accuracy %s" % dataset,
filename="accs_%s.pdf" % dataset)
def main():
if not os.path.exists(svmstruct_path + "svm_multiclass_learn"):
print("Please install SVM^multi and set the svmstruct_path variable "
"to run this example.")
return
datasets = ['iris', 'digits']
#datasets = ['iris', 'digits', 'usps', 'mnist']
# IRIS
if 'iris' in datasets:
iris = load_iris()
X, y = iris.data, iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=0)
do_comparison(X_train, y_train, X_test, y_test, "iris")
# DIGITS
if 'digits' in datasets:
digits = load_digits()
X, y = digits.data / 16., digits.target
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=0)
do_comparison(X_train, y_train, X_test, y_test, "digits")
# USPS
if 'usps' in datasets:
digits = fetch_mldata("USPS")
X, y = digits.data, digits.target.astype(np.int) - 1
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=0)
do_comparison(X_train, y_train, X_test, y_test, "USPS")
# MNIST
if 'mnist' in datasets:
digits = fetch_mldata("MNIST original")
X, y = digits.data / 255., digits.target.astype(np.int)
X_train, X_test = X[:60000], X[60000:]
y_train, y_test = y[:60000], y[60000:]
do_comparison(X_train, y_train, X_test, y_test, "MNIST")
plt.show()
if __name__ == "__main__":
main()
| bsd-2-clause |
HolgerPeters/scikit-learn | examples/plot_digits_pipe.py | 65 | 1652 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
# Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
| bsd-3-clause |
caidongyun/BuildingMachineLearningSystemsWithPython | ch06/utils.py | 22 | 6937 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import os
import sys
import collections
import csv
import json
from matplotlib import pylab
import numpy as np
DATA_DIR = "data"
CHART_DIR = "charts"
if not os.path.exists(DATA_DIR):
raise RuntimeError("Expecting directory 'data' in current path")
if not os.path.exists(CHART_DIR):
os.mkdir(CHART_DIR)
def tweak_labels(Y, pos_sent_list):
pos = Y == pos_sent_list[0]
for sent_label in pos_sent_list[1:]:
pos |= Y == sent_label
Y = np.zeros(Y.shape[0])
Y[pos] = 1
Y = Y.astype(int)
return Y
def load_sanders_data(dirname=".", line_count=-1):
count = 0
topics = []
labels = []
tweets = []
with open(os.path.join(DATA_DIR, dirname, "corpus.csv"), "r") as csvfile:
metareader = csv.reader(csvfile, delimiter=',', quotechar='"')
for line in metareader:
count += 1
if line_count > 0 and count > line_count:
break
topic, label, tweet_id = line
tweet_fn = os.path.join(
DATA_DIR, dirname, 'rawdata', '%s.json' % tweet_id)
try:
tweet = json.load(open(tweet_fn, "r"))
except IOError:
print(("Tweet '%s' not found. Skip." % tweet_fn))
continue
if 'text' in tweet and tweet['user']['lang'] == "en":
topics.append(topic)
labels.append(label)
tweets.append(tweet['text'])
tweets = np.asarray(tweets)
labels = np.asarray(labels)
return tweets, labels
def plot_pr(auc_score, name, phase, precision, recall, label=None):
pylab.clf()
pylab.figure(num=None, figsize=(5, 4))
pylab.grid(True)
pylab.fill_between(recall, precision, alpha=0.5)
pylab.plot(recall, precision, lw=1)
pylab.xlim([0.0, 1.0])
pylab.ylim([0.0, 1.0])
pylab.xlabel('Recall')
pylab.ylabel('Precision')
pylab.title('P/R curve (AUC=%0.2f) / %s' % (auc_score, label))
filename = name.replace(" ", "_")
pylab.savefig(os.path.join(CHART_DIR, "pr_%s_%s.png" %
(filename, phase)), bbox_inches="tight")
def show_most_informative_features(vectorizer, clf, n=20):
c_f = sorted(zip(clf.coef_[0], vectorizer.get_feature_names()))
top = list(zip(c_f[:n], c_f[:-(n + 1):-1]))
for (c1, f1), (c2, f2) in top:
print("\t%.4f\t%-15s\t\t%.4f\t%-15s" % (c1, f1, c2, f2))
def plot_log():
pylab.clf()
pylab.figure(num=None, figsize=(6, 5))
x = np.arange(0.001, 1, 0.001)
y = np.log(x)
pylab.title('Relationship between probabilities and their logarithm')
pylab.plot(x, y)
pylab.grid(True)
pylab.xlabel('P')
pylab.ylabel('log(P)')
filename = 'log_probs.png'
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
def plot_feat_importance(feature_names, clf, name):
pylab.clf()
coef_ = clf.coef_
important = np.argsort(np.absolute(coef_.ravel()))
f_imp = feature_names[important]
coef = coef_.ravel()[important]
inds = np.argsort(coef)
f_imp = f_imp[inds]
coef = coef[inds]
xpos = np.array(list(range(len(coef))))
pylab.bar(xpos, coef, width=1)
pylab.title('Feature importance for %s' % (name))
ax = pylab.gca()
ax.set_xticks(np.arange(len(coef)))
labels = ax.set_xticklabels(f_imp)
for label in labels:
label.set_rotation(90)
filename = name.replace(" ", "_")
pylab.savefig(os.path.join(
CHART_DIR, "feat_imp_%s.png" % filename), bbox_inches="tight")
def plot_feat_hist(data_name_list, filename=None):
pylab.clf()
num_rows = 1 + (len(data_name_list) - 1) / 2
num_cols = 1 if len(data_name_list) == 1 else 2
pylab.figure(figsize=(5 * num_cols, 4 * num_rows))
for i in range(num_rows):
for j in range(num_cols):
pylab.subplot(num_rows, num_cols, 1 + i * num_cols + j)
x, name = data_name_list[i * num_cols + j]
pylab.title(name)
pylab.xlabel('Value')
pylab.ylabel('Density')
# the histogram of the data
max_val = np.max(x)
if max_val <= 1.0:
bins = 50
elif max_val > 50:
bins = 50
else:
bins = max_val
n, bins, patches = pylab.hist(
x, bins=bins, normed=1, facecolor='green', alpha=0.75)
pylab.grid(True)
if not filename:
filename = "feat_hist_%s.png" % name
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
def plot_bias_variance(data_sizes, train_errors, test_errors, name):
pylab.clf()
pylab.ylim([0.0, 1.0])
pylab.xlabel('Data set size')
pylab.ylabel('Error')
pylab.title("Bias-Variance for '%s'" % name)
pylab.plot(
data_sizes, train_errors, "-", data_sizes, test_errors, "--", lw=1)
pylab.legend(["train error", "test error"], loc="upper right")
pylab.grid()
pylab.savefig(os.path.join(CHART_DIR, "bv_" + name + ".png"))
def load_sent_word_net():
sent_scores = collections.defaultdict(list)
sentiwordnet_path = os.path.join(DATA_DIR, "SentiWordNet_3.0.0_20130122.txt")
if not os.path.exists(sentiwordnet_path):
print("Please download SentiWordNet_3.0.0 from http://sentiwordnet.isti.cnr.it/download.php, extract it and put it into the data directory")
sys.exit(1)
with open(sentiwordnet_path, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter='\t', quotechar='"')
for line in reader:
if line[0].startswith("#"):
continue
if len(line) == 1:
continue
POS, ID, PosScore, NegScore, SynsetTerms, Gloss = line
if len(POS) == 0 or len(ID) == 0:
continue
# print POS,PosScore,NegScore,SynsetTerms
for term in SynsetTerms.split(" "):
# drop #number at the end of every term
term = term.split("#")[0]
term = term.replace("-", " ").replace("_", " ")
key = "%s/%s" % (POS, term.split("#")[0])
sent_scores[key].append((float(PosScore), float(NegScore)))
for key, value in sent_scores.items():
sent_scores[key] = np.mean(value, axis=0)
return sent_scores
def log_false_positives(clf, X, y, name):
with open("FP_" + name.replace(" ", "_") + ".tsv", "w") as f:
false_positive = clf.predict(X) != y
for tweet, false_class in zip(X[false_positive], y[false_positive]):
f.write("%s\t%s\n" %
(false_class, tweet.encode("ascii", "ignore")))
if __name__ == '__main__':
plot_log()
| mit |
xavierwu/scikit-learn | sklearn/cluster/tests/test_hierarchical.py | 230 | 19795 | """
Several basic tests for hierarchical clustering procedures
"""
# Authors: Vincent Michel, 2010, Gael Varoquaux 2012,
# Matteo Visconti di Oleggio Castello 2014
# License: BSD 3 clause
from tempfile import mkdtemp
import shutil
from functools import partial
import numpy as np
from scipy import sparse
from scipy.cluster import hierarchy
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.cluster import ward_tree
from sklearn.cluster import AgglomerativeClustering, FeatureAgglomeration
from sklearn.cluster.hierarchical import (_hc_cut, _TREE_BUILDERS,
linkage_tree)
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.metrics.pairwise import PAIRED_DISTANCES, cosine_distances,\
manhattan_distances, pairwise_distances
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.neighbors.graph import kneighbors_graph
from sklearn.cluster._hierarchical import average_merge, max_merge
from sklearn.utils.fast_dict import IntFloatDict
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
def test_linkage_misc():
# Misc tests on linkage
rng = np.random.RandomState(42)
X = rng.normal(size=(5, 5))
assert_raises(ValueError, AgglomerativeClustering(linkage='foo').fit, X)
assert_raises(ValueError, linkage_tree, X, linkage='foo')
assert_raises(ValueError, linkage_tree, X, connectivity=np.ones((4, 4)))
# Smoke test FeatureAgglomeration
FeatureAgglomeration().fit(X)
# test hiearchical clustering on a precomputed distances matrix
dis = cosine_distances(X)
res = linkage_tree(dis, affinity="precomputed")
assert_array_equal(res[0], linkage_tree(X, affinity="cosine")[0])
# test hiearchical clustering on a precomputed distances matrix
res = linkage_tree(X, affinity=manhattan_distances)
assert_array_equal(res[0], linkage_tree(X, affinity="manhattan")[0])
def test_structured_linkage_tree():
# Check that we obtain the correct solution for structured linkage trees.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
# Avoiding a mask with only 'True' entries
mask[4:7, 4:7] = 0
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for tree_builder in _TREE_BUILDERS.values():
children, n_components, n_leaves, parent = \
tree_builder(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
# Check that ward_tree raises a ValueError with a connectivity matrix
# of the wrong shape
assert_raises(ValueError,
tree_builder, X.T, np.ones((4, 4)))
# Check that fitting with no samples raises an error
assert_raises(ValueError,
tree_builder, X.T[:0], connectivity)
def test_unstructured_linkage_tree():
# Check that we obtain the correct solution for unstructured linkage trees.
rng = np.random.RandomState(0)
X = rng.randn(50, 100)
for this_X in (X, X[0]):
# With specified a number of clusters just for the sake of
# raising a warning and testing the warning code
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, ward_tree, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
for tree_builder in _TREE_BUILDERS.values():
for this_X in (X, X[0]):
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, tree_builder, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
def test_height_linkage_tree():
# Check that the height of the results of linkage tree is sorted.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for linkage_func in _TREE_BUILDERS.values():
children, n_nodes, n_leaves, parent = linkage_func(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
def test_agglomerative_clustering():
# Check that we obtain the correct number of clusters with
# agglomerative clustering.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
n_samples = 100
X = rng.randn(n_samples, 50)
connectivity = grid_to_graph(*mask.shape)
for linkage in ("ward", "complete", "average"):
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage=linkage)
clustering.fit(X)
# test caching
try:
tempdir = mkdtemp()
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity,
memory=tempdir,
linkage=linkage)
clustering.fit(X)
labels = clustering.labels_
assert_true(np.size(np.unique(labels)) == 10)
finally:
shutil.rmtree(tempdir)
# Turn caching off now
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity, linkage=linkage)
# Check that we obtain the same solution with early-stopping of the
# tree building
clustering.compute_full_tree = False
clustering.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering.labels_,
labels), 1)
clustering.connectivity = None
clustering.fit(X)
assert_true(np.size(np.unique(clustering.labels_)) == 10)
# Check that we raise a TypeError on dense matrices
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=sparse.lil_matrix(
connectivity.toarray()[:10, :10]),
linkage=linkage)
assert_raises(ValueError, clustering.fit, X)
# Test that using ward with another metric than euclidean raises an
# exception
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=connectivity.toarray(),
affinity="manhattan",
linkage="ward")
assert_raises(ValueError, clustering.fit, X)
# Test using another metric than euclidean works with linkage complete
for affinity in PAIRED_DISTANCES.keys():
# Compare our (structured) implementation to scipy
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=np.ones((n_samples, n_samples)),
affinity=affinity,
linkage="complete")
clustering.fit(X)
clustering2 = AgglomerativeClustering(
n_clusters=10,
connectivity=None,
affinity=affinity,
linkage="complete")
clustering2.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering2.labels_,
clustering.labels_),
1)
# Test that using a distance matrix (affinity = 'precomputed') has same
# results (with connectivity constraints)
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage="complete")
clustering.fit(X)
X_dist = pairwise_distances(X)
clustering2 = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
affinity='precomputed',
linkage="complete")
clustering2.fit(X_dist)
assert_array_equal(clustering.labels_, clustering2.labels_)
def test_ward_agglomeration():
# Check that we obtain the correct solution in a simplistic case
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
agglo = FeatureAgglomeration(n_clusters=5, connectivity=connectivity)
agglo.fit(X)
assert_true(np.size(np.unique(agglo.labels_)) == 5)
X_red = agglo.transform(X)
assert_true(X_red.shape[1] == 5)
X_full = agglo.inverse_transform(X_red)
assert_true(np.unique(X_full[0]).size == 5)
assert_array_almost_equal(agglo.transform(X_full), X_red)
# Check that fitting with no samples raises a ValueError
assert_raises(ValueError, agglo.fit, X[:0])
def assess_same_labelling(cut1, cut2):
"""Util for comparison with scipy"""
co_clust = []
for cut in [cut1, cut2]:
n = len(cut)
k = cut.max() + 1
ecut = np.zeros((n, k))
ecut[np.arange(n), cut] = 1
co_clust.append(np.dot(ecut, ecut.T))
assert_true((co_clust[0] == co_clust[1]).all())
def test_scikit_vs_scipy():
# Test scikit linkage with full connectivity (i.e. unstructured) vs scipy
n, p, k = 10, 5, 3
rng = np.random.RandomState(0)
# Not using a lil_matrix here, just to check that non sparse
# matrices are well handled
connectivity = np.ones((n, n))
for linkage in _TREE_BUILDERS.keys():
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out = hierarchy.linkage(X, method=linkage)
children_ = out[:, :2].astype(np.int)
children, _, n_leaves, _ = _TREE_BUILDERS[linkage](X, connectivity)
cut = _hc_cut(k, children, n_leaves)
cut_ = _hc_cut(k, children_, n_leaves)
assess_same_labelling(cut, cut_)
# Test error management in _hc_cut
assert_raises(ValueError, _hc_cut, n_leaves + 1, children, n_leaves)
def test_connectivity_propagation():
# Check that connectivity in the ward tree is propagated correctly during
# merging.
X = np.array([(.014, .120), (.014, .099), (.014, .097),
(.017, .153), (.017, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .152), (.018, .149), (.018, .144)])
connectivity = kneighbors_graph(X, 10, include_self=False)
ward = AgglomerativeClustering(
n_clusters=4, connectivity=connectivity, linkage='ward')
# If changes are not propagated correctly, fit crashes with an
# IndexError
ward.fit(X)
def test_ward_tree_children_order():
# Check that children are ordered in the same way for both structured and
# unstructured versions of ward_tree.
# test on five random datasets
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X)
out_structured = ward_tree(X, connectivity=connectivity)
assert_array_equal(out_unstructured[0], out_structured[0])
def test_ward_linkage_tree_return_distance():
# Test return_distance option on linkage and ward trees
# test that return_distance when set true, gives same
# output on both structured and unstructured clustering.
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X, return_distance=True)
out_structured = ward_tree(X, connectivity=connectivity,
return_distance=True)
# get children
children_unstructured = out_unstructured[0]
children_structured = out_structured[0]
# check if we got the same clusters
assert_array_equal(children_unstructured, children_structured)
# check if the distances are the same
dist_unstructured = out_unstructured[-1]
dist_structured = out_structured[-1]
assert_array_almost_equal(dist_unstructured, dist_structured)
for linkage in ['average', 'complete']:
structured_items = linkage_tree(
X, connectivity=connectivity, linkage=linkage,
return_distance=True)[-1]
unstructured_items = linkage_tree(
X, linkage=linkage, return_distance=True)[-1]
structured_dist = structured_items[-1]
unstructured_dist = unstructured_items[-1]
structured_children = structured_items[0]
unstructured_children = unstructured_items[0]
assert_array_almost_equal(structured_dist, unstructured_dist)
assert_array_almost_equal(
structured_children, unstructured_children)
# test on the following dataset where we know the truth
# taken from scipy/cluster/tests/hierarchy_test_data.py
X = np.array([[1.43054825, -7.5693489],
[6.95887839, 6.82293382],
[2.87137846, -9.68248579],
[7.87974764, -6.05485803],
[8.24018364, -6.09495602],
[7.39020262, 8.54004355]])
# truth
linkage_X_ward = np.array([[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 9.10208346, 4.],
[7., 9., 24.7784379, 6.]])
linkage_X_complete = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.96742194, 4.],
[7., 9., 18.77445997, 6.]])
linkage_X_average = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.55832839, 4.],
[7., 9., 15.44089605, 6.]])
n_samples, n_features = np.shape(X)
connectivity_X = np.ones((n_samples, n_samples))
out_X_unstructured = ward_tree(X, return_distance=True)
out_X_structured = ward_tree(X, connectivity=connectivity_X,
return_distance=True)
# check that the labels are the same
assert_array_equal(linkage_X_ward[:, :2], out_X_unstructured[0])
assert_array_equal(linkage_X_ward[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_unstructured[4])
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_structured[4])
linkage_options = ['complete', 'average']
X_linkage_truth = [linkage_X_complete, linkage_X_average]
for (linkage, X_truth) in zip(linkage_options, X_linkage_truth):
out_X_unstructured = linkage_tree(
X, return_distance=True, linkage=linkage)
out_X_structured = linkage_tree(
X, connectivity=connectivity_X, linkage=linkage,
return_distance=True)
# check that the labels are the same
assert_array_equal(X_truth[:, :2], out_X_unstructured[0])
assert_array_equal(X_truth[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(X_truth[:, 2], out_X_unstructured[4])
assert_array_almost_equal(X_truth[:, 2], out_X_structured[4])
def test_connectivity_fixing_non_lil():
# Check non regression of a bug if a non item assignable connectivity is
# provided with more than one component.
# create dummy data
x = np.array([[0, 0], [1, 1]])
# create a mask with several components to force connectivity fixing
m = np.array([[True, False], [False, True]])
c = grid_to_graph(n_x=2, n_y=2, mask=m)
w = AgglomerativeClustering(connectivity=c, linkage='ward')
assert_warns(UserWarning, w.fit, x)
def test_int_float_dict():
rng = np.random.RandomState(0)
keys = np.unique(rng.randint(100, size=10).astype(np.intp))
values = rng.rand(len(keys))
d = IntFloatDict(keys, values)
for key, value in zip(keys, values):
assert d[key] == value
other_keys = np.arange(50).astype(np.intp)[::2]
other_values = 0.5 * np.ones(50)[::2]
other = IntFloatDict(other_keys, other_values)
# Complete smoke test
max_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
average_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
def test_connectivity_callable():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(
connectivity=partial(kneighbors_graph, n_neighbors=3, include_self=False))
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_connectivity_ignores_diagonal():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
connectivity_include_self = kneighbors_graph(X, 3, include_self=True)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(connectivity=connectivity_include_self)
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_compute_full_tree():
# Test that the full tree is computed if n_clusters is small
rng = np.random.RandomState(0)
X = rng.randn(10, 2)
connectivity = kneighbors_graph(X, 5, include_self=False)
# When n_clusters is less, the full tree should be built
# that is the number of merges should be n_samples - 1
agc = AgglomerativeClustering(n_clusters=2, connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - 1)
# When n_clusters is large, greater than max of 100 and 0.02 * n_samples.
# we should stop when there are n_clusters.
n_clusters = 101
X = rng.randn(200, 2)
connectivity = kneighbors_graph(X, 10, include_self=False)
agc = AgglomerativeClustering(n_clusters=n_clusters,
connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - n_clusters)
def test_n_components():
# Test n_components returned by linkage, average and ward tree
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Connectivity matrix having five components.
connectivity = np.eye(5)
for linkage_func in _TREE_BUILDERS.values():
assert_equal(ignore_warnings(linkage_func)(X, connectivity)[1], 5)
def test_agg_n_clusters():
# Test that an error is raised when n_clusters <= 0
rng = np.random.RandomState(0)
X = rng.rand(20, 10)
for n_clus in [-1, 0]:
agc = AgglomerativeClustering(n_clusters=n_clus)
msg = ("n_clusters should be an integer greater than 0."
" %s was provided." % str(agc.n_clusters))
assert_raise_message(ValueError, msg, agc.fit, X)
| bsd-3-clause |
rkmaddox/mne-python | mne/preprocessing/maxwell.py | 3 | 103429 | # -*- coding: utf-8 -*-
# Authors: Mark Wronkiewicz <[email protected]>
# Eric Larson <[email protected]>
# Jussi Nurminen <[email protected]>
# License: BSD (3-clause)
from collections import Counter, OrderedDict
from functools import partial
from math import factorial
from os import path as op
import numpy as np
from .. import __version__
from ..annotations import _annotations_starts_stops
from ..bem import _check_origin
from ..transforms import (_str_to_frame, _get_trans, Transform, apply_trans,
_find_vector_rotation, _cart_to_sph, _get_n_moments,
_sph_to_cart_partials, _deg_ord_idx, _average_quats,
_sh_complex_to_real, _sh_real_to_complex, _sh_negate,
quat_to_rot, rot_to_quat)
from ..forward import _concatenate_coils, _prep_meg_channels, _create_meg_coils
from ..surface import _normalize_vectors
from ..io.constants import FIFF, FWD
from ..io.meas_info import _simplify_info, Info
from ..io.proc_history import _read_ctc
from ..io.write import _generate_meas_id, DATE_NONE
from ..io import (_loc_to_coil_trans, _coil_trans_to_loc, BaseRaw, RawArray,
Projection)
from ..io.pick import pick_types, pick_info
from ..utils import (verbose, logger, _clean_names, warn, _time_mask, _pl,
_check_option, _ensure_int, _validate_type, use_log_level)
from ..fixes import _safe_svd, bincount
from ..channels.channels import _get_T1T2_mag_inds, fix_mag_coil_types
# Note: MF uses single precision and some algorithms might use
# truncated versions of constants (e.g., μ0), which could lead to small
# differences between algorithms
# Changes to arguments here should also be made in find_bad_channels_maxwell
@verbose
def maxwell_filter(raw, origin='auto', int_order=8, ext_order=3,
calibration=None, cross_talk=None, st_duration=None,
st_correlation=0.98, coord_frame='head', destination=None,
regularize='in', ignore_ref=False, bad_condition='error',
head_pos=None, st_fixed=True, st_only=False, mag_scale=100.,
skip_by_annotation=('edge', 'bad_acq_skip'),
extended_proj=(), verbose=None):
"""Maxwell filter data using multipole moments.
Parameters
----------
raw : instance of mne.io.Raw
Data to be filtered.
.. warning:: It is critical to mark bad channels in
``raw.info['bads']`` prior to processing in order to
prevent artifact spreading. Manual inspection and use
of :func:`~find_bad_channels_maxwell` is recommended.
%(maxwell_origin)s
%(maxwell_int)s
%(maxwell_ext)s
%(maxwell_cal)s
%(maxwell_cross)s
st_duration : float | None
If not None, apply spatiotemporal SSS with specified buffer duration
(in seconds). MaxFilter™'s default is 10.0 seconds in v2.2.
Spatiotemporal SSS acts as implicitly as a high-pass filter where the
cut-off frequency is 1/st_duration Hz. For this (and other) reasons,
longer buffers are generally better as long as your system can handle
the higher memory usage. To ensure that each window is processed
identically, choose a buffer length that divides evenly into your data.
Any data at the trailing edge that doesn't fit evenly into a whole
buffer window will be lumped into the previous buffer.
st_correlation : float
Correlation limit between inner and outer subspaces used to reject
ovwrlapping intersecting inner/outer signals during spatiotemporal SSS.
%(maxwell_coord)s
%(maxwell_dest)s
%(maxwell_reg)s
%(maxwell_ref)s
%(maxwell_cond)s
%(maxwell_pos)s
.. versionadded:: 0.12
%(maxwell_st_fixed_only)s
%(maxwell_mag)s
.. versionadded:: 0.13
%(maxwell_skip)s
.. versionadded:: 0.17
%(maxwell_extended)s
%(verbose)s
Returns
-------
raw_sss : instance of mne.io.Raw
The raw data with Maxwell filtering applied.
See Also
--------
mne.preprocessing.annotate_flat
mne.preprocessing.find_bad_channels_maxwell
mne.chpi.filter_chpi
mne.chpi.read_head_pos
mne.epochs.average_movements
Notes
-----
.. versionadded:: 0.11
Some of this code was adapted and relicensed (with BSD form) with
permission from Jussi Nurminen. These algorithms are based on work
from :footcite:`TauluKajola2005` and :footcite:`TauluSimola2006`.
It will likely use multiple CPU cores, see the :ref:`FAQ <faq_cpu>`
for more information.
.. warning:: Maxwell filtering in MNE is not designed or certified
for clinical use.
Compared to the MEGIN MaxFilter™ software, the MNE Maxwell filtering
routines currently provide the following features:
.. table::
:widths: auto
+-----------------------------------------------------------------------------+-----+-----------+
| Feature | MNE | MaxFilter |
+=============================================================================+=====+===========+
| Maxwell filtering software shielding | ✓ | ✓ |
+-----------------------------------------------------------------------------+-----+-----------+
| Bad channel reconstruction | ✓ | ✓ |
+-----------------------------------------------------------------------------+-----+-----------+
| Cross-talk cancellation | ✓ | ✓ |
+-----------------------------------------------------------------------------+-----+-----------+
| Fine calibration correction (1D) | ✓ | ✓ |
+-----------------------------------------------------------------------------+-----+-----------+
| Fine calibration correction (3D) | ✓ | |
+-----------------------------------------------------------------------------+-----+-----------+
| Spatio-temporal SSS (tSSS) | ✓ | ✓ |
+-----------------------------------------------------------------------------+-----+-----------+
| Coordinate frame translation | ✓ | ✓ |
+-----------------------------------------------------------------------------+-----+-----------+
| Regularization using information theory | ✓ | ✓ |
+-----------------------------------------------------------------------------+-----+-----------+
| Movement compensation (raw) | ✓ | ✓ |
+-----------------------------------------------------------------------------+-----+-----------+
| Movement compensation (:func:`epochs <mne.epochs.average_movements>`) | ✓ | |
+-----------------------------------------------------------------------------+-----+-----------+
| :func:`cHPI subtraction <mne.chpi.filter_chpi>` | ✓ | ✓ |
+-----------------------------------------------------------------------------+-----+-----------+
| Double floating point precision | ✓ | |
+-----------------------------------------------------------------------------+-----+-----------+
| Seamless processing of split (``-1.fif``) and concatenated files | ✓ | |
+-----------------------------------------------------------------------------+-----+-----------+
| Automatic bad channel detection (:func:`~find_bad_channels_maxwell`) | ✓ | ✓ |
+-----------------------------------------------------------------------------+-----+-----------+
| Head position estimation (:func:`~mne.chpi.compute_head_pos`) | ✓ | ✓ |
+-----------------------------------------------------------------------------+-----+-----------+
| Certified for clinical use | | ✓ |
+-----------------------------------------------------------------------------+-----+-----------+
| Extended external basis (eSSS) | ✓ | |
+-----------------------------------------------------------------------------+-----+-----------+
Epoch-based movement compensation is described in :footcite:`TauluKajola2005`.
Use of Maxwell filtering routines with non-Neuromag systems is currently
**experimental**. Worse results for non-Neuromag systems are expected due
to (at least):
* Missing fine-calibration and cross-talk cancellation data for
other systems.
* Processing with reference sensors has not been vetted.
* Regularization of components may not work well for all systems.
* Coil integration has not been optimized using Abramowitz/Stegun
definitions.
.. note:: Various Maxwell filtering algorithm components are covered by
patents owned by MEGIN. These patents include, but may not be
limited to:
- US2006031038 (Signal Space Separation)
- US6876196 (Head position determination)
- WO2005067789 (DC fields)
- WO2005078467 (MaxShield)
- WO2006114473 (Temporal Signal Space Separation)
These patents likely preclude the use of Maxwell filtering code
in commercial applications. Consult a lawyer if necessary.
Currently, in order to perform Maxwell filtering, the raw data must not
have any projectors applied. During Maxwell filtering, the spatial
structure of the data is modified, so projectors are discarded (unless
in ``st_only=True`` mode).
References
----------
.. footbibliography::
""" # noqa: E501
logger.info('Maxwell filtering raw data')
params = _prep_maxwell_filter(
raw=raw, origin=origin, int_order=int_order, ext_order=ext_order,
calibration=calibration, cross_talk=cross_talk,
st_duration=st_duration, st_correlation=st_correlation,
coord_frame=coord_frame, destination=destination,
regularize=regularize, ignore_ref=ignore_ref,
bad_condition=bad_condition, head_pos=head_pos, st_fixed=st_fixed,
st_only=st_only, mag_scale=mag_scale,
skip_by_annotation=skip_by_annotation, extended_proj=extended_proj)
raw_sss = _run_maxwell_filter(raw, **params)
# Update info
_update_sss_info(raw_sss, **params['update_kwargs'])
logger.info('[done]')
return raw_sss
@verbose
def _prep_maxwell_filter(
raw, origin='auto', int_order=8, ext_order=3,
calibration=None, cross_talk=None, st_duration=None,
st_correlation=0.98, coord_frame='head', destination=None,
regularize='in', ignore_ref=False, bad_condition='error',
head_pos=None, st_fixed=True, st_only=False,
mag_scale=100.,
skip_by_annotation=('edge', 'bad_acq_skip'), extended_proj=(),
reconstruct='in', verbose=None):
# There are an absurd number of different possible notations for spherical
# coordinates, which confounds the notation for spherical harmonics. Here,
# we purposefully stay away from shorthand notation in both and use
# explicit terms (like 'azimuth' and 'polar') to avoid confusion.
# See mathworld.wolfram.com/SphericalHarmonic.html for more discussion.
# Our code follows the same standard that ``scipy`` uses for ``sph_harm``.
# triage inputs ASAP to avoid late-thrown errors
_validate_type(raw, BaseRaw, 'raw')
_check_usable(raw)
_check_regularize(regularize)
st_correlation = float(st_correlation)
if st_correlation <= 0. or st_correlation > 1.:
raise ValueError('Need 0 < st_correlation <= 1., got %s'
% st_correlation)
_check_option('coord_frame', coord_frame, ['head', 'meg'])
head_frame = True if coord_frame == 'head' else False
recon_trans = _check_destination(destination, raw.info, head_frame)
if st_duration is not None:
st_duration = float(st_duration)
st_correlation = float(st_correlation)
st_duration = int(round(st_duration * raw.info['sfreq']))
if not 0. < st_correlation <= 1:
raise ValueError('st_correlation must be between 0. and 1.')
_check_option('bad_condition', bad_condition,
['error', 'warning', 'ignore', 'info'])
if raw.info['dev_head_t'] is None and coord_frame == 'head':
raise RuntimeError('coord_frame cannot be "head" because '
'info["dev_head_t"] is None; if this is an '
'empty room recording, consider using '
'coord_frame="meg"')
if st_only and st_duration is None:
raise ValueError('st_duration must not be None if st_only is True')
head_pos = _check_pos(head_pos, head_frame, raw, st_fixed,
raw.info['sfreq'])
_check_info(raw.info, sss=not st_only, tsss=st_duration is not None,
calibration=not st_only and calibration is not None,
ctc=not st_only and cross_talk is not None)
# Now we can actually get moving
info = raw.info.copy()
meg_picks, mag_picks, grad_picks, good_mask, mag_or_fine = \
_get_mf_picks_fix_mags(info, int_order, ext_order, ignore_ref)
# Magnetometers are scaled to improve numerical stability
coil_scale, mag_scale = _get_coil_scale(
meg_picks, mag_picks, grad_picks, mag_scale, info)
#
# Extended projection vectors
#
_validate_type(extended_proj, (list, tuple), 'extended_proj')
good_names = [info['ch_names'][c] for c in meg_picks[good_mask]]
if len(extended_proj) > 0:
extended_proj_ = list()
for pi, proj in enumerate(extended_proj):
item = 'extended_proj[%d]' % (pi,)
_validate_type(proj, Projection, item)
got_names = proj['data']['col_names']
missing = sorted(set(good_names) - set(got_names))
if missing:
raise ValueError('%s channel names were missing some '
'good MEG channel names:\n%s'
% (item, ', '.join(missing)))
idx = [got_names.index(name) for name in good_names]
extended_proj_.append(proj['data']['data'][:, idx])
extended_proj = np.concatenate(extended_proj_)
logger.info(' Extending external SSS basis using %d projection '
'vectors' % (len(extended_proj),))
#
# Fine calibration processing (load fine cal and overwrite sensor geometry)
#
sss_cal = dict()
if calibration is not None:
calibration, sss_cal = _update_sensor_geometry(
info, calibration, ignore_ref)
mag_or_fine.fill(True) # all channels now have some mag-type data
# Determine/check the origin of the expansion
origin = _check_origin(origin, info, coord_frame, disp=True)
# Convert to the head frame
if coord_frame == 'meg' and info['dev_head_t'] is not None:
origin_head = apply_trans(info['dev_head_t'], origin)
else:
origin_head = origin
update_kwargs = dict(
origin=origin, coord_frame=coord_frame, sss_cal=sss_cal,
int_order=int_order, ext_order=ext_order,
extended_proj=extended_proj)
del origin, coord_frame, sss_cal
origin_head.setflags(write=False)
#
# Cross-talk processing
#
meg_ch_names = [info['ch_names'][p] for p in meg_picks]
ctc, sss_ctc = _read_cross_talk(cross_talk, meg_ch_names)
update_kwargs['sss_ctc'] = sss_ctc
del sss_ctc
#
# Translate to destination frame (always use non-fine-cal bases)
#
exp = dict(origin=origin_head, int_order=int_order, ext_order=0)
all_coils = _prep_mf_coils(info, ignore_ref)
S_recon = _trans_sss_basis(exp, all_coils, recon_trans, coil_scale)
exp['ext_order'] = ext_order
exp['extended_proj'] = extended_proj
del extended_proj
# Reconstruct data from internal space only (Eq. 38), and rescale S_recon
S_recon /= coil_scale
if recon_trans is not None:
# warn if we have translated too far
diff = 1000 * (info['dev_head_t']['trans'][:3, 3] -
recon_trans['trans'][:3, 3])
dist = np.sqrt(np.sum(_sq(diff)))
if dist > 25.:
warn('Head position change is over 25 mm (%s) = %0.1f mm'
% (', '.join('%0.1f' % x for x in diff), dist))
# Reconstruct raw file object with spatiotemporal processed data
max_st = dict()
if st_duration is not None:
if st_only:
job = FIFF.FIFFV_SSS_JOB_TPROJ
else:
job = FIFF.FIFFV_SSS_JOB_ST
max_st.update(job=job, subspcorr=st_correlation,
buflen=st_duration / info['sfreq'])
logger.info(' Processing data using tSSS with st_duration=%s'
% max_st['buflen'])
st_when = 'before' if st_fixed else 'after' # relative to movecomp
else:
# st_duration from here on will act like the chunk size
st_duration = min(max(int(round(10. * info['sfreq'])), 1),
len(raw.times))
st_correlation = None
st_when = 'never'
update_kwargs['max_st'] = max_st
del st_fixed, max_st
# Figure out which transforms we need for each tSSS block
# (and transform pos[1] to times)
head_pos[1] = raw.time_as_index(head_pos[1], use_rounding=True)
# Compute the first bit of pos_data for cHPI reporting
if info['dev_head_t'] is not None and head_pos[0] is not None:
this_pos_quat = np.concatenate([
rot_to_quat(info['dev_head_t']['trans'][:3, :3]),
info['dev_head_t']['trans'][:3, 3],
np.zeros(3)])
else:
this_pos_quat = None
_get_this_decomp_trans = partial(
_get_decomp, all_coils=all_coils,
cal=calibration, regularize=regularize,
exp=exp, ignore_ref=ignore_ref, coil_scale=coil_scale,
grad_picks=grad_picks, mag_picks=mag_picks, good_mask=good_mask,
mag_or_fine=mag_or_fine, bad_condition=bad_condition,
mag_scale=mag_scale)
update_kwargs.update(
nchan=good_mask.sum(), st_only=st_only, recon_trans=recon_trans)
params = dict(
skip_by_annotation=skip_by_annotation,
st_duration=st_duration, st_correlation=st_correlation,
st_only=st_only, st_when=st_when, ctc=ctc, coil_scale=coil_scale,
this_pos_quat=this_pos_quat, meg_picks=meg_picks,
good_mask=good_mask, grad_picks=grad_picks, head_pos=head_pos,
info=info, _get_this_decomp_trans=_get_this_decomp_trans,
S_recon=S_recon, update_kwargs=update_kwargs)
return params
def _run_maxwell_filter(
raw, skip_by_annotation, st_duration, st_correlation, st_only,
st_when, ctc, coil_scale, this_pos_quat, meg_picks, good_mask,
grad_picks, head_pos, info, _get_this_decomp_trans, S_recon,
update_kwargs,
reconstruct='in', copy=True):
# Eventually find_bad_channels_maxwell could be sped up by moving this
# outside the loop (e.g., in the prep function) but regularization depends
# on which channels are being used, so easier just to include it here.
# The time it takes to recompute S and pS themselves is roughly on par
# with the np.dot with the data, so not a huge gain to be made there.
S_decomp, S_decomp_full, pS_decomp, reg_moments, n_use_in = \
_get_this_decomp_trans(info['dev_head_t'], t=0.)
update_kwargs.update(reg_moments=reg_moments.copy())
if ctc is not None:
ctc = ctc[good_mask][:, good_mask]
add_channels = (head_pos[0] is not None) and (not st_only) and copy
raw_sss, pos_picks = _copy_preload_add_channels(
raw, add_channels, copy, info)
sfreq = info['sfreq']
del raw
if not st_only:
# remove MEG projectors, they won't apply now
_remove_meg_projs(raw_sss)
# Figure out which segments of data we can use
onsets, ends = _annotations_starts_stops(
raw_sss, skip_by_annotation, invert=True)
max_samps = (ends - onsets).max()
if not 0. < st_duration <= max_samps + 1.:
raise ValueError('st_duration (%0.1fs) must be between 0 and the '
'longest contiguous duration of the data '
'(%0.1fs).' % (st_duration / sfreq,
max_samps / sfreq))
# Generate time points to break up data into equal-length windows
starts, stops = list(), list()
for onset, end in zip(onsets, ends):
read_lims = np.arange(onset, end + 1, st_duration)
if len(read_lims) == 1:
read_lims = np.concatenate([read_lims, [end]])
if read_lims[-1] != end:
read_lims[-1] = end
# fold it into the previous buffer
n_last_buf = read_lims[-1] - read_lims[-2]
if st_correlation is not None and len(read_lims) > 2:
if n_last_buf >= st_duration:
logger.info(
' Spatiotemporal window did not fit evenly into'
'contiguous data segment. %0.2f seconds were lumped '
'into the previous window.'
% ((n_last_buf - st_duration) / sfreq,))
else:
logger.info(
' Contiguous data segment of duration %0.2f '
'seconds is too short to be processed with tSSS '
'using duration %0.2f'
% (n_last_buf / sfreq, st_duration / sfreq))
assert len(read_lims) >= 2
assert read_lims[0] == onset and read_lims[-1] == end
starts.extend(read_lims[:-1])
stops.extend(read_lims[1:])
del read_lims
st_duration = min(max_samps, st_duration)
# Loop through buffer windows of data
n_sig = int(np.floor(np.log10(max(len(starts), 0)))) + 1
logger.info(
' Processing %s data chunk%s' % (len(starts), _pl(starts)))
for ii, (start, stop) in enumerate(zip(starts, stops)):
if start == stop:
continue # Skip zero-length annotations
tsss_valid = (stop - start) >= st_duration
rel_times = raw_sss.times[start:stop]
t_str = '%8.3f - %8.3f sec' % tuple(rel_times[[0, -1]])
t_str += ('(#%d/%d)' % (ii + 1, len(starts))).rjust(2 * n_sig + 5)
# Get original data
orig_data = raw_sss._data[meg_picks[good_mask], start:stop]
# This could just be np.empty if not st_only, but shouldn't be slow
# this way so might as well just always take the original data
out_meg_data = raw_sss._data[meg_picks, start:stop]
# Apply cross-talk correction
if ctc is not None:
orig_data = ctc.dot(orig_data)
out_pos_data = np.empty((len(pos_picks), stop - start))
# Figure out which positions to use
t_s_s_q_a = _trans_starts_stops_quats(head_pos, start, stop,
this_pos_quat)
n_positions = len(t_s_s_q_a[0])
# Set up post-tSSS or do pre-tSSS
if st_correlation is not None:
# If doing tSSS before movecomp...
resid = orig_data.copy() # to be safe let's operate on a copy
if st_when == 'after':
orig_in_data = np.empty((len(meg_picks), stop - start))
else: # 'before'
avg_trans = t_s_s_q_a[-1]
if avg_trans is not None:
# if doing movecomp
S_decomp_st, _, pS_decomp_st, _, n_use_in_st = \
_get_this_decomp_trans(avg_trans, t=rel_times[0])
else:
S_decomp_st, pS_decomp_st = S_decomp, pS_decomp
n_use_in_st = n_use_in
orig_in_data = np.dot(np.dot(S_decomp_st[:, :n_use_in_st],
pS_decomp_st[:n_use_in_st]),
resid)
resid -= np.dot(np.dot(S_decomp_st[:, n_use_in_st:],
pS_decomp_st[n_use_in_st:]), resid)
resid -= orig_in_data
# Here we operate on our actual data
proc = out_meg_data if st_only else orig_data
_do_tSSS(proc, orig_in_data, resid, st_correlation,
n_positions, t_str, tsss_valid)
if not st_only or st_when == 'after':
# Do movement compensation on the data
for trans, rel_start, rel_stop, this_pos_quat in \
zip(*t_s_s_q_a[:4]):
# Recalculate bases if necessary (trans will be None iff the
# first position in this interval is the same as last of the
# previous interval)
if trans is not None:
S_decomp, S_decomp_full, pS_decomp, reg_moments, \
n_use_in = _get_this_decomp_trans(
trans, t=rel_times[rel_start])
# Determine multipole moments for this interval
mm_in = np.dot(pS_decomp[:n_use_in],
orig_data[:, rel_start:rel_stop])
# Our output data
if not st_only:
if reconstruct == 'in':
proj = S_recon.take(reg_moments[:n_use_in], axis=1)
mult = mm_in
else:
assert reconstruct == 'orig'
proj = S_decomp_full # already picked reg
mm_out = np.dot(pS_decomp[n_use_in:],
orig_data[:, rel_start:rel_stop])
mult = np.concatenate((mm_in, mm_out))
out_meg_data[:, rel_start:rel_stop] = \
np.dot(proj, mult)
if len(pos_picks) > 0:
out_pos_data[:, rel_start:rel_stop] = \
this_pos_quat[:, np.newaxis]
# Transform orig_data to store just the residual
if st_when == 'after':
# Reconstruct data using original location from external
# and internal spaces and compute residual
rel_resid_data = resid[:, rel_start:rel_stop]
orig_in_data[:, rel_start:rel_stop] = \
np.dot(S_decomp[:, :n_use_in], mm_in)
rel_resid_data -= np.dot(np.dot(S_decomp[:, n_use_in:],
pS_decomp[n_use_in:]),
rel_resid_data)
rel_resid_data -= orig_in_data[:, rel_start:rel_stop]
# If doing tSSS at the end
if st_when == 'after':
_do_tSSS(out_meg_data, orig_in_data, resid, st_correlation,
n_positions, t_str, tsss_valid)
elif st_when == 'never' and head_pos[0] is not None:
logger.info(' Used % 2d head position%s for %s'
% (n_positions, _pl(n_positions), t_str))
raw_sss._data[meg_picks, start:stop] = out_meg_data
raw_sss._data[pos_picks, start:stop] = out_pos_data
return raw_sss
def _get_coil_scale(meg_picks, mag_picks, grad_picks, mag_scale, info):
"""Get the magnetometer scale factor."""
if isinstance(mag_scale, str):
if mag_scale != 'auto':
raise ValueError('mag_scale must be a float or "auto", got "%s"'
% mag_scale)
if len(mag_picks) in (0, len(meg_picks)):
mag_scale = 100. # only one coil type, doesn't matter
logger.info(' Setting mag_scale=%0.2f because only one '
'coil type is present' % mag_scale)
else:
# Find our physical distance between gradiometer pickup loops
# ("base line")
coils = _create_meg_coils([info['chs'][pick]
for pick in meg_picks], 'accurate')
grad_base = {coils[pick]['base'] for pick in grad_picks}
if len(grad_base) != 1 or list(grad_base)[0] <= 0:
raise RuntimeError('Could not automatically determine '
'mag_scale, could not find one '
'proper gradiometer distance from: %s'
% list(grad_base))
grad_base = list(grad_base)[0]
mag_scale = 1. / grad_base
logger.info(' Setting mag_scale=%0.2f based on gradiometer '
'distance %0.2f mm' % (mag_scale, 1000 * grad_base))
mag_scale = float(mag_scale)
coil_scale = np.ones((len(meg_picks), 1))
coil_scale[mag_picks] = mag_scale
return coil_scale, mag_scale
def _remove_meg_projs(inst):
"""Remove inplace existing MEG projectors (assumes inactive)."""
meg_picks = pick_types(inst.info, meg=True, exclude=[])
meg_channels = [inst.ch_names[pi] for pi in meg_picks]
non_meg_proj = list()
for proj in inst.info['projs']:
if not any(c in meg_channels for c in proj['data']['col_names']):
non_meg_proj.append(proj)
inst.add_proj(non_meg_proj, remove_existing=True, verbose=False)
def _check_destination(destination, info, head_frame):
"""Triage our reconstruction trans."""
if destination is None:
return info['dev_head_t']
if not head_frame:
raise RuntimeError('destination can only be set if using the '
'head coordinate frame')
if isinstance(destination, str):
recon_trans = _get_trans(destination, 'meg', 'head')[0]
elif isinstance(destination, Transform):
recon_trans = destination
else:
destination = np.array(destination, float)
if destination.shape != (3,):
raise ValueError('destination must be a 3-element vector, '
'str, or None')
recon_trans = np.eye(4)
recon_trans[:3, 3] = destination
recon_trans = Transform('meg', 'head', recon_trans)
if recon_trans.to_str != 'head' or recon_trans.from_str != 'MEG device':
raise RuntimeError('Destination transform is not MEG device -> head, '
'got %s -> %s' % (recon_trans.from_str,
recon_trans.to_str))
return recon_trans
@verbose
def _prep_mf_coils(info, ignore_ref=True, verbose=None):
"""Get all coil integration information loaded and sorted."""
coils, comp_coils = _prep_meg_channels(
info, accurate=True, head_frame=False,
ignore_ref=ignore_ref, do_picking=False, verbose=False)[:2]
mag_mask = _get_mag_mask(coils)
if len(comp_coils) > 0:
meg_picks = pick_types(info, meg=True, ref_meg=False, exclude=[])
ref_picks = pick_types(info, meg=False, ref_meg=True, exclude=[])
inserts = np.searchsorted(meg_picks, ref_picks)
# len(inserts) == len(comp_coils)
for idx, comp_coil in zip(inserts[::-1], comp_coils[::-1]):
coils.insert(idx, comp_coil)
# Now we have:
# [c['chname'] for c in coils] ==
# [info['ch_names'][ii]
# for ii in pick_types(info, meg=True, ref_meg=True)]
# Now coils is a sorted list of coils. Time to do some vectorization.
n_coils = len(coils)
rmags = np.concatenate([coil['rmag'] for coil in coils])
cosmags = np.concatenate([coil['cosmag'] for coil in coils])
ws = np.concatenate([coil['w'] for coil in coils])
cosmags *= ws[:, np.newaxis]
del ws
n_int = np.array([len(coil['rmag']) for coil in coils])
bins = np.repeat(np.arange(len(n_int)), n_int)
bd = np.concatenate(([0], np.cumsum(n_int)))
slice_map = {ii: slice(start, stop)
for ii, (start, stop) in enumerate(zip(bd[:-1], bd[1:]))}
return rmags, cosmags, bins, n_coils, mag_mask, slice_map
def _trans_starts_stops_quats(pos, start, stop, this_pos_data):
"""Get all trans and limits we need."""
pos_idx = np.arange(*np.searchsorted(pos[1], [start, stop]))
used = np.zeros(stop - start, bool)
trans = list()
rel_starts = list()
rel_stops = list()
quats = list()
weights = list()
for ti in range(-1, len(pos_idx)):
# first iteration for this block of data
if ti < 0:
rel_start = 0
rel_stop = pos[1][pos_idx[0]] if len(pos_idx) > 0 else stop
rel_stop = rel_stop - start
if rel_start == rel_stop:
continue # our first pos occurs on first time sample
# Don't calculate S_decomp here, use the last one
trans.append(None) # meaning: use previous
quats.append(this_pos_data)
else:
rel_start = pos[1][pos_idx[ti]] - start
if ti == len(pos_idx) - 1:
rel_stop = stop - start
else:
rel_stop = pos[1][pos_idx[ti + 1]] - start
trans.append(pos[0][pos_idx[ti]])
quats.append(pos[2][pos_idx[ti]])
assert 0 <= rel_start
assert rel_start < rel_stop
assert rel_stop <= stop - start
assert not used[rel_start:rel_stop].any()
used[rel_start:rel_stop] = True
rel_starts.append(rel_start)
rel_stops.append(rel_stop)
weights.append(rel_stop - rel_start)
assert used.all()
# Use weighted average for average trans over the window
if this_pos_data is None:
avg_trans = None
else:
weights = np.array(weights)
quats = np.array(quats)
weights = weights / weights.sum().astype(float) # int -> float
avg_quat = _average_quats(quats[:, :3], weights)
avg_t = np.dot(weights, quats[:, 3:6])
avg_trans = np.vstack([
np.hstack([quat_to_rot(avg_quat), avg_t[:, np.newaxis]]),
[[0., 0., 0., 1.]]])
return trans, rel_starts, rel_stops, quats, avg_trans
def _do_tSSS(clean_data, orig_in_data, resid, st_correlation,
n_positions, t_str, tsss_valid):
"""Compute and apply SSP-like projection vectors based on min corr."""
if not tsss_valid:
t_proj = np.empty((clean_data.shape[1], 0))
else:
np.asarray_chkfinite(resid)
t_proj = _overlap_projector(orig_in_data, resid, st_correlation)
# Apply projector according to Eq. 12 in :footcite:`TauluSimola2006`
msg = (' Projecting %2d intersecting tSSS component%s '
'for %s' % (t_proj.shape[1], _pl(t_proj.shape[1], ' '), t_str))
if n_positions > 1:
msg += ' (across %2d position%s)' % (n_positions,
_pl(n_positions, ' '))
logger.info(msg)
clean_data -= np.dot(np.dot(clean_data, t_proj), t_proj.T)
def _copy_preload_add_channels(raw, add_channels, copy, info):
"""Load data for processing and (maybe) add cHPI pos channels."""
if copy:
raw = raw.copy()
raw.info['chs'] = info['chs'] # updated coil types
if add_channels:
kinds = [FIFF.FIFFV_QUAT_1, FIFF.FIFFV_QUAT_2, FIFF.FIFFV_QUAT_3,
FIFF.FIFFV_QUAT_4, FIFF.FIFFV_QUAT_5, FIFF.FIFFV_QUAT_6,
FIFF.FIFFV_HPI_G, FIFF.FIFFV_HPI_ERR, FIFF.FIFFV_HPI_MOV]
out_shape = (len(raw.ch_names) + len(kinds), len(raw.times))
out_data = np.zeros(out_shape, np.float64)
msg = ' Appending head position result channels and '
if raw.preload:
logger.info(msg + 'copying original raw data')
out_data[:len(raw.ch_names)] = raw._data
raw._data = out_data
else:
logger.info(msg + 'loading raw data from disk')
with use_log_level(False):
raw._preload_data(out_data[:len(raw.ch_names)])
raw._data = out_data
assert raw.preload is True
off = len(raw.ch_names)
chpi_chs = [
dict(ch_name='CHPI%03d' % (ii + 1), logno=ii + 1,
scanno=off + ii + 1, unit_mul=-1, range=1., unit=-1,
kind=kinds[ii], coord_frame=FIFF.FIFFV_COORD_UNKNOWN,
cal=1e-4, coil_type=FWD.COIL_UNKNOWN, loc=np.zeros(12))
for ii in range(len(kinds))]
raw.info['chs'].extend(chpi_chs)
raw.info._update_redundant()
raw.info._check_consistency()
assert raw._data.shape == (raw.info['nchan'], len(raw.times))
# Return the pos picks
pos_picks = np.arange(len(raw.ch_names) - len(chpi_chs),
len(raw.ch_names))
return raw, pos_picks
else:
if copy:
if not raw.preload:
logger.info(' Loading raw data from disk')
raw.load_data(verbose=False)
else:
logger.info(' Using loaded raw data')
return raw, np.array([], int)
def _check_pos(pos, head_frame, raw, st_fixed, sfreq):
"""Check for a valid pos array and transform it to a more usable form."""
_validate_type(pos, (np.ndarray, None), 'head_pos')
if pos is None:
return [None, np.array([-1])]
if not head_frame:
raise ValueError('positions can only be used if coord_frame="head"')
if not st_fixed:
warn('st_fixed=False is untested, use with caution!')
if not isinstance(pos, np.ndarray):
raise TypeError('pos must be an ndarray')
if pos.ndim != 2 or pos.shape[1] != 10:
raise ValueError('pos must be an array of shape (N, 10)')
t = pos[:, 0]
if not np.array_equal(t, np.unique(t)):
raise ValueError('Time points must unique and in ascending order')
# We need an extra 1e-3 (1 ms) here because MaxFilter outputs values
# only out to 3 decimal places
if not _time_mask(t, tmin=raw._first_time - 1e-3, tmax=None,
sfreq=sfreq).all():
raise ValueError('Head position time points must be greater than '
'first sample offset, but found %0.4f < %0.4f'
% (t[0], raw._first_time))
max_dist = np.sqrt(np.sum(pos[:, 4:7] ** 2, axis=1)).max()
if max_dist > 1.:
warn('Found a distance greater than 1 m (%0.3g m) from the device '
'origin, positions may be invalid and Maxwell filtering could '
'fail' % (max_dist,))
dev_head_ts = np.zeros((len(t), 4, 4))
dev_head_ts[:, 3, 3] = 1.
dev_head_ts[:, :3, 3] = pos[:, 4:7]
dev_head_ts[:, :3, :3] = quat_to_rot(pos[:, 1:4])
pos = [dev_head_ts, t - raw._first_time, pos[:, 1:]]
return pos
def _get_decomp(trans, all_coils, cal, regularize, exp, ignore_ref,
coil_scale, grad_picks, mag_picks, good_mask, mag_or_fine,
bad_condition, t, mag_scale):
"""Get a decomposition matrix and pseudoinverse matrices."""
from scipy import linalg
#
# Fine calibration processing (point-like magnetometers and calib. coeffs)
#
S_decomp_full = _get_s_decomp(
exp, all_coils, trans, coil_scale, cal, ignore_ref, grad_picks,
mag_picks, mag_scale)
S_decomp = S_decomp_full[good_mask]
#
# Extended SSS basis (eSSS)
#
extended_proj = exp.get('extended_proj', ())
if len(extended_proj) > 0:
rcond = 1e-4
thresh = 1e-4
extended_proj = extended_proj.T * coil_scale[good_mask]
extended_proj /= np.linalg.norm(extended_proj, axis=0)
n_int = _get_n_moments(exp['int_order'])
if S_decomp.shape[1] > n_int:
S_ext = S_decomp[:, n_int:].copy()
S_ext /= np.linalg.norm(S_ext, axis=0)
S_ext_orth = linalg.orth(S_ext, rcond=rcond)
assert S_ext_orth.shape[1] == S_ext.shape[1]
extended_proj -= np.dot(S_ext_orth,
np.dot(S_ext_orth.T, extended_proj))
scale = np.mean(np.linalg.norm(S_decomp[n_int:], axis=0))
else:
scale = np.mean(np.linalg.norm(S_decomp[:n_int], axis=0))
mask = np.linalg.norm(extended_proj, axis=0) > thresh
extended_remove = list(np.where(~mask)[0] + S_decomp.shape[1])
logger.debug(' Reducing %d -> %d'
% (extended_proj.shape[1], mask.sum()))
extended_proj /= np.linalg.norm(extended_proj, axis=0) / scale
S_decomp = np.concatenate([S_decomp, extended_proj], axis=-1)
if extended_proj.shape[1]:
S_decomp_full = np.pad(
S_decomp_full, ((0, 0), (0, extended_proj.shape[1])),
'constant')
S_decomp_full[good_mask, -extended_proj.shape[1]:] = extended_proj
else:
extended_remove = list()
del extended_proj
#
# Regularization
#
S_decomp, reg_moments, n_use_in = _regularize(
regularize, exp, S_decomp, mag_or_fine, extended_remove, t=t)
S_decomp_full = S_decomp_full.take(reg_moments, axis=1)
#
# Pseudo-inverse of total multipolar moment basis set (Part of Eq. 37)
#
pS_decomp, sing = _col_norm_pinv(S_decomp.copy())
cond = sing[0] / sing[-1]
if bad_condition != 'ignore' and cond >= 1000.:
msg = 'Matrix is badly conditioned: %0.0f >= 1000' % cond
if bad_condition == 'error':
raise RuntimeError(msg)
elif bad_condition == 'warning':
warn(msg)
else: # condition == 'info'
logger.info(msg)
# Build in our data scaling here
pS_decomp *= coil_scale[good_mask].T
S_decomp /= coil_scale[good_mask]
S_decomp_full /= coil_scale
return S_decomp, S_decomp_full, pS_decomp, reg_moments, n_use_in
def _get_s_decomp(exp, all_coils, trans, coil_scale, cal, ignore_ref,
grad_picks, mag_picks, mag_scale):
"""Get S_decomp."""
S_decomp = _trans_sss_basis(exp, all_coils, trans, coil_scale)
if cal is not None:
# Compute point-like mags to incorporate gradiometer imbalance
grad_cals = _sss_basis_point(exp, trans, cal, ignore_ref, mag_scale)
# Add point like magnetometer data to bases.
if len(grad_picks) > 0:
S_decomp[grad_picks, :] += grad_cals
# Scale magnetometers by calibration coefficient
if len(mag_picks) > 0:
S_decomp[mag_picks, :] /= cal['mag_cals']
# We need to be careful about KIT gradiometers
return S_decomp
@verbose
def _regularize(regularize, exp, S_decomp, mag_or_fine, extended_remove, t,
verbose=None):
"""Regularize a decomposition matrix."""
# ALWAYS regularize the out components according to norm, since
# gradiometer-only setups (e.g., KIT) can have zero first-order
# (homogeneous field) components
int_order, ext_order = exp['int_order'], exp['ext_order']
n_in = _get_n_moments(int_order)
n_out = S_decomp.shape[1] - n_in
t_str = '%8.3f' % t
if regularize is not None: # regularize='in'
in_removes, out_removes = _regularize_in(
int_order, ext_order, S_decomp, mag_or_fine, extended_remove)
else:
in_removes = []
out_removes = _regularize_out(int_order, ext_order, mag_or_fine,
extended_remove)
reg_in_moments = np.setdiff1d(np.arange(n_in), in_removes)
reg_out_moments = np.setdiff1d(np.arange(n_in, S_decomp.shape[1]),
out_removes)
n_use_in = len(reg_in_moments)
n_use_out = len(reg_out_moments)
reg_moments = np.concatenate((reg_in_moments, reg_out_moments))
S_decomp = S_decomp.take(reg_moments, axis=1)
if regularize is not None or n_use_out != n_out:
logger.info(' Using %s/%s harmonic components for %s '
'(%s/%s in, %s/%s out)'
% (n_use_in + n_use_out, n_in + n_out, t_str,
n_use_in, n_in, n_use_out, n_out))
return S_decomp, reg_moments, n_use_in
@verbose
def _get_mf_picks_fix_mags(info, int_order, ext_order, ignore_ref=False,
verbose=None):
"""Pick types for Maxwell filtering and fix magnetometers."""
# Check for T1/T2 mag types
mag_inds_T1T2 = _get_T1T2_mag_inds(info, use_cal=True)
if len(mag_inds_T1T2) > 0:
fix_mag_coil_types(info, use_cal=True)
# Get indices of channels to use in multipolar moment calculation
ref = not ignore_ref
meg_picks = pick_types(info, meg=True, ref_meg=ref, exclude=[])
meg_info = pick_info(_simplify_info(info), meg_picks)
del info
good_mask = np.zeros(len(meg_picks,), bool)
good_mask[pick_types(meg_info, meg=True, ref_meg=ref, exclude='bads')] = 1
n_bases = _get_n_moments([int_order, ext_order]).sum()
if n_bases > good_mask.sum():
raise ValueError('Number of requested bases (%s) exceeds number of '
'good sensors (%s)' % (str(n_bases), good_mask.sum()))
recons = [ch for ch in meg_info['bads']]
if len(recons) > 0:
msg = ' Bad MEG channels being reconstructed: %s' % recons
else:
msg = ' No bad MEG channels'
logger.info(msg)
ref_meg = False if ignore_ref else 'mag'
mag_picks = pick_types(meg_info, meg='mag', ref_meg=ref_meg, exclude=[])
ref_meg = False if ignore_ref else 'grad'
grad_picks = pick_types(meg_info, meg='grad', ref_meg=ref_meg, exclude=[])
assert len(mag_picks) + len(grad_picks) == len(meg_info['ch_names'])
# Determine which are magnetometers for external basis purposes
mag_or_fine = np.zeros(len(meg_picks), bool)
mag_or_fine[mag_picks] = True
# KIT gradiometers are marked as having units T, not T/M (argh)
# We need a separate variable for this because KIT grads should be
# treated mostly like magnetometers (e.g., scaled by 100) for reg
coil_types = np.array([ch['coil_type'] for ch in meg_info['chs']])
mag_or_fine[(coil_types & 0xFFFF) == FIFF.FIFFV_COIL_KIT_GRAD] = False
# The same thing goes for CTF gradiometers...
ctf_grads = [FIFF.FIFFV_COIL_CTF_GRAD,
FIFF.FIFFV_COIL_CTF_REF_GRAD,
FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD]
mag_or_fine[np.in1d(coil_types, ctf_grads)] = False
msg = (' Processing %s gradiometers and %s magnetometers'
% (len(grad_picks), len(mag_picks)))
n_kit = len(mag_picks) - mag_or_fine.sum()
if n_kit > 0:
msg += ' (of which %s are actually KIT gradiometers)' % n_kit
logger.info(msg)
return meg_picks, mag_picks, grad_picks, good_mask, mag_or_fine
def _check_regularize(regularize):
"""Ensure regularize is valid."""
if not (regularize is None or (isinstance(regularize, str) and
regularize in ('in',))):
raise ValueError('regularize must be None or "in"')
def _check_usable(inst):
"""Ensure our data are clean."""
if inst.proj:
raise RuntimeError('Projectors cannot be applied to data during '
'Maxwell filtering.')
current_comp = inst.compensation_grade
if current_comp not in (0, None):
raise RuntimeError('Maxwell filter cannot be done on compensated '
'channels, but data have been compensated with '
'grade %s.' % current_comp)
def _col_norm_pinv(x):
"""Compute the pinv with column-normalization to stabilize calculation.
Note: will modify/overwrite x.
"""
norm = np.sqrt(np.sum(x * x, axis=0))
x /= norm
u, s, v = _safe_svd(x, full_matrices=False, **check_disable)
v /= norm
return np.dot(v.T * 1. / s, u.T), s
def _sq(x):
"""Square quickly."""
return x * x
def _check_finite(data):
"""Ensure data is finite."""
if not np.isfinite(data).all():
raise RuntimeError('data contains non-finite numbers')
def _sph_harm_norm(order, degree):
"""Compute normalization factor for spherical harmonics."""
# we could use scipy.special.poch(degree + order + 1, -2 * order)
# here, but it's slower for our fairly small degree
norm = np.sqrt((2 * degree + 1.) / (4 * np.pi))
if order != 0:
norm *= np.sqrt(factorial(degree - order) /
float(factorial(degree + order)))
return norm
def _concatenate_sph_coils(coils):
"""Concatenate MEG coil parameters for spherical harmoncs."""
rs = np.concatenate([coil['r0_exey'] for coil in coils])
wcoils = np.concatenate([coil['w'] for coil in coils])
ezs = np.concatenate([np.tile(coil['ez'][np.newaxis, :],
(len(coil['rmag']), 1))
for coil in coils])
bins = np.repeat(np.arange(len(coils)),
[len(coil['rmag']) for coil in coils])
return rs, wcoils, ezs, bins
_mu_0 = 4e-7 * np.pi # magnetic permeability
def _get_mag_mask(coils):
"""Get the coil_scale for Maxwell filtering."""
return np.array([coil['coil_class'] == FWD.COILC_MAG for coil in coils])
def _sss_basis_basic(exp, coils, mag_scale=100., method='standard'):
"""Compute SSS basis using non-optimized (but more readable) algorithms."""
from scipy.special import sph_harm
int_order, ext_order = exp['int_order'], exp['ext_order']
origin = exp['origin']
assert 'extended_proj' not in exp # advanced option not supported
# Compute vector between origin and coil, convert to spherical coords
if method == 'standard':
# Get position, normal, weights, and number of integration pts.
rmags, cosmags, ws, bins = _concatenate_coils(coils)
rmags -= origin
# Convert points to spherical coordinates
rad, az, pol = _cart_to_sph(rmags).T
cosmags *= ws[:, np.newaxis]
del rmags, ws
out_type = np.float64
else: # testing equivalence method
rs, wcoils, ezs, bins = _concatenate_sph_coils(coils)
rs -= origin
rad, az, pol = _cart_to_sph(rs).T
ezs *= wcoils[:, np.newaxis]
del rs, wcoils
out_type = np.complex128
del origin
# Set up output matrices
n_in, n_out = _get_n_moments([int_order, ext_order])
S_tot = np.empty((len(coils), n_in + n_out), out_type)
S_in = S_tot[:, :n_in]
S_out = S_tot[:, n_in:]
coil_scale = np.ones((len(coils), 1))
coil_scale[_get_mag_mask(coils)] = mag_scale
# Compute internal/external basis vectors (exclude degree 0; L/RHS Eq. 5)
for degree in range(1, max(int_order, ext_order) + 1):
# Only loop over positive orders, negative orders are handled
# for efficiency within
for order in range(degree + 1):
S_in_out = list()
grads_in_out = list()
# Same spherical harmonic is used for both internal and external
sph = sph_harm(order, degree, az, pol)
sph_norm = _sph_harm_norm(order, degree)
# Compute complex gradient for all integration points
# in spherical coordinates (Eq. 6). The gradient for rad, az, pol
# is obtained by taking the partial derivative of Eq. 4 w.r.t. each
# coordinate.
az_factor = 1j * order * sph / np.sin(np.maximum(pol, 1e-16))
pol_factor = (-sph_norm * np.sin(pol) * np.exp(1j * order * az) *
_alegendre_deriv(order, degree, np.cos(pol)))
if degree <= int_order:
S_in_out.append(S_in)
in_norm = _mu_0 * rad ** -(degree + 2)
g_rad = in_norm * (-(degree + 1.) * sph)
g_az = in_norm * az_factor
g_pol = in_norm * pol_factor
grads_in_out.append(_sph_to_cart_partials(az, pol,
g_rad, g_az, g_pol))
if degree <= ext_order:
S_in_out.append(S_out)
out_norm = _mu_0 * rad ** (degree - 1)
g_rad = out_norm * degree * sph
g_az = out_norm * az_factor
g_pol = out_norm * pol_factor
grads_in_out.append(_sph_to_cart_partials(az, pol,
g_rad, g_az, g_pol))
for spc, grads in zip(S_in_out, grads_in_out):
# We could convert to real at the end, but it's more efficient
# to do it now
if method == 'standard':
grads_pos_neg = [_sh_complex_to_real(grads, order)]
orders_pos_neg = [order]
# Deal with the negative orders
if order > 0:
# it's faster to use the conjugation property for
# our normalized spherical harmonics than recalculate
grads_pos_neg.append(_sh_complex_to_real(
_sh_negate(grads, order), -order))
orders_pos_neg.append(-order)
for gr, oo in zip(grads_pos_neg, orders_pos_neg):
# Gradients dotted w/integration point weighted normals
gr = np.einsum('ij,ij->i', gr, cosmags)
vals = np.bincount(bins, gr, len(coils))
spc[:, _deg_ord_idx(degree, oo)] = -vals
else:
grads = np.einsum('ij,ij->i', grads, ezs)
v = (np.bincount(bins, grads.real, len(coils)) +
1j * np.bincount(bins, grads.imag, len(coils)))
spc[:, _deg_ord_idx(degree, order)] = -v
if order > 0:
spc[:, _deg_ord_idx(degree, -order)] = \
-_sh_negate(v, order)
# Scale magnetometers
S_tot *= coil_scale
if method != 'standard':
# Eventually we could probably refactor this for 2x mem (and maybe CPU)
# savings by changing how spc/S_tot is assigned above (real only)
S_tot = _bases_complex_to_real(S_tot, int_order, ext_order)
return S_tot
def _sss_basis(exp, all_coils):
"""Compute SSS basis for given conditions.
Parameters
----------
exp : dict
Must contain the following keys:
origin : ndarray, shape (3,)
Origin of the multipolar moment space in meters
int_order : int
Order of the internal multipolar moment space
ext_order : int
Order of the external multipolar moment space
coils : list
List of MEG coils. Each should contain coil information dict specifying
position, normals, weights, number of integration points and channel
type. All coil geometry must be in the same coordinate frame
as ``origin`` (``head`` or ``meg``).
Returns
-------
bases : ndarray, shape (n_coils, n_mult_moments)
Internal and external basis sets as a single ndarray.
Notes
-----
Does not incorporate magnetometer scaling factor or normalize spaces.
Adapted from code provided by Jukka Nenonen.
"""
rmags, cosmags, bins, n_coils = all_coils[:4]
int_order, ext_order = exp['int_order'], exp['ext_order']
n_in, n_out = _get_n_moments([int_order, ext_order])
rmags = rmags - exp['origin']
# do the heavy lifting
max_order = max(int_order, ext_order)
L = _tabular_legendre(rmags, max_order)
phi = np.arctan2(rmags[:, 1], rmags[:, 0])
r_n = np.sqrt(np.sum(rmags * rmags, axis=1))
r_xy = np.sqrt(rmags[:, 0] * rmags[:, 0] + rmags[:, 1] * rmags[:, 1])
cos_pol = rmags[:, 2] / r_n # cos(theta); theta 0...pi
sin_pol = np.sqrt(1. - cos_pol * cos_pol) # sin(theta)
z_only = (r_xy <= 1e-16)
sin_pol_nz = sin_pol.copy()
sin_pol_nz[z_only] = 1. # will be overwritten later
r_xy[z_only] = 1.
cos_az = rmags[:, 0] / r_xy # cos(phi)
cos_az[z_only] = 1.
sin_az = rmags[:, 1] / r_xy # sin(phi)
sin_az[z_only] = 0.
# Appropriate vector spherical harmonics terms
# JNE 2012-02-08: modified alm -> 2*alm, blm -> -2*blm
r_nn2 = r_n.copy()
r_nn1 = 1.0 / (r_n * r_n)
S_tot = np.empty((n_coils, n_in + n_out), np.float64)
S_in = S_tot[:, :n_in]
S_out = S_tot[:, n_in:]
for degree in range(max_order + 1):
if degree <= ext_order:
r_nn1 *= r_n # r^(l-1)
if degree <= int_order:
r_nn2 *= r_n # r^(l+2)
# mu_0*sqrt((2l+1)/4pi (l-m)!/(l+m)!)
mult = 2e-7 * np.sqrt((2 * degree + 1) * np.pi)
if degree > 0:
idx = _deg_ord_idx(degree, 0)
# alpha
if degree <= int_order:
b_r = mult * (degree + 1) * L[degree][0] / r_nn2
b_pol = -mult * L[degree][1] / r_nn2
S_in[:, idx] = _integrate_points(
cos_az, sin_az, cos_pol, sin_pol, b_r, 0., b_pol,
cosmags, bins, n_coils)
# beta
if degree <= ext_order:
b_r = -mult * degree * L[degree][0] * r_nn1
b_pol = -mult * L[degree][1] * r_nn1
S_out[:, idx] = _integrate_points(
cos_az, sin_az, cos_pol, sin_pol, b_r, 0., b_pol,
cosmags, bins, n_coils)
for order in range(1, degree + 1):
ord_phi = order * phi
sin_order = np.sin(ord_phi)
cos_order = np.cos(ord_phi)
mult /= np.sqrt((degree - order + 1) * (degree + order))
factor = mult * np.sqrt(2) # equivalence fix (MF uses 2.)
# Real
idx = _deg_ord_idx(degree, order)
r_fact = factor * L[degree][order] * cos_order
az_fact = factor * order * sin_order * L[degree][order]
pol_fact = -factor * (L[degree][order + 1] -
(degree + order) * (degree - order + 1) *
L[degree][order - 1]) * cos_order
# alpha
if degree <= int_order:
b_r = (degree + 1) * r_fact / r_nn2
b_az = az_fact / (sin_pol_nz * r_nn2)
b_az[z_only] = 0.
b_pol = pol_fact / (2 * r_nn2)
S_in[:, idx] = _integrate_points(
cos_az, sin_az, cos_pol, sin_pol, b_r, b_az, b_pol,
cosmags, bins, n_coils)
# beta
if degree <= ext_order:
b_r = -degree * r_fact * r_nn1
b_az = az_fact * r_nn1 / sin_pol_nz
b_az[z_only] = 0.
b_pol = pol_fact * r_nn1 / 2.
S_out[:, idx] = _integrate_points(
cos_az, sin_az, cos_pol, sin_pol, b_r, b_az, b_pol,
cosmags, bins, n_coils)
# Imaginary
idx = _deg_ord_idx(degree, -order)
r_fact = factor * L[degree][order] * sin_order
az_fact = factor * order * cos_order * L[degree][order]
pol_fact = factor * (L[degree][order + 1] -
(degree + order) * (degree - order + 1) *
L[degree][order - 1]) * sin_order
# alpha
if degree <= int_order:
b_r = -(degree + 1) * r_fact / r_nn2
b_az = az_fact / (sin_pol_nz * r_nn2)
b_az[z_only] = 0.
b_pol = pol_fact / (2 * r_nn2)
S_in[:, idx] = _integrate_points(
cos_az, sin_az, cos_pol, sin_pol, b_r, b_az, b_pol,
cosmags, bins, n_coils)
# beta
if degree <= ext_order:
b_r = degree * r_fact * r_nn1
b_az = az_fact * r_nn1 / sin_pol_nz
b_az[z_only] = 0.
b_pol = pol_fact * r_nn1 / 2.
S_out[:, idx] = _integrate_points(
cos_az, sin_az, cos_pol, sin_pol, b_r, b_az, b_pol,
cosmags, bins, n_coils)
return S_tot
def _integrate_points(cos_az, sin_az, cos_pol, sin_pol, b_r, b_az, b_pol,
cosmags, bins, n_coils):
"""Integrate points in spherical coords."""
grads = _sp_to_cart(cos_az, sin_az, cos_pol, sin_pol, b_r, b_az, b_pol).T
grads = (grads * cosmags).sum(axis=1)
return bincount(bins, grads, n_coils)
def _tabular_legendre(r, nind):
"""Compute associated Legendre polynomials."""
r_n = np.sqrt(np.sum(r * r, axis=1))
x = r[:, 2] / r_n # cos(theta)
L = list()
for degree in range(nind + 1):
L.append(np.zeros((degree + 2, len(r))))
L[0][0] = 1.
pnn = np.ones(x.shape)
fact = 1.
sx2 = np.sqrt((1. - x) * (1. + x))
for degree in range(nind + 1):
L[degree][degree] = pnn
pnn *= (-fact * sx2)
fact += 2.
if degree < nind:
L[degree + 1][degree] = x * (2 * degree + 1) * L[degree][degree]
if degree >= 2:
for order in range(degree - 1):
L[degree][order] = (x * (2 * degree - 1) *
L[degree - 1][order] -
(degree + order - 1) *
L[degree - 2][order]) / (degree - order)
return L
def _sp_to_cart(cos_az, sin_az, cos_pol, sin_pol, b_r, b_az, b_pol):
"""Convert spherical coords to cartesian."""
out = np.empty((3,) + sin_pol.shape)
out[0] = sin_pol * cos_az * b_r + cos_pol * cos_az * b_pol - sin_az * b_az
out[1] = sin_pol * sin_az * b_r + cos_pol * sin_az * b_pol + cos_az * b_az
out[2] = cos_pol * b_r - sin_pol * b_pol
return out
def _get_degrees_orders(order):
"""Get the set of degrees used in our basis functions."""
degrees = np.zeros(_get_n_moments(order), int)
orders = np.zeros_like(degrees)
for degree in range(1, order + 1):
# Only loop over positive orders, negative orders are handled
# for efficiency within
for order in range(degree + 1):
ii = _deg_ord_idx(degree, order)
degrees[ii] = degree
orders[ii] = order
ii = _deg_ord_idx(degree, -order)
degrees[ii] = degree
orders[ii] = -order
return degrees, orders
def _alegendre_deriv(order, degree, val):
"""Compute the derivative of the associated Legendre polynomial at a value.
Parameters
----------
order : int
Order of spherical harmonic. (Usually) corresponds to 'm'.
degree : int
Degree of spherical harmonic. (Usually) corresponds to 'l'.
val : float
Value to evaluate the derivative at.
Returns
-------
dPlm : float
Associated Legendre function derivative
"""
from scipy.special import lpmv
assert order >= 0
return (order * val * lpmv(order, degree, val) + (degree + order) *
(degree - order + 1.) * np.sqrt(1. - val * val) *
lpmv(order - 1, degree, val)) / (1. - val * val)
def _bases_complex_to_real(complex_tot, int_order, ext_order):
"""Convert complex spherical harmonics to real."""
n_in, n_out = _get_n_moments([int_order, ext_order])
complex_in = complex_tot[:, :n_in]
complex_out = complex_tot[:, n_in:]
real_tot = np.empty(complex_tot.shape, np.float64)
real_in = real_tot[:, :n_in]
real_out = real_tot[:, n_in:]
for comp, real, exp_order in zip([complex_in, complex_out],
[real_in, real_out],
[int_order, ext_order]):
for deg in range(1, exp_order + 1):
for order in range(deg + 1):
idx_pos = _deg_ord_idx(deg, order)
idx_neg = _deg_ord_idx(deg, -order)
real[:, idx_pos] = _sh_complex_to_real(comp[:, idx_pos], order)
if order != 0:
# This extra mult factor baffles me a bit, but it works
# in round-trip testing, so we'll keep it :(
mult = (-1 if order % 2 == 0 else 1)
real[:, idx_neg] = mult * _sh_complex_to_real(
comp[:, idx_neg], -order)
return real_tot
def _bases_real_to_complex(real_tot, int_order, ext_order):
"""Convert real spherical harmonics to complex."""
n_in, n_out = _get_n_moments([int_order, ext_order])
real_in = real_tot[:, :n_in]
real_out = real_tot[:, n_in:]
comp_tot = np.empty(real_tot.shape, np.complex128)
comp_in = comp_tot[:, :n_in]
comp_out = comp_tot[:, n_in:]
for real, comp, exp_order in zip([real_in, real_out],
[comp_in, comp_out],
[int_order, ext_order]):
for deg in range(1, exp_order + 1):
# only loop over positive orders, figure out neg from pos
for order in range(deg + 1):
idx_pos = _deg_ord_idx(deg, order)
idx_neg = _deg_ord_idx(deg, -order)
this_comp = _sh_real_to_complex([real[:, idx_pos],
real[:, idx_neg]], order)
comp[:, idx_pos] = this_comp
comp[:, idx_neg] = _sh_negate(this_comp, order)
return comp_tot
def _check_info(info, sss=True, tsss=True, calibration=True, ctc=True):
"""Ensure that Maxwell filtering has not been applied yet."""
for ent in info['proc_history']:
for msg, key, doing in (('SSS', 'sss_info', sss),
('tSSS', 'max_st', tsss),
('fine calibration', 'sss_cal', calibration),
('cross-talk cancellation', 'sss_ctc', ctc)):
if not doing:
continue
if len(ent['max_info'][key]) > 0:
raise RuntimeError('Maxwell filtering %s step has already '
'been applied, cannot reapply' % msg)
def _update_sss_info(raw, origin, int_order, ext_order, nchan, coord_frame,
sss_ctc, sss_cal, max_st, reg_moments, st_only,
recon_trans, extended_proj):
"""Update info inplace after Maxwell filtering.
Parameters
----------
raw : instance of mne.io.Raw
Data to be filtered
origin : array-like, shape (3,)
Origin of internal and external multipolar moment space in head coords
(in meters)
int_order : int
Order of internal component of spherical expansion
ext_order : int
Order of external component of spherical expansion
nchan : int
Number of sensors
sss_ctc : dict
The cross talk information.
sss_cal : dict
The calibration information.
max_st : dict
The tSSS information.
reg_moments : ndarray | slice
The moments that were used.
st_only : bool
Whether tSSS only was performed.
recon_trans : instance of Transformation
The reconstruction trans.
extended_proj : ndarray
Extended external bases.
"""
n_in, n_out = _get_n_moments([int_order, ext_order])
raw.info['maxshield'] = False
components = np.zeros(n_in + n_out + len(extended_proj)).astype('int32')
components[reg_moments] = 1
sss_info_dict = dict(in_order=int_order, out_order=ext_order,
nchan=nchan, origin=origin.astype('float32'),
job=FIFF.FIFFV_SSS_JOB_FILTER,
nfree=np.sum(components[:n_in]),
frame=_str_to_frame[coord_frame],
components=components)
max_info_dict = dict(max_st=max_st)
if st_only:
max_info_dict.update(sss_info=dict(), sss_cal=dict(), sss_ctc=dict())
else:
max_info_dict.update(sss_info=sss_info_dict, sss_cal=sss_cal,
sss_ctc=sss_ctc)
# Reset 'bads' for any MEG channels since they've been reconstructed
_reset_meg_bads(raw.info)
# set the reconstruction transform
raw.info['dev_head_t'] = recon_trans
block_id = _generate_meas_id()
raw.info['proc_history'].insert(0, dict(
max_info=max_info_dict, block_id=block_id, date=DATE_NONE,
creator='mne-python v%s' % __version__, experimenter=''))
def _reset_meg_bads(info):
"""Reset MEG bads."""
meg_picks = pick_types(info, meg=True, exclude=[])
info['bads'] = [bad for bad in info['bads']
if info['ch_names'].index(bad) not in meg_picks]
check_disable = dict(check_finite=False)
def _orth_overwrite(A):
"""Create a slightly more efficient 'orth'."""
# adapted from scipy/linalg/decomp_svd.py
u, s = _safe_svd(A, full_matrices=False, **check_disable)[:2]
M, N = A.shape
eps = np.finfo(float).eps
tol = max(M, N) * np.amax(s) * eps
num = np.sum(s > tol, dtype=int)
return u[:, :num]
def _overlap_projector(data_int, data_res, corr):
"""Calculate projector for removal of subspace intersection in tSSS."""
# corr necessary to deal with noise when finding identical signal
# directions in the subspace. See the end of the Results section in
# :footcite:`TauluSimola2006`
# Note that the procedure here is an updated version of
# :footcite:`TauluSimola2006` (and used in MF's tSSS) that uses residuals
# instead of internal/external spaces directly. This provides more degrees
# of freedom when analyzing for intersections between internal and
# external spaces.
# Normalize data, then compute orth to get temporal bases. Matrices
# must have shape (n_samps x effective_rank) when passed into svd
# computation
# we use np.linalg.norm instead of sp.linalg.norm here: ~2x faster!
from scipy import linalg
n = np.linalg.norm(data_int)
n = 1. if n == 0 else n # all-zero data should gracefully continue
data_int = _orth_overwrite((data_int / n).T)
n = np.linalg.norm(data_res)
n = 1. if n == 0 else n
data_res = _orth_overwrite((data_res / n).T)
if data_int.shape[1] == 0 or data_res.shape[1] == 0:
return np.empty((data_int.shape[0], 0))
Q_int = linalg.qr(data_int,
overwrite_a=True, mode='economic', **check_disable)[0].T
Q_res = linalg.qr(data_res,
overwrite_a=True, mode='economic', **check_disable)[0]
C_mat = np.dot(Q_int, Q_res)
del Q_int
# Compute angles between subspace and which bases to keep
S_intersect, Vh_intersect = _safe_svd(C_mat, full_matrices=False,
**check_disable)[1:]
del C_mat
intersect_mask = (S_intersect >= corr)
del S_intersect
# Compute projection operator as (I-LL_T) Eq. 12 in
# :footcite:`TauluSimola2006` V_principal should be shape
# (n_time_pts x n_retained_inds)
Vh_intersect = Vh_intersect[intersect_mask].T
V_principal = np.dot(Q_res, Vh_intersect)
return V_principal
def _prep_fine_cal(info, fine_cal):
from ._fine_cal import read_fine_calibration
_validate_type(fine_cal, (dict, 'path-like'))
if not isinstance(fine_cal, dict):
extra = op.basename(str(fine_cal))
fine_cal = read_fine_calibration(fine_cal)
else:
extra = 'dict'
logger.info(f' Using fine calibration {extra}')
ch_names = _clean_names(info['ch_names'], remove_whitespace=True)
info_to_cal = OrderedDict()
missing = list()
for ci, name in enumerate(fine_cal['ch_names']):
if name not in ch_names:
missing.append(name)
else:
oi = ch_names.index(name)
info_to_cal[oi] = ci
meg_picks = pick_types(info, meg=True, exclude=[])
if len(info_to_cal) != len(meg_picks):
raise RuntimeError(
'Not all MEG channels found in fine calibration file, missing:\n%s'
% sorted(list({ch_names[pick] for pick in meg_picks} -
set(fine_cal['ch_names']))))
if len(missing):
warn('Found cal channel%s not in data: %s' % (_pl(missing), missing))
return info_to_cal, fine_cal, ch_names
def _update_sensor_geometry(info, fine_cal, ignore_ref):
"""Replace sensor geometry information and reorder cal_chs."""
info_to_cal, fine_cal, ch_names = _prep_fine_cal(info, fine_cal)
grad_picks = pick_types(info, meg='grad', exclude=())
mag_picks = pick_types(info, meg='mag', exclude=())
# Determine gradiometer imbalances and magnetometer calibrations
grad_imbalances = np.array([fine_cal['imb_cals'][info_to_cal[gi]]
for gi in grad_picks]).T
if grad_imbalances.shape[0] not in [0, 1, 3]:
raise ValueError('Must have 1 (x) or 3 (x, y, z) point-like ' +
'magnetometers. Currently have %i' %
grad_imbalances.shape[0])
mag_cals = np.array([fine_cal['imb_cals'][info_to_cal[mi]]
for mi in mag_picks])
# Now let's actually construct our point-like adjustment coils for grads
grad_coilsets = _get_grad_point_coilsets(
info, n_types=len(grad_imbalances), ignore_ref=ignore_ref)
calibration = dict(grad_imbalances=grad_imbalances,
grad_coilsets=grad_coilsets, mag_cals=mag_cals)
# Replace sensor locations (and track differences) for fine calibration
ang_shift = list()
used = np.zeros(len(info['chs']), bool)
cal_corrs = list()
cal_chans = list()
adjust_logged = False
for oi, ci in info_to_cal.items():
assert not used[oi]
used[oi] = True
info_ch = info['chs'][oi]
ch_num = int(fine_cal['ch_names'][ci].lstrip('MEG').lstrip('0'))
cal_chans.append([ch_num, info_ch['coil_type']])
# Some .dat files might only rotate EZ, so we must check first that
# EX and EY are orthogonal to EZ. If not, we find the rotation between
# the original and fine-cal ez, and rotate EX and EY accordingly:
ch_coil_rot = _loc_to_coil_trans(info_ch['loc'])[:3, :3]
cal_loc = fine_cal['locs'][ci].copy()
cal_coil_rot = _loc_to_coil_trans(cal_loc)[:3, :3]
if np.max([np.abs(np.dot(cal_coil_rot[:, ii], cal_coil_rot[:, 2]))
for ii in range(2)]) > 1e-6: # X or Y not orthogonal
if not adjust_logged:
logger.info(' Adjusting non-orthogonal EX and EY')
adjust_logged = True
# find the rotation matrix that goes from one to the other
this_trans = _find_vector_rotation(
ch_coil_rot[:, 2], cal_coil_rot[:, 2])
cal_loc[3:] = np.dot(this_trans, ch_coil_rot).T.ravel()
# calculate shift angle
v1 = _loc_to_coil_trans(cal_loc)[:3, :3]
_normalize_vectors(v1)
v2 = _loc_to_coil_trans(info_ch['loc'])[:3, :3]
_normalize_vectors(v2)
ang_shift.append(np.sum(v1 * v2, axis=0))
if oi in grad_picks:
extra = [1., fine_cal['imb_cals'][ci][0]]
else:
extra = [fine_cal['imb_cals'][ci][0], 0.]
cal_corrs.append(np.concatenate([extra, cal_loc]))
# Adjust channel normal orientations with those from fine calibration
# Channel positions are not changed
info_ch['loc'][3:] = cal_loc[3:]
assert (info_ch['coord_frame'] == FIFF.FIFFV_COORD_DEVICE)
meg_picks = pick_types(info, meg=True, exclude=())
assert used[meg_picks].all()
assert not used[np.setdiff1d(np.arange(len(used)), meg_picks)].any()
# This gets written to the Info struct
sss_cal = dict(cal_corrs=np.array(cal_corrs),
cal_chans=np.array(cal_chans))
# Log quantification of sensor changes
# Deal with numerical precision giving absolute vals slightly more than 1.
ang_shift = np.array(ang_shift)
np.clip(ang_shift, -1., 1., ang_shift)
np.rad2deg(np.arccos(ang_shift), ang_shift) # Convert to degrees
logger.info(' Adjusted coil positions by (μ ± σ): '
'%0.1f° ± %0.1f° (max: %0.1f°)' %
(np.mean(ang_shift), np.std(ang_shift),
np.max(np.abs(ang_shift))))
return calibration, sss_cal
def _get_grad_point_coilsets(info, n_types, ignore_ref):
"""Get point-type coilsets for gradiometers."""
_rotations = dict(
x=np.array([[0, 0, 1, 0], [0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1.]]),
y=np.array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1.]]),
z=np.eye(4))
grad_coilsets = list()
grad_picks = pick_types(info, meg='grad', exclude=[])
if len(grad_picks) == 0:
return grad_coilsets
grad_info = pick_info(_simplify_info(info), grad_picks)
# Coil_type values for x, y, z point magnetometers
# Note: 1D correction files only have x-direction corrections
for ch in grad_info['chs']:
ch['coil_type'] = FIFF.FIFFV_COIL_POINT_MAGNETOMETER
orig_locs = [ch['loc'].copy() for ch in grad_info['chs']]
for rot in 'xyz'[:n_types]:
# Rotate the Z magnetometer orientation to the destination orientation
for ci, ch in enumerate(grad_info['chs']):
ch['loc'][3:] = _coil_trans_to_loc(np.dot(
_loc_to_coil_trans(orig_locs[ci]),
_rotations[rot]))[3:]
grad_coilsets.append(_prep_mf_coils(grad_info, ignore_ref))
return grad_coilsets
def _sss_basis_point(exp, trans, cal, ignore_ref=False, mag_scale=100.):
"""Compute multipolar moments for point-like mags (in fine cal)."""
# Loop over all coordinate directions desired and create point mags
S_tot = 0.
# These are magnetometers, so use a uniform coil_scale of 100.
this_cs = np.array([mag_scale], float)
for imb, coils in zip(cal['grad_imbalances'], cal['grad_coilsets']):
S_add = _trans_sss_basis(exp, coils, trans, this_cs)
# Scale spaces by gradiometer imbalance
S_add *= imb[:, np.newaxis]
S_tot += S_add
# Return point-like mag bases
return S_tot
def _regularize_out(int_order, ext_order, mag_or_fine, extended_remove):
"""Regularize out components based on norm."""
n_in = _get_n_moments(int_order)
remove_homog = ext_order > 0 and not mag_or_fine.any()
return list(range(n_in, n_in + 3 * remove_homog)) + extended_remove
def _regularize_in(int_order, ext_order, S_decomp, mag_or_fine,
extended_remove):
"""Regularize basis set using idealized SNR measure."""
n_in, n_out = _get_n_moments([int_order, ext_order])
# The "signal" terms depend only on the inner expansion order
# (i.e., not sensor geometry or head position / expansion origin)
a_lm_sq, rho_i = _compute_sphere_activation_in(
np.arange(int_order + 1))
degrees, orders = _get_degrees_orders(int_order)
a_lm_sq = a_lm_sq[degrees]
I_tots = np.zeros(n_in) # we might not traverse all, so use np.zeros
in_keepers = list(range(n_in))
out_removes = _regularize_out(int_order, ext_order, mag_or_fine,
extended_remove)
out_keepers = list(np.setdiff1d(np.arange(n_in, S_decomp.shape[1]),
out_removes))
remove_order = []
S_decomp = S_decomp.copy()
use_norm = np.sqrt(np.sum(S_decomp * S_decomp, axis=0))
S_decomp /= use_norm
eigs = np.zeros((n_in, 2))
# plot = False # for debugging
# if plot:
# import matplotlib.pyplot as plt
# fig, axs = plt.subplots(3, figsize=[6, 12])
# plot_ord = np.empty(n_in, int)
# plot_ord.fill(-1)
# count = 0
# # Reorder plot to match MF
# for degree in range(1, int_order + 1):
# for order in range(0, degree + 1):
# assert plot_ord[count] == -1
# plot_ord[count] = _deg_ord_idx(degree, order)
# count += 1
# if order > 0:
# assert plot_ord[count] == -1
# plot_ord[count] = _deg_ord_idx(degree, -order)
# count += 1
# assert count == n_in
# assert (plot_ord >= 0).all()
# assert len(np.unique(plot_ord)) == n_in
noise_lev = 5e-13 # noise level in T/m
noise_lev *= noise_lev # effectively what would happen by earlier multiply
for ii in range(n_in):
this_S = S_decomp.take(in_keepers + out_keepers, axis=1)
u, s, v = _safe_svd(this_S, full_matrices=False, **check_disable)
del this_S
eigs[ii] = s[[0, -1]]
v = v.T[:len(in_keepers)]
v /= use_norm[in_keepers][:, np.newaxis]
eta_lm_sq = np.dot(v * 1. / s, u.T)
del u, s, v
eta_lm_sq *= eta_lm_sq
eta_lm_sq = eta_lm_sq.sum(axis=1)
eta_lm_sq *= noise_lev
# Mysterious scale factors to match MF, likely due to differences
# in the basis normalizations...
eta_lm_sq[orders[in_keepers] == 0] *= 2
eta_lm_sq *= 0.0025
snr = a_lm_sq[in_keepers] / eta_lm_sq
I_tots[ii] = 0.5 * np.log2(snr + 1.).sum()
remove_order.append(in_keepers[np.argmin(snr)])
in_keepers.pop(in_keepers.index(remove_order[-1]))
# heuristic to quit if we're past the peak to save cycles
if ii > 10 and (I_tots[ii - 1:ii + 1] < 0.95 * I_tots.max()).all():
break
# if plot and ii == 0:
# axs[0].semilogy(snr[plot_ord[in_keepers]], color='k')
# if plot:
# axs[0].set(ylabel='SNR', ylim=[0.1, 500], xlabel='Component')
# axs[1].plot(I_tots)
# axs[1].set(ylabel='Information', xlabel='Iteration')
# axs[2].plot(eigs[:, 0] / eigs[:, 1])
# axs[2].set(ylabel='Condition', xlabel='Iteration')
# Pick the components that give at least 98% of max info
# This is done because the curves can be quite flat, and we err on the
# side of including rather than excluding components
max_info = np.max(I_tots)
lim_idx = np.where(I_tots >= 0.98 * max_info)[0][0]
in_removes = remove_order[:lim_idx]
for ii, ri in enumerate(in_removes):
logger.debug(' Condition %0.3f/%0.3f = %03.1f, '
'Removing in component %s: l=%s, m=%+0.0f'
% (tuple(eigs[ii]) + (eigs[ii, 0] / eigs[ii, 1],
ri, degrees[ri], orders[ri])))
logger.debug(' Resulting information: %0.1f bits/sample '
'(%0.1f%% of peak %0.1f)'
% (I_tots[lim_idx], 100 * I_tots[lim_idx] / max_info,
max_info))
return in_removes, out_removes
def _compute_sphere_activation_in(degrees):
u"""Compute the "in" power from random currents in a sphere.
Parameters
----------
degrees : ndarray
The degrees to evaluate.
Returns
-------
a_power : ndarray
The a_lm associated for the associated degrees (see
:footcite:`KnuutilaEtAl1993`).
rho_i : float
The current density.
References
----------
.. footbibliography::
"""
r_in = 0.080 # radius of the randomly-activated sphere
# set the observation point r=r_s, az=el=0, so we can just look at m=0 term
# compute the resulting current density rho_i
# This is the "surface" version of the equation:
# b_r_in = 100e-15 # fixed radial field amplitude at distance r_s = 100 fT
# r_s = 0.13 # 5 cm from the surface
# rho_degrees = np.arange(1, 100)
# in_sum = (rho_degrees * (rho_degrees + 1.) /
# ((2. * rho_degrees + 1.)) *
# (r_in / r_s) ** (2 * rho_degrees + 2)).sum() * 4. * np.pi
# rho_i = b_r_in * 1e7 / np.sqrt(in_sum)
# rho_i = 5.21334885574e-07 # value for r_s = 0.125
rho_i = 5.91107375632e-07 # deterministic from above, so just store it
a_power = _sq(rho_i) * (degrees * r_in ** (2 * degrees + 4) /
(_sq(2. * degrees + 1.) *
(degrees + 1.)))
return a_power, rho_i
def _trans_sss_basis(exp, all_coils, trans=None, coil_scale=100.):
"""Compute SSS basis (optionally) using a dev<->head trans."""
if trans is not None:
if not isinstance(trans, Transform):
trans = Transform('meg', 'head', trans)
assert not np.isnan(trans['trans']).any()
all_coils = (apply_trans(trans, all_coils[0]),
apply_trans(trans, all_coils[1], move=False),
) + all_coils[2:]
if not isinstance(coil_scale, np.ndarray):
# Scale all magnetometers (with `coil_class` == 1.0) by `mag_scale`
cs = coil_scale
coil_scale = np.ones((all_coils[3], 1))
coil_scale[all_coils[4]] = cs
S_tot = _sss_basis(exp, all_coils)
S_tot *= coil_scale
return S_tot
# intentionally omitted: st_duration, st_correlation, destination, st_fixed,
# st_only
@verbose
def find_bad_channels_maxwell(
raw, limit=7., duration=5., min_count=5, return_scores=False,
origin='auto', int_order=8, ext_order=3, calibration=None,
cross_talk=None, coord_frame='head', regularize='in', ignore_ref=False,
bad_condition='error', head_pos=None, mag_scale=100.,
skip_by_annotation=('edge', 'bad_acq_skip'), h_freq=40.0,
extended_proj=(), verbose=None):
r"""Find bad channels using Maxwell filtering.
Parameters
----------
raw : instance of Raw
Raw data to process.
limit : float
Detection limit for noisy segments (default is 7.). Smaller values will
find more bad channels at increased risk of including good ones. This
value can be interpreted as the standard score of differences between
the original and Maxwell-filtered data. See the ``Notes`` section for
details.
.. note:: This setting only concerns *noisy* channel detection.
The limit for *flat* channel detection currently cannot be
controlled by the user. Flat channel detection is always run
before noisy channel detection.
duration : float
Duration of the segments into which to slice the data for processing,
in seconds. Default is 5.
min_count : int
Minimum number of times a channel must show up as bad in a chunk.
Default is 5.
return_scores : bool
If ``True``, return a dictionary with scoring information for each
evaluated segment of the data. Default is ``False``.
.. warning:: This feature is experimental and may change in a future
version of MNE-Python without prior notice. Please
report any problems and enhancement proposals to the
developers.
.. versionadded:: 0.21
%(maxwell_origin)s
%(maxwell_int)s
%(maxwell_ext)s
%(maxwell_cal)s
%(maxwell_cross)s
%(maxwell_coord)s
%(maxwell_reg)s
%(maxwell_ref)s
%(maxwell_cond)s
%(maxwell_pos)s
%(maxwell_mag)s
%(maxwell_skip)s
h_freq : float | None
The cutoff frequency (in Hz) of the low-pass filter that will be
applied before processing the data. This defaults to ``40.``, which
should provide similar results to MaxFilter. If you do not wish to
apply a filter, set this to ``None``.
%(maxwell_extended)s
%(verbose)s
Returns
-------
noisy_chs : list
List of bad MEG channels that were automatically detected as being
noisy among the good MEG channels.
flat_chs : list
List of MEG channels that were detected as being flat in at least
``min_count`` segments.
scores : dict
A dictionary with information produced by the scoring algorithms.
Only returned when ``return_scores`` is ``True``. It contains the
following keys:
- ``ch_names`` : ndarray, shape (n_meg,)
The names of the MEG channels. Their order corresponds to the
order of rows in the ``scores`` and ``limits`` arrays.
- ``ch_types`` : ndarray, shape (n_meg,)
The types of the MEG channels in ``ch_names`` (``'mag'``,
``'grad'``).
- ``bins`` : ndarray, shape (n_windows, 2)
The inclusive window boundaries (start and stop; in seconds) used
to calculate the scores.
- ``scores_flat`` : ndarray, shape (n_meg, n_windows)
The scores for testing whether MEG channels are flat. These values
correspond to the standard deviation of a segment.
See the ``Notes`` section for details.
- ``limits_flat`` : ndarray, shape (n_meg, 1)
The score thresholds (in standard deviation) above which a segment
was classified as "flat".
- ``scores_noisy`` : ndarray, shape (n_meg, n_windows)
The scores for testing whether MEG channels are noisy. These values
correspond to the standard score of a segment.
See the ``Notes`` section for details.
- ``limits_noisy`` : ndarray, shape (n_meg, 1)
The score thresholds (in standard scores) above which a segment was
classified as "noisy".
.. note:: The scores and limits for channels marked as ``bad`` in the
input data will be set to ``np.nan``.
See Also
--------
annotate_flat
maxwell_filter
Notes
-----
All arguments after ``raw``, ``limit``, ``duration``, ``min_count``, and
``return_scores`` are the same as :func:`~maxwell_filter`, except that the
following are not allowed in this function because they are unused:
``st_duration``, ``st_correlation``, ``destination``, ``st_fixed``, and
``st_only``.
This algorithm, for a given chunk of data:
1. Runs SSS on the data, without removing external components.
2. Excludes channels as *flat* that have had low variability
(standard deviation < 0.01 fT or fT/cm in a 30 ms window) in the given
or any previous chunk.
3. For each channel :math:`k`, computes the *range* or peak-to-peak
:math:`d_k` of the difference between the reconstructed and original
data.
4. Computes the average :math:`\mu_d` and standard deviation
:math:`\sigma_d` of the differences (after scaling magnetometer data
to roughly match the scale of the gradiometer data using ``mag_scale``).
5. Marks channels as bad for the chunk when
:math:`d_k > \mu_d + \textrm{limit} \times \sigma_d`. Note that this
expression can be easily transformed into
:math:`(d_k - \mu_d) / \sigma_d > \textrm{limit}`, which is equivalent
to :math:`z(d_k) > \textrm{limit}`, with :math:`z(d_k)` being the
standard or z-score of the difference.
Data are processed in chunks of the given ``duration``, and channels that
are bad for at least ``min_count`` chunks are returned.
Channels marked as *flat* in step 2 are excluded from all subsequent steps
of noisy channel detection.
This algorithm gives results similar to, but not identical with,
MaxFilter. Differences arise because MaxFilter processes on a
buffer-by-buffer basis (using buffer-size-dependent downsampling logic),
uses different filtering characteristics, and possibly other factors.
Channels that are near the ``limit`` for a given ``min_count`` are
particularly susceptible to being different between the two
implementations.
.. versionadded:: 0.20
"""
if h_freq is not None:
if raw.info.get('lowpass') and raw.info['lowpass'] < h_freq:
msg = (f'The input data has already been low-pass filtered with a '
f'{raw.info["lowpass"]} Hz cutoff frequency, which is '
f'below the requested cutoff of {h_freq} Hz. Not applying '
f'low-pass filter.')
logger.info(msg)
else:
logger.info(f'Applying low-pass filter with {h_freq} Hz cutoff '
f'frequency ...')
raw = raw.copy().load_data().filter(l_freq=None, h_freq=h_freq)
limit = float(limit)
onsets, ends = _annotations_starts_stops(
raw, skip_by_annotation, invert=True)
del skip_by_annotation
# operate on chunks
starts = list()
stops = list()
step = int(round(raw.info['sfreq'] * duration))
for onset, end in zip(onsets, ends):
if end - onset >= step:
ss = np.arange(onset, end - step + 1, step)
starts.extend(ss)
ss = ss + step
ss[-1] = end
stops.extend(ss)
min_count = min(_ensure_int(min_count, 'min_count'), len(starts))
logger.info('Scanning for bad channels in %d interval%s (%0.1f sec) ...'
% (len(starts), _pl(starts), step / raw.info['sfreq']))
params = _prep_maxwell_filter(
raw, skip_by_annotation=[], # already accounted for
origin=origin, int_order=int_order, ext_order=ext_order,
calibration=calibration, cross_talk=cross_talk,
coord_frame=coord_frame, regularize=regularize,
ignore_ref=ignore_ref, bad_condition=bad_condition, head_pos=head_pos,
mag_scale=mag_scale, extended_proj=extended_proj)
del origin, int_order, ext_order, calibration, cross_talk, coord_frame
del regularize, ignore_ref, bad_condition, head_pos, mag_scale
good_meg_picks = params['meg_picks'][params['good_mask']]
assert len(params['meg_picks']) == len(params['coil_scale'])
assert len(params['good_mask']) == len(params['meg_picks'])
noisy_chs = Counter()
flat_chs = Counter()
flat_limits = dict(grad=0.01e-13, mag=0.01e-15)
these_limits = np.array([
flat_limits['grad']
if pick in params['grad_picks'] else
flat_limits['mag']
for pick in good_meg_picks])
flat_step = max(20, int(30 * raw.info['sfreq'] / 1000.))
all_flats = set()
# Prepare variables to return if `return_scores=True`.
bins = np.empty((len(starts), 2)) # To store start, stop of each segment
# We create ndarrays with one row per channel, regardless of channel type
# and whether the channel has been marked as "bad" in info or not. This
# makes indexing in the loop easier. We only filter this down to the subset
# of MEG channels after all processing is done.
ch_names = np.array(raw.ch_names)
ch_types = np.array(raw.get_channel_types())
scores_flat = np.full((len(ch_names), len(starts)), np.nan)
scores_noisy = np.full_like(scores_flat, fill_value=np.nan)
thresh_flat = np.full((len(ch_names), 1), np.nan)
thresh_noisy = np.full_like(thresh_flat, fill_value=np.nan)
for si, (start, stop) in enumerate(zip(starts, stops)):
n_iter = 0
orig_data = raw.get_data(None, start, stop, verbose=False)
chunk_raw = RawArray(
orig_data, params['info'],
first_samp=raw.first_samp + start, copy='data', verbose=False)
t = chunk_raw.times[[0, -1]] + start / raw.info['sfreq']
logger.info(' Interval %3d: %8.3f - %8.3f'
% ((si + 1,) + tuple(t[[0, -1]])))
# Flat pass: SD < 0.01 fT/cm or 0.01 fT for at 30 ms (or 20 samples)
n = stop - start
flat_stop = n - (n % flat_step)
data = chunk_raw.get_data(good_meg_picks, 0, flat_stop)
data.shape = (data.shape[0], -1, flat_step)
delta = np.std(data, axis=-1).min(-1) # min std across segments
# We may want to return this later if `return_scores=True`.
bins[si, :] = t[0], t[-1]
scores_flat[good_meg_picks, si] = delta
thresh_flat[good_meg_picks] = these_limits.reshape(-1, 1)
chunk_flats = delta < these_limits
chunk_flats = np.where(chunk_flats)[0]
chunk_flats = [raw.ch_names[good_meg_picks[chunk_flat]]
for chunk_flat in chunk_flats]
flat_chs.update(chunk_flats)
all_flats |= set(chunk_flats)
chunk_flats = sorted(all_flats)
these_picks = [pick for pick in good_meg_picks
if raw.ch_names[pick] not in chunk_flats]
# Bad pass
chunk_noisy = list()
params['st_duration'] = int(round(
chunk_raw.times[-1] * raw.info['sfreq']))
for n_iter in range(1, 101): # iteratively exclude the worst ones
assert set(raw.info['bads']) & set(chunk_noisy) == set()
params['good_mask'][:] = [
chunk_raw.ch_names[pick] not in
raw.info['bads'] + chunk_noisy + chunk_flats
for pick in params['meg_picks']]
chunk_raw._data[:] = orig_data
delta = chunk_raw.get_data(these_picks)
with use_log_level(False):
_run_maxwell_filter(
chunk_raw, reconstruct='orig', copy=False, **params)
if n_iter == 1 and len(chunk_flats):
logger.info(' Flat (%2d): %s'
% (len(chunk_flats), ' '.join(chunk_flats)))
delta -= chunk_raw.get_data(these_picks)
# p2p
range_ = np.ptp(delta, axis=-1)
cs_picks = np.searchsorted(params['meg_picks'], these_picks)
range_ *= params['coil_scale'][cs_picks, 0]
mean, std = np.mean(range_), np.std(range_)
# z score
z = (range_ - mean) / std
idx = np.argmax(z)
max_ = z[idx]
# We may want to return this later if `return_scores=True`.
scores_noisy[these_picks, si] = z
thresh_noisy[these_picks] = limit
if max_ < limit:
break
name = raw.ch_names[these_picks[idx]]
logger.debug(' Bad: %s %0.1f'
% (name, max_))
these_picks.pop(idx)
chunk_noisy.append(name)
noisy_chs.update(chunk_noisy)
noisy_chs = sorted((b for b, c in noisy_chs.items() if c >= min_count),
key=lambda x: raw.ch_names.index(x))
flat_chs = sorted((f for f, c in flat_chs.items() if c >= min_count),
key=lambda x: raw.ch_names.index(x))
# Only include MEG channels.
ch_names = ch_names[params['meg_picks']]
ch_types = ch_types[params['meg_picks']]
scores_flat = scores_flat[params['meg_picks']]
thresh_flat = thresh_flat[params['meg_picks']]
scores_noisy = scores_noisy[params['meg_picks']]
thresh_noisy = thresh_noisy[params['meg_picks']]
logger.info(' Static bad channels: %s' % (noisy_chs,))
logger.info(' Static flat channels: %s' % (flat_chs,))
logger.info('[done]')
if return_scores:
scores = dict(ch_names=ch_names,
ch_types=ch_types,
bins=bins,
scores_flat=scores_flat,
limits_flat=thresh_flat,
scores_noisy=scores_noisy,
limits_noisy=thresh_noisy)
return noisy_chs, flat_chs, scores
else:
return noisy_chs, flat_chs
def _read_cross_talk(cross_talk, ch_names):
sss_ctc = dict()
ctc = None
if cross_talk is not None:
sss_ctc = _read_ctc(cross_talk)
ctc_chs = sss_ctc['proj_items_chs']
# checking for extra space ambiguity in channel names
# between old and new fif files
if ch_names[0] not in ctc_chs:
ctc_chs = _clean_names(ctc_chs, remove_whitespace=True)
ch_names = _clean_names(ch_names, remove_whitespace=True)
missing = sorted(list(set(ch_names) - set(ctc_chs)))
if len(missing) != 0:
raise RuntimeError('Missing MEG channels in cross-talk matrix:\n%s'
% missing)
missing = sorted(list(set(ctc_chs) - set(ch_names)))
if len(missing) > 0:
warn('Not all cross-talk channels in raw:\n%s' % missing)
ctc_picks = [ctc_chs.index(name) for name in ch_names]
ctc = sss_ctc['decoupler'][ctc_picks][:, ctc_picks]
# I have no idea why, but MF transposes this for storage..
sss_ctc['decoupler'] = sss_ctc['decoupler'].T.tocsc()
return ctc, sss_ctc
@verbose
def compute_maxwell_basis(info, origin='auto', int_order=8, ext_order=3,
calibration=None, coord_frame='head',
regularize='in', ignore_ref=True,
bad_condition='error', mag_scale=100.,
extended_proj=(), verbose=None):
r"""Compute the SSS basis for a given measurement info structure.
Parameters
----------
info : instance of Info
The measurement info.
%(maxwell_origin)s
%(maxwell_int)s
%(maxwell_ext)s
%(maxwell_cal)s
%(maxwell_coord)s
%(maxwell_reg)s
%(maxwell_ref)s
%(maxwell_cond)s
%(maxwell_mag)s
%(maxwell_extended)s
%(verbose)s
Returns
-------
S : ndarray, shape (n_meg, n_moments)
The basis that can be used to reconstruct the data.
pS : ndarray, shape (n_moments, n_good_meg)
The (stabilized) pseudoinverse of the S array.
reg_moments : ndarray, shape (n_moments,)
The moments that were kept after regularization.
n_use_in : int
The number of kept moments that were in the internal space.
Notes
-----
This outputs variants of :math:`\mathbf{S}` and :math:`\mathbf{S^\dagger}`
from equations 27 and 37 of :footcite:`TauluKajola2005` with the coil scale
for magnetometers already factored in so that the resulting denoising
transform of the data to obtain :math:`\hat{\phi}_{in}` from equation
38 would be::
phi_in = S[:, :n_use_in] @ pS[:n_use_in] @ data_meg_good
.. versionadded:: 0.23
References
----------
.. footbibliography::
"""
from ..io import RawArray
_validate_type(info, Info, 'info')
raw = RawArray(np.zeros((len(info['ch_names']), 1)), info.copy(),
verbose=False)
logger.info('Computing Maxwell basis')
params = _prep_maxwell_filter(
raw=raw, origin=origin, int_order=int_order, ext_order=ext_order,
calibration=calibration, coord_frame=coord_frame, destination=None,
regularize=regularize, ignore_ref=ignore_ref,
bad_condition=bad_condition, mag_scale=mag_scale,
extended_proj=extended_proj)
_, S_decomp_full, pS_decomp, reg_moments, n_use_in = \
params['_get_this_decomp_trans'](info['dev_head_t'], t=0.)
return S_decomp_full, pS_decomp, reg_moments, n_use_in
| bsd-3-clause |
Reagankm/KnockKnock | venv/lib/python3.4/site-packages/matplotlib/delaunay/interpolate.py | 21 | 7262 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
from matplotlib._delaunay import compute_planes, linear_interpolate_grid
from matplotlib._delaunay import nn_interpolate_grid
from matplotlib._delaunay import nn_interpolate_unstructured
__all__ = ['LinearInterpolator', 'NNInterpolator']
def slice2gridspec(key):
"""Convert a 2-tuple of slices to start,stop,steps for x and y.
key -- (slice(ystart,ystop,ystep), slice(xtart, xstop, xstep))
For now, the only accepted step values are imaginary integers (interpreted
in the same way numpy.mgrid, etc. do).
"""
if ((len(key) != 2) or
(not isinstance(key[0], slice)) or
(not isinstance(key[1], slice))):
raise ValueError("only 2-D slices, please")
x0 = key[1].start
x1 = key[1].stop
xstep = key[1].step
if not isinstance(xstep, complex) or int(xstep.real) != xstep.real:
raise ValueError("only the [start:stop:numsteps*1j] form supported")
xstep = int(xstep.imag)
y0 = key[0].start
y1 = key[0].stop
ystep = key[0].step
if not isinstance(ystep, complex) or int(ystep.real) != ystep.real:
raise ValueError("only the [start:stop:numsteps*1j] form supported")
ystep = int(ystep.imag)
return x0, x1, xstep, y0, y1, ystep
class LinearInterpolator(object):
"""Interpolate a function defined on the nodes of a triangulation by
using the planes defined by the three function values at each corner of
the triangles.
LinearInterpolator(triangulation, z, default_value=numpy.nan)
triangulation -- Triangulation instance
z -- the function values at each node of the triangulation
default_value -- a float giving the default value should the interpolating
point happen to fall outside of the convex hull of the triangulation
At the moment, the only regular rectangular grids are supported for
interpolation.
vals = interp[ystart:ystop:ysteps*1j, xstart:xstop:xsteps*1j]
vals would then be a (ysteps, xsteps) array containing the interpolated
values. These arguments are interpreted the same way as numpy.mgrid.
Attributes:
planes -- (ntriangles, 3) array of floats specifying the plane for each
triangle.
Linear Interpolation
--------------------
Given the Delauany triangulation (or indeed *any* complete triangulation)
we can interpolate values inside the convex hull by locating the enclosing
triangle of the interpolation point and returning the value at that point
of the plane defined by the three node values.
f = planes[tri,0]*x + planes[tri,1]*y + planes[tri,2]
The interpolated function is C0 continuous across the convex hull of the
input points. It is C1 continuous across the convex hull except for the
nodes and the edges of the triangulation.
"""
def __init__(self, triangulation, z, default_value=np.nan):
self.triangulation = triangulation
self.z = np.asarray(z, dtype=np.float64)
self.default_value = default_value
self.planes = compute_planes(triangulation.x, triangulation.y, self.z,
triangulation.triangle_nodes)
def __getitem__(self, key):
x0, x1, xstep, y0, y1, ystep = slice2gridspec(key)
grid = linear_interpolate_grid(
x0, x1, xstep, y0, y1, ystep, self.default_value,
self.planes, self.triangulation.x, self.triangulation.y,
self.triangulation.triangle_nodes,
self.triangulation.triangle_neighbors)
return grid
class NNInterpolator(object):
"""Interpolate a function defined on the nodes of a triangulation by
the natural neighbors method.
NNInterpolator(triangulation, z, default_value=numpy.nan)
triangulation -- Triangulation instance
z -- the function values at each node of the triangulation
default_value -- a float giving the default value should the interpolating
point happen to fall outside of the convex hull of the triangulation
At the moment, the only regular rectangular grids are supported for
interpolation.
vals = interp[ystart:ystop:ysteps*1j, xstart:xstop:xsteps*1j]
vals would then be a (ysteps, xsteps) array containing the interpolated
values. These arguments are interpreted the same way as numpy.mgrid.
Natural Neighbors Interpolation
-------------------------------
One feature of the Delaunay triangulation is that for each triangle, its
circumcircle contains no other point (although in degenerate cases, like
squares, other points may be *on* the circumcircle). One can also
construct what is called the Voronoi diagram from a Delaunay triangulation
by connecting the circumcenters of the triangles to those of their
neighbors to form a tesselation of irregular polygons covering the plane
and containing only one node from the triangulation. Each point in one
node's Voronoi polygon is closer to that node than any other node.
To compute the Natural Neighbors interpolant, we consider adding the
interpolation point to the triangulation. We define the natural neighbors
of this point as the set of nodes participating in Delaunay triangles
whose circumcircles contain the point. To restore the Delaunay-ness of the
triangulation, one would only have to alter those triangles and Voronoi
polygons. The new Voronoi diagram would have a polygon around the
inserted point. This polygon would "steal" area from the original Voronoi
polygons. For each node i in the natural neighbors set, we compute the
area stolen from its original Voronoi polygon, stolen[i]. We define the
natural neighbors coordinates
phi[i] = stolen[i] / sum(stolen,axis=0)
We then use these phi[i] to weight the corresponding function values from
the input data z to compute the interpolated value.
The interpolated surface is C1-continuous except at the nodes themselves
across the convex hull of the input points. One can find the set of points
that a given node will affect by computing the union of the areas covered
by the circumcircles of each Delaunay triangle that node participates in.
"""
def __init__(self, triangulation, z, default_value=np.nan):
self.triangulation = triangulation
self.z = np.asarray(z, dtype=np.float64)
self.default_value = default_value
def __getitem__(self, key):
x0, x1, xstep, y0, y1, ystep = slice2gridspec(key)
grid = nn_interpolate_grid(
x0, x1, xstep, y0, y1, ystep, self.default_value,
self.triangulation.x, self.triangulation.y, self.z,
self.triangulation.circumcenters,
self.triangulation.triangle_nodes,
self.triangulation.triangle_neighbors)
return grid
def __call__(self, intx, inty):
intz = nn_interpolate_unstructured(intx, inty, self.default_value,
self.triangulation.x, self.triangulation.y, self.z,
self.triangulation.circumcenters,
self.triangulation.triangle_nodes,
self.triangulation.triangle_neighbors)
return intz
| gpl-2.0 |
mikofski/pvlib-python | pvlib/tests/test_inverter.py | 1 | 7415 | import numpy as np
import pandas as pd
from conftest import assert_series_equal
from numpy.testing import assert_allclose
from conftest import DATA_DIR
import pytest
from pvlib import inverter
def test_adr(adr_inverter_parameters):
vdcs = pd.Series([135, 154, 390, 420, 551])
pdcs = pd.Series([135, 1232, 1170, 420, 551])
pacs = inverter.adr(vdcs, pdcs, adr_inverter_parameters)
assert_series_equal(pacs, pd.Series([np.nan, 1161.5745, 1116.4459,
382.6679, np.nan]))
def test_adr_vtol(adr_inverter_parameters):
vdcs = pd.Series([135, 154, 390, 420, 551])
pdcs = pd.Series([135, 1232, 1170, 420, 551])
pacs = inverter.adr(vdcs, pdcs, adr_inverter_parameters, vtol=0.20)
assert_series_equal(pacs, pd.Series([104.8223, 1161.5745, 1116.4459,
382.6679, 513.3385]))
def test_adr_float(adr_inverter_parameters):
vdcs = 154.
pdcs = 1232.
pacs = inverter.adr(vdcs, pdcs, adr_inverter_parameters)
assert_allclose(pacs, 1161.5745)
def test_adr_invalid_and_night(sam_data):
# also tests if inverter.adr can read the output from pvsystem.retrieve_sam
inverters = sam_data['adrinverter']
testinv = 'Zigor__Sunzet_3_TL_US_240V__CEC_2011_'
vdcs = np.array([39.873036, 0., np.nan, 420])
pdcs = np.array([188.09182, 0., 420, np.nan])
pacs = inverter.adr(vdcs, pdcs, inverters[testinv])
assert_allclose(pacs, np.array([np.nan, -0.25, np.nan, np.nan]))
def test_sandia(cec_inverter_parameters):
vdcs = pd.Series(np.linspace(0, 50, 3))
idcs = pd.Series(np.linspace(0, 11, 3))
pdcs = idcs * vdcs
pacs = inverter.sandia(vdcs, pdcs, cec_inverter_parameters)
assert_series_equal(pacs, pd.Series([-0.020000, 132.004308, 250.000000]))
def test_sandia_float(cec_inverter_parameters):
vdcs = 25.
idcs = 5.5
pdcs = idcs * vdcs
pacs = inverter.sandia(vdcs, pdcs, cec_inverter_parameters)
assert_allclose(pacs, 132.004278, 5)
# test at low power condition
vdcs = 25.
idcs = 0
pdcs = idcs * vdcs
pacs = inverter.sandia(vdcs, pdcs, cec_inverter_parameters)
assert_allclose(pacs, -1. * cec_inverter_parameters['Pnt'], 5)
def test_sandia_Pnt_micro():
"""
Test for issue #140, where some microinverters were giving a positive AC
power output when the DC power was 0.
"""
inverter_parameters = {
'Name': 'Enphase Energy: M250-60-2LL-S2x (-ZC) (-NA) 208V [CEC 2013]',
'Vac': 208.0,
'Paco': 240.0,
'Pdco': 250.5311318,
'Vdco': 32.06160667,
'Pso': 1.12048857,
'C0': -5.76E-05,
'C1': -6.24E-04,
'C2': 8.09E-02,
'C3': -0.111781106,
'Pnt': 0.043,
'Vdcmax': 48.0,
'Idcmax': 9.8,
'Mppt_low': 27.0,
'Mppt_high': 39.0,
}
vdcs = pd.Series(np.linspace(0, 50, 3))
idcs = pd.Series(np.linspace(0, 11, 3))
pdcs = idcs * vdcs
pacs = inverter.sandia(vdcs, pdcs, inverter_parameters)
assert_series_equal(pacs, pd.Series([-0.043, 132.545914746, 240.0]))
def test_sandia_multi(cec_inverter_parameters):
vdcs = pd.Series(np.linspace(0, 50, 3))
idcs = pd.Series(np.linspace(0, 11, 3)) / 2
pdcs = idcs * vdcs
pacs = inverter.sandia_multi((vdcs, vdcs), (pdcs, pdcs),
cec_inverter_parameters)
assert_series_equal(pacs, pd.Series([-0.020000, 132.004308, 250.000000]))
# with lists instead of tuples
pacs = inverter.sandia_multi([vdcs, vdcs], [pdcs, pdcs],
cec_inverter_parameters)
assert_series_equal(pacs, pd.Series([-0.020000, 132.004308, 250.000000]))
# with arrays instead of tuples
pacs = inverter.sandia_multi(np.array([vdcs, vdcs]),
np.array([pdcs, pdcs]),
cec_inverter_parameters)
assert_series_equal(pacs, pd.Series([-0.020000, 132.004308, 250.000000]))
def test_sandia_multi_length_error(cec_inverter_parameters):
vdcs = pd.Series(np.linspace(0, 50, 3))
idcs = pd.Series(np.linspace(0, 11, 3))
pdcs = idcs * vdcs
with pytest.raises(ValueError, match='p_dc and v_dc have different'):
inverter.sandia_multi((vdcs,), (pdcs, pdcs), cec_inverter_parameters)
def test_sandia_multi_array(cec_inverter_parameters):
vdcs = np.linspace(0, 50, 3)
idcs = np.linspace(0, 11, 3)
pdcs = idcs * vdcs
pacs = inverter.sandia_multi((vdcs,), (pdcs,), cec_inverter_parameters)
assert_allclose(pacs, np.array([-0.020000, 132.004278, 250.000000]))
def test_pvwatts_scalars():
expected = 85.58556604752516
out = inverter.pvwatts(90, 100, 0.95)
assert_allclose(out, expected)
# GH 675
expected = 0.
out = inverter.pvwatts(0., 100)
assert_allclose(out, expected)
def test_pvwatts_possible_negative():
# pvwatts could return a negative value for (pdc / pdc0) < 0.006
# unless it is clipped. see GH 541 for more
expected = 0
out = inverter.pvwatts(0.001, 1)
assert_allclose(out, expected)
def test_pvwatts_arrays():
pdc = np.array([[np.nan], [0], [50], [100]])
pdc0 = 100
expected = np.array([[np.nan],
[0.],
[47.60843624],
[95.]])
out = inverter.pvwatts(pdc, pdc0, 0.95)
assert_allclose(out, expected, equal_nan=True)
def test_pvwatts_series():
pdc = pd.Series([np.nan, 0, 50, 100])
pdc0 = 100
expected = pd.Series(np.array([np.nan, 0., 47.608436, 95.]))
out = inverter.pvwatts(pdc, pdc0, 0.95)
assert_series_equal(expected, out)
def test_pvwatts_multi():
pdc = np.array([np.nan, 0, 50, 100]) / 2
pdc0 = 100
expected = np.array([np.nan, 0., 47.608436, 95.])
out = inverter.pvwatts_multi((pdc, pdc), pdc0, 0.95)
assert_allclose(expected, out)
# with 2D array
pdc_2d = np.array([pdc, pdc])
out = inverter.pvwatts_multi(pdc_2d, pdc0, 0.95)
assert_allclose(expected, out)
# with Series
pdc = pd.Series(pdc)
out = inverter.pvwatts_multi((pdc, pdc), pdc0, 0.95)
assert_series_equal(expected, out)
# with list instead of tuple
out = inverter.pvwatts_multi([pdc, pdc], pdc0, 0.95)
assert_series_equal(expected, out)
INVERTER_TEST_MEAS = DATA_DIR / 'inverter_fit_snl_meas.csv'
INVERTER_TEST_SIM = DATA_DIR / 'inverter_fit_snl_sim.csv'
@pytest.mark.parametrize('infilen, expected', [
(INVERTER_TEST_MEAS, {'Paco': 333000., 'Pdco': 343251., 'Vdco': 740.,
'Pso': 1427.746, 'C0': -5.768e-08, 'C1': 3.596e-05,
'C2': 1.038e-03, 'C3': 2.978e-05, 'Pnt': 1.}),
(INVERTER_TEST_SIM, {'Paco': 1000., 'Pdco': 1050., 'Vdco': 240.,
'Pso': 10., 'C0': 1e-6, 'C1': 1e-4, 'C2': 1e-2,
'C3': 1e-3, 'Pnt': 1.}),
])
def test_fit_sandia(infilen, expected):
curves = pd.read_csv(infilen)
dc_power = curves['ac_power'] / curves['efficiency']
result = inverter.fit_sandia(ac_power=curves['ac_power'],
dc_power=dc_power,
dc_voltage=curves['dc_voltage'],
dc_voltage_level=curves['dc_voltage_level'],
p_ac_0=expected['Paco'], p_nt=expected['Pnt'])
assert expected == pytest.approx(result, rel=1e-3)
| bsd-3-clause |
glwagner/py2Periodic | tests/twoDimTurbulence/quantifyTimeStepperError.py | 1 | 1833 | import time, sys
import numpy as np
import matplotlib.pyplot as plt
sys.path.append('../../')
from py2Periodic.physics import twoDimTurbulence
from numpy import pi
def many2DTurbulenceSimulations(params, dt, nSteps, timeSteppers, q0):
solutions = {}
# Instantiate and run a bunch of two-dimensional turbulence models.
for timeStepper in timeSteppers:
m = twoDimTurbulence.model(timeStepper = timeStepper,
dt = dt,
**params)
m.set_physical_soln(q0)
m.step_nSteps(nSteps=nSteps)
m.update_state_variables()
solutions[timeStepper] = m.q
return solutions
nSteps0 = 1e2
dtz = [1.0e-3, 2.0e-3, 4.0e-3, 1.0e-2, 2.0e-2, 4.0e-2, 1.0e-1]
timeSteppers = ['forwardEuler', 'RK4', 'RKW3', 'ETDRK4']
params = {
'nx' : 128,
'Lx' : 2.0*pi,
'visc' : 4.0e-4,
'viscOrder' : 4.0,
'nThreads' : 4,
}
# Define initial condition
q0 = np.random.standard_normal((params['nx'], params['nx'], 1))
# Run many simulations
solutions = {}
for dt in dtz:
solutions[dt] = many2DTurbulenceSimulations(
params, dt, int(nSteps0*dtz[-1]/dt), timeSteppers, q0)
# Analyze results
bulkError = {}
answer = solutions[ dtz[-1] ][ 'RK4' ]
for timeStepper in timeSteppers:
bulkError[timeStepper] = np.zeros_like(dtz)
for ii, dt in enumerate(dtz):
errorDensity = np.abs(answer-solutions[dt][timeStepper])**2.0
bulkError[timeStepper][ii] = errorDensity.sum()
# Plot the results
fig = plt.figure(('Bulk error'), figsize=(3, 3)); plt.clf()
plt.plot(dtz, bulkError[timeSteppers[0]], 'ko--')
plt.plot(dtz, bulkError[timeSteppers[1]], 'b^-')
plt.plot(dtz, bulkError[timeSteppers[2]], 'rs--')
plt.plot(dtz, bulkError[timeSteppers[3]], 'gp-')
print("\nClose the figure to end the program")
plt.show()
| mit |
jmmease/pandas | pandas/tests/indexes/datetimes/test_indexing.py | 6 | 10289 | import pytest
import pytz
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pandas.compat as compat
from pandas import notna, Index, DatetimeIndex, datetime, date_range
class TestDatetimeIndex(object):
def test_where_other(self):
# other is ndarray or Index
i = pd.date_range('20130101', periods=3, tz='US/Eastern')
for arr in [np.nan, pd.NaT]:
result = i.where(notna(i), other=np.nan)
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = Index([pd.NaT, pd.NaT] + i[2:].tolist())
result = i.where(notna(i2), i2)
tm.assert_index_equal(result, i2)
i2 = i.copy()
i2 = Index([pd.NaT, pd.NaT] + i[2:].tolist())
result = i.where(notna(i2), i2.values)
tm.assert_index_equal(result, i2)
def test_where_tz(self):
i = pd.date_range('20130101', periods=3, tz='US/Eastern')
result = i.where(notna(i))
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = Index([pd.NaT, pd.NaT] + i[2:].tolist())
result = i.where(notna(i2))
expected = i2
tm.assert_index_equal(result, expected)
def test_insert(self):
idx = DatetimeIndex(
['2000-01-04', '2000-01-01', '2000-01-02'], name='idx')
result = idx.insert(2, datetime(2000, 1, 5))
exp = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-05',
'2000-01-02'], name='idx')
tm.assert_index_equal(result, exp)
# insertion of non-datetime should coerce to object index
result = idx.insert(1, 'inserted')
expected = Index([datetime(2000, 1, 4), 'inserted',
datetime(2000, 1, 1),
datetime(2000, 1, 2)], name='idx')
assert not isinstance(result, DatetimeIndex)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
idx = date_range('1/1/2000', periods=3, freq='M', name='idx')
# preserve freq
expected_0 = DatetimeIndex(['1999-12-31', '2000-01-31', '2000-02-29',
'2000-03-31'], name='idx', freq='M')
expected_3 = DatetimeIndex(['2000-01-31', '2000-02-29', '2000-03-31',
'2000-04-30'], name='idx', freq='M')
# reset freq to None
expected_1_nofreq = DatetimeIndex(['2000-01-31', '2000-01-31',
'2000-02-29',
'2000-03-31'], name='idx',
freq=None)
expected_3_nofreq = DatetimeIndex(['2000-01-31', '2000-02-29',
'2000-03-31',
'2000-01-02'], name='idx',
freq=None)
cases = [(0, datetime(1999, 12, 31), expected_0),
(-3, datetime(1999, 12, 31), expected_0),
(3, datetime(2000, 4, 30), expected_3),
(1, datetime(2000, 1, 31), expected_1_nofreq),
(3, datetime(2000, 1, 2), expected_3_nofreq)]
for n, d, expected in cases:
result = idx.insert(n, d)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freq == expected.freq
# reset freq to None
result = idx.insert(3, datetime(2000, 1, 2))
expected = DatetimeIndex(['2000-01-31', '2000-02-29', '2000-03-31',
'2000-01-02'], name='idx', freq=None)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freq is None
# see gh-7299
idx = date_range('1/1/2000', periods=3, freq='D', tz='Asia/Tokyo',
name='idx')
with pytest.raises(ValueError):
idx.insert(3, pd.Timestamp('2000-01-04'))
with pytest.raises(ValueError):
idx.insert(3, datetime(2000, 1, 4))
with pytest.raises(ValueError):
idx.insert(3, pd.Timestamp('2000-01-04', tz='US/Eastern'))
with pytest.raises(ValueError):
idx.insert(3, datetime(2000, 1, 4,
tzinfo=pytz.timezone('US/Eastern')))
for tz in ['US/Pacific', 'Asia/Singapore']:
idx = date_range('1/1/2000 09:00', periods=6, freq='H', tz=tz,
name='idx')
# preserve freq
expected = date_range('1/1/2000 09:00', periods=7, freq='H', tz=tz,
name='idx')
for d in [pd.Timestamp('2000-01-01 15:00', tz=tz),
pytz.timezone(tz).localize(datetime(2000, 1, 1, 15))]:
result = idx.insert(6, d)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freq == expected.freq
assert result.tz == expected.tz
expected = DatetimeIndex(['2000-01-01 09:00', '2000-01-01 10:00',
'2000-01-01 11:00',
'2000-01-01 12:00', '2000-01-01 13:00',
'2000-01-01 14:00',
'2000-01-01 10:00'], name='idx',
tz=tz, freq=None)
# reset freq to None
for d in [pd.Timestamp('2000-01-01 10:00', tz=tz),
pytz.timezone(tz).localize(datetime(2000, 1, 1, 10))]:
result = idx.insert(6, d)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.tz == expected.tz
assert result.freq is None
def test_delete(self):
idx = date_range(start='2000-01-01', periods=5, freq='M', name='idx')
# prserve freq
expected_0 = date_range(start='2000-02-01', periods=4, freq='M',
name='idx')
expected_4 = date_range(start='2000-01-01', periods=4, freq='M',
name='idx')
# reset freq to None
expected_1 = DatetimeIndex(['2000-01-31', '2000-03-31', '2000-04-30',
'2000-05-31'], freq=None, name='idx')
cases = {0: expected_0,
-5: expected_0,
-1: expected_4,
4: expected_4,
1: expected_1}
for n, expected in compat.iteritems(cases):
result = idx.delete(n)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freq == expected.freq
with pytest.raises((IndexError, ValueError)):
# either depeidnig on numpy version
result = idx.delete(5)
for tz in [None, 'Asia/Tokyo', 'US/Pacific']:
idx = date_range(start='2000-01-01 09:00', periods=10, freq='H',
name='idx', tz=tz)
expected = date_range(start='2000-01-01 10:00', periods=9,
freq='H', name='idx', tz=tz)
result = idx.delete(0)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freqstr == 'H'
assert result.tz == expected.tz
expected = date_range(start='2000-01-01 09:00', periods=9,
freq='H', name='idx', tz=tz)
result = idx.delete(-1)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freqstr == 'H'
assert result.tz == expected.tz
def test_delete_slice(self):
idx = date_range(start='2000-01-01', periods=10, freq='D', name='idx')
# prserve freq
expected_0_2 = date_range(start='2000-01-04', periods=7, freq='D',
name='idx')
expected_7_9 = date_range(start='2000-01-01', periods=7, freq='D',
name='idx')
# reset freq to None
expected_3_5 = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03',
'2000-01-07', '2000-01-08', '2000-01-09',
'2000-01-10'], freq=None, name='idx')
cases = {(0, 1, 2): expected_0_2,
(7, 8, 9): expected_7_9,
(3, 4, 5): expected_3_5}
for n, expected in compat.iteritems(cases):
result = idx.delete(n)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freq == expected.freq
result = idx.delete(slice(n[0], n[-1] + 1))
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freq == expected.freq
for tz in [None, 'Asia/Tokyo', 'US/Pacific']:
ts = pd.Series(1, index=pd.date_range(
'2000-01-01 09:00', periods=10, freq='H', name='idx', tz=tz))
# preserve freq
result = ts.drop(ts.index[:5]).index
expected = pd.date_range('2000-01-01 14:00', periods=5, freq='H',
name='idx', tz=tz)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freq == expected.freq
assert result.tz == expected.tz
# reset freq to None
result = ts.drop(ts.index[[1, 3, 5, 7, 9]]).index
expected = DatetimeIndex(['2000-01-01 09:00', '2000-01-01 11:00',
'2000-01-01 13:00',
'2000-01-01 15:00', '2000-01-01 17:00'],
freq=None, name='idx', tz=tz)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freq == expected.freq
assert result.tz == expected.tz
| bsd-3-clause |
janmtl/pypsych | tests/data_sources/eprime/test_eprime.py | 1 | 2866 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_eprime
----------------------------------
Tests for `EPrime` Data Source provided in pypsych.data_sources.eprime module.
"""
import unittest
import pandas as pd
pd.set_option('display.max_rows', 50)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
from pkg_resources import resource_filename
from pypsych.config import Config
from pypsych.schedule import Schedule
from pypsych.data_sources.eprime import EPrime
class EprimeLoading(unittest.TestCase):
"""
Asserts that bad config and schedule yaml files cannot be loaded.
"""
def setUp(self):
# Load a config and a schedule
config = Config(path=resource_filename('tests.config', 'config.yaml'))
config.load()
schedule = Schedule(path=resource_filename('tests.schedule',
'schedule.yaml'))
schedule.load()
schedule.compile('tests/data')
# Extract the configuration and schedule for just one task
self.subconfig = config.get_subconfig('Mock1', 'EPrime')
self.subschedule = schedule.get_subschedule('Mock1', 'EPrime')
self.file_paths = schedule.get_file_paths(101, 'Mock1', 'EPrime')
def test_create_eprime(self):
"""Should not throw errors when correct configuration is loaded."""
eprime = EPrime(config=self.subconfig, schedule=self.subschedule)
# Check that eprime instance matches
self.assertIsInstance(eprime, EPrime)
def test_load_eprime(self):
"""Should not throw errors when correct file is loaded."""
eprime = EPrime(config=self.subconfig, schedule=self.subschedule)
eprime.load(self.file_paths)
class EPrimeBinData(unittest.TestCase):
"""
Tests that EPrime correctly bins data to take statistics.
"""
def setUp(self):
# Load a config and a schedule
config = Config(path=resource_filename('tests.config', 'config.yaml'))
config.load()
schedule = Schedule(path=resource_filename('tests.schedule',
'schedule.yaml'))
schedule.load()
schedule.compile('tests/data')
# Extract the configuration and schedule for just one task
subconfig = config.get_subconfig('Mock1', 'EPrime')
subschedule = schedule.get_subschedule('Mock1', 'EPrime')
file_paths = schedule.get_file_paths(101, 'Mock1', 'EPrime')
# Create an instance of the eprime data source
self.eprime = EPrime(config=subconfig, schedule=subschedule)
self.eprime.load(file_paths)
self.eprime.merge_data()
def test_bin_data(self):
"""Test if stats are being calculated correctly."""
self.eprime.bin_data()
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
NTMatBoiseState/FiberFit | src/fiberfit_model/computerVision_BP.py | 1 | 10936 | import sys
import matplotlib
import numpy as np
import scipy
import scipy.ndimage
import scipy.interpolate
import scipy.optimize
import scipy.integrate
import scipy.stats
import math
import time
import glob
from pylab import *
from pandas import DataFrame
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
import matplotlib.pyplot as plt
from src.fiberfit_model.EllipseDirectFit import*
from src.fiberfit_model import helpers
figSize = 4.5
# cs font and ticksfont is used to style the matplotlib figures
csfont = {'fontname':'Times New Roman',
'size':'14',
}
ticksfont = {'fontname':'Times New Roman'}
def process_histogram(PabsFlip, N1, uCut, lCut, angleInc, radStep):
"""
Create orientation Histogram
Sum pixel intensity along different angles
:param PabsFlip:
:param N1:
:param uCut: upper-cut parameter from the settings.SettingsWindow
:param lCut: lower-cut parameter form the settings.SettingsWindow
:param angleInc: angle-increment from the
:param radStep: radial-step
:return:
"""
n1 = np.round(N1 / 2) - 1
freq = np.arange(-n1, n1 + 1, 1)
x, y = freq, freq
# Variables for settings
CO_lower = lCut
CO_upper = uCut
angleInc = angleInc
radStep = radStep
# Set up polar coordinates prior to summing the spectrum
theta1Rad = np.linspace(0.0, 2 * math.pi, num=360/angleInc)
f1 = np.round_(N1 / (2 * CO_lower))
f2 = np.round_(N1 / (2 * CO_upper))
rho1 = np.linspace(f1, f2, num=(f2 - f1)/radStep) # frequency band
PowerX = np.zeros((theta1Rad.size, theta1Rad.size))
PowerY = np.zeros((theta1Rad.size))
# Interpolate using a Spine
PowerSpline = scipy.interpolate.RectBivariateSpline(y=y, x=x, z=PabsFlip)
n_dx = 0.001
for p in range(0, theta1Rad.size):
# converting theta1Rad and rho1 to cartesian coordinates
xfinal = rho1 * math.cos(theta1Rad[p])
yfinal = rho1 * math.sin(theta1Rad[p])
# Evaluate spin on path
px = PowerSpline.ev(yfinal, xfinal)
PowerY[p] = np.sum(px)
# Only use the data in the first two quadrants (Spectrum is symmetric)
num = len(theta1Rad)
PowerYFinal = PowerY[0:num // 2]
theta1RadFinal = theta1Rad[0:num // 2]
power_area = np.trapz(PowerYFinal, theta1RadFinal)
normPower = PowerYFinal / power_area
# TODO: Ask Rici what those are
return normPower, theta1RadFinal
def process_ellipse(normPower, theta1RadFinal, figWidth, figHeigth, dir, number):
"""
:param normPower:
:param theta1RadFinal:
:param figWidth: width of the figure
:param figHeigth: height of the figure
:param dir: full path to the directory where one wants to store the intermediate images
:param number:
:return:
"""
# Combine data into [XY] to fit to an ellipse
Mirtheta1RadFinal1 = np.concatenate([theta1RadFinal.T, (theta1RadFinal + np.pi).T])
MirnormPower = np.concatenate([normPower.T, normPower.T])
# Convert mirrored polar coords to cartesian coords
xdata, ydata = pol2cart(Mirtheta1RadFinal1, MirnormPower)
ell_data = np.vstack([xdata, ydata])
ell_data = ell_data.T
# Python fitting function, see EllipseDirectFit
A, centroid = EllipseDirectFit(ell_data)
t = orientation(A)
# Plot Lower Left - Polar plot of angular distribution
angDist = plt.figure(figsize=(figWidth, figHeigth)) # Creates a figure containing angular distribution.
r_line = np.arange(0, max(MirnormPower) + .5, .5)
th = np.zeros(len(r_line))
for i in range(0, len(r_line)):
th[i] = t
th = np.concatenate([th, (th + 180)])
r_line = np.concatenate([r_line, r_line])
plt.polar(Mirtheta1RadFinal1, MirnormPower, color ='k', linewidth=2)
plt.polar(th * pi / 180, r_line, color='r', linewidth=3)
if (max(MirnormPower)<2):
inc = 0.5
elif (max(MirnormPower)<5):
inc = 1
elif max(MirnormPower)<20:
inc = 5
else:
inc = 10
plt.yticks(np.arange(inc, max(MirnormPower), inc), **ticksfont)
plt.xticks(**ticksfont)
angDist.savefig(dir+'angDist_' + number.__str__(), bbox_inches='tight')
plt.close()
return t, angDist
def process_kappa(t_final, theta1RadFinal, normPower, figWidth, figHeigth, dir, number):
"""
:param t_final:
:param theta1RadFinal:
:param normPower:
:param figWidth:
:param figHeigth:
:param dir:
:param number:
:return:
"""
t_final_rad = t_final * pi / 180
def fitted_func(thetas, c):
int_value, int_err = scipy.integrate.quadrature(func=lambda x: exp(c * cos(x)), a=0.0, b=np.pi)
return ((np.pi * (1.0 / np.pi * (int_value))) ** - 1) * \
np.exp(c * np.cos(2 * (thetas - t_final_rad)))
c0 = 15
kappa, kappa_pcov = scipy.optimize.curve_fit(f=fitted_func, p0=(c0,), xdata=theta1RadFinal, ydata=normPower)
# Shift data for plotting purposes
t = t_final
diff = abs(theta1RadFinal - (t * pi / 180))
centerLoc = find(diff == min(diff))
num_angles = len(theta1RadFinal)
shift = (round(num_angles / 2) - (num_angles - centerLoc))
normPower1 = np.roll(normPower, -shift)
theta1RadFinal1 = np.roll(theta1RadFinal, -shift)
if (shift > 0):
s = num_angles - shift
for k in range(s, num_angles):
theta1RadFinal1[k] = pi + theta1RadFinal1[k]
elif (shift < 0):
for k in range(0, -shift):
theta1RadFinal1[k] = -pi + theta1RadFinal1[k]
# Plot Lower Right - Distribution on a cartesian plane with appropriate shift
cartDist = plt.figure(figsize=(figWidth, figHeigth)) # Creates a figure containing cartesian distribution.
h2 = plt.bar((theta1RadFinal1 * 180 / pi), normPower1, edgecolor = 'k', color = 'k')
plt.xticks(np.arange(-360, 360, 45,), **ticksfont)
plt.xlim([t - 100, t + 100])
p_act = fitted_func(theta1RadFinal1, kappa)
h3, = plt.plot(theta1RadFinal1 * 180 / pi, p_act, linewidth=3)
#plt.title('Fiber Distribution', **csfont)
plt.xlabel('Angle (°)', **csfont)
plt.ylabel('Normalized Intensity', **csfont)
if (max(normPower)<2):
inc = 0.5
elif (max(normPower)<5):
inc = 1
elif (max(normPower)<20):
inc = 5
else:
inc = 10
plt.yticks(np.arange(0, max(normPower1) + .3, inc), **ticksfont)
plt.ylim([0, max(normPower1) + .3])
cartDist.savefig(dir + 'cartDist_' + number.__str__(), bbox_inches='tight')
plt.close()
slope, intercept, rValue, pValue, stderr = scipy.stats.linregress(p_act, normPower1)
return kappa, cartDist, rValue
def process_image(name, uCut, lCut, angleInc, radStep, screenDim, dpi, directory, number):
"""
FFT // POWER SPECTRUM // ANGULAR DISTRIBUTION
SIMPLE FFT
:param name:
:param uCut:
:param lCut:
:param angleInc:
:param radStep:
:param screenDim:
:param dpi:
:param directory:
:param number:
:return:
"""
dir = directory + "/"
start_time = time.time()
figWidth = 4.5
figHeigth = 4.5
im = scipy.ndimage.imread(fname=str(name))
m, n = im.shape
# Remove a row and column if the dimension of the image is odd
if (m % 2 == 1):
im = np.delete(im, (0), axis=0)
if (n % 2 == 1):
im = np.delete(im, (0), axis=1)
# Plot Upper left - Original Image
originalImage = plt.figure(frameon=False, figsize=(figWidth, figHeigth))
# Makes it so the image fits entire dedicated space.
ax = plt.Axes(originalImage, [0., 0., 1., 1.])
ax.set_axis_off()
originalImage.add_axes(ax)
plt.imshow(im, cmap='gray', aspect='auto')
plt.axis('off')
originalImage.savefig(dir + 'orgImg_' + number.__str__())
plt.close()
fft_result = np.fft.fft2(im)
Fshift = np.fft.fftshift(fft_result)
Pabs = np.abs(Fshift) ** 2
# shift in terms of image because power specrum is the mirroR of lines so
# misrroring back in terms of image would give right allignment
PabsFlip1 = np.rot90(Pabs)
PabsFlip = np.flipud(PabsFlip1)
PabsFlip = np.delete(PabsFlip, (0), axis=0)
PabsFlip = np.delete(PabsFlip, (0), axis=1)
# Plot Upper Right - Power Spectrum on logrithmic scale
logScale = plt.figure(frameon=False, figsize=(figWidth, figHeigth))
# Makes it so the image fits entire dedicated space.
ax = plt.Axes(logScale, [0., 0., 1., 1.])
ax.set_axis_off()
logScale.add_axes(ax)
plt.axis('off')
plt.imshow(log(PabsFlip), cmap='gray', aspect='auto')
logScale.savefig(dir + 'logScl_' + number.__str__())
plt.close()
M, N1 = im.shape
normPower, theta1RadFinal = process_histogram(PabsFlip, N1, uCut, lCut, angleInc, radStep)
# theta and angular distribution are getting retrieved.
t_final, angDist = process_ellipse(normPower, theta1RadFinal, figWidth, figHeigth, dir, number)
# k and cartesian distrubution are getting retrieved.
k, cartDist, rValue = process_kappa(t_final, theta1RadFinal, normPower, figWidth, figHeigth, dir, number)
# Rounding results for Title of Figure
krnd = math.ceil(k * 1000) / 1000
thrnd = math.ceil(t_final * 1000) / 1000
krnd = math.trunc(krnd * 100) / 100
thrnd = math.trunc(thrnd * 100) / 100
a = 32.02
b= -12.43
c = 47.06
d = -0.9185
e = 19.43
f = -0.07693
x = k[0]
sig = math.exp(b*x) + c*math.exp(d*x) + e*exp(f*x)
end_time = time.time()
return sig, k[0], t_final, rValue**2, angDist, cartDist, logScale, originalImage, figWidth, figHeigth, (end_time-start_time)
def pol2cart(theta, radius):
xx = radius * np.cos(theta)
yy = radius * np.sin(theta)
return (xx, yy)
def orientation(A):
if (abs(A[1]) < (1 * 10 ^ (-15))):
if (A[0] <= A[2]):
# Ellipse is horizontal
angle = 0;
major = sqrt(1 / A[0])
minor = sqrt(1 / A[2])
else:
angle = np.pi / 2;
major = sqrt(1 / A[2])
minor = sqrt(1 / A[0])
else:
R = ((A[2] - A[0]) / A[1])
tg = R - sqrt((R * R) + 1)
angle = math.atan(tg)
P = (2 * tg) / (1 + (tg * tg))
if ((A[0] > 0 and A[1] > 0 and A[2] > 0)):
if (angle < (-pi / 4)):
angle = angle + np.pi
else:
angle = angle
elif ((A[1] / P <= (-A[1] / P))):
if (angle < 0):
angle = angle + np.pi / 2
else:
angle = angle - np.pi / 2
elif (A[0] < 0 and A[1] < 0 and A[2] < 0):
if (angle < 0):
angle = angle + np.pi
else:
angle = angle - np.pi
else:
# Switch
if (angle < 0):
angle = angle + np.pi / 2
else:
angle = angle - np.pi / 2
t_New = angle * 180 / np.pi
return (t_New)
| lgpl-3.0 |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/matplotlib/testing/decorators.py | 6 | 16219 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import functools
import gc
import inspect
import os
import sys
import shutil
import warnings
import unittest
# Note - don't import nose up here - import it only as needed in functions. This
# allows other functions here to be used by pytest-based testing suites without
# requiring nose to be installed.
import numpy as np
import matplotlib as mpl
import matplotlib.style
import matplotlib.units
import matplotlib.testing
from matplotlib import cbook
from matplotlib import ticker
from matplotlib import pyplot as plt
from matplotlib import ft2font
from matplotlib import rcParams
from matplotlib.testing.noseclasses import KnownFailureTest, \
KnownFailureDidNotFailTest, ImageComparisonFailure
from matplotlib.testing.compare import comparable_formats, compare_images, \
make_test_filename
def knownfailureif(fail_condition, msg=None, known_exception_class=None ):
"""
Assume a will fail if *fail_condition* is True. *fail_condition*
may also be False or the string 'indeterminate'.
*msg* is the error message displayed for the test.
If *known_exception_class* is not None, the failure is only known
if the exception is an instance of this class. (Default = None)
"""
# based on numpy.testing.dec.knownfailureif
if msg is None:
msg = 'Test known to fail'
def known_fail_decorator(f):
# Local import to avoid a hard nose dependency and only incur the
# import time overhead at actual test-time.
import nose
def failer(*args, **kwargs):
try:
# Always run the test (to generate images).
result = f(*args, **kwargs)
except Exception as err:
if fail_condition:
if known_exception_class is not None:
if not isinstance(err,known_exception_class):
# This is not the expected exception
raise
# (Keep the next ultra-long comment so in shows in console.)
raise KnownFailureTest(msg) # An error here when running nose means that you don't have the matplotlib.testing.noseclasses:KnownFailure plugin in use.
else:
raise
if fail_condition and fail_condition != 'indeterminate':
raise KnownFailureDidNotFailTest(msg)
return result
return nose.tools.make_decorator(f)(failer)
return known_fail_decorator
def _do_cleanup(original_units_registry, original_settings):
plt.close('all')
gc.collect()
mpl.rcParams.clear()
mpl.rcParams.update(original_settings)
matplotlib.units.registry.clear()
matplotlib.units.registry.update(original_units_registry)
warnings.resetwarnings() # reset any warning filters set in tests
class CleanupTest(object):
@classmethod
def setup_class(cls):
cls.original_units_registry = matplotlib.units.registry.copy()
cls.original_settings = mpl.rcParams.copy()
matplotlib.testing.setup()
@classmethod
def teardown_class(cls):
_do_cleanup(cls.original_units_registry,
cls.original_settings)
def test(self):
self._func()
class CleanupTestCase(unittest.TestCase):
'''A wrapper for unittest.TestCase that includes cleanup operations'''
@classmethod
def setUpClass(cls):
import matplotlib.units
cls.original_units_registry = matplotlib.units.registry.copy()
cls.original_settings = mpl.rcParams.copy()
@classmethod
def tearDownClass(cls):
_do_cleanup(cls.original_units_registry,
cls.original_settings)
def cleanup(style=None):
"""
A decorator to ensure that any global state is reset before
running a test.
Parameters
----------
style : str, optional
The name of the style to apply.
"""
# If cleanup is used without arguments, `style` will be a
# callable, and we pass it directly to the wrapper generator. If
# cleanup if called with an argument, it is a string naming a
# style, and the function will be passed as an argument to what we
# return. This is a confusing, but somewhat standard, pattern for
# writing a decorator with optional arguments.
def make_cleanup(func):
if inspect.isgeneratorfunction(func):
@functools.wraps(func)
def wrapped_callable(*args, **kwargs):
original_units_registry = matplotlib.units.registry.copy()
original_settings = mpl.rcParams.copy()
matplotlib.style.use(style)
try:
for yielded in func(*args, **kwargs):
yield yielded
finally:
_do_cleanup(original_units_registry,
original_settings)
else:
@functools.wraps(func)
def wrapped_callable(*args, **kwargs):
original_units_registry = matplotlib.units.registry.copy()
original_settings = mpl.rcParams.copy()
matplotlib.style.use(style)
try:
func(*args, **kwargs)
finally:
_do_cleanup(original_units_registry,
original_settings)
return wrapped_callable
if isinstance(style, six.string_types):
return make_cleanup
else:
result = make_cleanup(style)
style = 'classic'
return result
def check_freetype_version(ver):
if ver is None:
return True
from distutils import version
if isinstance(ver, six.string_types):
ver = (ver, ver)
ver = [version.StrictVersion(x) for x in ver]
found = version.StrictVersion(ft2font.__freetype_version__)
return found >= ver[0] and found <= ver[1]
class ImageComparisonTest(CleanupTest):
@classmethod
def setup_class(cls):
CleanupTest.setup_class()
try:
matplotlib.style.use(cls._style)
matplotlib.testing.set_font_settings_for_testing()
cls._func()
except:
# Restore original settings before raising errors during the update.
CleanupTest.teardown_class()
raise
@classmethod
def teardown_class(cls):
CleanupTest.teardown_class()
@staticmethod
def remove_text(figure):
figure.suptitle("")
for ax in figure.get_axes():
ax.set_title("")
ax.xaxis.set_major_formatter(ticker.NullFormatter())
ax.xaxis.set_minor_formatter(ticker.NullFormatter())
ax.yaxis.set_major_formatter(ticker.NullFormatter())
ax.yaxis.set_minor_formatter(ticker.NullFormatter())
try:
ax.zaxis.set_major_formatter(ticker.NullFormatter())
ax.zaxis.set_minor_formatter(ticker.NullFormatter())
except AttributeError:
pass
def test(self):
baseline_dir, result_dir = _image_directories(self._func)
for fignum, baseline in zip(plt.get_fignums(), self._baseline_images):
for extension in self._extensions:
will_fail = not extension in comparable_formats()
if will_fail:
fail_msg = 'Cannot compare %s files on this system' % extension
else:
fail_msg = 'No failure expected'
orig_expected_fname = os.path.join(baseline_dir, baseline) + '.' + extension
if extension == 'eps' and not os.path.exists(orig_expected_fname):
orig_expected_fname = os.path.join(baseline_dir, baseline) + '.pdf'
expected_fname = make_test_filename(os.path.join(
result_dir, os.path.basename(orig_expected_fname)), 'expected')
actual_fname = os.path.join(result_dir, baseline) + '.' + extension
if os.path.exists(orig_expected_fname):
shutil.copyfile(orig_expected_fname, expected_fname)
else:
will_fail = True
fail_msg = 'Do not have baseline image %s' % expected_fname
@knownfailureif(
will_fail, fail_msg,
known_exception_class=ImageComparisonFailure)
def do_test(fignum, actual_fname, expected_fname):
figure = plt.figure(fignum)
if self._remove_text:
self.remove_text(figure)
figure.savefig(actual_fname, **self._savefig_kwarg)
err = compare_images(expected_fname, actual_fname,
self._tol, in_decorator=True)
try:
if not os.path.exists(expected_fname):
raise ImageComparisonFailure(
'image does not exist: %s' % expected_fname)
if err:
raise ImageComparisonFailure(
'images not close: %(actual)s vs. %(expected)s '
'(RMS %(rms).3f)'%err)
except ImageComparisonFailure:
if not check_freetype_version(self._freetype_version):
raise KnownFailureTest(
"Mismatched version of freetype. Test requires '%s', you have '%s'" %
(self._freetype_version, ft2font.__freetype_version__))
raise
yield do_test, fignum, actual_fname, expected_fname
def image_comparison(baseline_images=None, extensions=None, tol=0,
freetype_version=None, remove_text=False,
savefig_kwarg=None, style='classic'):
"""
Compare images generated by the test with those specified in
*baseline_images*, which must correspond else an
ImageComparisonFailure exception will be raised.
Keyword arguments:
*baseline_images*: list
A list of strings specifying the names of the images generated
by calls to :meth:`matplotlib.figure.savefig`.
*extensions*: [ None | list ]
If *None*, default to all supported extensions.
Otherwise, a list of extensions to test. For example ['png','pdf'].
*tol*: (default 0)
The RMS threshold above which the test is considered failed.
*freetype_version*: str or tuple
The expected freetype version or range of versions for this
test to pass.
*remove_text*: bool
Remove the title and tick text from the figure before
comparison. This does not remove other, more deliberate,
text, such as legends and annotations.
*savefig_kwarg*: dict
Optional arguments that are passed to the savefig method.
*style*: string
Optional name for the base style to apply to the image
test. The test itself can also apply additional styles
if desired. Defaults to the 'classic' style.
"""
if baseline_images is None:
raise ValueError('baseline_images must be specified')
if extensions is None:
# default extensions to test
extensions = ['png', 'pdf', 'svg']
if savefig_kwarg is None:
#default no kwargs to savefig
savefig_kwarg = dict()
def compare_images_decorator(func):
# We want to run the setup function (the actual test function
# that generates the figure objects) only once for each type
# of output file. The only way to achieve this with nose
# appears to be to create a test class with "setup_class" and
# "teardown_class" methods. Creating a class instance doesn't
# work, so we use type() to actually create a class and fill
# it with the appropriate methods.
name = func.__name__
# For nose 1.0, we need to rename the test function to
# something without the word "test", or it will be run as
# well, outside of the context of our image comparison test
# generator.
func = staticmethod(func)
func.__get__(1).__name__ = str('_private')
new_class = type(
name,
(ImageComparisonTest,),
{'_func': func,
'_baseline_images': baseline_images,
'_extensions': extensions,
'_tol': tol,
'_freetype_version': freetype_version,
'_remove_text': remove_text,
'_savefig_kwarg': savefig_kwarg,
'_style': style})
return new_class
return compare_images_decorator
def _image_directories(func):
"""
Compute the baseline and result image directories for testing *func*.
Create the result directory if it doesn't exist.
"""
module_name = func.__module__
if module_name == '__main__':
# FIXME: this won't work for nested packages in matplotlib.tests
warnings.warn('test module run as script. guessing baseline image locations')
script_name = sys.argv[0]
basedir = os.path.abspath(os.path.dirname(script_name))
subdir = os.path.splitext(os.path.split(script_name)[1])[0]
else:
mods = module_name.split('.')
if len(mods) >= 3:
mods.pop(0)
# mods[0] will be the name of the package being tested (in
# most cases "matplotlib") However if this is a
# namespace package pip installed and run via the nose
# multiprocess plugin or as a specific test this may be
# missing. See https://github.com/matplotlib/matplotlib/issues/3314
if mods.pop(0) != 'tests':
warnings.warn(("Module '%s' does not live in a parent module "
"named 'tests'. This is probably ok, but we may not be able "
"to guess the correct subdirectory containing the baseline "
"images. If things go wrong please make sure that there is "
"a parent directory named 'tests' and that it contains a "
"__init__.py file (can be empty).") % module_name)
subdir = os.path.join(*mods)
import imp
def find_dotted_module(module_name, path=None):
"""A version of imp which can handle dots in the module name.
As for imp.find_module(), the return value is a 3-element
tuple (file, pathname, description)."""
res = None
for sub_mod in module_name.split('.'):
try:
res = file, path, _ = imp.find_module(sub_mod, path)
path = [path]
if file is not None:
file.close()
except ImportError:
# assume namespace package
path = list(sys.modules[sub_mod].__path__)
res = None, path, None
return res
mod_file = find_dotted_module(func.__module__)[1]
basedir = os.path.dirname(mod_file)
baseline_dir = os.path.join(basedir, 'baseline_images', subdir)
result_dir = os.path.abspath(os.path.join('result_images', subdir))
if not os.path.exists(result_dir):
cbook.mkdirs(result_dir)
return baseline_dir, result_dir
def switch_backend(backend):
# Local import to avoid a hard nose dependency and only incur the
# import time overhead at actual test-time.
import nose
def switch_backend_decorator(func):
def backend_switcher(*args, **kwargs):
try:
prev_backend = mpl.get_backend()
matplotlib.testing.setup()
plt.switch_backend(backend)
result = func(*args, **kwargs)
finally:
plt.switch_backend(prev_backend)
return result
return nose.tools.make_decorator(func)(backend_switcher)
return switch_backend_decorator
| gpl-3.0 |
wallarelvo/golddigger | golddigger/learningFork.py | 1 | 2924 |
# from sklearn import tree
from sklearn.metrics import roc_auc_score, roc_curve
from sklearn.decomposition import PCA
from sklearn import linear_model
from progressbar import ProgressBar
import tableFork
class GroupLearner(object):
def __init__(self, num_groups, data_table):
self.num_groups = num_groups
self.dt = data_table
def learn(self, classifier, **kwargs):
self.models = list()
self.weights = list()
self.probs = list()
self.pca = PCA().fit(self.dt.get_all_training_inputs())
h_inputs = self.dt.get_holdout_inputs()
h_outputs = self.dt.get_holdout_outputs()
progress = ProgressBar()
for i in progress(xrange(self.num_groups)):
inputs = self.dt.get_training_inputs(i)
outputs = self.dt.get_training_outputs(i)
clf = classifier(**kwargs)
clf.fit(inputs, outputs)
preds = clf.predict(h_inputs)
self.weights.append(roc_auc_score(h_outputs, preds))
self.models.append(clf)
self.probs = map(lambda w: w / sum(self.weights), self.weights)
return self
def predict(self, inputs):
print "Predicting..."
progress = ProgressBar()
inputs = inputs
ret_sum = 0.0
for i, model in progress(enumerate(self.models)):
ret_sum += self.probs[i] * model.predict_proba(inputs)[:, 1]
return ret_sum
def compute_auc(self):
h_inputs = self.dt.get_unseen_inputs()
h_outputs = self.dt.get_unseen_outputs()
pred_out = self.predict(h_inputs)
fpr, tpr, thresholds = roc_curve(h_outputs, pred_out)
print fpr
print tpr
print thresholds
return roc_auc_score(h_outputs, pred_out)
def generate_kaggle_file(self, kaggle_file):
self.compute_auc()
print "Generating Kaggle file..."
progress = ProgressBar()
test_inputs = self.dt.get_test_inputs()
ids = self.dt.get_test_ids()
preds = self.predict(test_inputs)
with open(kaggle_file, "w") as f:
f.write("id,repeatProbability\n")
for buyer_id, pred in progress(zip(ids, preds)):
f.write("{},{}\n".format(buyer_id, pred))
return self
class GroupLearnerClassifier(GroupLearner):
def predict(self, inputs):
inputs = inputs
ret_sum = 0.0
for i, model in enumerate(self.models):
ret_sum += model.predict(inputs)
return ret_sum / len(self.models)
if __name__ == "__main__":
num_groups = 500
kaggle_file = "output_cat_final.csv"
print "Loading data..."
dt = tableFork.DataTable().load(num_groups)
print "Learning..."
df = GroupLearnerClassifier(num_groups, dt)
df.learn(linear_model.RidgeClassifierCV,
class_weight={1: 2, 0: 1})\
.generate_kaggle_file(kaggle_file)
| apache-2.0 |
yyjiang/scikit-learn | sklearn/semi_supervised/tests/test_label_propagation.py | 307 | 1974 | """ test the label propagation module """
import nose
import numpy as np
from sklearn.semi_supervised import label_propagation
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
ESTIMATORS = [
(label_propagation.LabelPropagation, {'kernel': 'rbf'}),
(label_propagation.LabelPropagation, {'kernel': 'knn', 'n_neighbors': 2}),
(label_propagation.LabelSpreading, {'kernel': 'rbf'}),
(label_propagation.LabelSpreading, {'kernel': 'knn', 'n_neighbors': 2})
]
def test_fit_transduction():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
nose.tools.assert_equal(clf.transduction_[2], 1)
def test_distribution():
samples = [[1., 0.], [0., 1.], [1., 1.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
if parameters['kernel'] == 'knn':
continue # unstable test; changes in k-NN ordering break it
assert_array_almost_equal(clf.predict_proba([[1., 0.0]]),
np.array([[1., 0.]]), 2)
else:
assert_array_almost_equal(np.asarray(clf.label_distributions_[2]),
np.array([.5, .5]), 2)
def test_predict():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_equal(clf.predict([[0.5, 2.5]]), np.array([1]))
def test_predict_proba():
samples = [[1., 0.], [0., 1.], [1., 2.5]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_almost_equal(clf.predict_proba([[1., 1.]]),
np.array([[0.5, 0.5]]))
| bsd-3-clause |
LiquidGalaxyLAB/FlOYBD | DataMining/weather/weatherGraphs.py | 2 | 2794 | import matplotlib
import os
import time
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import shutil
from cassandra.cluster import Cluster
from pyspark import SparkContext, SparkConf
from pyspark.sql import SQLContext, SparkSession
def initEnvironment():
global sc, sql, cluster, session
conf = SparkConf()
#conf.setMaster("spark://192.168.246.236:7077")
conf.setMaster("local[*]")
conf.setAppName("Spark Graphs Generation")
conf.set("spark.cassandra.connection.host", "192.168.246.236")
conf.set("spark.executor.memory", "10g")
conf.set("spark.num.executors", "1")
sc = SparkContext(conf=conf)
sql = SQLContext(sc)
cluster = Cluster(['192.168.246.236'])
session = cluster.connect("dev")
def loadData():
global stations, clean_daily
stations = sql.read.format("org.apache.spark.sql.cassandra").load(keyspace="dev", table="station")
clean_daily = sql.read.format("org.apache.spark.sql.cassandra").load(keyspace="dev",
table="clean_daily_measurement")
def createDir(dirName):
if not os.path.exists(dirName):
os.makedirs(dirName)
def generateGraphs():
start_time = time.time()
basePath = "/home/ubuntu/GSOC17/FlOYBD/Flask/graphs/"
stationsPd = stations.toPandas()
columnsList = ["max_temp", "med_temp", "min_temp", "max_pressure", "min_pressure", "precip", "insolation"]
stationCount = 1
for index, row in stationsPd.iterrows():
print(str(stationCount) + ":\t" + row.station_id)
stationpath = basePath + row.station_id
createDir(stationpath)
station_data = clean_daily[clean_daily.station_id == row.station_id]
dataframe = station_data.sort("measure_date", ascending=True).toPandas()
for column in columnsList:
dataframe[column] = dataframe[column].apply(pd.to_numeric)
numregs = dataframe[column].count()
plot = dataframe.plot(y=column, x=dataframe['measure_date'], figsize=(20, 15))
n = int(0.034*numregs)
#ticks = plot.xaxis.get_ticklocs()
#ticklabels = [l.get_text() for l in plot.xaxis.get_ticklabels()]
#plot.xaxis.set_ticks(ticks[::n])
#plot.xaxis.set_ticklabels(ticklabels[::n])
fig = plot.get_figure()
fig.savefig(stationpath + "/" + row.station_id + "_" + column + ".png")
plt.close(fig)
stationCount += 1
print("--- %s seconds ---" % (time.time() - start_time))
if __name__ == "__main__":
# matplotlib.style.use('ggplot')
initEnvironment()
loadData()
shutil.rmtree("/home/ubuntu/GSOC17/FlOYBD/Flask/graphs/")
generateGraphs()
| mit |
Collumbus/ann_xor | SIARP.py | 1 | 18419 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Collumbus'
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.tools.shortcuts import buildNetwork
from pybrain.structure import TanhLayer, SigmoidLayer, LinearLayer
from pybrain.datasets import SupervisedDataSet
from sklearn.metrics import r2_score
import numpy as np
import matplotlib
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
import Tkinter as tk
from Tkinter import *
from PIL import ImageTk, Image
import os
matplotlib.use('TkAgg')
#Window root configs
root = tk.Tk()
root.title("RNA para previsão de valores da porta XOR")
root.geometry("1260x1030+400+100")
#### Just to work on Mac
#img = ImageTk.PhotoImage(Image.open('pybrain_logoe.gif'))
#panel = tk.Label(root, image = img)
#panel.grid(row=0, column=0, columnspan=2, rowspan=4, sticky=W+E+N+S, padx=5,
# pady=5)
img1 = ImageTk.PhotoImage(Image.open('pybrain_logoe.gif'))
panel1 = tk.Label(root, image = img1)
panel1.grid(row=0, column=0, columnspan=2, rowspan=4, sticky=W+E+N+S, padx=5,
pady=5)
#Set up misc. widgets
# Settings Label
Label(root, text="Configurações", font=('Verdana','13','bold'), width=60,
bg='#135823', fg='grey' ).grid(row=0,columnspan=2)
# Epochs
Label(root, text="Épocas", font=('Verdana','11','bold')).grid(row=1)
var1 = IntVar()
e1 = Entry( root, text = var1)
e1.grid(row=2)
var1.set(1000)
# Max Error
Label(root, text="Erro Máximo", font=('Verdana','11','bold')).grid(row=3)
var6 = DoubleVar()
e7 = Entry( root, text = var6)
var6.set(0.0001)
e7.grid(row=4)
# Momentum
Label(root, text="Momento", font=('Verdana','11','bold')).grid(row=1,column=1)
var2 = DoubleVar()
e2 = Scale( root, variable = var2, from_=0.0, to=1.0, resolution=0.01,
orient=HORIZONTAL)
e2.grid(row=2,column=1)
e2.set(0.90)
# Learning Rate
Label(root, text="Aprendizagem", font=('Verdana','11','bold')).grid(row=3,
column=1)
var3 = DoubleVar()
e3 = Scale( root, variable = var3, from_=0.001, to=0.9, resolution=0.001,
orient=HORIZONTAL)
e3.grid(row=4, column=1)
e3.set(0.01)
# Activation Bias
Label(root, text="Ativar Bias", font=('Verdana','11','bold')).grid(row=3,
column=0, columnspan=2)
e4 = IntVar(value=True)
chk = Checkbutton(root, variable=e4,onvalue=True, offvalue=False).grid(row=4,
column=0, columnspan=2)
# Activation Function
Label(root, text="Função de Ativação", font=('Verdana','11','bold')).grid(row=5,
column=0, columnspan=2)
e5 = StringVar()
e5.set("TanhLayer") # default value
var4 = OptionMenu(root, e5, 'TanhLayer', 'SigmoidLayer').grid(row=6, column=0,
columnspan=2,)
var4 = StringVar()
#Setting Weights
Label(root, text="Pesos", font=('Verdana','11','bold')).grid(row=5,
column=0)
e6 = StringVar()
e6.set("Padrão") # default value
var5 = OptionMenu(root, e6, 'Padrão', '(-1,0)', '(-1,1)', '(0,1)', '(-0.1,0.1)'
).grid(row=6, column=0)
var5 = DoubleVar()
# Questions Label
Label(root, text="Questionário", font=('Verdana','13','bold'), width=60,
bg='#135823', fg='grey' ).grid(row=0, column=3, columnspan=2)
#Label(root, height=20, width=2, bg='#135823', fg='grey' ).grid(row=0, column=1,rowspan=9, sticky=E)
#Label(root, height=20, width=2, bg='#135823', fg='grey' ).grid(row=0, column=0,rowspan=9, sticky=W)
# Question 1
Label(root, text="1) É uma cirurgia de emergência?", font=('Verdana','11','bold')).grid(row=1,
column=3,sticky=W)
q1 = StringVar()
q1.set("Não") # default value
qvar1 = OptionMenu(root, q1, 'Sim', 'Não').grid(row=2, column=3,sticky=W)
qvar1 = StringVar()
# Question 2
Label(root, text="2) Há condições cardíacas ativas?", font=('Verdana','11','bold')).grid(row=3,
column=3,sticky=W)
q2 = StringVar()
q2.set("Não") # default value
qvar2 = OptionMenu(root, q2, 'Sim', 'Não').grid(row=4, column=3,sticky=W)
qvar2 = StringVar()
# Question 3
Label(root, text="3) O risco cirúrgico é baixo?", font=('Verdana','11','bold')).grid(row=5,
column=3,sticky=W)
q3 = StringVar()
q3.set("Não") # default value
qvar3 = OptionMenu(root, q3, 'Sim', 'Não').grid(row=6, column=3,sticky=W)
qvar3 = StringVar()
img2 = ImageTk.PhotoImage(Image.open('risc.gif'))
panel2 = tk.Label(root, image = img2)
panel2.grid(row=7, column=3, columnspan=3, rowspan=8, sticky=W+E+N+S, padx=5,
pady=5)
# Question 4
Label(root, text="4) A capacidade funcional é maior ou igual a 4 MET's e sem sintomas?",
font=('Verdana','11','bold')).grid(row=15, column=3,sticky=W)
q4 = StringVar()
q4.set("Não") # default value
qvar4 = OptionMenu(root, q4, 'Sim', 'Não').grid(row=16, column=3,sticky=W)
qvar4 = StringVar()
# Question 5
Label(root, text="5)Existem quantos fatores clínicos de risco?",
font=('Verdana','11','bold')).grid(row=17, column=3,sticky=W)
q5 = StringVar()
q5.set("Nenhum") # default value
qvar5 = OptionMenu(root, q5, 'Nenhum', '1 ou 2', '3 ou mais').grid(row=18, column=3,sticky=W)
qvar5 = StringVar()
img3 = ImageTk.PhotoImage(Image.open('risc2.gif'))
panel3 = tk.Label(root, image = img3)
panel3.grid(row=19, column=3, columnspan=3, rowspan=6, sticky=W+E+N, padx=5,
pady=5)
def runi():
print net.activate([0, 0, 0, 0, 0])[0]
rrt = IntVar(value=True)
rrf = IntVar(value=False)
# Train Button
submit = Button(root, bg='#98FB98', activebackground='#FF7F50', text="Treinar", width=13, command=lambda: all(int(e1.get()),
e2.get(),e3.get(),e4.get(),e5.get(),e6.get(),float(e7.get()),
q1.get(),q2.get(),q3.get(),q4.get(),q5.get(),rrt.get())).grid(row=7, column=0,
pady=4)
#Run button
Button (root, text="Rodar", bg='#98FB98', activebackground='#FF7F50', command=lambda: all(int(e1.get()),
e2.get(),e3.get(),e4.get(),e5.get(),e6.get(),float(e7.get()),
q1.get(),q2.get(),q3.get(),q4.get(),q5.get(), rrf.get())).grid(row=7, column=1,
pady=4)
# Results Label
Label(root, text="Resultados", font=('Verdana','13','bold'), width=60,
bg='#135823', fg='grey' ).grid(row=8,columnspan=2)
#Show total of epochs
Label(root,text='Numero de épocas percorridas: ',fg = 'red', font=('Verdana',
'11',"bold")).grid(row=9,columnspan=2)
epo = IntVar()
epoc1 = Label(textvariable=epo, font=('Verdana','11',"bold")).grid(row=10,
columnspan=2)
#Show error
Label(root, text='Erro final:',fg = 'red', font=('Verdana','11',"bold")).grid(
row=11,columnspan=2)
er = StringVar()
Label(root,textvariable=er, font=('Verdana','11',"bold")).grid(row=12,
columnspan=2)
#Show out esp
Label(root, text='\nA classificação indicada ao paciente é:',fg = 'blue',
font=('Verdana','11',"bold")).grid(row=13,columnspan=2)
m1 = StringVar()
Label(root, textvariable=m1, font=('Verdana','13',"bold")).grid(row=14,
columnspan=2)
#Variables to make plots
it = DoubleVar()
err = DoubleVar()
sc = DoubleVar()
ds = DoubleVar()
dp = DoubleVar()
def all(e1, e2=0.0, e3=0.0, e4=True, e5="TanhLayer", e6='Padrão', e7=0.0001, q1='Não', q2='Não', q3='Não', q4='Não', q5='Não', rr=True):
def rerun(epocas, e2, e3, e4, e5, e6, e7, q1, q2, q3, q4, q5):
#Making the net
#The first 3 parameters are the nember of layers: In-Hidden-Out
global net
net = buildNetwork(5, 4, 1, bias=e4, hiddenclass=eval(e5))
p1 = net.params
ps = net.params.shape
#Setting Weights
if e6 == '(-1,0)':
net._setParameters(np.random.uniform(-1.0,0.0,net.params.shape[0]))
elif e6 == '(-1,1)':
net._setParameters(np.random.uniform(-1.0,1.0,net.params.shape[0]))
elif e6 == '(0,1)':
net._setParameters(np.random.uniform(0.0,1.0,net.params.shape[0]))
elif e6 == '(-0.1,0.1)':
net._setParameters(np.random.uniform(-0.1,0.1,net.params.shape[0]))
################# Instantiating the weights correctly to show ##########
w_instance = []
w_instance = net.params.tolist()
#Creating training data
global ds
ds = SupervisedDataSet(5, 1)
ds.addSample([0, 0, 0, 0, 0], [0])
ds.addSample([0, 0, 0, 0, 1], [1])
ds.addSample([0, 0, 0, 0, 2], [2])
ds.addSample([0, 0, 0, 1, 0], [2])
ds.addSample([0, 0, 0, 1, 1], [0])
ds.addSample([0, 0, 0, 1, 2], [0])
ds.addSample([0, 0, 1, 0, 0], [0])
ds.addSample([0, 0, 1, 0, 1], [0])
ds.addSample([0, 0, 1, 0, 2], [0])
ds.addSample([0, 0, 1, 1, 0], [0])
ds.addSample([0, 0, 1, 1, 1], [0])
ds.addSample([0, 0, 1, 1, 2], [0])
ds.addSample([0, 1, 0, 0, 0], [0])
ds.addSample([0, 1, 0, 0, 1], [0])
ds.addSample([0, 1, 0, 0, 2], [0])
ds.addSample([0, 1, 0, 1, 0], [0])
ds.addSample([0, 1, 0, 1, 1], [0])
ds.addSample([0, 1, 0, 1, 2], [0])
ds.addSample([0, 1, 1, 0, 0], [0])
ds.addSample([0, 1, 1, 0, 1], [0])
ds.addSample([0, 1, 1, 0, 2], [0])
ds.addSample([0, 1, 1, 1, 0], [0])
ds.addSample([0, 1, 1, 1, 1], [0])
ds.addSample([0, 1, 1, 1, 2], [0])
ds.addSample([1, 0, 0, 0, 0], [0])
ds.addSample([1, 0, 0, 0, 1], [0])
ds.addSample([1, 0, 0, 0, 2], [0])
ds.addSample([1, 0, 0, 1, 0], [0])
ds.addSample([1, 0, 0, 1, 1], [0])
ds.addSample([1, 0, 0, 1, 2], [0])
ds.addSample([1, 0, 1, 0, 0], [0])
ds.addSample([1, 0, 1, 0, 1], [0])
ds.addSample([1, 0, 1, 0, 2], [0])
ds.addSample([1, 0, 1, 1, 0], [0])
ds.addSample([1, 0, 1, 1, 1], [0])
ds.addSample([1, 0, 1, 1, 2], [0])
ds.addSample([1, 1, 0, 0, 0], [0])
ds.addSample([1, 1, 0, 0, 1], [0])
ds.addSample([1, 1, 0, 0, 2], [0])
ds.addSample([1, 1, 0, 1, 0], [0])
ds.addSample([1, 1, 0, 1, 1], [0])
ds.addSample([1, 1, 0, 1, 2], [0])
ds.addSample([1, 1, 1, 0, 0], [0])
ds.addSample([1, 1, 1, 0, 1], [0])
ds.addSample([1, 1, 1, 0, 2], [0])
ds.addSample([1, 1, 1, 1, 0], [0])
ds.addSample([1, 1, 1, 1, 1], [0])
#Creating backdropTrainer
trainer = BackpropTrainer(net, ds, learningrate=e3, momentum=e2)
#max_error = 1
error = 0.00001
epocasPercorridas = 0
#Training compared by error or epochs
global it
global err
global sc
err = []
it = []
sc = []
score = 0
while epocas > 0:
y_true = [0, 1, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0]
y_pred = [net.activate([0, 0, 0, 0, 0])[0],
net.activate([0, 0, 0, 0, 1])[0],
net.activate([0, 0, 0, 0, 2])[0],
net.activate([0, 0, 0, 1, 0])[0],
net.activate([0, 0, 0, 1, 1])[0],
net.activate([0, 0, 0, 1, 2])[0],
net.activate([0, 0, 1, 0, 0])[0],
net.activate([0, 0, 1, 0, 1])[0],
net.activate([0, 0, 1, 0, 2])[0],
net.activate([0, 0, 1, 1, 0])[0],
net.activate([0, 0, 1, 1, 1])[0],
net.activate([0, 0, 1, 1, 2])[0],
net.activate([0, 1, 0, 0, 0])[0],
net.activate([0, 1, 0, 0, 1])[0],
net.activate([0, 1, 0, 0, 2])[0],
net.activate([0, 1, 0, 1, 0])[0],
net.activate([0, 1, 0, 1, 1])[0],
net.activate([0, 1, 0, 1, 2])[0],
net.activate([0, 1, 1, 0, 0])[0],
net.activate([0, 1, 1, 0, 1])[0],
net.activate([0, 1, 1, 0, 2])[0],
net.activate([0, 1, 1, 1, 0])[0],
net.activate([0, 1, 1, 1, 1])[0],
net.activate([0, 1, 1, 1, 2])[0],
net.activate([1, 0, 0, 0, 0])[0],
net.activate([1, 0, 0, 0, 1])[0],
net.activate([1, 0, 0, 0, 2])[0],
net.activate([1, 0, 0, 1, 0])[0],
net.activate([1, 0, 0, 1, 1])[0],
net.activate([1, 0, 0, 1, 2])[0],
net.activate([1, 0, 1, 0, 0])[0],
net.activate([1, 0, 1, 0, 1])[0],
net.activate([1, 0, 1, 0, 2])[0],
net.activate([1, 0, 1, 1, 0])[0],
net.activate([1, 0, 1, 1, 1])[0],
net.activate([1, 0, 1, 1, 2])[0],
net.activate([1, 1, 0, 0, 0])[0],
net.activate([1, 1, 0, 0, 1])[0],
net.activate([1, 1, 0, 0, 2])[0],
net.activate([1, 1, 0, 1, 0])[0],
net.activate([1, 1, 0, 1, 1])[0],
net.activate([1, 1, 0, 1, 2])[0],
net.activate([1, 1, 1, 0, 0])[0],
net.activate([1, 1, 1, 0, 1])[0],
net.activate([1, 1, 1, 0, 2])[0],
net.activate([1, 1, 1, 1, 0])[0],
net.activate([1, 1, 1, 1, 1])[0]]
score = r2_score(y_true, y_pred)
error = trainer.train()
epocas = epocas - 1
epocasPercorridas = epocasPercorridas + 1
sc.append(score)
err.append(error)
it.append(epocasPercorridas)
if error < e7:
break
#Show total of epochs
global epo
epo.set(epocasPercorridas)
#Show error
global er
#er.set("%f "%(error))
er.set(error)
#Specialist input
esp = np.array([q1,q2,q3,q4])
esp = np.where(esp == 'Sim', 1,0)
if q5 == 'Nenhum':
esp =np.append(esp,0)
elif q5 == '1 ou 2':
esp = np.append(esp,1)
elif q5 == '3 ou mais':
esp = np.append(esp,2)
global pred_esp
pred_esp = net.activate(esp)[0]
#Show out esp
global m1
if -0.2 < pred_esp < 0.2:
m1.set('Classe I')
elif 0.8 < pred_esp < 1.2:
m1.set('Classe IIb')
elif 1.8 < pred_esp < 2.2:
m1.set('Classe IIa')
global dp
dp = np.array([net.activate([0, 0, 0, 0, 0])[0],
net.activate([0, 0, 0, 0, 1])[0],
net.activate([0, 0, 0, 0, 2])[0],
net.activate([0, 0, 0, 1, 0])[0],
net.activate([0, 0, 1, 0, 0])[0],
net.activate([0, 1, 0, 0, 0])[0],
net.activate([1, 0, 0, 0, 0])[0]])
root.update_idletasks()
debug = True
if debug:
print '\n######################################### DEBUG ###########################################\n'
print "\n\nPesos finais: ", net.params
print "\nErro final: ", error
print "\n\nTotal de epocas percorridas: ", epocasPercorridas
print '\n\nSIARP_net 0: Esperado = 0, Calculado = ', net.activate([0, 0, 0, 0, 0])[0]
print 'SIARP_net 1: Esperado = 1, Calculado =', net.activate([0, 0, 0, 0, 1])[0]
print 'SIARP_net 2: Esperado = 2, Calculado =', net.activate([0, 0, 0, 0, 2])[0]
print 'SIARP_net 3: Esperado = 2, Calculado =', net.activate([0, 0, 0, 1, 0])[0]
print 'SIARP_net 4: Esperado = 0, Calculado =', net.activate([0, 0, 1, 0, 0])[0]
print 'SIARP_net 5: Esperado = 0, Calculado =', net.activate([0, 1, 0, 0, 0])[0]
print 'SIARP_net 6: Esperado = 0, Calculado =', net.activate([1, 0, 0, 0, 0])[0]
print net['bias']
print 'O DP é:', dp
print "Pesos iniciais: ", p1
print"Novos pesos:", np.array(w_instance)
print"Score:", score
print 'e6 =', e6
print 'e5 =', e5
print 'pred_esp:', pred_esp
if rr:
rerun(e1, e2, e3, e4, e5, e6, e7, q1, q2, q3, q4, q5)
#Specialist input
esp = np.array([q1,q2,q3,q4])
esp = np.where(esp == 'Sim', 1,0)
if q5 == 'Nenhum':
esp =np.append(esp,0)
elif q5 == '1 ou 2':
esp = np.append(esp,1)
elif q5 == '3 ou mais':
esp = np.append(esp,2)
global pred_esp
pred_esp = net.activate(esp)[0]
#Show out esp
global m1
if -0.2 < pred_esp < 0.2:
m1.set('\nClasse I:\nBenefício >>>risco, cirurgia indicada')
elif 0.8 < pred_esp < 1.2:
m1.set('\nClasse IIb:\nBenefício >>risco, cirurgia provavelmente indicada')
elif 1.8 < pred_esp < 2.2:
m1.set('\nClasse IIa:\nBenefício > ou igual, risco indicação cirúrgica \npode ser considerada')
print 'pred_esp:', pred_esp
root.mainloop()
#Create an empyt plotgrid
#Learning
fig1 = Figure(figsize=(6.3,5.15))
canvas1 = FigureCanvasTkAgg(fig1, master=root)
canvas1.get_tk_widget().grid(row=20,column=0,columnspan=2)
canvas1.draw()
def plot_error ():
fig1 = Figure(figsize=(6.3,5.15))
a = fig1.add_subplot(111)
a.plot(it, err,color='blue', linewidth=2)
a.set_title('Curva de erro', fontsize=16)
a.set_xlabel('Epoca', fontsize=14)
a.set_ylabel('Erro', fontsize=14, labelpad=7)#.set_rotation(0)
a.set_yscale('log')
a.grid()
canvas1 = FigureCanvasTkAgg(fig1, master=root)
canvas1.get_tk_widget().grid(row=20,column=0,columnspan=2)
canvas1.draw()
def plot_learn ():
fig1 = Figure(figsize=(6.3,5.15))
b = fig1.add_subplot(111)
b.plot(it, sc,color='red', linewidth=2)
b.set_title('Curva de Aprendizado', fontsize=16)
b.grid()
b.set_yscale('log')
canvas1 = FigureCanvasTkAgg(fig1, master=root)
canvas1.get_tk_widget().grid(row=20,column=0,columnspan=2)
canvas1.draw()
#Plot error button
Button (root, text="Erro", command=plot_error).grid(row=18, column=0, pady=4)
#Plot learning button
Button (root, text="Aprendizado", command=plot_learn).grid(row=18, column=1,
pady=4)
Label(root, text=u"\u00a9 Collumbus.2017", font=('Verdana','9'),
foreground="#5c5555").grid(row=21,columnspan=2)
mainloop()
| mit |
jwpeterson/libmesh | doc/statistics/libmesh_pagehits.py | 2 | 11534 | #!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
# Import stuff for working with dates
from datetime import datetime
from matplotlib.dates import date2num
# Hits/month, pages, and gigabytes served.
# To get the Google analytics data:
# .) Go to analytics.google.com.
# .) There should be (as of July 2017) a "Google Analytics Home" box at the top left of the dashboard.
# .) Click the "Audience Overview" link at the bottom right corner of this box.
# .) Adjust date range to previous month.
# .) Record the number of "Pageviews" in the "Hits" column below.
# The data below are from the libmesh.github.io site, which uses the
# number UA-24978333-1.
#
# Note: we do not have control over the analytics for the
# https://www.github.com/libMesh/libmesh page. If you look at the page
# source, analytics code UA-3769691-2 appears, but if I try to add
# this property in my analytics account, Google assigns me the number
# UA-24978333-{2,3,...} (where the last digit may change depending on
# how many times you tried to add/remove this property in the
# Analytics Dashboard) and there does not seem to be a straightforward
# way of inserting this code into the source. There have been some
# README.md based hacks for doing this in the past, but I don't think
# they are particularly reliable...
# Hits, pages, GB served
data = [
# 'Jan 2003', 616, 616, 0
# 'Feb 2003', 2078, 2078, 0,
# 'Mar 2003', 3157, 3157, 0,
# 'Apr 2003', 7800, 7800, 0,
# 'May 2003', 4627, 4627, 0,
# 'Jun 2003', 6156, 6156, 0,
# 'Jul 2003', 6389, 6389, 0,
# 'Aug 2003', 10136, 10136, 0,
# 'Sep 2003', 8871, 8871, 0,
# 'Oct 2003', 9703, 9703, 0,
# 'Nov 2003', 9802, 9802, 0,
# 'Dec 2003', 9123, 9123, 0,
# 'Jan 2004', 13599, 13599, 0,
# 'Feb 2004', 11018, 11018, 0,
# 'Mar 2004', 11713, 11713, 0,
# 'Apr 2004', 14995, 14995, 0,
# 'May 2004', 11285, 11285, 0,
# 'Jun 2004', 12974, 12974, 0,
# 'Jul 2004', 12939, 12939, 0,
# 'Aug 2004', 9708, 9708, 0,
# 'Sep 2004', 7994, 7994, 0,
# 'Oct 2004', 6920, 6920, 0,
# 'Nov 2004', 10261, 10261, 0,
# 'Dec 2004', 7483, 7483, 0,
# 'Jan 2005', 3184, 3184, 0,
# 'Feb 2005', 37733, 14077, .4373,
# 'Mar 2005', 43927, 16408, .5637,
# 'Apr 2005', 29792, 8518, .2890,
# 'May 2005', 51288, 17629, .5689,
# 'Jun 2005', 40617, 16599, .5379,
# 'Jul 2005', 29944, 10006, .3363,
# 'Aug 2005', 39592, 14556, .4577,
# 'Sep 2005', 57638, 14666, .4881,
# 'Oct 2005', 48336, 17976, .5749,
# 'Nov 2005', 49563, 15308, .5810,
# 'Dec 2005', 90863, 40736, .9415,
# 'Jan 2006', 46723, 13487, .5662,
# 'Feb 2006', 62285, 26567, .8229,
# 'Mar 2006', 47446, 14711, .6534,
# 'Apr 2006', 90314, 29635, .9762,
# 'May 2006', 68209, 20998, .7949,
# 'Jun 2006', 50495, 17128, .6881,
# 'Jul 2006', 42387, 10958, .6016,
# 'Aug 2006', 55658, 11793, .6174,
# 'Sep 2006', 54919, 20591, .9056,
# 'Oct 2006', 52916, 17944, .9015,
# 'Nov 2006', 55382, 19833, .9439,
# 'Dec 2006', 54265, 22688, .9162,
# 'Jan 2007', 53813, 19881, 1.0 ,
# 'Feb 2007', 52434, 17920, .9472,
# 'Mar 2007', 61530, 21172, 1.2,
# 'Apr 2007', 125578, 77539, 1.3,
# 'May 2007', 182764, 129596, 1.6,
# 'Jun 2007', 115730, 38571, 1.7,
# 'Jul 2007', 121054, 42757, 1.8,
# 'Aug 2007', 81192, 28187, 1.3,
# 'Sep 2007', 143553, 39734, 2.3,
# 'Oct 2007', 110449, 42111, 2.4,
# 'Nov 2007', 128307, 57851, 2.3,
# 'Dec 2007', 80584, 42631, 2.0,
# 'Jan 2008', 69623, 34155, 2.0,
# 'Feb 2008', 144881, 111751, 2.5,
# 'Mar 2008', 69801, 29211, 1.9,
# 'Apr 2008', 74023, 31149, 2.0,
# 'May 2008', 63123, 23277, 1.8,
# 'Jun 2008', 66055, 25418, 2.1,
# 'Jul 2008', 60046, 22082, 2.0,
# 'Aug 2008', 60206, 24543, 2.0,
# 'Sep 2008', 53057, 18635, 1.6,
# 'Oct 2008', 64828, 27042, 2.1,
# 'Nov 2008', 72406, 29767, 2.3,
# 'Dec 2008', 76248, 31690, 2.3,
# 'Jan 2009', 73002, 29744, 2.0,
# 'Feb 2009', 70801, 29156, 2.1,
# 'Mar 2009', 78200, 31139, 2.1,
# 'Apr 2009', 70888, 26182, 1.7,
# 'May 2009', 67263, 26210, 1.8,
# 'Jun 2009', 73146, 31328, 2.6,
# 'Jul 2009', 77828, 33711, 2.4,
# 'Aug 2009', 64378, 28542, 1.9,
# 'Sep 2009', 76167, 33484, 2.2,
# 'Oct 2009', 95727, 41062, 2.8,
# 'Nov 2009', 88042, 38869, 2.5,
# 'Dec 2009', 76148, 37609, 2.3,
# 'Jan 2010', 268856, 45983, 3.2,
# 'Feb 2010', 208210, 42680, 3.0,
# 'Mar 2010', 116263, 42660, 2.6,
# 'Apr 2010', 102493, 32942, 2.4,
# 'May 2010', 117023, 37107, 2.5,
# 'Jun 2010', 128589, 38019, 2.5,
# 'Jul 2010', 87183, 34026, 2.2,
# 'Aug 2010', 99161, 33199, 2.5,
# 'Sep 2010', 81657, 32305, 2.5,
# 'Oct 2010', 98236, 42091, 3.4,
# 'Nov 2010', 115603, 48695, 3.4,
# 'Dec 2010', 105030, 45570, 3.4,
# 'Jan 2011', 133476, 43549, 3.1,
# 'Feb 2011', 34483, 15002, 1.1,
# 'Mar 2011', 0, 0, 0.0,
# 'Apr 2011', 0, 0, 0.0,
# 'May 2011', 0, 0, 0.0,
# 'Jun 2011', 0, 0, 0.0,
# 'Jul 2011', 0, 0, 0.0,
'Aug 2011', 10185, 0, 0.0, # New "Pageviews" data from google analytics, does not seem comparable to sf.net pagehits data
'Sep 2011', 10305, 0, 0.0,
'Oct 2011', 14081, 0, 0.0,
'Nov 2011', 13397, 0, 0.0,
'Dec 2011', 13729, 0, 0.0,
'Jan 2012', 11050, 0, 0.0,
'Feb 2012', 12779, 0, 0.0,
'Mar 2012', 12970, 0, 0.0,
'Apr 2012', 13051, 0, 0.0,
'May 2012', 11857, 0, 0.0,
'Jun 2012', 12584, 0, 0.0,
'Jul 2012', 12995, 0, 0.0,
'Aug 2012', 13204, 0, 0.0,
'Sep 2012', 13170, 0, 0.0,
'Oct 2012', 13335, 0, 0.0,
'Nov 2012', 11337, 0, 0.0,
'Dec 2012', 10108, 0, 0.0, # libmesh switched to github on December 10, 2012
'Jan 2013', 13029, 0, 0.0,
'Feb 2013', 10420, 0, 0.0,
'Mar 2013', 13400, 0, 0.0,
'Apr 2013', 14416, 0, 0.0,
'May 2013', 13875, 0, 0.0,
'Jun 2013', 13747, 0, 0.0,
'Jul 2013', 14019, 0, 0.0,
'Aug 2013', 10828, 0, 0.0,
'Sep 2013', 9969, 0, 0.0,
'Oct 2013', 13083, 0, 0.0,
'Nov 2013', 12938, 0, 0.0,
'Dec 2013', 9079, 0, 0.0,
'Jan 2014', 9736, 0, 0.0,
'Feb 2014', 11824, 0, 0.0,
'Mar 2014', 10861, 0, 0.0,
'Apr 2014', 12711, 0, 0.0,
'May 2014', 11177, 0, 0.0,
'Jun 2014', 10738, 0, 0.0,
'Jul 2014', 10349, 0, 0.0,
'Aug 2014', 8877, 0, 0.0,
'Sep 2014', 9226, 0, 0.0,
'Oct 2014', 8052, 0, 0.0, # Google analytics number moved over to libmesh.github.io in Oct 2014
'Nov 2014', 9243, 0, 0.0,
'Dec 2014', 10714, 0, 0.0,
'Jan 2015', 11508, 0, 0.0,
'Feb 2015', 11278, 0, 0.0,
'Mar 2015', 13305, 0, 0.0,
'Apr 2015', 12347, 0, 0.0,
'May 2015', 11368, 0, 0.0,
'Jun 2015', 11203, 0, 0.0,
'Jul 2015', 10419, 0, 0.0,
'Aug 2015', 11282, 0, 0.0,
'Sep 2015', 13535, 0, 0.0,
'Oct 2015', 12912, 0, 0.0,
'Nov 2015', 13894, 0, 0.0,
'Dec 2015', 11694, 0, 0.0,
'Jan 2016', 11837, 0, 0.0,
'Feb 2016', 14102, 0, 0.0,
'Mar 2016', 13212, 0, 0.0,
'Apr 2016', 13355, 0, 0.0,
'May 2016', 12486, 0, 0.0,
'Jun 2016', 13973, 0, 0.0,
'Jul 2016', 10688, 0, 0.0,
'Aug 2016', 10048, 0, 0.0,
'Sep 2016', 10847, 0, 0.0,
'Oct 2016', 10984, 0, 0.0,
'Nov 2016', 12233, 0, 0.0,
'Dec 2016', 11430, 0, 0.0,
'Jan 2017', 10327, 0, 0.0,
'Feb 2017', 11039, 0, 0.0,
'Mar 2017', 12986, 0, 0.0,
'Apr 2017', 9773, 0, 0.0,
'May 2017', 10880, 0, 0.0,
'Jun 2017', 9179, 0, 0.0,
'Jul 2017', 8344, 0, 0.0,
'Aug 2017', 8617, 0, 0.0,
'Sep 2017', 8576, 0, 0.0,
'Oct 2017', 11255, 0, 0.0,
'Nov 2017', 10362, 0, 0.0,
'Dec 2017', 7948, 0, 0.0,
'Jan 2018', 9376, 0, 0.0,
'Feb 2018', 8864, 0, 0.0,
'Mar 2018', 10339, 0, 0.0,
'Apr 2018', 10958, 0, 0.0,
'May 2018', 10151, 0, 0.0,
'Jun 2018', 8981, 0, 0.0,
'Jul 2018', 8619, 0, 0.0,
'Aug 2018', 9226, 0, 0.0,
'Sep 2018', 8507, 0, 0.0,
'Oct 2018', 9150, 0, 0.0,
'Nov 2018', 8135, 0, 0.0,
'Dec 2018', 7522, 0, 0.0,
'Jan 2019', 8643, 0, 0.0,
'Feb 2019', 8729, 0, 0.0,
'Mar 2019', 7916, 0, 0.0,
'Apr 2019', 9256, 0, 0.0,
'May 2019', 8841, 0, 0.0,
'Jun 2019', 7443, 0, 0.0,
'Jul 2019', 7718, 0, 0.0,
'Aug 2019', 6167, 0, 0.0,
'Sep 2019', 7215, 0, 0.0,
'Oct 2019', 8026, 0, 0.0,
'Nov 2019', 6529, 0, 0.0,
'Dec 2019', 6468, 0, 0.0,
'Jan 2020', 6545, 0, 0.0,
'Feb 2020', 6577, 0, 0.0,
'Mar 2020', 5658, 0, 0.0,
'Apr 2020', 6349, 0, 0.0,
'May 2020', 7004, 0, 0.0,
'Jun 2020', 7590, 0, 0.0,
'Jul 2020', 6093, 0, 0.0,
'Aug 2020', 6113, 0, 0.0,
'Sep 2020', 5608, 0, 0.0,
'Oct 2020', 6978, 0, 0.0,
'Nov 2020', 7045, 0, 0.0,
'Dec 2020', 5522, 0, 0.0,
'Jan 2021', 5426, 0, 0.0,
'Feb 2021', 5320, 0, 0.0,
'Mar 2021', 7747, 0, 0.0,
'Apr 2021', 8797, 0, 0.0,
'May 2021', 7382, 0, 0.0,
'Jun 2021', 6377, 0, 0.0,
]
# Extract number of hits/month
n_hits_month = data[1::4]
# Divide by 1000 for plotting...
n_hits_month = np.divide(n_hits_month, 1000.)
# Extract list of date strings
date_strings = data[0::4]
# Convert date strings into numbers
date_nums = []
for d in date_strings:
date_nums.append(date2num(datetime.strptime(d, '%b %Y')))
# Get a reference to the figure
fig = plt.figure()
# 111 is equivalent to Matlab's subplot(1,1,1) command
ax = fig.add_subplot(111)
# Make the bar chart. We have one number/month, there are about 30
# days in each month, this defines the bar width...
# The color used comes from sns.color_palette("muted").as_hex() They
# are the "same basic order of hues as the default matplotlib color
# cycle but more attractive colors."
ax.plot(date_nums, n_hits_month, marker='o', linewidth=2, color=u'#4878cf')
# Create title
fig.suptitle('libmesh.github.io Hits/Month (in Thousands)')
# Set up x-tick locations
ticks_names = ['2012', '2013', '2014', '2015', '2016', '2017', '2018', '2019', '2020', '2021']
# Get numerical values for the names
tick_nums = []
for x in ticks_names:
tick_nums.append(date2num(datetime.strptime('Jan ' + x, '%b %Y')))
# Set tick labels and positions
ax.set_xticks(tick_nums)
ax.set_xticklabels(ticks_names)
# Set x limits for the plot
plt.xlim(date_nums[0], date_nums[-1]+30);
# Make x-axis ticks point outward
ax.get_xaxis().set_tick_params(direction='out')
# Save as PDF
plt.savefig('libmesh_pagehits.pdf')
# Local Variables:
# python-indent: 2
# End:
| lgpl-2.1 |
laurent-george/protolab_sound_recognition | sound_classification/generate_feature_database.py | 1 | 2699 | __author__ = 'lgeorge'
"""
little script to offline generate features database from wav file
"""
import glob
import os
import traceback
import pandas as pd
from sound_processing.features_extraction import extract_mfcc_features_one_channel, _flatten_features_dict
from sound_processing.sig_proc import downsample_signal
from sound_processing.io_sound import load_sound
def _generate_8k_dataset_dict(glob_file_pattern='/mnt/protolab_server_8k/fold*/*.wav', nfft=1024, downsampling_freq=None):
"""
:param glob_file_pattern:
:param nfft:
:param downsampling_freq: if set it's used for downsampling
:return:
"""
files = glob.glob(glob_file_pattern)
assert(files!=[])
res = []
for num, f in enumerate(files):
try:
data = {}
data['file_path'], data['file_name'] = os.path.split(f)
signal, fs = load_sound(f)
# using channel_1 only
if downsampling_freq:
signal, fs = downsample_signal(signal, origin_fs=fs, final_fs=downsampling_freq)
data['fs'] = fs
try:
features = extract_mfcc_features_one_channel(signal, nfft=nfft)
data['features'] = features
except Exception as e:
import IPython
IPython.embed()
res.append(data)
except Exception as e:
print("ERROR on %s" % f)
print(traceback.format_exc())
return res
def generate_8k_dataset(glob_file_pattern='/mnt/protolab_server_8k/fold*/*.wav', nfft=1024, downsampling_freq=None):
dict_with_features = _generate_8k_dataset_dict(glob_file_pattern=glob_file_pattern, nfft=nfft)
df = pd.DataFrame(dict_with_features)
df['fold'] = df['file_path'].apply(lambda x: int(os.path.basename(x)[4:])) # string = foldXY , so we take string[4:
df['features'] = df['features'].apply(lambda x : _flatten_features_dict(x))
df['expected_class'] = df['file_name'].apply(lambda x: _add_class_from_filename_8kdataset(x))
return df
def _add_class_from_filename_8kdataset(x):
"""
:param df: a dataframe with columns file_name, file_path, 'fs',
:return:
"""
# adding class name to dataframe
class_id_to_name = {0:"air_conditioner",
1:"car_horn",
2:"children_playing",
3:"dog_bark",
4:"drilling",
5:"engine_idling",
6:"gun_shot",
7:"jackhammer",
8:"siren",
9:"street_music"}
return class_id_to_name[int(x.split('-')[1])]
| mit |
richardwolny/sms-tools | lectures/08-Sound-transformations/plots-code/FFT-filtering.py | 21 | 1723 | import math
import matplotlib.pyplot as plt
import numpy as np
import time, os, sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/orchestra.wav')
N = 2048
start = 1.0*fs
x1 = x[start:start+N]
plt.figure(1, figsize=(12, 9))
plt.subplot(321)
plt.plot(np.arange(N)/float(fs), x1*np.hamming(N), 'b', lw=1.5)
plt.axis([0, N/float(fs), min(x1*np.hamming(N)), max(x1*np.hamming(N))])
plt.title('x (orchestra.wav)')
mX, pX = DFT.dftAnal(x1, np.hamming(N), N)
startBin = int(N*500.0/fs)
nBins = int(N*4000.0/fs)
bandpass = (np.hanning(nBins) * 60.0) - 60
filt = np.zeros(mX.size)-60
filt[startBin:startBin+nBins] = bandpass
mY = mX + filt
plt.subplot(323)
plt.plot(fs*np.arange(mX.size)/float(mX.size), mX, 'r', lw=1.5, label = 'mX')
plt.plot(fs*np.arange(mX.size)/float(mX.size), filt+max(mX), 'k', lw=1.5, label='filter')
plt.legend(prop={'size':10})
plt.axis([0,fs/4.0,-90,max(mX)+2])
plt.title('mX + filter')
plt.subplot(325)
plt.plot(fs*np.arange(pX.size)/float(pX.size), pX, 'c', lw=1.5)
plt.axis([0,fs/4.0,min(pX),8])
plt.title('pX')
y = DFT.dftSynth(mY, pX, N)*sum(np.hamming(N))
mY1, pY = DFT.dftAnal(y, np.hamming(N), N)
plt.subplot(322)
plt.plot(np.arange(N)/float(fs), y, 'b')
plt.axis([0, float(N)/fs, min(y), max(y)])
plt.title('y')
plt.subplot(324)
plt.plot(fs*np.arange(mY1.size)/float(mY1.size), mY1, 'r', lw=1.5)
plt.axis([0,fs/4.0,-90,max(mY1)+2])
plt.title('mY')
plt.subplot(326)
plt.plot(fs*np.arange(pY.size)/float(pY.size), pY, 'c', lw=1.5)
plt.axis([0,fs/4.0,min(pY),8])
plt.title('pY')
plt.tight_layout()
plt.savefig('FFT-filtering.png')
plt.show()
| agpl-3.0 |
drakipovic/deep-learning | lab4/vae.py | 1 | 7985 | import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import os
from PIL import Image
from utils import tile_raster_images
import math
import matplotlib.pyplot as plt
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
n_samples = mnist.train.num_examples
learning_rate = 0.001
batch_size = 100
n_hidden_recog_1=200 # 1 sloj enkodera
n_hidden_recog_2=200 # 2 sloj enkodera
n_hidden_gener_1=200 # 1 sloj dekodera
n_hidden_gener_2=200 # 2 sloj dekodera
n_z=20 # broj skrivenih varijabli
n_input=784 # MNIST data input (img shape: 28*28)
def draw_reconstructions(ins, outs, states, shape_in, shape_state, Nh):
"""Vizualizacija ulaza i pripadajucih rekonstrkcija i stanja skrivenog sloja
ins -- ualzni vektori
outs -- rekonstruirani vektori
states -- vektori stanja skrivenog sloja
shape_in -- dimezije ulaznih slika npr. (28,28)
shape_state -- dimezije za 2D prikaz stanja (npr. za 100 stanja (10,10)
"""
plt.figure(figsize=(8, 12*4))
for i in range(20):
plt.subplot(20, 4, 4*i + 1)
plt.imshow(ins[i].reshape(shape_in), vmin=0, vmax=1, interpolation="nearest")
#plt.title("Test input")
plt.subplot(20, 4, 4*i + 2)
plt.imshow(outs[i][0:784].reshape(shape_in), vmin=0, vmax=1, interpolation="nearest")
#plt.title("Reconstruction")
plt.subplot(20, 4, 4*i + 3)
plt.imshow(states[i][0:Nh].reshape(shape_state), vmin=0, vmax=1, interpolation="nearest")
#plt.title("States")
plt.tight_layout()
def weight_variable(shape, name):
"""Kreiranje tezina"""
# http://andyljones.tumblr.com/post/110998971763/an-explanation-of-xavier-initialization
return tf.get_variable(name, shape=shape,
initializer=tf.contrib.layers.xavier_initializer())
def bias_variable(shape):
"""Kreiranje pomaka"""
initial = tf.zeros(shape, dtype=tf.float32)
return tf.Variable(initial)
def variable_summaries(var, name):
"""Prikupljanje podataka za Tensorboard"""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.scalar_summary('mean/' + name, mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.scalar_summary('stddev/' + name, stddev)
tf.scalar_summary('max/' + name, tf.reduce_max(var))
tf.scalar_summary('min/' + name, tf.reduce_min(var))
tf.histogram_summary(name, var)
def vae_layer(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.softplus):
"""Kreiranje jednog skrivenog sloja"""
# Adding a name scope ensures logical grouping of the layers in the graph.
with tf.name_scope(layer_name):
# This Variable will hold the state of the weights for the layer
with tf.name_scope('weights'):
weights = weight_variable([input_dim, output_dim], layer_name + '/weights')
variable_summaries(weights, layer_name + '/weights')
with tf.name_scope('biases'):
biases = bias_variable([output_dim])
variable_summaries(biases, layer_name + '/biases')
with tf.name_scope('Wx_plus_b'):
preactivate = tf.matmul(input_tensor, weights) + biases
tf.histogram_summary(layer_name + '/pre_activations', preactivate)
activations = act(preactivate, name='activation')
tf.histogram_summary(layer_name + '/activations', activations)
return activations
tf.reset_default_graph()
sess = tf.InteractiveSession()
# definicije ulaznog tenzora
x = tf.placeholder(tf.float32, [None, 784])
# TODO definirajte enkoiderski dio
layer_e1 = vae_layer(x, n_input, n_hidden_recog_1, 'layer_e1')
layer_e2 = vae_layer(layer_e1, n_hidden_recog_1, n_hidden_recog_2, 'layer_e2')
with tf.name_scope('z'):
# definirajte skrivene varijable i pripadajuci generator suma
z_mean = vae_layer(layer_e2, n_hidden_recog_2, n_z, 'z_mean', act=tf.identity)
z_log_sigma_sq = vae_layer(layer_e2, n_hidden_recog_2, n_z, 'z_log_sigma_sq', act=tf.identity)
eps = tf.random_normal((batch_size, n_z), 0, 1, dtype=tf.float32)
z = tf.add(z_mean, tf.mul(tf.sqrt(tf.exp(z_log_sigma_sq)), eps))
tf.histogram_summary('z/activations', z)
# definirajte dekoderski dio
layer_d1 = vae_layer(z, n_z, n_hidden_gener_1, 'layer_d1')
layer_d2 = vae_layer(layer_d1, n_hidden_gener_1, n_hidden_gener_2, 'layer_d2')
# definirajte srednju vrijednost rekonstrukcije
x_reconstr_mean = vae_layer(layer_d2, n_hidden_gener_2, n_input, 'x', act=tf.identity)
x_reconstr_mean_out = tf.nn.sigmoid(x_reconstr_mean)
# definirajte dvije komponente funkcije cijene
with tf.name_scope('costs'):
# komponenta funkcije cijene - unakrsna entropija
#cost1 = tf.nn.softmax_cross_entropy_with_logits(x_reconstr_mean, x)
#cost1 = -tf.reduce_sum(x*tf.log(1e-6+x_reconstr_mean)+(1-x)+tf.log(1e-6+1-x_reconstr_mean), axis=1)
cost1 = tf.reduce_sum(-x_reconstr_mean * x + tf.log(1 + tf.exp(x_reconstr_mean)), axis=1)
# komponenta funkcije cijene - KL divergencija
cost2 = -0.5 * tf.reduce_sum(1 + z_log_sigma_sq - tf.square(z_mean) - tf.exp(z_log_sigma_sq), axis=1)
#tf.scalar_summary('cost2', cost2)
cost = tf.reduce_mean(cost1) + tf.reduce_mean(cost2) # average over batch
tf.scalar_summary('cost', cost)
# ADAM optimizer
with tf.name_scope('train'):
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Prikupljanje podataka za Tensorboard
merged = tf.merge_all_summaries()
train_writer = tf.train.SummaryWriter('train', sess.graph)
init = tf.initialize_all_variables()
sess.run(init)
saver = tf.train.Saver()
n_epochs = 5
for epoch in range(n_epochs):
avg_cost = 0.
total_batch = int(n_samples / batch_size)
for i in range(total_batch):
batch_xs, _ = mnist.train.next_batch(batch_size)
opt, cos = sess.run((optimizer, cost), feed_dict={x: batch_xs})
avg_cost += cos / n_samples * batch_size
if epoch % 2 == 0:
print("Epoch:", '%04d' % (epoch+1),
"cost=", "{:.9f}".format(avg_cost))
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
summary, _ = sess.run([merged, optimizer], feed_dict={x: batch_xs},
options=run_options, run_metadata=run_metadata)
train_writer.add_run_metadata(run_metadata, 'epoch%03d' % epoch)
train_writer.add_summary(summary, i)
saver.save(sess, os.path.join('train', "model.ckpt"), epoch)
train_writer.close()
# vizualizacija rekonstrukcije i stanja
x_sample = mnist.test.next_batch(100)[0]
x_reconstruct, z_out = sess.run([x_reconstr_mean_out, z], feed_dict={x: x_sample})
draw_reconstructions(x_sample, x_reconstruct, z_out, (28, 28), (4,5), 20)
# Vizualizacija raspored testnih uzoraka u 2D prostoru skrivenih varijabli - 1. nacin
x_sample, y_sample = mnist.test.next_batch(5000)
z_mu = sess.run(z_mean, feed_dict={x: x_sample})
plt.figure(figsize=(8, 6))
plt.scatter(z_mu[:, 0], z_mu[:, 1], c=np.argmax(y_sample, 1))
plt.colorbar()
# # Vizualizacija raspored testnih uzoraka u 2D prostoru skrivenih varijabli - 2. nacin
# nx = ny = 20
# x_values = np.linspace(-3, 3, nx)
# y_values = np.linspace(-3, 3, ny)
# canvas = np.empty((28*ny, 28*nx))
# for i, yi in enumerate(x_values):
# for j, xi in enumerate(y_values):
# z_mu = np.array([[xi, yi]])
# print z_mu.shape
# x_mean = sess.run(x_reconstr_mean_out, feed_dict={z: np.repeat(z_mu,100,0)})
# canvas[(nx-i-1)*28:(nx-i)*28, j*28:(j+1)*28] = x_mean[0].reshape(28, 28)
# plt.figure(figsize=(8, 10))
# Xi, Yi = np.meshgrid(x_values, y_values)
# plt.imshow(canvas, origin="upper")
# plt.tight_layout()
plt.show() | mit |
srowen/spark | python/pyspark/pandas/tests/test_dataframe_conversion.py | 15 | 11321 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import shutil
import string
import tempfile
import unittest
import numpy as np
import pandas as pd
from distutils.version import LooseVersion
from pyspark import pandas as ps
from pyspark.testing.pandasutils import PandasOnSparkTestCase, TestUtils
from pyspark.testing.sqlutils import SQLTestUtils
class DataFrameConversionTest(PandasOnSparkTestCase, SQLTestUtils, TestUtils):
"""Test cases for "small data" conversion and I/O."""
def setUp(self):
self.tmp_dir = tempfile.mkdtemp(prefix=DataFrameConversionTest.__name__)
def tearDown(self):
shutil.rmtree(self.tmp_dir, ignore_errors=True)
@property
def pdf(self):
return pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=[0, 1, 3])
@property
def psdf(self):
return ps.from_pandas(self.pdf)
@staticmethod
def strip_all_whitespace(str):
"""A helper function to remove all whitespace from a string."""
return str.translate({ord(c): None for c in string.whitespace})
def test_to_html(self):
expected = self.strip_all_whitespace(
"""
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;"><th></th><th>a</th><th>b</th></tr>
</thead>
<tbody>
<tr><th>0</th><td>1</td><td>4</td></tr>
<tr><th>1</th><td>2</td><td>5</td></tr>
<tr><th>3</th><td>3</td><td>6</td></tr>
</tbody>
</table>
"""
)
got = self.strip_all_whitespace(self.psdf.to_html())
self.assert_eq(got, expected)
# with max_rows set
expected = self.strip_all_whitespace(
"""
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;"><th></th><th>a</th><th>b</th></tr>
</thead>
<tbody>
<tr><th>0</th><td>1</td><td>4</td></tr>
<tr><th>1</th><td>2</td><td>5</td></tr>
</tbody>
</table>
"""
)
got = self.strip_all_whitespace(self.psdf.to_html(max_rows=2))
self.assert_eq(got, expected)
@staticmethod
def get_excel_dfs(pandas_on_spark_location, pandas_location):
return {
"got": pd.read_excel(pandas_on_spark_location, index_col=0),
"expected": pd.read_excel(pandas_location, index_col=0),
}
@unittest.skip("openpyxl")
def test_to_excel(self):
with self.temp_dir() as dirpath:
pandas_location = dirpath + "/" + "output1.xlsx"
pandas_on_spark_location = dirpath + "/" + "output2.xlsx"
pdf = self.pdf
psdf = self.psdf
psdf.to_excel(pandas_on_spark_location)
pdf.to_excel(pandas_location)
dataframes = self.get_excel_dfs(pandas_on_spark_location, pandas_location)
self.assert_eq(dataframes["got"], dataframes["expected"])
psdf.a.to_excel(pandas_on_spark_location)
pdf.a.to_excel(pandas_location)
dataframes = self.get_excel_dfs(pandas_on_spark_location, pandas_location)
self.assert_eq(dataframes["got"], dataframes["expected"])
pdf = pd.DataFrame({"a": [1, None, 3], "b": ["one", "two", None]}, index=[0, 1, 3])
psdf = ps.from_pandas(pdf)
psdf.to_excel(pandas_on_spark_location, na_rep="null")
pdf.to_excel(pandas_location, na_rep="null")
dataframes = self.get_excel_dfs(pandas_on_spark_location, pandas_location)
self.assert_eq(dataframes["got"], dataframes["expected"])
pdf = pd.DataFrame({"a": [1.0, 2.0, 3.0], "b": [4.0, 5.0, 6.0]}, index=[0, 1, 3])
psdf = ps.from_pandas(pdf)
psdf.to_excel(pandas_on_spark_location, float_format="%.1f")
pdf.to_excel(pandas_location, float_format="%.1f")
dataframes = self.get_excel_dfs(pandas_on_spark_location, pandas_location)
self.assert_eq(dataframes["got"], dataframes["expected"])
psdf.to_excel(pandas_on_spark_location, header=False)
pdf.to_excel(pandas_location, header=False)
dataframes = self.get_excel_dfs(pandas_on_spark_location, pandas_location)
self.assert_eq(dataframes["got"], dataframes["expected"])
psdf.to_excel(pandas_on_spark_location, index=False)
pdf.to_excel(pandas_location, index=False)
dataframes = self.get_excel_dfs(pandas_on_spark_location, pandas_location)
self.assert_eq(dataframes["got"], dataframes["expected"])
def test_to_json(self):
pdf = self.pdf
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.to_json(orient="records"), pdf.to_json(orient="records"))
def test_to_json_negative(self):
psdf = ps.from_pandas(self.pdf)
with self.assertRaises(NotImplementedError):
psdf.to_json(orient="table")
with self.assertRaises(NotImplementedError):
psdf.to_json(lines=False)
def test_read_json_negative(self):
with self.assertRaises(NotImplementedError):
ps.read_json("invalid", lines=False)
def test_to_json_with_path(self):
pdf = pd.DataFrame({"a": [1], "b": ["a"]})
psdf = ps.DataFrame(pdf)
psdf.to_json(self.tmp_dir, num_files=1)
expected = pdf.to_json(orient="records")
output_paths = [path for path in os.listdir(self.tmp_dir) if path.startswith("part-")]
assert len(output_paths) > 0
output_path = "%s/%s" % (self.tmp_dir, output_paths[0])
self.assertEqual("[%s]" % open(output_path).read().strip(), expected)
def test_to_json_with_partition_cols(self):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]})
psdf = ps.DataFrame(pdf)
psdf.to_json(self.tmp_dir, partition_cols="b", num_files=1)
partition_paths = [path for path in os.listdir(self.tmp_dir) if path.startswith("b=")]
assert len(partition_paths) > 0
for partition_path in partition_paths:
column, value = partition_path.split("=")
expected = pdf[pdf[column] == value].drop("b", axis=1).to_json(orient="records")
output_paths = [
path
for path in os.listdir("%s/%s" % (self.tmp_dir, partition_path))
if path.startswith("part-")
]
assert len(output_paths) > 0
output_path = "%s/%s/%s" % (self.tmp_dir, partition_path, output_paths[0])
with open(output_path) as f:
self.assertEqual("[%s]" % open(output_path).read().strip(), expected)
@unittest.skip("Pyperclip could not find a copy/paste mechanism for Linux.")
def test_to_clipboard(self):
pdf = self.pdf
psdf = self.psdf
self.assert_eq(psdf.to_clipboard(), pdf.to_clipboard())
self.assert_eq(psdf.to_clipboard(excel=False), pdf.to_clipboard(excel=False))
self.assert_eq(
psdf.to_clipboard(sep=";", index=False), pdf.to_clipboard(sep=";", index=False)
)
def test_to_latex(self):
pdf = self.pdf
psdf = self.psdf
self.assert_eq(psdf.to_latex(), pdf.to_latex())
self.assert_eq(psdf.to_latex(col_space=2), pdf.to_latex(col_space=2))
self.assert_eq(psdf.to_latex(header=True), pdf.to_latex(header=True))
self.assert_eq(psdf.to_latex(index=False), pdf.to_latex(index=False))
self.assert_eq(psdf.to_latex(na_rep="-"), pdf.to_latex(na_rep="-"))
self.assert_eq(psdf.to_latex(float_format="%.1f"), pdf.to_latex(float_format="%.1f"))
self.assert_eq(psdf.to_latex(sparsify=False), pdf.to_latex(sparsify=False))
self.assert_eq(psdf.to_latex(index_names=False), pdf.to_latex(index_names=False))
self.assert_eq(psdf.to_latex(bold_rows=True), pdf.to_latex(bold_rows=True))
self.assert_eq(psdf.to_latex(decimal=","), pdf.to_latex(decimal=","))
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
self.assert_eq(psdf.to_latex(encoding="ascii"), pdf.to_latex(encoding="ascii"))
def test_to_records(self):
if LooseVersion(pd.__version__) >= LooseVersion("0.24.0"):
pdf = pd.DataFrame({"A": [1, 2], "B": [0.5, 0.75]}, index=["a", "b"])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.to_records(), pdf.to_records())
self.assert_eq(psdf.to_records(index=False), pdf.to_records(index=False))
self.assert_eq(psdf.to_records(index_dtypes="<S2"), pdf.to_records(index_dtypes="<S2"))
def test_from_records(self):
# Assert using a dict as input
self.assert_eq(
ps.DataFrame.from_records({"A": [1, 2, 3]}), pd.DataFrame.from_records({"A": [1, 2, 3]})
)
# Assert using a list of tuples as input
self.assert_eq(
ps.DataFrame.from_records([(1, 2), (3, 4)]), pd.DataFrame.from_records([(1, 2), (3, 4)])
)
# Assert using a NumPy array as input
self.assert_eq(ps.DataFrame.from_records(np.eye(3)), pd.DataFrame.from_records(np.eye(3)))
# Asserting using a custom index
self.assert_eq(
ps.DataFrame.from_records([(1, 2), (3, 4)], index=[2, 3]),
pd.DataFrame.from_records([(1, 2), (3, 4)], index=[2, 3]),
)
# Assert excluding excluding column(s)
self.assert_eq(
ps.DataFrame.from_records({"A": [1, 2, 3], "B": [1, 2, 3]}, exclude=["B"]),
pd.DataFrame.from_records({"A": [1, 2, 3], "B": [1, 2, 3]}, exclude=["B"]),
)
# Assert limiting to certain column(s)
self.assert_eq(
ps.DataFrame.from_records({"A": [1, 2, 3], "B": [1, 2, 3]}, columns=["A"]),
pd.DataFrame.from_records({"A": [1, 2, 3], "B": [1, 2, 3]}, columns=["A"]),
)
# Assert limiting to a number of rows
self.assert_eq(
ps.DataFrame.from_records([(1, 2), (3, 4)], nrows=1),
pd.DataFrame.from_records([(1, 2), (3, 4)], nrows=1),
)
if __name__ == "__main__":
from pyspark.pandas.tests.test_dataframe_conversion import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
waynenilsen/statsmodels | docs/sphinxext/numpy_ext/plot_directive.py | 65 | 20399 | """
A special directive for generating a matplotlib plot.
.. warning::
This is a hacked version of plot_directive.py from Matplotlib.
It's very much subject to change!
Usage
-----
Can be used like this::
.. plot:: examples/example.py
.. plot::
import matplotlib.pyplot as plt
plt.plot([1,2,3], [4,5,6])
.. plot::
A plotting example:
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3], [4,5,6])
The content is interpreted as doctest formatted if it has a line starting
with ``>>>``.
The ``plot`` directive supports the options
format : {'python', 'doctest'}
Specify the format of the input
include-source : bool
Whether to display the source code. Default can be changed in conf.py
and the ``image`` directive options ``alt``, ``height``, ``width``,
``scale``, ``align``, ``class``.
Configuration options
---------------------
The plot directive has the following configuration options:
plot_include_source
Default value for the include-source option
plot_pre_code
Code that should be executed before each plot.
plot_basedir
Base directory, to which plot:: file names are relative to.
(If None or empty, file names are relative to the directoly where
the file containing the directive is.)
plot_formats
File formats to generate. List of tuples or strings::
[(suffix, dpi), suffix, ...]
that determine the file format and the DPI. For entries whose
DPI was omitted, sensible defaults are chosen.
plot_html_show_formats
Whether to show links to the files in HTML.
TODO
----
* Refactor Latex output; now it's plain images, but it would be nice
to make them appear side-by-side, or in floats.
"""
import sys, os, glob, shutil, imp, warnings, cStringIO, re, textwrap, traceback
import sphinx
import warnings
warnings.warn("A plot_directive module is also available under "
"matplotlib.sphinxext; expect this numpydoc.plot_directive "
"module to be deprecated after relevant features have been "
"integrated there.",
FutureWarning, stacklevel=2)
#------------------------------------------------------------------------------
# Registration hook
#------------------------------------------------------------------------------
def setup(app):
setup.app = app
setup.config = app.config
setup.confdir = app.confdir
app.add_config_value('plot_pre_code', '', True)
app.add_config_value('plot_include_source', False, True)
app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True)
app.add_config_value('plot_basedir', None, True)
app.add_config_value('plot_html_show_formats', True, True)
app.add_directive('plot', plot_directive, True, (0, 1, False),
**plot_directive_options)
#------------------------------------------------------------------------------
# plot:: directive
#------------------------------------------------------------------------------
from docutils.parsers.rst import directives
from docutils import nodes
def plot_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return run(arguments, content, options, state_machine, state, lineno)
plot_directive.__doc__ = __doc__
def _option_boolean(arg):
if not arg or not arg.strip():
# no argument given, assume used as a flag
return True
elif arg.strip().lower() in ('no', '0', 'false'):
return False
elif arg.strip().lower() in ('yes', '1', 'true'):
return True
else:
raise ValueError('"%s" unknown boolean' % arg)
def _option_format(arg):
return directives.choice(arg, ('python', 'lisp'))
def _option_align(arg):
return directives.choice(arg, ("top", "middle", "bottom", "left", "center",
"right"))
plot_directive_options = {'alt': directives.unchanged,
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'scale': directives.nonnegative_int,
'align': _option_align,
'class': directives.class_option,
'include-source': _option_boolean,
'format': _option_format,
}
#------------------------------------------------------------------------------
# Generating output
#------------------------------------------------------------------------------
from docutils import nodes, utils
try:
# Sphinx depends on either Jinja or Jinja2
import jinja2
def format_template(template, **kw):
return jinja2.Template(template).render(**kw)
except ImportError:
import jinja
def format_template(template, **kw):
return jinja.from_string(template, **kw)
TEMPLATE = """
{{ source_code }}
{{ only_html }}
{% if source_link or (html_show_formats and not multi_image) %}
(
{%- if source_link -%}
`Source code <{{ source_link }}>`__
{%- endif -%}
{%- if html_show_formats and not multi_image -%}
{%- for img in images -%}
{%- for fmt in img.formats -%}
{%- if source_link or not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
{%- endfor -%}
{%- endif -%}
)
{% endif %}
{% for img in images %}
.. figure:: {{ build_dir }}/{{ img.basename }}.png
{%- for option in options %}
{{ option }}
{% endfor %}
{% if html_show_formats and multi_image -%}
(
{%- for fmt in img.formats -%}
{%- if not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
)
{%- endif -%}
{% endfor %}
{{ only_latex }}
{% for img in images %}
.. image:: {{ build_dir }}/{{ img.basename }}.pdf
{% endfor %}
"""
class ImageFile(object):
def __init__(self, basename, dirname):
self.basename = basename
self.dirname = dirname
self.formats = []
def filename(self, format):
return os.path.join(self.dirname, "%s.%s" % (self.basename, format))
def filenames(self):
return [self.filename(fmt) for fmt in self.formats]
def run(arguments, content, options, state_machine, state, lineno):
if arguments and content:
raise RuntimeError("plot:: directive can't have both args and content")
document = state_machine.document
config = document.settings.env.config
options.setdefault('include-source', config.plot_include_source)
# determine input
rst_file = document.attributes['source']
rst_dir = os.path.dirname(rst_file)
if arguments:
if not config.plot_basedir:
source_file_name = os.path.join(rst_dir,
directives.uri(arguments[0]))
else:
source_file_name = os.path.join(setup.confdir, config.plot_basedir,
directives.uri(arguments[0]))
code = open(source_file_name, 'r').read()
output_base = os.path.basename(source_file_name)
else:
source_file_name = rst_file
code = textwrap.dedent("\n".join(map(str, content)))
counter = document.attributes.get('_plot_counter', 0) + 1
document.attributes['_plot_counter'] = counter
base, ext = os.path.splitext(os.path.basename(source_file_name))
output_base = '%s-%d.py' % (base, counter)
base, source_ext = os.path.splitext(output_base)
if source_ext in ('.py', '.rst', '.txt'):
output_base = base
else:
source_ext = ''
# ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
output_base = output_base.replace('.', '-')
# is it in doctest format?
is_doctest = contains_doctest(code)
if options.has_key('format'):
if options['format'] == 'python':
is_doctest = False
else:
is_doctest = True
# determine output directory name fragment
source_rel_name = relpath(source_file_name, setup.confdir)
source_rel_dir = os.path.dirname(source_rel_name)
while source_rel_dir.startswith(os.path.sep):
source_rel_dir = source_rel_dir[1:]
# build_dir: where to place output files (temporarily)
build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
'plot_directive',
source_rel_dir)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
# output_dir: final location in the builder's directory
dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
source_rel_dir))
# how to link to files from the RST file
dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
source_rel_dir).replace(os.path.sep, '/')
build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
source_link = dest_dir_link + '/' + output_base + source_ext
# make figures
try:
results = makefig(code, source_file_name, build_dir, output_base,
config)
errors = []
except PlotError, err:
reporter = state.memo.reporter
sm = reporter.system_message(
2, "Exception occurred in plotting %s: %s" % (output_base, err),
line=lineno)
results = [(code, [])]
errors = [sm]
# generate output restructuredtext
total_lines = []
for j, (code_piece, images) in enumerate(results):
if options['include-source']:
if is_doctest:
lines = ['']
lines += [row.rstrip() for row in code_piece.split('\n')]
else:
lines = ['.. code-block:: python', '']
lines += [' %s' % row.rstrip()
for row in code_piece.split('\n')]
source_code = "\n".join(lines)
else:
source_code = ""
opts = [':%s: %s' % (key, val) for key, val in options.items()
if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]
only_html = ".. only:: html"
only_latex = ".. only:: latex"
if j == 0:
src_link = source_link
else:
src_link = None
result = format_template(
TEMPLATE,
dest_dir=dest_dir_link,
build_dir=build_dir_link,
source_link=src_link,
multi_image=len(images) > 1,
only_html=only_html,
only_latex=only_latex,
options=opts,
images=images,
source_code=source_code,
html_show_formats=config.plot_html_show_formats)
total_lines.extend(result.split("\n"))
total_lines.extend("\n")
if total_lines:
state_machine.insert_input(total_lines, source=source_file_name)
# copy image files to builder's output directory
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
for code_piece, images in results:
for img in images:
for fn in img.filenames():
shutil.copyfile(fn, os.path.join(dest_dir,
os.path.basename(fn)))
# copy script (if necessary)
if source_file_name == rst_file:
target_name = os.path.join(dest_dir, output_base + source_ext)
f = open(target_name, 'w')
f.write(unescape_doctest(code))
f.close()
return errors
#------------------------------------------------------------------------------
# Run code and capture figures
#------------------------------------------------------------------------------
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.image as image
from matplotlib import _pylab_helpers
import exceptions
def contains_doctest(text):
try:
# check if it's valid Python as-is
compile(text, '<string>', 'exec')
return False
except SyntaxError:
pass
r = re.compile(r'^\s*>>>', re.M)
m = r.search(text)
return bool(m)
def unescape_doctest(text):
"""
Extract code from a piece of text, which contains either Python code
or doctests.
"""
if not contains_doctest(text):
return text
code = ""
for line in text.split("\n"):
m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line)
if m:
code += m.group(2) + "\n"
elif line.strip():
code += "# " + line.strip() + "\n"
else:
code += "\n"
return code
def split_code_at_show(text):
"""
Split code at plt.show()
"""
parts = []
is_doctest = contains_doctest(text)
part = []
for line in text.split("\n"):
if (not is_doctest and line.strip() == 'plt.show()') or \
(is_doctest and line.strip() == '>>> plt.show()'):
part.append(line)
parts.append("\n".join(part))
part = []
else:
part.append(line)
if "\n".join(part).strip():
parts.append("\n".join(part))
return parts
class PlotError(RuntimeError):
pass
def run_code(code, code_path, ns=None):
# Change the working directory to the directory of the example, so
# it can get at its data files, if any.
pwd = os.getcwd()
old_sys_path = list(sys.path)
if code_path is not None:
dirname = os.path.abspath(os.path.dirname(code_path))
os.chdir(dirname)
sys.path.insert(0, dirname)
# Redirect stdout
stdout = sys.stdout
sys.stdout = cStringIO.StringIO()
# Reset sys.argv
old_sys_argv = sys.argv
sys.argv = [code_path]
try:
try:
code = unescape_doctest(code)
if ns is None:
ns = {}
if not ns:
exec setup.config.plot_pre_code in ns
exec code in ns
except (Exception, SystemExit), err:
raise PlotError(traceback.format_exc())
finally:
os.chdir(pwd)
sys.argv = old_sys_argv
sys.path[:] = old_sys_path
sys.stdout = stdout
return ns
#------------------------------------------------------------------------------
# Generating figures
#------------------------------------------------------------------------------
def out_of_date(original, derived):
"""
Returns True if derivative is out-of-date wrt original,
both of which are full file paths.
"""
return (not os.path.exists(derived)
or os.stat(derived).st_mtime < os.stat(original).st_mtime)
def makefig(code, code_path, output_dir, output_base, config):
"""
Run a pyplot script *code* and save the images under *output_dir*
with file names derived from *output_base*
"""
# -- Parse format list
default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 50}
formats = []
for fmt in config.plot_formats:
if isinstance(fmt, str):
formats.append((fmt, default_dpi.get(fmt, 80)))
elif type(fmt) in (tuple, list) and len(fmt)==2:
formats.append((str(fmt[0]), int(fmt[1])))
else:
raise PlotError('invalid image format "%r" in plot_formats' % fmt)
# -- Try to determine if all images already exist
code_pieces = split_code_at_show(code)
# Look for single-figure output files first
all_exists = True
img = ImageFile(output_base, output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
if all_exists:
return [(code, [img])]
# Then look for multi-figure output files
results = []
all_exists = True
for i, code_piece in enumerate(code_pieces):
images = []
for j in xrange(1000):
img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
# assume that if we have one, we have them all
if not all_exists:
all_exists = (j > 0)
break
images.append(img)
if not all_exists:
break
results.append((code_piece, images))
if all_exists:
return results
# -- We didn't find the files, so build them
results = []
ns = {}
for i, code_piece in enumerate(code_pieces):
# Clear between runs
plt.close('all')
# Run code
run_code(code_piece, code_path, ns)
# Collect images
images = []
fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()
for j, figman in enumerate(fig_managers):
if len(fig_managers) == 1 and len(code_pieces) == 1:
img = ImageFile(output_base, output_dir)
else:
img = ImageFile("%s_%02d_%02d" % (output_base, i, j),
output_dir)
images.append(img)
for format, dpi in formats:
try:
figman.canvas.figure.savefig(img.filename(format), dpi=dpi)
except exceptions.BaseException, err:
raise PlotError(traceback.format_exc())
img.formats.append(format)
# Results
results.append((code_piece, images))
return results
#------------------------------------------------------------------------------
# Relative pathnames
#------------------------------------------------------------------------------
try:
from os.path import relpath
except ImportError:
# Copied from Python 2.7
if 'posix' in sys.builtin_module_names:
def relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
from os.path import sep, curdir, join, abspath, commonprefix, \
pardir
if not path:
raise ValueError("no path specified")
start_list = abspath(start).split(sep)
path_list = abspath(path).split(sep)
# Work out how much of the filepath is shared by start and path.
i = len(commonprefix([start_list, path_list]))
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
elif 'nt' in sys.builtin_module_names:
def relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
from os.path import sep, curdir, join, abspath, commonprefix, \
pardir, splitunc
if not path:
raise ValueError("no path specified")
start_list = abspath(start).split(sep)
path_list = abspath(path).split(sep)
if start_list[0].lower() != path_list[0].lower():
unc_path, rest = splitunc(path)
unc_start, rest = splitunc(start)
if bool(unc_path) ^ bool(unc_start):
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
% (path, start))
else:
raise ValueError("path is on drive %s, start on drive %s"
% (path_list[0], start_list[0]))
# Work out how much of the filepath is shared by start and path.
for i in range(min(len(start_list), len(path_list))):
if start_list[i].lower() != path_list[i].lower():
break
else:
i += 1
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
else:
raise RuntimeError("Unsupported platform (no relpath available!)")
| bsd-3-clause |
awickert/river-network-evolution | run_ThreeChannels_generalizing.py | 1 | 4095 | import numpy as np
from scipy.sparse import spdiags, block_diag
from scipy.sparse.linalg import spsolve, isolve
from matplotlib import pyplot as plt
import copy
import time
import ThreeChannels_generalizing
reload(ThreeChannels_generalizing)
r = ThreeChannels_generalizing.rnet()
self = r
plt.ion()
# PER RIVER #
#############
self.eta = []
self.nx = 4E0 + 1
#######################
### INPUT VARIABLES ###
#######################
# GLOBAL UNIFORM #
##################
self.D = 200E-3 # [m] [uniform so far]
porosity = lambda_p = 0.35 # [-]
n_time_steps = 1
self.flow_from_to = np.array([[0,2],[1,2]])
self.flow_from = [[], [], [0,1]]
self.flow_to = [[2], [2], []]
self.b = [20, 20, 40]
self.segment_Q_in = self.headwaters_segments = np.array([[0,40],[1,20]])
self.nsegments = len(self.b)
#self.flow_from_to = np.array([[0,1]])
#self.flow_from = [[], [0]]
#self.flow_to = [[1], []]
self.flow_from_to = np.array([[0,2],[1,2],[2,4],[3,4]])
self.flow_from = [[], [], [0,1], [], [2,3]]
self.flow_to = [[2], [2], [4], [4], []]
self.b = [20, 20, 40, 20, 60]
#self.b = [20, 30, 50, 10, 60]
self.segment_Q_in = self.headwaters_segments = np.array([[0,40],[1,20],[3,50]])
self.nsegments = len(self.flow_from)
"""
self.flow_from_to = np.array([[]])
self.flow_from = [[]]
self.flow_to = [[]]
self.b = [20]
#self.b = [20, 30, 50, 10, 60]
self.segment_Q_in = self.headwaters_segments = np.array([[0,40]])
self.nsegments = len(self.flow_from)
"""
"""
self.flow_from_to = np.array([[0,1]])
self.flow_from = [[], [0]]
self.flow_to = [[1], []]
#self.b = [20, 20, 40, 20, 60]
self.b = [20, 20]
"""
# 3 rivers -- would often pull them in from GIS
# Keep everything uniform for starters
xmax = 1E3
self.B = 100 * np.ones(self.nx)
S = 1E-2
self.dt = 3.15E2
self.x = []
self.dx = []
self.h = []
self.eta = []
# Multiple rivers
for Si in range(len(self.flow_to)):
self.x.append(np.linspace(0, xmax, self.nx))
self.dx.append(np.mean(np.diff(self.x[-1]))) # Special case of uniform grid spacing
self.h.append(2. * np.ones(self.nx)) # specific case of 2 m depth everywhere
#self.x[-1] += self.x[-2][-1] + self.dx[-1] #Very specific to this 3-river set here
self.x[-3] += self.x[1][-1] + self.dx[-1] #Very specific to this 5-river set here
self.x[-2] += self.x[1][-1] + self.dx[-1] #Very specific to this 5-river set here
self.x[-1] += self.x[2][-1] + self.dx[-1] #Very specific to this 5-river set here
#self.x[-1] += self.x[-2][-1] + self.dx[-1] #Very specific to this 2-river set here
for row in self.x:
self.eta.append( -S * row + np.max(self.x)*S )
self.eta[-1] = np.round(self.eta[-1], 6) # coarse trick to rmv floating point issues
self.eta0 = copy.deepcopy(self.eta)
#########################
### DERIVED VARIABLES ###
#########################
self.nts = np.linspace(0, n_time_steps, n_time_steps+1) # start at 1 below, t0 is initial
self.A0 = []
for Si in range(len(self.x)):
self.A0.append( 11.325 / (1 - lambda_p) * self.h[Si]/self.D )
#q_s_in = 0.69623693 # [m^3 s^{-1}]
# q_s for equilibrium in each channel; used for transport slope upstream
# boundary conditions
#q_s_out = whatever it has to be to transport out as much material as it receives
q_s_equilibrium = np.array(self.sediment__discharge_per_unit_width())
#print np.mean(eta)
# Ignoring for now -- for iterating
# Assuming in order: so flow_from is really irrelavant; flow_to is the important part
"""
fig = plt.figure()
plt.ylim((0,50))
ax = plt.subplot(111)
"""
#for row in self.eta:
# row += 10
for ts in range(1): # self.nts
# 3 iterations is usually good; nothing special about it, though.
self.eta_iter = copy.deepcopy(self.eta) # For iteration
self.stack_vars()
for iter_i in range(1):
self.build_coeff_matrix(q_s_equilibrium)
self.build_RHS()
#print np.max(np.hstack(self.eta_iter))
self.solve()
self.update()
"""
ax.clear()
if ts % 25 == 0:
self.riverplot(linewidth=2)
#plt.ylim((0,40))
#plt.draw()
plt.pause(0.0001)
"""
self.stack_vars()
#self.plot_coeff_matrix()
#plt.ylim((0,40))
self.riverplot(linewidth=4, plot_start=True)
plt.show()
| gpl-3.0 |
wdurhamh/statsmodels | statsmodels/examples/ex_univar_kde.py | 34 | 5127 | """
This example tests the nonparametric estimator
for several popular univariate distributions with the different
bandwidth selction methods - CV-ML; CV-LS; Scott's rule of thumb.
Produces six different plots for each distribution
1) Beta
2) f
3) Pareto
4) Laplace
5) Weibull
6) Poisson
"""
from __future__ import print_function
import numpy as np
import scipy.stats as stats
import matplotlib.pyplot as plt
import statsmodels.api as sm
KDEMultivariate = sm.nonparametric.KDEMultivariate
np.random.seed(123456)
# Beta distribution
# Parameters
a = 2
b = 5
nobs = 250
support = np.random.beta(a, b, size=nobs)
rv = stats.beta(a, b)
ix = np.argsort(support)
dens_normal = KDEMultivariate(data=[support], var_type='c', bw='normal_reference')
dens_cvls = KDEMultivariate(data=[support], var_type='c', bw='cv_ls')
dens_cvml = KDEMultivariate(data=[support], var_type='c', bw='cv_ml')
plt.figure(1)
plt.plot(support[ix], rv.pdf(support[ix]), label='Actual')
plt.plot(support[ix], dens_normal.pdf()[ix], label='Scott')
plt.plot(support[ix], dens_cvls.pdf()[ix], label='CV_LS')
plt.plot(support[ix], dens_cvml.pdf()[ix], label='CV_ML')
plt.title("Nonparametric Estimation of the Density of Beta Distributed " \
"Random Variable")
plt.legend(('Actual', 'Scott', 'CV_LS', 'CV_ML'))
# f distribution
df = 100
dn = 100
nobs = 250
support = np.random.f(dn, df, size=nobs)
rv = stats.f(df, dn)
ix = np.argsort(support)
dens_normal = KDEMultivariate(data=[support], var_type='c', bw='normal_reference')
dens_cvls = KDEMultivariate(data=[support], var_type='c', bw='cv_ls')
dens_cvml = KDEMultivariate(data=[support], var_type='c', bw='cv_ml')
plt.figure(2)
plt.plot(support[ix], rv.pdf(support[ix]), label='Actual')
plt.plot(support[ix], dens_normal.pdf()[ix], label='Scott')
plt.plot(support[ix], dens_cvls.pdf()[ix], label='CV_LS')
plt.plot(support[ix], dens_cvml.pdf()[ix], label='CV_ML')
plt.title("Nonparametric Estimation of the Density of f Distributed " \
"Random Variable")
plt.legend(('Actual', 'Scott', 'CV_LS', 'CV_ML'))
# Pareto distribution
a = 2
nobs = 150
support = np.random.pareto(a, size=nobs)
rv = stats.pareto(a)
ix = np.argsort(support)
dens_normal = KDEMultivariate(data=[support], var_type='c', bw='normal_reference')
dens_cvls = KDEMultivariate(data=[support], var_type='c', bw='cv_ls')
dens_cvml = KDEMultivariate(data=[support], var_type='c', bw='cv_ml')
plt.figure(3)
plt.plot(support[ix], rv.pdf(support[ix]), label='Actual')
plt.plot(support[ix], dens_normal.pdf()[ix], label='Scott')
plt.plot(support[ix], dens_cvls.pdf()[ix], label='CV_LS')
plt.plot(support[ix], dens_cvml.pdf()[ix], label='CV_ML')
plt.title("Nonparametric Estimation of the Density of Pareto " \
"Distributed Random Variable")
plt.legend(('Actual', 'Scott', 'CV_LS', 'CV_ML'))
# Laplace Distribution
mu = 0
s = 1
nobs = 250
support = np.random.laplace(mu, s, size=nobs)
rv = stats.laplace(mu, s)
ix = np.argsort(support)
dens_normal = KDEMultivariate(data=[support], var_type='c', bw='normal_reference')
dens_cvls = KDEMultivariate(data=[support], var_type='c', bw='cv_ls')
dens_cvml = KDEMultivariate(data=[support], var_type='c', bw='cv_ml')
plt.figure(4)
plt.plot(support[ix], rv.pdf(support[ix]), label='Actual')
plt.plot(support[ix], dens_normal.pdf()[ix], label='Scott')
plt.plot(support[ix], dens_cvls.pdf()[ix], label='CV_LS')
plt.plot(support[ix], dens_cvml.pdf()[ix], label='CV_ML')
plt.title("Nonparametric Estimation of the Density of Laplace " \
"Distributed Random Variable")
plt.legend(('Actual', 'Scott', 'CV_LS', 'CV_ML'))
# Weibull Distribution
a = 1
nobs = 250
support = np.random.weibull(a, size=nobs)
rv = stats.weibull_min(a)
ix = np.argsort(support)
dens_normal = KDEMultivariate(data=[support], var_type='c', bw='normal_reference')
dens_cvls = KDEMultivariate(data=[support], var_type='c', bw='cv_ls')
dens_cvml = KDEMultivariate(data=[support], var_type='c', bw='cv_ml')
plt.figure(5)
plt.plot(support[ix], rv.pdf(support[ix]), label='Actual')
plt.plot(support[ix], dens_normal.pdf()[ix], label='Scott')
plt.plot(support[ix], dens_cvls.pdf()[ix], label='CV_LS')
plt.plot(support[ix], dens_cvml.pdf()[ix], label='CV_ML')
plt.title("Nonparametric Estimation of the Density of Weibull " \
"Distributed Random Variable")
plt.legend(('Actual', 'Scott', 'CV_LS', 'CV_ML'))
# Poisson Distribution
a = 2
nobs = 250
support = np.random.poisson(a, size=nobs)
rv = stats.poisson(a)
ix = np.argsort(support)
dens_normal = KDEMultivariate(data=[support], var_type='o', bw='normal_reference')
dens_cvls = KDEMultivariate(data=[support], var_type='o', bw='cv_ls')
dens_cvml = KDEMultivariate(data=[support], var_type='o', bw='cv_ml')
plt.figure(6)
plt.plot(support[ix], rv.pmf(support[ix]), label='Actual')
plt.plot(support[ix], dens_normal.pdf()[ix], label='Scott')
plt.plot(support[ix], dens_cvls.pdf()[ix], label='CV_LS')
plt.plot(support[ix], dens_cvml.pdf()[ix], label='CV_ML')
plt.title("Nonparametric Estimation of the Density of Poisson " \
"Distributed Random Variable")
plt.legend(('Actual', 'Scott', 'CV_LS', 'CV_ML'))
plt.show()
| bsd-3-clause |
cdondrup/strands_qsr_lib | qsr_lib/scripts/basic_qsr_visualiser.py | 8 | 6789 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Graphical Visualisation for QSR Relations
:Author: Peter Lightbody <[email protected]>
:Organization: University of Lincoln
:Date: 12 September 2015
:Version: 0.1
:Status: Development
:Copyright: STRANDS default
"""
import argparse
import matplotlib
import matplotlib.pyplot as plt
import matplotlib as mpl
from random import randint
from pylab import *
import textwrap
import math
from math import sin, cos, radians
from matplotlib.widgets import CheckButtons
import time
from qsrlib.qsrlib import QSRlib, QSRlib_Request_Message
from qsrlib_io.world_trace import Object_State, World_Trace
class qsr_gui():
bb1 = None # (x1, y1, x1+w1, y1+h1)
bb2 = None # (x2, y2, x2+w2, y2+h2)
qsr = list()
qsr_type = ("rcc2", "rcc3", "rcc8", "cardir", "argd")
def __compute_qsr(self, bb1, bb2):
if not self.qsr:
return ""
ax, ay, bx, by = self.bb1
cx, cy, dx, dy = self.bb2
qsrlib = QSRlib()
world = World_Trace()
world.add_object_state_series([Object_State(name="red", timestamp=0, x=((ax+bx)/2.0), y=((ay+by)/2.0), xsize=abs(bx-ax), ysize=abs(by-ay)),
Object_State(name="yellow", timestamp=0, x=((cx+dx)/2.0), y=((cy+dy)/2.0), xsize=abs(dx-cx), ysize=abs(dy-cy))])
dynamic_args = {"argd": {"qsr_relations_and_values": self.distance}}
qsrlib_request_message = QSRlib_Request_Message(which_qsr=self.qsr, input_data=world, dynamic_args=dynamic_args)
qsrlib_response_message = qsrlib.request_qsrs(req_msg=qsrlib_request_message)
for t in qsrlib_response_message.qsrs.get_sorted_timestamps():
foo = ""
for k, v in zip(qsrlib_response_message.qsrs.trace[t].qsrs.keys(),
qsrlib_response_message.qsrs.trace[t].qsrs.values()):
foo += str(k) + ":" + str(v.qsr) + "; \n"
return foo
def randomBB(self):
x1 = randint(1,10);
y1 = randint(1,10);
w1 = randint(1,10);
h1 = randint(1,10);
x2 = randint(1,10);
y2 = randint(1,10);
w2 = randint(1,10);
h2 = randint(1,10);
self.bb1 = (x1, y1, x1+w1, y1+h1)
self.bb2 = (x2, y2, x2+w2, y2+h2)
def EventClick(self,label):
if label in self.qsr:
self.qsr.remove(label)
else:
self.qsr.append(label)
if not (self.args.placeOne and self.args.placeTwo):
self.randomBB()
self.updateWindow()
def updateWindow(self):
plt.subplot(2, 2, (1, 2)).clear()
plt.subplot(2, 2, 3)
plt.subplot(2, 2, 3).clear()
plt.axis('off')
plt.text(1, 1, (self.__compute_qsr(self.bb1, self.bb2)), family='serif', style='italic', ha='center')
rect1 = matplotlib.patches.Rectangle((self.bb1[0],self.bb1[1]), abs(self.bb1[2]-self.bb1[0]), abs(self.bb1[1]-self.bb1[3]), color='yellow', alpha=0.5)
rect2 = matplotlib.patches.Rectangle((self.bb2[0],self.bb2[1]), abs(self.bb2[2]-self.bb2[0]), abs(self.bb2[1]-self.bb2[3]), color='red', alpha=0.5)
ax, ay, bx, by = self.bb1
cx, cy, dx, dy = self.bb2
plt.subplot(2, 2, (1, 2)).add_patch(rect1)
plt.subplot(2, 2, (1, 2)).add_patch(rect2)
self.qsr_specific_reference_gui()
xlim([min(ax,bx,cx,dx)-15,max(ay,by,cy,dy)+15])
ylim([min(ax,bx,cx,dx)-15,max(ay,by,cy,dy)+15])
draw()
def qsr_specific_reference_gui(self):
ax, ay, bx, by = self.bb1
cx, cy, dx, dy = self.bb2
# Centre of BB1 on the X angle
AcentreX = ((ax+bx)/2.0)
# Centre of BB1 on the Y angle
AcentreY = ((ay+by)/2.0)
# Centre of BB2 on the X angle
BcentreX = ((cx+dx)/2.0)
# Centre of BB2 on the Y angle
BcentreY = ((cy+dy)/2.0)
plt.subplot(2, 2, (1, 2))
if "cardir" in self.qsr:
# Draws a Line between the centre of bb1 and bb2
verts = [(AcentreX, AcentreY), (BcentreX, BcentreY)]
xs, ys = zip(*verts)
plot(xs, ys, 'x--', lw=1, color='black')
# Draws the compass shaped guide around bb1 to help identify regions
for index in range(1,8):
angle = math.pi/8+(index*math.pi/4)
distanceBetweenObjects = math.sqrt(math.pow(dx,2) + math.pow(dy,2))
distance = (16)
verts = [(((distance * math.cos(angle)) + AcentreX),
((distance * math.sin(angle)) + AcentreY)),
(((distance * math.cos(angle+math.pi)) + AcentreX),
((distance * math.sin(angle+math.pi))+ AcentreY))]
xs, ys = zip(*verts)
plot(xs, ys, 'x--', lw=1, color='green')
# Add circles around bb1 to identify distance regions
if "argd" in self.qsr:
for k in self.distance.keys():
plt.subplot(2, 2, (1, 2)).add_patch(plt.Circle((AcentreX,AcentreY),self.distance[k], fill=False, color='green'))
def initWindow(self):
if not (self.args.placeOne and self.args.placeTwo):
self.randomBB()
axes().set_aspect('equal', 'datalim')
plt.subplot(2, 2, (1, 2))
plt.subplot(2, 2, (1, 2)).set_aspect('equal')
subplots_adjust(left=0.31)
subplots_adjust(bottom=-0.7)
plt.title('QSR Visualisation')
axcolor = 'lightgoldenrodyellow'
rax = plt.axes([0.03, 0.4, 0.22, 0.45], axisbg=axcolor)
checkBox = CheckButtons(rax, self.qsr_type,(False,False,False,False,False))
checkBox.on_clicked(self.EventClick)
plt.subplot(2, 2, 3)
plt.axis('off')
plt.text(1, 1, (self.__compute_qsr(self.bb1, self.bb2)), family='serif', style='italic', ha='center')
if self.qsr:
self.updateWindow()
plt.show()
def processArgs(self):
parser = argparse.ArgumentParser()
parser.add_argument("-pOne","--placeOne", help="specify the location of object one", nargs='+', type=int)
parser.add_argument("-pTwo","--placeTwo", help="specify the location of object two", nargs='+', type=int)
parser.add_argument("-argd","--distance", help="specify the distances for argd", nargs='+', type=float)
self.args = parser.parse_args()
self.bb1 = self.args.placeOne
self.bb2 = self.args.placeTwo
self.distance = dict()
if self.args.distance:
for x, d in enumerate(self.args.distance):
self.distance[str(x)] = d
else:
self.distance = {"0": 4., "1": 8., "2": 12., "3":16.}
if __name__ == "__main__":
vis = qsr_gui()
vis.processArgs()
vis.initWindow()
| mit |
lbeltrame/mnegri-ov170 | programs/dump_counts.py | 1 | 4110 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014 Luca Beltrame <[email protected]>
#
# This file is part of utils.
#
# utils is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# utils is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with utils. If not, see <http://www.gnu.org/licenses/>.
import argparse
from collections import defaultdict, Counter
import itertools
from gemini import GeminiQuery
import cruzdb
import pandas as pd
# This uses the public UCSC instance. In case MySQL is blocked, you can
# point it using a valid SQLalchemy URL to an existing database server.
ucsc = cruzdb.Genome(db="hg19")
# These work around proper gene association bugs (due to amplicon extending
# past boundaries, or annotation picking things on the wrong strand)
substitutions = {
"ZEB1-AS1": "ZEB1", # Different strand
"RHNO1": "FOXM1", # Different strand
# "PLA2G6": np.nan, # off-target?
"C3orf72": "FOXL2", # Different strand
# "MC1R": np.nan, # past the promoter
"ACAA1": "MYD88", # Different strand
"VIM-AS1": "VIM", # Different strand
"LOC100507424": "FOXM1", # Wrong annotation?
"MTOR-AS1": "MTOR",
"EGFR-AS1": "EGFR",
"WRAP53": "TP53",
"EPM2AIP1": "MLH1",
"C5orf22": "DROSHA",
"C9orf53": "CDKN2A",
"LYRM5": "KRAS",
"N4BP2L1": "BRCA2",
"RMDN3": "RAD51",
"NBR2": "BRCA1",
"CNTD2": "AKT2",
"HSCB": "CHEK2",
"NPAT": "ATM",
"MC1R": "TUBB3"
}
def get_nearby_gene(chrom, start, end):
nearest = ucsc.knearest("refFlat", chrom, start, end)
nearest = pd.Series([item.geneName for item in nearest])
nearest = nearest.apply(lambda x: substitutions[x]
if x in substitutions else x)
try:
nearest = nearest.drop_duplicates().item()
except Exception:
print(nearest.drop_duplicates())
raise
# assert len(nearest) == 1
return nearest
def summarize_by_gene_and_sample(db, coding_only=True):
"This is copied from GEMINI's own burden tool"
query = ("select chrom, start, end, gene, impact, info from variants where"
" impact != 'synonymous_coding' and in_1kg=0 ")
if coding_only:
query += " and codon_change != 'None'"
gq = GeminiQuery(db)
gq.run(query, show_variant_samples=True)
burden = defaultdict(Counter)
for row in gq:
gene_name = row['gene']
if not gene_name:
gene_name = get_nearby_gene(row["chrom"], row["start"],
row["end"])
new_counts = Counter(row["HET_samples"])
# Counter can't do scalar multiplication
new_counts = new_counts + Counter(row["HOM_ALT_samples"])
new_counts = new_counts + Counter(row["HOM_ALT_samples"])
del new_counts['']
burden[gene_name] += new_counts
dfs = list()
for gene_name, counts in burden.items():
df = pd.DataFrame(counts, columns=counts.keys(),
index=[gene_name])
dfs.append(df)
df = pd.concat(dfs)
df = df.fillna(0)
return df
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--coding-only", action="store_true")
parser.add_argument("source", help="Source GEMINI database")
parser.add_argument("destination", help="destination file to save to")
options = parser.parse_args()
df = summarize_by_gene_and_sample(options.source, options.coding_only)
with open(options.destination, "w") as handle:
df.to_csv(handle, sep="\t", na_rep="NA")
if __name__ == '__main__':
main()
| gpl-2.0 |
7630155/tushare | test/storing_test.py | 40 | 1729 | # -*- coding:utf-8 -*-
import os
from sqlalchemy import create_engine
from pandas.io.pytables import HDFStore
import tushare as ts
def csv():
df = ts.get_hist_data('000875')
df.to_csv('c:/day/000875.csv',columns=['open','high','low','close'])
def xls():
df = ts.get_hist_data('000875')
#直接保存
df.to_excel('c:/day/000875.xlsx', startrow=2,startcol=5)
def hdf():
df = ts.get_hist_data('000875')
# df.to_hdf('c:/day/store.h5','table')
store = HDFStore('c:/day/store.h5')
store['000875'] = df
store.close()
def json():
df = ts.get_hist_data('000875')
df.to_json('c:/day/000875.json',orient='records')
#或者直接使用
print(df.to_json(orient='records'))
def appends():
filename = 'c:/day/bigfile.csv'
for code in ['000875', '600848', '000981']:
df = ts.get_hist_data(code)
if os.path.exists(filename):
df.to_csv(filename, mode='a', header=None)
else:
df.to_csv(filename)
def db():
df = ts.get_tick_data('600848',date='2014-12-22')
engine = create_engine('mysql://root:[email protected]/mystock?charset=utf8')
# db = MySQLdb.connect(host='127.0.0.1',user='root',passwd='jimmy1',db="mystock",charset="utf8")
# df.to_sql('TICK_DATA',con=db,flavor='mysql')
# db.close()
df.to_sql('tick_data',engine,if_exists='append')
def nosql():
import pymongo
import json
conn = pymongo.Connection('127.0.0.1', port=27017)
df = ts.get_tick_data('600848',date='2014-12-22')
print(df.to_json(orient='records'))
conn.db.tickdata.insert(json.loads(df.to_json(orient='records')))
# print conn.db.tickdata.find()
if __name__ == '__main__':
nosql() | bsd-3-clause |
wanatpj/h_blind | compute_linear_correlation.py | 1 | 2168 | import os
import numpy
from matplotlib import pyplot
from optparse import OptionParser
from PIL import Image
from common import *
def _parse_flags():
global indir, inreferencefile, referencefile, versuswatermark
parser = OptionParser()
parser.add_option("-i",
"--in",
dest="indir",
help="location to directory that containes images to watermark",
metavar="DIR")
parser.add_option("-v",
"--inreference",
dest="inreference",
help="monochrome image; black pixel denotes 1, white pixel denotes -1",
metavar="FILE")
parser.add_option("-w",
"--reference",
dest="reference",
help="monochrome image; black pixel denotes 1, white pixel denotes -1",
metavar="FILE")
(options, args) = parser.parse_args()
if (options.indir != None and options.inreference != None) \
or (options.indir == None and options.inreference == None):
raise Exception("define: indir xor inreference")
if options.indir != None:
indir = options.indir
versuswatermark = False
elif options.inreference != None:
inreferencefile = options.inreference
versuswatermark = True
referencefile = options.reference
class ComputeLinearCorrelation:
def __init__(self, reference, indir):
self.reference = reference
self.indir = indir
def __call__(self, f):
return linear_correlation(
numpy.array(Image.open(self.indir + "/" + f).convert("L").getdata()),
self.reference)
def main():
global indir, inreferencefile, referencefile, versuswatermark
_parse_flags()
(width, height), reference = get_watermark(referencefile)
if versuswatermark:
(inwidth, inheight), inreference = get_watermark(inreferencefile)
lcs = [linear_correlation(reference, inreference)]
else:
lcs = get_pool().map(
ComputeLinearCorrelation(reference, indir),
filter(ImageSizeFilter((width, height), indir), os.listdir(indir)))
print "Mean: " + str(numpy.mean(lcs))
print "Median: " + str(numpy.median(lcs))
print "Variance: " + str(numpy.var(lcs))
bins = numpy.linspace(-2, 2, 200)
pyplot.hist(lcs, bins, alpha=0.5, label='lc')
pyplot.show()
main()
| gpl-3.0 |
evgchz/scikit-learn | sklearn/metrics/scorer.py | 9 | 12826 | """
The :mod:`sklearn.metrics.scorer` submodule implements a flexible
interface for model selection and evaluation using
arbitrary score functions.
A scorer object is a callable that can be passed to
:class:`sklearn.grid_search.GridSearchCV` or
:func:`sklearn.cross_validation.cross_val_score` as the ``scoring`` parameter,
to specify how a model should be evaluated.
The signature of the call is ``(estimator, X, y)`` where ``estimator``
is the model to be evaluated, ``X`` is the test data and ``y`` is the
ground truth labeling (or ``None`` in the case of unsupervised models).
"""
# Authors: Andreas Mueller <[email protected]>
# Lars Buitinck <[email protected]>
# Arnaud Joly <[email protected]>
# License: Simplified BSD
from abc import ABCMeta, abstractmethod
import numpy as np
from . import (r2_score, median_absolute_error, mean_absolute_error,
mean_squared_error, accuracy_score, f1_score,
roc_auc_score, average_precision_score,
precision_score, recall_score, log_loss)
from .cluster import adjusted_rand_score
from ..utils.multiclass import type_of_target
from ..externals import six
class _BaseScorer(six.with_metaclass(ABCMeta, object)):
def __init__(self, score_func, sign, kwargs):
self._kwargs = kwargs
self._score_func = score_func
self._sign = sign
@abstractmethod
def __call__(self, estimator, X, y, sample_weight=None):
pass
def __repr__(self):
kwargs_string = "".join([", %s=%s" % (str(k), str(v))
for k, v in self._kwargs.items()])
return ("make_scorer(%s%s%s%s)"
% (self._score_func.__name__,
"" if self._sign > 0 else ", greater_is_better=False",
self._factory_args(), kwargs_string))
def _factory_args(self):
"""Return non-default make_scorer arguments for repr."""
return ""
class _PredictScorer(_BaseScorer):
def __call__(self, estimator, X, y_true, sample_weight=None):
"""Evaluate predicted target values for X relative to y_true.
Parameters
----------
estimator : object
Trained estimator to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to estimator.predict.
y_true : array-like
Gold standard target values for X.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = estimator.predict(X)
if sample_weight is not None:
return self._sign * self._score_func(y_true, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y_true, y_pred,
**self._kwargs)
class _ProbaScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate predicted probabilities for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not probabilities.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = clf.predict_proba(X)
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_proba=True"
class _ThresholdScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate decision function output for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have either a
decision_function method or a predict_proba method; the output of
that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.decision_function or
clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not decision function values.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_type = type_of_target(y)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
try:
y_pred = clf.decision_function(X)
# For multi-output multi-class estimator
if isinstance(y_pred, list):
y_pred = np.vstack(p for p in y_pred).T
except (NotImplementedError, AttributeError):
y_pred = clf.predict_proba(X)
if y_type == "binary":
y_pred = y_pred[:, 1]
elif isinstance(y_pred, list):
y_pred = np.vstack([p[:, -1] for p in y_pred]).T
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_threshold=True"
def get_scorer(scoring):
if isinstance(scoring, six.string_types):
try:
scorer = SCORERS[scoring]
except KeyError:
raise ValueError('%r is not a valid scoring value. '
'Valid options are %s'
% (scoring, sorted(SCORERS.keys())))
else:
scorer = scoring
return scorer
def _passthrough_scorer(estimator, *args, **kwargs):
"""Function that wraps estimator.score"""
return estimator.score(*args, **kwargs)
def check_scoring(estimator, scoring=None, allow_none=False,
score_overrides_loss=False):
"""Determine scorer from user options.
A TypeError will be thrown if the estimator cannot be scored.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
allow_none : boolean, optional, default: False
If no scoring is specified and the estimator has no score function, we
can either return None or raise an exception.
Returns
-------
scoring : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
"""
has_scoring = scoring is not None
if not hasattr(estimator, 'fit'):
raise TypeError("estimator should a be an estimator implementing "
"'fit' method, %r was passed" % estimator)
elif hasattr(estimator, 'predict') and has_scoring:
return get_scorer(scoring)
elif hasattr(estimator, 'score'):
return _passthrough_scorer
elif not has_scoring:
if allow_none:
return None
raise TypeError(
"If no scoring is specified, the estimator passed should "
"have a 'score' method. The estimator %r does not." % estimator)
else:
raise TypeError(
"The estimator passed should have a 'score' or a 'predict' "
"method. The estimator %r does not." % estimator)
def make_scorer(score_func, greater_is_better=True, needs_proba=False,
needs_threshold=False, **kwargs):
"""Make a scorer from a performance metric or loss function.
This factory function wraps scoring functions for use in GridSearchCV
and cross_val_score. It takes a score function, such as ``accuracy_score``,
``mean_squared_error``, ``adjusted_rand_index`` or ``average_precision``
and returns a callable that scores an estimator's output.
Parameters
----------
score_func : callable,
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
greater_is_better : boolean, default=True
Whether score_func is a score function (default), meaning high is good,
or a loss function, meaning low is good. In the latter case, the
scorer object will sign-flip the outcome of the score_func.
needs_proba : boolean, default=False
Whether score_func requires predict_proba to get probability estimates
out of a classifier.
needs_threshold : boolean, default=False
Whether score_func takes a continuous decision certainty.
This only works for binary classification using estimators that
have either a decision_function or predict_proba method.
For example ``average_precision`` or the area under the roc curve
can not be computed using discrete predictions alone.
**kwargs : additional arguments
Additional parameters to be passed to score_func.
Returns
-------
scorer : callable
Callable object that returns a scalar score; greater is better.
Examples
--------
>>> from sklearn.metrics import fbeta_score, make_scorer
>>> ftwo_scorer = make_scorer(fbeta_score, beta=2)
>>> ftwo_scorer
make_scorer(fbeta_score, beta=2)
>>> from sklearn.grid_search import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]},
... scoring=ftwo_scorer)
"""
sign = 1 if greater_is_better else -1
if needs_proba and needs_threshold:
raise ValueError("Set either needs_proba or needs_threshold to True,"
" but not both.")
if needs_proba:
cls = _ProbaScorer
elif needs_threshold:
cls = _ThresholdScorer
else:
cls = _PredictScorer
return cls(score_func, sign, kwargs)
# Standard regression scores
r2_scorer = make_scorer(r2_score)
mean_squared_error_scorer = make_scorer(mean_squared_error,
greater_is_better=False)
mean_absolute_error_scorer = make_scorer(mean_absolute_error,
greater_is_better=False)
median_absolute_error_scorer = make_scorer(median_absolute_error,
greater_is_better=False)
# Standard Classification Scores
accuracy_scorer = make_scorer(accuracy_score)
f1_scorer = make_scorer(f1_score)
# Score functions that need decision values
roc_auc_scorer = make_scorer(roc_auc_score, greater_is_better=True,
needs_threshold=True)
average_precision_scorer = make_scorer(average_precision_score,
needs_threshold=True)
precision_scorer = make_scorer(precision_score)
recall_scorer = make_scorer(recall_score)
# Score function for probabilistic classification
log_loss_scorer = make_scorer(log_loss, greater_is_better=False,
needs_proba=True)
# Clustering scores
adjusted_rand_scorer = make_scorer(adjusted_rand_score)
SCORERS = dict(r2=r2_scorer,
median_absolute_error=median_absolute_error_scorer,
mean_absolute_error=mean_absolute_error_scorer,
mean_squared_error=mean_squared_error_scorer,
accuracy=accuracy_scorer, f1=f1_scorer, roc_auc=roc_auc_scorer,
average_precision=average_precision_scorer,
precision=precision_scorer, recall=recall_scorer,
log_loss=log_loss_scorer,
adjusted_rand_score=adjusted_rand_scorer)
| bsd-3-clause |
MECTsrl/imx_mect | projects/testing/cgi_trends_compare.py | 1 | 3713 | import pandas as pd
import json
import os
import glob
import csv
import urllib.request as url
import difflib
import sys
import time
import itertools
import threading
import socket
import subprocess
import shutil
from urllib.error import HTTPError, URLError
processDone = False
def loadJsonData (URLvars):
try:
jsondata = json.loads(url.urlopen(URLvars,timeout=20).read().decode())
time.sleep(1)
return jsondata
except HTTPError as error:
print("HTTP Timeout!")
processDone = True
exit()
#here is the animation
def animate():
for c in itertools.cycle(['|', '/', '-', '\\']):
if processDone:
sys.stdout.write('\r ')
break
sys.stdout.write('\r ' + c)
sys.stdout.flush()
time.sleep(0.1)
sys.stdout.write('\r ')
def prepend_line(file_name, line):
""" Insert given string as a new line at the beginning of a file """
# define name of temporary dummy file
dummy_file = file_name + '.bak'
# open original file in read mode and dummy file in write mode
with open(file_name, 'r') as read_obj, open(dummy_file, 'w') as write_obj:
# Write given line to the dummy file
write_obj.write(line + '\n')
# Read lines from original file one by one and append them to the dummy file
for line in read_obj:
write_obj.write(line)
# remove original file
os.remove(file_name)
# Rename dummy file as the original file
os.rename(dummy_file, file_name)
IP= sys.argv[1]
#IP= "192.168.5.123";
URLvars="http://"+IP+"/get_trends.cgi"
jsonTrendPath = "jsontrend"
if os.path.exists(jsonTrendPath):
shutil.rmtree(jsonTrendPath, ignore_errors=True)
os.mkdir(jsonTrendPath)
print("Loading trends from "+ IP + "\n")
t = threading.Thread(target=animate)
t.start()
jsondata = loadJsonData(URLvars)
processDone=True
trendsNameArray = jsondata['query']['trends']
print("\nWriting cgi_json_data.csv\n")
df = pd.DataFrame(jsondata['trends'])
df = df['filename']
for index in df.index:
df_track = pd.DataFrame(jsondata['trends'][index]['track'])
df_track['id'] = df_track['id'].str.pad(32, side='right', fillchar=' ')
df_track.to_csv(jsonTrendPath+"/"+df[index],sep=';',header=False,index=False)
prepend_line(jsonTrendPath+"/"+df[index],'L')
print("Downloading original files \n")
trendPath = "customtrend"
if os.path.exists(trendPath):
shutil.rmtree(trendPath, ignore_errors=True)
try:
subprocess.call(["rsync", "-Havx", "root@"+IP+":/local/data/"+trendPath, "./"])
# fRes = subprocess.call(["rsync", "-Havx", "[email protected]:/local/data/"+trendPath, "./"])
except subprocess.CalledProcessError as rsyncRes:
print("error code", rsyncRes.returncode, rsyncRes.output)
differenceFile = "data_difference_trends.diff"
print("Looking for differences \n")
for index in df.index:
with open(trendPath + '/' + df[index], 'r') as t1, open(jsonTrendPath+"/"+df[index], 'r') as t2:
fileone = t1.readlines()
filetwo = t2.readlines()
fileone = [line.replace(' ', '') for line in fileone]
filetwo = [line.replace(' ', '') for line in filetwo]
with open(differenceFile, 'a') as outFile:
for line in difflib.unified_diff(fileone, filetwo, fromfile=trendPath + '/' + df[index], tofile=jsonTrendPath+"/"+df[index]):
outFile.write(line)
if not os.stat(differenceFile).st_size == 0:
print("Done! Differences are loaded in "+ differenceFile)
else:
print("Done! No differences were found!")
#clean
shutil.rmtree(trendPath, ignore_errors=True)
shutil.rmtree(jsonTrendPath, ignore_errors=True)
os.remove(differenceFile)
| gpl-2.0 |
kmike/scikit-learn | sklearn/tests/test_random_projection.py | 6 | 13183 | from __future__ import division
import warnings
import numpy as np
import scipy.sparse as sp
from sklearn.metrics import euclidean_distances
from sklearn.random_projection import (
johnson_lindenstrauss_min_dim,
gaussian_random_matrix,
sparse_random_matrix,
SparseRandomProjection,
GaussianRandomProjection)
from sklearn.utils.testing import (
assert_less,
assert_raises,
assert_raise_message,
assert_array_equal,
assert_equal,
assert_almost_equal,
assert_in,
assert_array_almost_equal)
all_sparse_random_matrix = [sparse_random_matrix]
all_dense_random_matrix = [gaussian_random_matrix]
all_random_matrix = set(all_sparse_random_matrix + all_dense_random_matrix)
all_SparseRandomProjection = [SparseRandomProjection]
all_DenseRandomProjection = [GaussianRandomProjection]
all_RandomProjection = set(all_SparseRandomProjection +
all_DenseRandomProjection)
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros):
rng = np.random.RandomState(0)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def densify(matrix):
if not sp.issparse(matrix):
return matrix
else:
return matrix.toarray()
n_samples, n_features = (10, 1000)
n_nonzeros = n_samples * n_features / 100.
data, data_csr = make_sparse_random_data(n_samples, n_features, n_nonzeros)
###############################################################################
# test on JL lemma
###############################################################################
def test_invalid_jl_domain():
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 1.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 0.0)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, -0.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 0, 0.5)
def test_input_size_jl_min_dim():
assert_raises(ValueError, johnson_lindenstrauss_min_dim,
3 * [100], 2 * [0.9])
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 3 * [100],
2 * [0.9])
johnson_lindenstrauss_min_dim(np.random.randint(1, 10, size=(10, 10)),
0.5 * np.ones((10, 10)))
###############################################################################
# tests random matrix generation
###############################################################################
def check_input_size_random_matrix(random_matrix):
assert_raises(ValueError, random_matrix, 0, 0)
assert_raises(ValueError, random_matrix, -1, 1)
assert_raises(ValueError, random_matrix, 1, -1)
assert_raises(ValueError, random_matrix, 1, 0)
assert_raises(ValueError, random_matrix, -1, 0)
def check_size_generated(random_matrix):
assert_equal(random_matrix(1, 5).shape, (1, 5))
assert_equal(random_matrix(5, 1).shape, (5, 1))
assert_equal(random_matrix(5, 5).shape, (5, 5))
assert_equal(random_matrix(1, 1).shape, (1, 1))
def check_zero_mean_and_unit_norm(random_matrix):
# All random matrix should produce a transformation matrix
# with zero mean and unit norm for each columns
A = densify(random_matrix(10000, 1, random_state=0))
assert_array_almost_equal(0, np.mean(A), 3)
assert_array_almost_equal(1.0, np.linalg.norm(A), 1)
def check_input_with_sparse_random_matrix(random_matrix):
n_components, n_features = 5, 10
for density in [-1., 0.0, 1.1]:
assert_raises(ValueError,
random_matrix, n_components, n_features, density=density)
def test_basic_property_of_random_matrix():
"""Check basic properties of random matrix generation"""
for random_matrix in all_random_matrix:
check_input_size_random_matrix(random_matrix)
check_size_generated(random_matrix)
check_zero_mean_and_unit_norm(random_matrix)
for random_matrix in all_sparse_random_matrix:
check_input_with_sparse_random_matrix(random_matrix)
random_matrix_dense = \
lambda n_components, n_features, random_state: random_matrix(
n_components, n_features, random_state=random_state,
density=1.0)
check_zero_mean_and_unit_norm(random_matrix_dense)
def test_gaussian_random_matrix():
"""Check some statical properties of Gaussian random matrix"""
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
# a_ij ~ N(0.0, 1 / n_components).
#
n_components = 100
n_features = 1000
A = gaussian_random_matrix(n_components, n_features, random_state=0)
assert_array_almost_equal(0.0, np.mean(A), 2)
assert_array_almost_equal(np.var(A, ddof=1), 1 / n_components, 1)
def test_sparse_random_matrix():
"""Check some statical properties of sparse random matrix"""
n_components = 100
n_features = 500
for density in [0.3, 1.]:
s = 1 / density
A = sparse_random_matrix(n_components,
n_features,
density=density,
random_state=0)
A = densify(A)
# Check possible values
values = np.unique(A)
assert_in(np.sqrt(s) / np.sqrt(n_components), values)
assert_in(- np.sqrt(s) / np.sqrt(n_components), values)
if density == 1.0:
assert_equal(np.size(values), 2)
else:
assert_in(0., values)
assert_equal(np.size(values), 3)
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
#
# - -sqrt(s) / sqrt(n_components) with probability 1 / 2s
# - 0 with probability 1 - 1 / s
# - +sqrt(s) / sqrt(n_components) with probability 1 / 2s
#
assert_almost_equal(np.mean(A == 0.0),
1 - 1 / s, decimal=2)
assert_almost_equal(np.mean(A == np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.mean(A == - np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == 0.0, ddof=1),
(1 - 1 / s) * 1 / s, decimal=2)
assert_almost_equal(np.var(A == np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == - np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
###############################################################################
# tests on random projection transformer
###############################################################################
def test_sparse_random_projection_transformer_invalid_density():
for RandomProjection in all_SparseRandomProjection:
assert_raises(ValueError,
RandomProjection(density=1.1).fit, data)
assert_raises(ValueError,
RandomProjection(density=0).fit, data)
assert_raises(ValueError,
RandomProjection(density=-0.1).fit, data)
def test_random_projection_transformer_invalid_input():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').fit, [0, 1, 2])
assert_raises(ValueError,
RandomProjection(n_components=-10).fit, data)
def test_try_to_transform_before_fit():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').transform, data)
def test_too_many_samples_to_find_a_safe_embedding():
data, _ = make_sparse_random_data(1000, 100, 1000)
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=0.1)
expected_msg = (
'eps=0.100000 and n_samples=1000 lead to a target dimension'
' of 5920 which is larger than the original space with'
' n_features=100')
assert_raise_message(ValueError, expected_msg, rp.fit, data)
def test_random_projection_embedding_quality():
data, _ = make_sparse_random_data(8, 5000, 15000)
eps = 0.2
original_distances = euclidean_distances(data, squared=True)
original_distances = original_distances.ravel()
non_identical = original_distances != 0.0
# remove 0 distances to avoid division by 0
original_distances = original_distances[non_identical]
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=eps, random_state=0)
projected = rp.fit_transform(data)
projected_distances = euclidean_distances(projected, squared=True)
projected_distances = projected_distances.ravel()
# remove 0 distances to avoid division by 0
projected_distances = projected_distances[non_identical]
distances_ratio = projected_distances / original_distances
# check that the automatically tuned values for the density respect the
# contract for eps: pairwise distances are preserved according to the
# Johnson-Lindenstrauss lemma
assert_less(distances_ratio.max(), 1 + eps)
assert_less(1 - eps, distances_ratio.min())
def test_SparseRandomProjection_output_representation():
for SparseRandomProjection in all_SparseRandomProjection:
# when using sparse input, the projected data can be forced to be a
# dense numpy array
rp = SparseRandomProjection(n_components=10, dense_output=True,
random_state=0)
rp.fit(data)
assert isinstance(rp.transform(data), np.ndarray)
sparse_data = sp.csr_matrix(data)
assert isinstance(rp.transform(sparse_data), np.ndarray)
# the output can be left to a sparse matrix instead
rp = SparseRandomProjection(n_components=10, dense_output=False,
random_state=0)
rp = rp.fit(data)
# output for dense input will stay dense:
assert isinstance(rp.transform(data), np.ndarray)
# output for sparse output will be sparse:
assert sp.issparse(rp.transform(sparse_data))
def test_correct_RandomProjection_dimensions_embedding():
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto',
random_state=0,
eps=0.5).fit(data)
# the number of components is adjusted from the shape of the training
# set
assert_equal(rp.n_components, 'auto')
assert_equal(rp.n_components_, 110)
if RandomProjection in all_SparseRandomProjection:
assert_equal(rp.density, 'auto')
assert_almost_equal(rp.density_, 0.03, 2)
assert_equal(rp.components_.shape, (110, n_features))
projected_1 = rp.transform(data)
assert_equal(projected_1.shape, (n_samples, 110))
# once the RP is 'fitted' the projection is always the same
projected_2 = rp.transform(data)
assert_array_equal(projected_1, projected_2)
# fit transform with same random seed will lead to the same results
rp2 = RandomProjection(random_state=0, eps=0.5)
projected_3 = rp2.fit_transform(data)
assert_array_equal(projected_1, projected_3)
# Try to transform with an input X of size different from fitted.
assert_raises(ValueError, rp.transform, data[:, 1:5])
# it is also possible to fix the number of components and the density
# level
if RandomProjection in all_SparseRandomProjection:
rp = RandomProjection(n_components=100, density=0.001,
random_state=0)
projected = rp.fit_transform(data)
assert_equal(projected.shape, (n_samples, 100))
assert_equal(rp.components_.shape, (100, n_features))
assert_less(rp.components_.nnz, 115) # close to 1% density
assert_less(85, rp.components_.nnz) # close to 1% density
def test_warning_n_components_greater_than_n_features():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
RandomProjection(n_components=n_features + 1).fit(data)
assert_equal(len(w), 1)
assert issubclass(w[-1].category, UserWarning)
| bsd-3-clause |
power-system-simulation-toolbox/psst | psst/plot/plot.py | 2 | 5051 | import operator
import networkx as nx
from networkx.drawing.nx_agraph import graphviz_layout
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
from matplotlib import cm
import matplotlib.patches as mpatches
import matplotlib.colors as colors
import numpy as np
from ..utils import make_interpolater
from ..case.utils import calculate_segments
cdict = {'red': ((0.0, 0.0, 0.0), # From 0 to 0.25, we fade the red and green channels
(0.25, 0.5, 0.5), # up a little, to make the blue a bit more grey
(0.25, 0.0, 0.0), # From 0.25 to 0.75, we fade red from 0.5 to 1
(0.75, 1.0, 1.0), # to fade from green to yellow
(1.0, 0.5, 0.5)), # From 0.75 to 1.0, we bring the red down from 1
# to 0.5, to go from bright to dark red
'green': ((0.0, 0.0, 0.0), # From 0 to 0.25, we fade the red and green channels
(0.25, 0.6, 0.6), # up a little, to make the blue a bit more grey
(0.25, 1.0, 1.0), # Green is 1 from 0.25 to 0.75 (we add red
(0.75, 1.0, 1.0), # to turn it from green to yellow)
(0.75, 0.0, 0.0), # No green needed in the red upper quarter
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.9, 0.9), # Keep blue at 0.9 from 0 to 0.25, and adjust its
(0.25, 0.9, 0.9), # tone using the green and red channels
(0.25, 0.0, 0.0), # No blue needed above 0.25
(1.0, 0.0, 0.0))
}
cmap = colors.LinearSegmentedColormap('BuGnYlRd',cdict)
def plot_network_with_results(psstc, model, time=0):
G = create_network(psstc)
fig, axs = plt.subplots(1, 1, figsize=(12, 9))
ax = axs
line_color_dict = dict()
hour = 0
for i, b in branch_df.iterrows():
if model.ThermalLimit[i] != 0:
line_color_dict[(b['F_BUS'], b['T_BUS'])] = round(abs(model.LinePower[i, hour].value / model.ThermalLimit[i]), 2)
else:
line_color_dict[(b['F_BUS'], b['T_BUS'])] = 0
gen_color_dict = dict()
hour = 0
for i, g in generator_df.iterrows():
gen_color_dict[(i, g['GEN_BUS'])] = round(abs(model.PowerGenerated[i, hour].value / model.MaximumPowerOutput[i]), 2)
color_dict = line_color_dict.copy()
color_dict.update(gen_color_dict)
edge_color = list()
for e in G.edges():
try:
edge_color.append( color_dict[(e[0], e[1])] )
except KeyError:
edge_color.append( color_dict[(e[1], e[0])] )
ax.axis('off')
pos = graphviz_layout(G, prog='sfdp')
nx.draw_networkx_nodes(G, pos, list(generator_df.index),)
nx.draw_networkx_nodes(G, pos, list(bus_df.index), node_color='black',)
edges = nx.draw_networkx_edges(G, pos, edge_color=edge_color, edge_cmap=cmap, width=3)
nx.draw_networkx_edge_labels(G, pos, edge_labels=color_dict)
divider = make_axes_locatable(ax)
cax = divider.append_axes("left", size="5%", pad=0.05)
cb = plt.colorbar(edges, cax=cax)
cax.yaxis.set_label_position('left')
cax.yaxis.set_ticks_position('left')
# cb.set_label('Voltage (V)')
def plot_stacked_power_generation(results, ax=None, kind='bar', legend=False):
if ax is None:
fig, axs = plt.subplots(1, 1, figsize=(16, 10))
ax = axs
df = results.power_generated
cols = (df - results.unit_commitment*results.maximum_power_output).std().sort_values().index
df = df[[c for c in cols]]
df.plot(kind=kind, stacked=True, ax=ax, colormap=cm.jet, alpha=0.5, legend=legend)
df = results.unit_commitment * results.maximum_power_output
df = df[[c for c in cols]]
df.plot.area(stacked=True, ax=ax, alpha=0.125/2, colormap=cm.jet, legend=None)
ax.set_ylabel('Dispatch and Committed Capacity (MW)')
ax.set_xlabel('Time (h)')
return ax
def plot_costs(case, number_of_segments=1, ax=None, legend=True):
if ax is None:
fig, axs = plt.subplots(1, 1, figsize=(16, 10))
ax = axs
color_scale = make_interpolater(0, len(case.gen_name), 0, 1)
color = {g: plt.cm.jet(color_scale(i)) for i, g in enumerate(case.gen_name)}
for s in calculate_segments(case, number_of_segments=number_of_segments):
pmin, pmax = s['segment']
x = np.linspace(pmin, pmax)
y = x * s['slope']
ax.plot(x, y, color=color[s['name']])
ax = ax.twinx()
for s in calculate_segments(case, number_of_segments=number_of_segments):
pmin, pmax = s['segment']
x = np.linspace(pmin, pmax)
y = [s['slope'] for _ in x]
ax.plot(x, y, color=color[s['name']])
ax.set_ylim(0, 1.2*y[-1])
if legend:
lines = list()
for g in case.gen_name:
lines.append(mlines.Line2D([], [], color=color[g], label=g))
ax.legend(handles=lines, loc='upper left')
return ax
| mit |
hsaputra/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimators_test.py | 9 | 6700 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Custom optimizer tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from tensorflow.python.training import training_util
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn.estimators import estimator as estimator_lib
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.estimators._sklearn import train_test_split
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.training import momentum as momentum_lib
class FeatureEngineeringFunctionTest(test.TestCase):
"""Tests feature_engineering_fn."""
def testFeatureEngineeringFn(self):
def input_fn():
return {
"x": constant_op.constant([1.])
}, {
"y": constant_op.constant([11.])
}
def feature_engineering_fn(features, labels):
_, _ = features, labels
return {
"transformed_x": constant_op.constant([9.])
}, {
"transformed_y": constant_op.constant([99.])
}
def model_fn(features, labels):
# dummy variable:
_ = variables_lib.Variable([0.])
_ = labels
predictions = features["transformed_x"]
loss = constant_op.constant([2.])
update_global_step = training_util.get_global_step().assign_add(1)
return predictions, loss, update_global_step
estimator = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator.fit(input_fn=input_fn, steps=1)
prediction = next(estimator.predict(input_fn=input_fn, as_iterable=True))
# predictions = transformed_x (9)
self.assertEqual(9., prediction)
metrics = estimator.evaluate(
input_fn=input_fn, steps=1,
metrics={"label":
metric_spec.MetricSpec(lambda predictions, labels: labels)})
# labels = transformed_y (99)
self.assertEqual(99., metrics["label"])
def testFeatureEngineeringFnWithSameName(self):
def input_fn():
return {
"x": constant_op.constant(["9."])
}, {
"y": constant_op.constant(["99."])
}
def feature_engineering_fn(features, labels):
# Github #12205: raise a TypeError if called twice.
_ = string_ops.string_split(features["x"])
features["x"] = constant_op.constant([9.])
labels["y"] = constant_op.constant([99.])
return features, labels
def model_fn(features, labels):
# dummy variable:
_ = variables_lib.Variable([0.])
_ = labels
predictions = features["x"]
loss = constant_op.constant([2.])
update_global_step = training_util.get_global_step().assign_add(1)
return predictions, loss, update_global_step
estimator = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator.fit(input_fn=input_fn, steps=1)
prediction = next(estimator.predict(input_fn=input_fn, as_iterable=True))
# predictions = transformed_x (9)
self.assertEqual(9., prediction)
metrics = estimator.evaluate(
input_fn=input_fn, steps=1,
metrics={"label":
metric_spec.MetricSpec(lambda predictions, labels: labels)})
# labels = transformed_y (99)
self.assertEqual(99., metrics["label"])
def testNoneFeatureEngineeringFn(self):
def input_fn():
return {
"x": constant_op.constant([1.])
}, {
"y": constant_op.constant([11.])
}
def feature_engineering_fn(features, labels):
_, _ = features, labels
return {
"x": constant_op.constant([9.])
}, {
"y": constant_op.constant([99.])
}
def model_fn(features, labels):
# dummy variable:
_ = variables_lib.Variable([0.])
_ = labels
predictions = features["x"]
loss = constant_op.constant([2.])
update_global_step = training_util.get_global_step().assign_add(1)
return predictions, loss, update_global_step
estimator_with_fe_fn = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator_with_fe_fn.fit(input_fn=input_fn, steps=1)
estimator_without_fe_fn = estimator_lib.Estimator(model_fn=model_fn)
estimator_without_fe_fn.fit(input_fn=input_fn, steps=1)
# predictions = x
prediction_with_fe_fn = next(
estimator_with_fe_fn.predict(
input_fn=input_fn, as_iterable=True))
self.assertEqual(9., prediction_with_fe_fn)
prediction_without_fe_fn = next(
estimator_without_fe_fn.predict(
input_fn=input_fn, as_iterable=True))
self.assertEqual(1., prediction_without_fe_fn)
class CustomOptimizer(test.TestCase):
"""Custom optimizer tests."""
def testIrisMomentum(self):
random.seed(42)
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
def custom_optimizer():
return momentum_lib.MomentumOptimizer(learning_rate=0.01, momentum=0.9)
classifier = learn.DNNClassifier(
hidden_units=[10, 20, 10],
feature_columns=learn.infer_real_valued_columns_from_input(x_train),
n_classes=3,
optimizer=custom_optimizer,
config=learn.RunConfig(tf_random_seed=1))
classifier.fit(x_train, y_train, steps=400)
predictions = np.array(list(classifier.predict_classes(x_test)))
score = accuracy_score(y_test, predictions)
self.assertGreater(score, 0.65, "Failed with score = {0}".format(score))
if __name__ == "__main__":
test.main()
| apache-2.0 |
polysimtools/pysimm | pysimm/system.py | 1 | 186350 | # ******************************************************************************
# pysimm.system module
# ******************************************************************************
#
# ******************************************************************************
# License
# ******************************************************************************
# The MIT License (MIT)
#
# Copyright (c) 2016 Michael E. Fortunato
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import print_function
import os
import re
import sys
import json
from xml.etree import ElementTree as Et
from random import random
from io import StringIO
from urllib.request import urlopen
from urllib.error import HTTPError, URLError
from itertools import permutations
from math import sin, cos, sqrt, pi, acos, floor, ceil
try:
from subprocess import call
except ImportError:
call = None
try:
import numpy as np
except ImportError:
np = None
try:
import pandas as pd
except ImportError:
pd = None
from pysimm import calc
from pysimm import error_print
from pysimm import warning_print
from pysimm import verbose_print
from pysimm import debug_print
from pysimm import PysimmError
from pysimm.calc import rotate_vector
from pysimm.utils import PysimmError, Item, ItemContainer
class Particle(Item):
"""pysimm.system.Particle
Objects inheriting from :class:`~pysimm.utils.Item` can contain arbitrary data.
Keyword arguments are assigned as attributes.
Attributes usually used are given below.
Attributes:
x: x coordinate
y: y coordinate
z: z coordinate
charge: partial charge
type: :class:`~ParticleType` object reference
"""
def __init__(self, **kwargs):
Item.__init__(self, **kwargs)
def coords(self):
return (self.x, self.y, self.z)
def check(self, style='full'):
if style == 'full':
if self.x is None:
error_print('particle %s has no x coordinate' % self.tag)
return False
if self.y is None:
return False
if self.z is None:
return False
if self.charge is None:
return False
if self.type is None or not self.type.check():
return False
return True
else:
error_print('style %s not supported yet' % style)
return False
def delete_bonding(self, s):
"""pysimm.system.Particle.delete_bonding
Iterates through s.bonds, s.angles, s.dihedrals, and s.impropers and removes
those which contain this :class:`~pysimm.system.Particle`.
Args:
s: :class:`~pysimm.system.System` object from which bonding objects will be removed
Returns:
None
"""
if self.bonds:
for b in self.bonds:
if b in s.bonds:
s.bonds.remove(b.tag)
else:
for b in s.bonds:
if self is b.a or self is b.b:
s.bonds.remove(b.tag)
if self.angles:
for a in self.angles:
if a in s.angles:
s.angles.remove(a.tag)
else:
for a in s.angles:
if self is a.a or self is a.b or self is a.c:
s.angles.remove(a.tag)
if self.dihedrals:
for d in self.dihedrals:
if d in s.dihedrals:
s.dihedrals.remove(d.tag)
else:
for d in s.dihedrals:
if self is d.a or self is d.b or self is d.c or self is d.d:
s.dihedrals.remove(d.tag)
if self.impropers:
for i in self.impropers:
if i in s.impropers:
s.impropers.remove(i.tag)
else:
for i in s.impropers:
if self is i.a or self is i.b or self is i.c or self is i.d:
s.impropers.remove(i.tag)
def translate(self, dx, dy, dz):
"""pysimm.system.Particle.translate
Shifts Particle position by dx, dy, dz.
Args:
dx: distance to shift in x direction
dy: distance to shift in y direction
dz: distance to shift in z direction
Returns:
None
"""
self.x += dx
self.y += dy
self.z += dz
def __sub__(self, other):
"""pysimm.system.Particle.__sub__
Implements subtraction between :class:`~pysimm.system.Particle` objects to calculate distance.
Args:
other: :class:`~pysimm.system.Particle` object
Returns:
distance calculated by :func:`~pysimm.calc.distance`. This does not consider pbc
"""
if isinstance(other, Particle):
return calc.distance(self, other)
else:
return None
def __rsub__(self, other):
self.__sub__(other)
class ParticleType(Item):
"""pysimm.system.ParticleType
Objects inheriting from :class:`~pysimm.utils.Item` can contain arbitrary data.
Keyword arguments are assigned as attributes.
Attributes usually used are given below.
Attributes:
sigma: LJ sigma value (Angstrom)
epsilon: LJ epsilon value (kcal/mol)
elem: element abbreviation, i.e. 'H' for Hydrogen, 'Cl' for Chlorine
name: force field particle type name
"""
def __init__(self, **kwargs):
Item.__init__(self, **kwargs)
def form(self, style='lj_12-6', d_range=None):
"""pysimm.system.ParticleType.form
Returns data to plot functional form for the potential energy with
the given style.
Args:
style: string for pair style of ParticleType (lj_12-6, lj_9-6, buck)
Returns:
x, y for plotting functional form (energy vs distance)
"""
if not d_range:
d_range = np.linspace(0.1, 8, 79)
if style == 'lj_12-6':
e = np.array([calc.LJ_12_6(self, d) for d in d_range])
return d_range, e
elif style == 'lj_9-6':
e = np.array([calc.LJ_9_6(self, d) for d in d_range])
return d_range, e
elif style.startswith('buck'):
e = np.array([calc.buckingham(self, d) for d in d_range])
return d_range, e
@classmethod
def guess_style(cls, nparam):
if nparam == 2:
return 'lj'
elif nparam == 3:
return 'buck'
elif nparam == 4:
return 'charmm'
else:
raise PysimmError('Cannot guess pair style')
@classmethod
def parse_lammps(cls, line, style):
tmp = line.split('#')
data = tmp.pop(0).strip().split()
name = ','.join(re.split(',|\s+', tmp[0].strip())) if tmp else None
if style == 'mass':
if len(data) != 2:
raise PysimmError('LAMMPS data improperly formatted for mass info')
return cls(tag=int(data[0]), name=name, mass=float(data[1]))
elif style.startswith('lj') or style.startswith('class2'):
if len(data) != 3:
raise PysimmError('LAMMPS data improperly formatted for LJ style')
return cls(
tag=int(data[0]), name=name,
epsilon=float(data[1]), sigma=float(data[2])
)
elif style.startswith('charmm'):
if len(data) == 3:
return cls(
tag=int(data[0]), name=name,
epsilon=float(data[1]), sigma=float(data[2]),
epsilon_14=float(data[1]), sigma_14=float(data[2])
)
elif len(data) == 5:
return cls(
tag=int(data[0]), name=name,
epsilon=float(data[1]), sigma=float(data[2]),
epsilon_14=float(data[3]), sigma_14=float(data[4])
)
else:
raise PysimmError('LAMMPS data improperly formatted for charmm style')
elif style.startswith('buck'):
if len(data) != 4:
raise PysimmError('LAMMPS data improperly formatted for buckingham style')
return cls(
tag=int(data[0]), name=name,
a=float(data[1]), rho=float(data[2]), c=float(data[3])
)
else:
raise PysimmError('LAMMPS pair style {} not supported yet'.format(style))
def write_lammps(self, style='lj'):
"""pysimm.system.ParticleType.write_lammps
Formats a string to define particle type coefficients for a LAMMPS
data file given the provided style.
Args:
style: string for pair style of ParticleType (lj, class2, mass, buck)
Returns:
LAMMPS formatted string with pair coefficients
"""
if style.startswith('lj'):
return '{:4}\t{}\t{}\t# {}\n'.format(
self.tag, self.epsilon, self.sigma, self.name
)
elif style.startswith('charmm'):
if self.epsilon_14 and self.sigma_14:
return '{:4}\t{}\t{}\t{}\t{}\t# {}\n'.format(
self.tag, self.epsilon, self.sigma, self.epsilon_14, self.sigma_14, self.name
)
else:
return '{:4}\t{}\t{}\t{}\t{}\t# {}\n'.format(
self.tag, self.epsilon, self.sigma, self.epsilon, self.sigma, self.name
)
elif style.startswith('class2'):
return '{:4}\t{}\t{}\t# {}\n'.format(
self.tag, self.epsilon, self.sigma, self.name
)
elif style.startswith('mass'):
return '{:4}\t{}\t# {}\n'.format(
self.tag, self.mass, self.name
)
elif style.startswith('buck'):
return '{:4}\t{}\t{}\t{}\t# {}\n'.format(
self.tag, self.a, self.rho, self.c, self.name
)
else:
raise PysimmError('cannot understand pair style {}'.format(style))
class Bond(Item):
"""pysimm.system.Bond
Bond between particle a and b
a--b
Objects inheriting from :class:`~pysimm.utils.Item` can contain arbitrary data.
Keyword arguments are assigned as attributes.
Attributes usually used are given below.
Attributes:
a: :class:`~pysimm.system.Particle` object involved in bond
b: :class:`~pysimm.system.Particle` object involved in bond
type: BondType object reference
"""
def __init__(self, **kwargs):
Item.__init__(self, **kwargs)
def get_other_particle(self, p):
if p is not self.a and p is not self.b:
return None
else:
return self.a if p is self.b else self.b
def distance(self):
"""pysimm.system.Bond.distance
Calculates distance between :class:`~pysimm.system.Particle` a and :class:`~pysimm.system.Particle` b in this Bond object.
Sets distance to dist attribute of self. Does not consider pbc.
Args:
None
Returns:
Distance between Particle a and Particle b (not considering pbc)
"""
if isinstance(self.a, Particle) and isinstance(self.b, Particle):
self.dist = calc.distance(self.a, self.b)
return self.dist
else:
return None
class BondType(Item):
"""pysimm.system.BondType
Objects inheriting from :class:`~pysimm.utils.Item` can contain arbitrary data.
Keyword arguments are assigned as attributes.
Attributes usually used are given below.
Attributes:
k: harmonic bond force constant (kcal/mol/A^2)
r0: bond equilibrium distance (Angstrom)
name: force field bond type name
"""
def __init__(self, **kwargs):
Item.__init__(self, **kwargs)
if self.name:
self.rname = ','.join(reversed(self.name.split(',')))
@classmethod
def guess_style(cls, nparam):
if nparam == 2:
return 'harmonic'
elif nparam == 4:
return 'class2'
else:
raise PysimmError('Cannot guess bond style')
@classmethod
def parse_lammps(cls, line, style):
tmp = line.split('#')
data = tmp.pop(0).strip().split()
name = ','.join(re.split(',|\s+', tmp[0].strip())) if tmp else None
if style.startswith('harm'):
if len(data) != 3:
raise PysimmError('LAMMPS data improperly formatted for harmonic bond')
return cls(
tag=int(data[0]), name=name,
k=float(data[1]), r0=float(data[2])
)
elif style.startswith('class2'):
if len(data) != 5:
raise PysimmError('LAMMPS data improperly formatted for class2 bond')
return cls(
tag=int(data[0]), name=name,
r0=float(data[1]), k2=float(data[2]),
k3=float(data[3]), k4=float(data[4])
)
else:
raise PysimmError('LAMMPS bond style {} not supported yet'.format(style))
def write_lammps(self, style='harmonic'):
"""pysimm.system.BondType.write_lammps
Formats a string to define bond type coefficients for a LAMMPS
data file given the provided style.
Args:
style: string for pair style of BondType (harmonic, class2)
Returns:
LAMMPS formatted string with bond coefficients
"""
if style.startswith('harm'):
return '{:4}\t{}\t{}\t# {}\n'.format(
self.tag, self.k, self.r0, self.name
)
elif style.startswith('class2'):
return '{:4}\t{}\t{}\t{}\t{}\t# {}\n'.format(
self.tag, self.r0, self.k2, self.k3, self.k4, self.name
)
else:
raise PysimmError('cannot understand pair style {}'.format(style))
def form(self, style='harmonic', d_range=None):
"""pysimm.system.BondType.form
Returns data to plot functional form for the potential energy with
the given style.
Args:
style: string for pair style of BondType (harmonic, class2)
Returns:
x, y for plotting functional form (energy vs distance)
"""
if not d_range:
d_range = np.linspace(self.r0-0.5, self.r0+0.5, 100)
if style == 'harmonic':
e = np.array([calc.harmonic_bond(self, d) for d in d_range])
return d_range, e
elif style == 'class2':
e = np.array([calc.class2_bond(self, d) for d in d_range])
return d_range, e
class Angle(Item):
"""pysimm.system.Angle
Angle between particles a, b, and c
a--b--c
Objects inheriting from :class:`~pysimm.utils.Item` can contain arbitrary data.
Keyword arguments are assigned as attributes.
Attributes usually used are given below.
Attributes:
a: :class:`~pysimm.system.Particle` object involved in angle
b: :class:`~pysimm.system.Particle` object involved in angle (middle particle)
c: :class:`~pysimm.system.Particle` object involved in angle
type: AngleType object reference
"""
def __init__(self, **kwargs):
Item.__init__(self, **kwargs)
def angle(self, radians=False):
"""pysimm.system.Angle.angle
Calculate angle.
Args:
radians: True to return value in radians (default: False)
Returns:
Angle between Particle a, b, and c
"""
self.theta = calc.angle(self.a, self.b, self.c, radians)
return self.theta
class AngleType(Item):
"""pysimm.system.AngleType
Objects inheriting from :class:`~pysimm.utils.Item` can contain arbitrary data.
Keyword arguments are assigned as attributes.
Attributes usually used are given below.
Attributes:
k: harmonic angle bend force constant (kcal/mol/radian^2)
theta0: angle equilibrium value (degrees)
name: force field angle type name
"""
def __init__(self, **kwargs):
Item.__init__(self, **kwargs)
if self.name:
self.rname = ','.join(reversed(self.name.split(',')))
@classmethod
def guess_style(cls, nparam):
if nparam == 2:
return 'harmonic'
elif nparam == 4:
return 'class2'
else:
raise PysimmError('Cannot guess angle style')
@classmethod
def parse_lammps(cls, line, style):
tmp = line.split('#')
data = tmp.pop(0).strip().split()
name = ','.join(re.split(',|\s+', tmp[0].strip())) if tmp else None
if style.startswith('harm'):
if len(data) != 3:
raise PysimmError('LAMMPS data improperly formatted for harmonic angle')
return cls(
tag=int(data[0]), name=name,
k=float(data[1]), theta0=float(data[2])
)
elif style.startswith('class2'):
if len(data) != 5:
raise PysimmError('LAMMPS data improperly formatted for class2 angle')
return cls(
tag=int(data[0]), name=name,
theta0=float(data[1]), k2=float(data[2]),
k3=float(data[3]), k4=float(data[4])
)
elif style.startswith('charmm'):
if len(data) != 5:
raise PysimmError('LAMMPS data improperly formatted for harmonic angle')
return cls(
tag=int(data[0]), name=name,
k=float(data[1]), theta0=float(data[2]),
k_ub=float(data[3]), r_ub=float(data[4])
)
else:
raise PysimmError('LAMMPS angle style {} not supported yet'.format(style))
def write_lammps(self, style='harmonic', cross_term=None):
"""pysimm.system.AngleType.write_lammps
Formats a string to define angle type coefficients for a LAMMPS
data file given the provided style.
Args:
style: string for pair style of AngleType (harmonic, class2, charmm)
cross_term: type of class2 cross term to write (default=None)
- BondBond
- BondAngle
Returns:
LAMMPS formatted string with angle coefficients
"""
if style.startswith('harm'):
return '{:4}\t{}\t{}\t# {}\n'.format(
self.tag, self.k, self.theta0, self.name
)
elif style.startswith('class2'):
if not cross_term:
return '{:4}\t{}\t{}\t{}\t{}\t# {}\n'.format(
self.tag, self.theta0, self.k2, self.k3, self.k4, self.name
)
elif cross_term == 'BondBond':
return '{:4}\t{}\t{}\t{}\t# {}\n'.format(
self.tag, self.m, self.r1, self.r2, self.name
)
elif cross_term == 'BondAngle':
return '{:4}\t{}\t{}\t{}\t{}\t# {}\n'.format(
self.tag, self.n1, self.n2, self.r1, self.r2, self.name
)
elif style.startswith('charmm'):
return '{:4}\t{}\t{}\t{}\t{}\t# {}\n'.format(
self.tag, self.k, self.theta0, self.k_ub, self.r_ub, self.name
)
else:
raise PysimmError('cannot understand pair style {}'.format(style))
def form(self, style='harmonic', d_range=None):
"""pysimm.system.AngleType.form
Returns data to plot functional form for the potential energy with
the given style.
Args:
style: string for pair style of AngleType (harmonic, class2, charmm)
Returns:
x, y for plotting functional form (energy vs angle)
"""
if not d_range:
d_range = np.linspace(self.theta0-1, self.theta0+1, 100)
if style == 'harmonic':
e = np.array([calc.harmonic_angle(self, d) for d in d_range])
return d_range, e
elif style == 'charmm':
e = np.array([calc.harmonic_angle(self, d) for d in d_range])
return d_range, e
elif style == 'class2':
e = np.array([calc.class2_angle(self, d) for d in d_range])
return d_range, e
class Dihedral(Item):
"""pysimm.system.Dihedral
Dihedral between particles a, b, c, and d
a--b--c--d
Objects inheriting from :class:`~pysimm.utils.Item` can contain arbitrary data.
Keyword arguments are assigned as attributes.
Attributes usually used are given below.
Attributes:
a: :class:`~pysimm.system.Particle` object involved in dihedral
b: :class:`~pysimm.system.Particle` object involved in dihedral (middle particle)
c: :class:`~pysimm.system.Particle` object involved in dihedral (middle particle)
d: :class:`~pysimm.system.Particle` object involved in dihedral
type: :class:`~pysimm.system.DihedralType` object reference
"""
def __init__(self, **kwargs):
Item.__init__(self, **kwargs)
class DihedralType(Item):
"""pysimm.system.DihedralType
Objects inheriting from :class:`~pysimm.utils.Item` can contain arbitrary data.
Keyword arguments are assigned as attributes.
Attributes usually used are given below.
Attributes:
k: dihedral energy barrier (kcal/mol)
d: minimum (+1 or -1)
n: multiplicity (integer >= 0)
name: force field dihedral type name
"""
def __init__(self, **kwargs):
Item.__init__(self, **kwargs)
if self.name:
self.rname = ','.join(reversed(self.name.split(',')))
@classmethod
def guess_style(cls, nparam):
if nparam == 3:
return 'harmonic'
elif nparam % 3 == 1:
return 'fourier'
elif nparam == 6:
return 'class2'
else:
raise PysimmError('Cannot guess dihedral style')
@classmethod
def parse_lammps(cls, line, style):
tmp = line.split('#')
data = tmp.pop(0).strip().split()
name = ','.join(re.split(',|\s+', tmp[0].strip())) if tmp else None
if style.startswith('harm'):
if len(data) != 4:
raise PysimmError('LAMMPS data improperly formatted for harmonic dihedral')
return cls(
tag=int(data[0]), name=name,
k=float(data[1]), d=int(data[2]), n=int(data[3])
)
elif style.startswith('fourier'):
if len(data) % 3 != 2:
raise PysimmError('LAMMPS data improperly formatted for fourier dihedral')
tag = int(data.pop(0))
m = int(data.pop(0))
k = []
n = []
d = []
for i in range(m):
k.append(data.pop(0))
n.append(data.pop(0))
d.append(data.pop(0))
return cls(
tag=tag, name=name,
m=m, k=list(map(float, k)), n=list(map(int, n)), d=list(map(float, d))
)
elif style.startswith('class2'):
if len(data) != 7:
raise PysimmError('LAMMPS data improperly formatted for class2 dihedral')
return cls(
tag=int(data[0]), name=name,
k1=float(data[1]), phi1=float(data[2]),
k2=float(data[3]), phi2=float(data[4]),
k3=float(data[5]), phi3=float(data[6]),
)
elif style.startswith('charmm'):
if len(data) != 5:
raise PysimmError('LAMMPS data improperly formatted for charmm dihedral')
return cls(
tag=int(data[0]), name=name,
k=float(data[1]), n=float(data[2]),
d=float(data[3]), w=float(data[4])
)
elif style.startswith('opls'):
if len(data) != 5:
raise PysimmError('LAMMPS data improperly formatted for opls dihedral')
return cls(
tag=int(data[0]), name=name,
k1=float(data[1]), k2=float(data[2]),
k3=float(data[3]), k4=float(data[4])
)
else:
raise PysimmError('LAMMPS dihedral style {} not supported yet'.format(style))
def write_lammps(self, style='harmonic', cross_term=None):
"""pysimm.system.DihedralType.write_lammps
Formats a string to define dihedral type coefficients for a LAMMPS
data file given the provided style.
Args:
style: string for pair style of DihedralType (harmonic, class2, fourier)
cross_term: type of class2 cross term to write (default=None)
- MiddleBond
- EndBond
- Angle
- AngleAngle
- BondBond13
Returns:
LAMMPS formatted string with dihedral coefficients
"""
if style.startswith('harm'):
return '{:4}\t{:f}\t{:d}\t{:d}\t# {}\n'.format(
self.tag, self.k, int(self.d), int(self.n), self.name
)
elif style.startswith('charmm'):
return '{:4}\t{:f}\t{:d}\t{:d}\t{:f}\t# {}\n'.format(
self.tag, self.k, int(self.n), int(self.d), self.w, self.name
)
elif style.startswith('opls'):
return '{:4}\t{:f}\t{:f}\t{:f}\t{:f}\t# {}\n'.format(
self.tag, self.k1, self.k2, self.k3, self.k4, self.name
)
elif style.startswith('fourier'):
st = '{:4}\t{:d}'.format(self.tag, self.m)
for k, n, d in zip(self.k, self.n, self.d):
st += '\t{}\t{:d}\t{}'.format(k, int(n), d)
st += '\t# {}\n'.format(self.name)
return st
elif style.startswith('class2'):
if not cross_term:
return '{:4}\t{}\t{}\t{}\t{}\t{}\t{}\t# {}\n'.format(
self.tag,
self.k1, self.phi1,
self.k2, self.phi2,
self.k3, self.phi3,
self.name
)
elif cross_term == 'MiddleBond':
return '{:4}\t{}\t{}\t{}\t{}\t# {}\n'.format(
self.tag,
self.a1, self.a2, self.a3, self.r2,
self.name
)
elif cross_term == 'EndBond':
return '{:4}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t# {}\n'.format(
self.tag,
self.b1, self.b2, self.b3,
self.c1, self.c2, self.c3,
self.r1, self.r3,
self.name
)
elif cross_term == 'Angle':
return '{:4}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t# {}\n'.format(
self.tag,
self.d1, self.d2, self.d3,
self.e1, self.e2, self.e3,
self.theta1, self.theta2,
self.name
)
elif cross_term == 'AngleAngle':
return '{:4}\t{}\t{}\t{}\t# {}\n'.format(
self.tag,
self.m,
self.theta1, self.theta2,
self.name
)
elif cross_term == 'BondBond13':
if self.n is None:
self.n = 0.0
return '{:4}\t{}\t{}\t{}\t# {}\n'.format(
self.tag,
self.n,
self.r1, self.r3,
self.name
)
else:
raise PysimmError('cannot understand pair style {}'.format(style))
def form(self, style='harmonic', d_range=None):
"""pysimm.system.DihedralType.form
Returns data to plot functional form for the potential energy with
the given style.
Args:
style: string for pair style of DihedralType (harmonic, class2, fourier)
Returns:
x, y for plotting functional form (energy vs angle)
"""
if not d_range:
d_range = np.linspace(-180, 180, 100)
if style == 'harmonic':
e = np.array([calc.harmonic_dihedral(self, d) for d in d_range])
return d_range, e
elif style == 'fourier':
e = np.array([calc.fourier_dihedral(self, d) for d in d_range])
return d_range, e
elif style == 'class2':
e = np.array([calc.class2_dihedral(self, d) for d in d_range])
return d_range, e
elif style == 'opls':
e = np.array([calc.opls_dihedral(self, d) for d in d_range])
return d_range, e
class Improper(Item):
"""pysimm.system.Improper
Improper dihedral around particle a, bonded to b, c, and d
| b
| |
| a--d
| |
| c
Objects inheriting from :class:`~pysimm.utils.Item` can contain arbitrary data.
Keyword arguments are assigned as attributes.
Attributes usually used are given below.
Attributes:
a: :class:`~pysimm.system.Particle` object involved in improper (middle particle)
b: :class:`~pysimm.system.Particle` object involved in improper
c: :class:`~pysimm.system.Particle` object involved in improper
d: :class:`~pysimm.system.Particle` object involved in improper
type: :class:`~pysimm.system.ImproperType` object reference
"""
def __init__(self, **kwargs):
Item.__init__(self, **kwargs)
class ImproperType(Item):
"""pysimm.system.ImproperType
Objects inheriting from :class:`~pysimm.utils.Item` can contain arbitrary data.
Keyword arguments are assigned as attributes.
Attributes usually used are given below.
Attributes:
k: improper energy barrier (kcal/mol)
x0: equilibrium value (degrees)
name: force field improper type name
"""
def __init__(self, **kwargs):
Item.__init__(self, **kwargs)
if self.name:
self.rname = ','.join(reversed(self.name.split(',')))
@classmethod
def guess_style(cls, nparam):
if nparam == 2:
return 'harmonic'
if nparam == 3:
return 'cvff'
else:
raise PysimmError('Cannot guess improper style')
@classmethod
def parse_lammps(cls, line, style):
tmp = line.split('#')
data = tmp.pop(0).strip().split()
name = ','.join(re.split(',|\s+', tmp[0].strip())) if tmp else None
if style.startswith('harm') or style.startswith('class2') or style.startswith('umbrella'):
if len(data) != 3:
raise PysimmError('LAMMPS data improperly formatted for harmonic improper')
return cls(
tag=int(data[0]), name=name,
k=float(data[1]), x0=float(data[2])
)
elif style.startswith('cvff'):
if len(data) != 4:
raise PysimmError('LAMMPS data improperly formatted for harmonic improper')
return cls(
tag=int(data[0]), name=name,
k=float(data[1]), d=int(data[2]), n=int(data[3])
)
else:
raise PysimmError('LAMMPS improper style {} not supported yet'.format(style))
def write_lammps(self, style='harmonic', cross_term=None):
"""pysimm.system.ImproperType.write_lammps
Formats a string to define improper type coefficients for a LAMMPS
data file given the provided style.
Args:
style: string for pair style of ImproperType (harmonic, class2, cvff)
cross_term: type of class2 cross term to write (default=None)
- AngleAngle
Returns:
LAMMPS formatted string with dihedral coefficients
"""
if style.startswith('harmonic'):
return '{:4}\t{}\t{}\t# {}\n'.format(
self.tag, self.k, self.x0, self.name
)
elif style.startswith('umbrella'):
return '{:4}\t{}\t{}\t# {}\n'.format(
self.tag, self.k, self.x0, self.name
)
elif style.startswith('cvff'):
return '{:4}\t{}\t{}\t{}\t# {}\n'.format(
self.tag, self.k, self.d, self.n, self.name
)
elif style.startswith('class2'):
if self.k is None:
self.k = 0.0
if self.x0 is None:
self.x0 = 0.0
if not cross_term:
return '{:4}\t{}\t{}\t# {}\n'.format(
self.tag, self.k, self.x0, self.name
)
elif cross_term == 'AngleAngle':
return '{:4}\t{}\t{}\t{}\t{}\t{}\t{}\t# {}\n'.format(
self.tag,
self.m1, self.m2, self.m3,
self.theta1, self.theta2, self.theta3,
self.name
)
else:
raise PysimmError('cannot understand pair style {}'.format(style))
def form(self, style='harmonic', d_range=None):
"""pysimm.system.ImproperType.form
Returns data to plot functional form for the potential energy with
the given style.
Args:
style: string for pair style of ImproperType (harmonic, cvff)
Returns:
x, y for plotting functional form (energy vs angle)
"""
if not d_range:
d_range = np.linspace(-2, 2, 100)
if style == 'harmonic':
e = np.array([calc.harmonic_improper(self, d) for d in d_range])
return d_range, e
elif style == 'cvff':
e = np.array([calc.cvff_improper(self, d) for d in d_range])
return d_range, e
elif style == 'umbrella':
e = np.array([calc.umbrella_improper(self, d) for d in d_range])
return d_range, e
class Dimension(Item):
"""pysimm.system.Dimension
Objects inheriting from :class:`~pysimm.utils.Item` can contain arbitrary data.
Keyword arguments are assigned as attributes.
Attributes usually used are given below.
Attributes:
xlo: minimum value in x dimension
xhi: maximum value in x dimension
ylo: minimum value in y dimension
yhi: maximum value in y dimension
zlo: minimum value in z dimension
zhi: maximum value in z dimension
dx: distance in x dimension
dy: distance in y dimension
dz: distance in z dimension
"""
def __init__(self, **kwargs):
center = kwargs.get('center')
Item.__init__(self, **kwargs)
if center:
self.translate(*center)
del self.center
def check(self):
if self.dx is not None and self.dy is not None and self.dz is not None:
return True
else:
return False
def size(self):
return (self.dx, self.dy, self.dz)
def translate(self, x, y, z):
"""pysimm.system.Dimension.translate
Shifts box bounds by x, y, z.
Args:
x: distance to shift box bounds in x direction
y: distance to shift box bounds in y direction
z: distance to shift box bounds in z direction
Returns:
None
"""
self.xlo += x
self.xhi += x
self.ylo += y
self.yhi += y
self.zlo += z
self.zhi += z
@property
def dx(self):
if self.xhi is None or self.xlo is None:
return None
else:
return self.xhi-self.xlo
@dx.setter
def dx(self, dx):
if dx is None:
return
center = 0
if self.xlo is not None and self.xhi is not None:
center = float(self.xhi + self.xlo)/2
self.xlo = center - float(dx)/2
self.xhi = center + float(dx)/2
@property
def dy(self):
if self.yhi is None or self.ylo is None:
return None
else:
return self.yhi-self.ylo
@dy.setter
def dy(self, dy):
if dy is None:
return
center = 0
if self.ylo is not None and self.yhi is not None:
center = float(self.yhi + self.ylo)/2
self.ylo = center - float(dy)/2
self.yhi = center + float(dy)/2
@property
def dz(self):
if self.zhi is None or self.zlo is None:
return None
else:
return self.zhi-self.zlo
@dz.setter
def dz(self, dz):
if dz is None:
return
center = 0
if self.zlo is not None and self.zhi is not None:
center = float(self.zhi + self.zlo)/2
self.zlo = center - float(dz)/2
self.zhi = center + float(dz)/2
class System(object):
"""pysimm.system.System
Object representation of molecular system.
Contains information required for molecular simulation.
Attributes:
dim: Dimension object reference
particles: :class:`~pysimm.utils.ItemContainer` for Particle organization
particle_types: :class:`~pysimm.utils.ItemContainer` for ParticleType organization
bonds: :class:`~pysimm.utils.ItemContainer` for Bond organization
bond_types: :class:`~pysimm.utils.ItemContainer` for BondType organization
angles: :class:`~pysimm.utils.ItemContainer` for Angle organization
angle_types: :class:`~pysimm.utils.ItemContainer` for AngleType organization
dihedrals: :class:`~pysimm.utils.ItemContainer` for Dihedral organization
dihedral_types: :class:`~pysimm.utils.ItemContainer` for DihedralType organization
impropers: :class:`~pysimm.utils.ItemContainer` for Improper organization
improper_types: :class:`~pysimm.utils.ItemContainer` for ImproperType organization
molecules: :class:`~pysimm.utils.ItemContainer` for Molecule organization
"""
def __init__(self, **kwargs):
self.objectified = False
self.name = kwargs.get('name', 'pySIMM System Object')
self.ff_class = kwargs.get('ff_class')
self.forcefield = kwargs.get('forcefield')
self.dim = Dimension(xlo=kwargs.get('xlo'), xhi=kwargs.get('xhi'),
ylo=kwargs.get('ylo'), yhi=kwargs.get('yhi'),
zlo=kwargs.get('zlo'), zhi=kwargs.get('zhi'),
dx=kwargs.get('dx'), dy=kwargs.get('dy'),
dz=kwargs.get('dz'), center=kwargs.get('center'))
self.dim_check = self.dim.check()
self.mass = kwargs.get('mass', 0.0)
self.particle_types = kwargs.get('particle_types', ItemContainer())
self.bond_types = kwargs.get('bond_types', ItemContainer())
self.angle_types = kwargs.get('angle_types', ItemContainer())
self.dihedral_types = kwargs.get('dihedral_types', ItemContainer())
self.improper_types = kwargs.get('improper_types', ItemContainer())
self.molecule_types = kwargs.get('molecule_types', ItemContainer())
self.particles = kwargs.get('particles', ItemContainer())
self.bonds = kwargs.get('bonds', ItemContainer())
self.angles = kwargs.get('angles', ItemContainer())
self.dihedrals = kwargs.get('dihedrals', ItemContainer())
self.impropers = kwargs.get('impropers', ItemContainer())
self.molecules = kwargs.get('molecules', ItemContainer())
self.write_coeffs = kwargs.get('write_coeffs', True)
self.set_mass()
self.set_volume()
self.set_density()
self.set_cog()
def __getattr__(self, name):
return None
def copy(self, rotate_x=None, rotate_y=None, rotate_z=None,
dx=0, dy=0, dz=0):
"""pysimm.system.System.copy
Create duplicate :class:`~pysimm.system.System` object. Default behavior does not modify particle positions.
Args:
rotate_x: rotate duplicate system around x axis by this value (radians)
rotate_y: rotate duplicate system around y axis by this value (radians)
rotate_z: rotate duplicate system around z axis by this value (radians)
dx: translate duplicate system in x dimension by this value (Angstrom)
dy: translate duplicate system in y dimension by this value (Angstrom)
dz: translate duplicate system in z dimension by this value (Angstrom)
"""
new = System()
new.ff_class = self.ff_class
new.forcefield = self.forcefield
new.pair_style = self.pair_style
new.bond_style = self.bond_style
new.angle_style = self.angle_style
new.dihedral_style = self.dihedral_style
new.improper_style = self.improper_style
new.dim = self.dim.copy()
for _ in self.molecules:
new.molecules.add(Molecule(tag=_.tag))
for pt in self.particle_types:
new.particle_types.add(pt.copy())
for bt in self.bond_types:
new.bond_types.add(bt.copy())
for at in self.angle_types:
new.angle_types.add(at.copy())
for dt in self.dihedral_types:
new.dihedral_types.add(dt.copy())
for it in self.improper_types:
new.improper_types.add(it.copy())
for p in self.particles:
new_p = p.copy()
if p.type:
new_p.type = new.particle_types[p.type.tag]
new_p.molecule = new.molecules[p.molecule.tag]
if rotate_x or rotate_y or rotate_z:
new_p.x, new_p.y, new_p.z = rotate_vector(new_p.x, new_p.y, new_p.z,
rotate_x, rotate_y, rotate_z)
new_p.x += dx
new_p.y += dy
new_p.z += dz
new.particles.add(new_p)
new_p.molecule.particles.add(new_p)
new_p.bonds = ItemContainer()
new_p.angles = ItemContainer()
new_p.dihedrals = ItemContainer()
new_p.impropers = ItemContainer()
for b in self.bonds:
new_b = b.copy()
new_b.a = new.particles[b.a.tag]
new_b.b = new.particles[b.b.tag]
if b.type:
new_b.type = new.bond_types[b.type.tag]
new.bonds.add(new_b)
new_b.a.molecule.bonds.add(new_b)
new_b.a.bonds.add(new_b)
new_b.b.bonds.add(new_b)
for a in self.angles:
new_a = Angle(a=new.particles[a.a.tag],
b=new.particles[a.b.tag],
c=new.particles[a.c.tag])
if a.type:
new_a.type=new.angle_types[a.type.tag]
new.angles.add(new_a)
new_a.a.molecule.angles.add(new_a)
for d in self.dihedrals:
new_d = Dihedral(a=new.particles[d.a.tag],
b=new.particles[d.b.tag],
c=new.particles[d.c.tag],
d=new.particles[d.d.tag])
if d.type:
new_d.type=new.dihedral_types[d.type.tag]
new.dihedrals.add(new_d)
new_d.a.molecule.dihedrals.add(new_d)
for i in self.impropers:
new_i = Improper(a=new.particles[i.a.tag],
b=new.particles[i.b.tag],
c=new.particles[i.c.tag],
d=new.particles[i.d.tag])
if i.type:
new_i.type = new.improper_types[i.type.tag]
new.impropers.add(new_i)
new_i.a.molecule.impropers.add(new_i)
for k, v in vars(self).items():
if not isinstance(v, ItemContainer) and not isinstance(v, Item):
setattr(new, k, v)
return new
def add(self, other, **kwargs):
"""pysimm.system.System.add
Add other :class:`~pysimm.system.System` to this. Optionally remove duplicate types (default behavior).
Args:
other: :class:`~pysimm.system.System` object to add
unique_types (optional): Remove duplicate types and reassign references to existing types (True)
change_dim (optional): Update :class:`~pysimm.system.Dimension` object so that :class:`~pysimm.system.Particle` objects do not exist
outside of :class:`~pysimm.system.Dimension` extremes (True)
update_properties (optional): Update system-wide mass, volume, density, center of gravity, and velocity
properties (True)
"""
unique_types = kwargs.get('unique_types', True)
change_dim = kwargs.get('change_dim', True)
update_properties = kwargs.get('update_properties', True)
for pt in other.particle_types:
if unique_types:
if pt.name not in [x.name for x in self.particle_types]:
del pt.tag
self.particle_types.add(pt)
else:
del pt.tag
self.particle_types.add(pt)
for bt in other.bond_types:
if unique_types:
if bt.name not in [x.name for x in self.bond_types]:
del bt.tag
self.bond_types.add(bt)
else:
del bt.tag
self.bond_types.add(bt)
for at in other.angle_types:
if unique_types:
if at.name not in [x.name for x in self.angle_types]:
del at.tag
self.angle_types.add(at)
else:
del at.tag
self.angle_types.add(at)
for dt in other.dihedral_types:
if unique_types:
if dt.name not in [x.name for x in self.dihedral_types]:
del dt.tag
self.dihedral_types.add(dt)
else:
del dt.tag
self.dihedral_types.add(dt)
for it in other.improper_types:
if unique_types:
if it.name not in [x.name for x in self.improper_types]:
del it.tag
self.improper_types.add(it)
else:
del it.tag
self.improper_types.add(it)
for p in other.particles:
del p.tag
if change_dim:
self.dim.xhi = max(p.x, self.dim.xhi)
self.dim.xlo = min(p.x, self.dim.xlo)
self.dim.yhi = max(p.y, self.dim.yhi)
self.dim.ylo = min(p.y, self.dim.ylo)
self.dim.zhi = max(p.z, self.dim.zhi)
self.dim.zlo = min(p.z, self.dim.zlo)
if unique_types and p.type not in self.particle_types:
pt = self.particle_types.get(p.type.name)
if not pt or len(pt) > 1:
error_print('ParticleType error')
else:
p.type = pt[0]
self.particles.add(p)
for b in other.bonds:
del b.tag
if unique_types and b.type not in self.bond_types:
bt = self.bond_types.get(b.type.name)
if not bt or len(bt) > 1:
error_print('BondType error')
else:
b.type = bt[0]
self.bonds.add(b)
for a in other.angles:
del a.tag
if unique_types and a.type not in self.angle_types:
at = self.angle_types.get(a.type.name)
if not at or len(at) > 1:
error_print('AngleType error')
else:
a.type = at[0]
self.angles.add(a)
for d in other.dihedrals:
del d.tag
if unique_types and d.type not in self.dihedral_types:
dt = self.dihedral_types.get(d.type.name)
if not dt:
error_print('DihedralType error')
elif len(dt) > 1:
index = 0
x = 5
for i in range(len(dt)):
if dt[i].name.count('X') < x:
index = i
x = dt[i].name.count('X')
d.type = dt[index]
else:
d.type = dt[0]
self.dihedrals.add(d)
for i in other.impropers:
del i.tag
if unique_types and i.type not in self.improper_types:
it = self.improper_types.get(i.type.name)
if not it:
error_print('ImproperType error')
else:
i.type = it[0]
self.impropers.add(i)
for m in other.molecules:
del m.tag
self.molecules.add(m)
p_list = m.particles.get('all')
m.particles.remove('all')
for p in p_list:
m.particles.add(p)
if update_properties:
self.set_mass()
self.set_volume()
self.set_density()
self.set_cog()
self.set_velocity()
def distance(self, p1, p2):
"""pysimm.system.System.distance
Calculate distance between two particles considering pbc.
Args:
p1: :class:`~pysimm.system.Particle` object
p2: :class:`~pysimm.system.Particle` object
Returns:
distance between particles considering pbc
"""
return calc.pbc_distance(self, p1, p2)
def wrap(self):
"""pysimm.system.System.wrap
Wrap :class:`~pysimm.system.Particle` images into box defined by :class:`~pysimm.system.Dimension` object.
Ensure particles are contained within simulation box.
Args:
None
Returns:
None
"""
self.dim.check()
for p in self.particles:
while p.x > self.dim.xhi:
p.x -= self.dim.dx
while p.x < self.dim.xlo:
p.x += self.dim.dx
while p.y > self.dim.yhi:
p.y -= self.dim.dy
while p.y < self.dim.ylo:
p.y += self.dim.dy
while p.z > self.dim.zhi:
p.z -= self.dim.dz
while p.z < self.dim.zlo:
p.z += self.dim.dz
def unwrap(self):
"""pysimm.system.System.unwrap()
Unwraps :class:`~pysimm.system.Particle` images such that no bonds cross box edges.
Args:
None
Returns:
None
"""
self.dim.check()
self.add_particle_bonding()
next_to_unwrap = []
for p in self.particles:
p.unwrapped = False
for m in self.molecules:
for p0 in m.particles:
p0.unwrapped = True
next_to_unwrap.append(p0)
for p in next_to_unwrap:
for pb in p.bonded_to:
if pb.unwrapped:
continue
next_to_unwrap.append(pb)
pb.unwrapped = True
dx = p.x - pb.x
while abs(dx) > self.dim.dx / 2:
if dx > 0:
pb.x += self.dim.dx
else:
pb.x -= self.dim.dx
dx = p.x - pb.x
dy = p.y - pb.y
while abs(dy) > self.dim.dy / 2:
if dy > 0:
pb.y += self.dim.dy
else:
pb.y -= self.dim.dy
dy = p.y - pb.y
dz = p.z - pb.z
while abs(dz) > self.dim.dz / 2:
if dz > 0:
pb.z += self.dim.dz
else:
pb.z -= self.dim.dz
dz = p.z - pb.z
for b in self.bonds:
if b.distance() > 5:
print('unwrap probably failed')
return False
return True
def particles_df(self, columns=['tag', 'x', 'y', 'z', 'q'], index='tag', extras=[]):
if pd is None:
raise PysimmError('pysimm.system.System.particles_df function requires pandas')
data = [{c: getattr(p, c) for c in columns} for p in self.particles]
if extras:
for d in data:
if 'type.name' in extras:
d['type.name'] = self.particles[d['tag']].type.name
if 'type.tag' in extras:
d['type.tag'] = self.particles[d['tag']].type.tag
df = pd.DataFrame(data=data)
if index in columns:
df = df.set_index(index)
return df
def unite_atoms(self):
for p in self.particles:
p.implicit_h = 0
if p.elem != 'C':
continue
for b in p.bonds:
pb = b.a if b.b is p else b.b
if pb.elem =='H':
p.implicit_h += 1
p.charge += pb.charge
self.particles.remove(pb.tag, update=False)
self.remove_spare_bonding()
def quality(self, tolerance=0.1):
"""pysimm.system.System.quality
Attemps to assess quality of :class:`~pysimm.system.System` based on bond lengths in unwrapped system.
Args:
tolerance: fractional value of equilibrium bond length that is acceptable
Returns:
number of bonds in system outside tolerance
"""
self.unwrap()
bad_bonds = 0
for b in self.bonds:
if b.distance() > b.type.r0*(1+tolerance) or b.distance() < b.type.r0*(1-tolerance):
bad_bonds += 1
verbose_print('%s of %s bonds found to be outside of tolerance' % (bad_bonds, self.bonds.count))
self.wrap()
def shift_to_origin(self):
"""pysimm.system.System.shift_to_origin
Shifts simulation box to begin at origin. i.e. xlo=ylo=zlo=0
Args:
None
Returns:
None
"""
for p in self.particles:
p.x -= self.dim.xlo
p.y -= self.dim.ylo
p.z -= self.dim.zlo
self.dim.xhi -= self.dim.xlo
self.dim.yhi -= self.dim.ylo
self.dim.zhi -= self.dim.zlo
self.dim.xlo -= self.dim.xlo
self.dim.ylo -= self.dim.ylo
self.dim.zlo -= self.dim.zlo
def set_charge(self):
"""pysimm.system.System.set_charge
Sets total charge of all :class:`~pysimm.system.Particle` objects in System.particles
Args:
None
Returns:
None
"""
self.charge = 0
for p in self.particles:
self.charge += p.charge
def zero_charge(self):
"""pysimm.system.System.zero_charge
Enforces total :class:`~pysimm.system.System` charge to be 0.0 by subtracting excess charge from last particle
Args:
None:
Returns:
None
"""
charge = 0.
for p in self.particles:
charge += p.charge
if charge != 0:
p.charge -= charge
self.set_charge()
def check_items(self):
"""pysimm.system.System.check_items
Checks particles, bonds, angles, dihedrals, impropers, and molecules containers and raises exception if the length of items in the container does not equal the count property
Args:
None:
Returns:
None
"""
if len(self.particles) != self.particles.count:
raise PysimmError('particles missing')
if len(self.bonds) != self.bonds.count:
raise PysimmError('bonds missing')
if len(self.angles) != self.angles.count:
raise PysimmError('angles missing')
if len(self.dihedrals) != self.dihedrals.count:
raise PysimmError('dihedrals missing')
if len(self.impropers) != self.impropers.count:
raise PysimmError('impropers missing')
if len(self.molecules) != self.molecules.count:
raise PysimmError('molecules missing')
def update_ff_types_from_ac(self, ff, acname):
"""pysimm.system.System.update_ff_types_from_ac
Updates :class:`~pysimm.system.ParticleType` objects in system using type names given in antechamber (ac) file. Retrieves type from System if possible, then searches force field provided by ff.
Args:
ff: forcefield to search for Type objects
acname: ac filename containing type names
Returns:
None
"""
self.particle_types.remove('all')
with open(acname) as f:
for line in f:
if line.startswith('ATOM'):
tag = int(line.split()[1])
tname = line.split()[-1]
s_pt = self.particle_types.get(tname)
if not s_pt:
s_pt = ff.particle_types.get(tname)
if not s_pt:
error_print('cannot find type with name {}'.format(tname))
self.particle_types.add(s_pt[0].copy())
self.particles[tag].type = self.particle_types.get(tname)[0]
def update_particle_types_from_forcefield(self, f):
"""pysimm.system.System.update_types_from_forcefield
Updates :class:`~pysimm.system.ParticleType` data from :class:`~pysimm.forcefield.Forcefield` object f based on :class:`~pysimm.system.ParticleType`.name
Args:
f: :class:`~pysimm.forcefield.Forcefield` object reference
Returns:
None
"""
for pt in self.particle_types:
name_ = pt.name.split('@')[-1]
linker = False
if pt.name.find('@') >= 0:
linker = pt.name.split('@')[0]
pt_ = f.particle_types.get(name_)
if pt_:
new = pt_[0].copy()
new.tag = pt.tag
if linker:
new.name = '%s@%s' % (linker, new.name)
self.particle_types.remove(pt.tag)
self.particle_types.add(new)
def make_linker_types(self):
"""pysimm.system.System.make_linker_types
Identifies linker particles and creates duplicate :class:`~pysimm.system.ParticleType objects with new names.
Identification is performed by :class:`~pysimm.system.Particle`.linker attribute.
New :class:`~pysimm.system.ParticleType` name is prepended with [H or T]L@ to designate head or tail linker
Args:
None
Returns:
None
"""
for p in self.particles:
if p.linker == 'head':
head_linker = self.particle_types.get('HL@%s' % p.type.name)
if head_linker:
p.type = head_linker[0]
else:
p.type = p.type.copy()
p.type.name = 'HL@%s' % p.type.name
self.particle_types.add(p.type)
elif p.linker == 'tail':
tail_linker = self.particle_types.get('TL@%s' % p.type.name)
if tail_linker:
p.type = tail_linker[0]
else:
p.type = p.type.copy()
p.type.name = 'TL@%s' % p.type.name
self.particle_types.add(p.type)
elif p.linker:
linker = self.particle_types.get('L@%s' % p.type.name)
if linker:
p.type = linker[0]
else:
p.type = p.type.copy()
p.type.name = 'L@%s' % p.type.name
self.particle_types.add(p.type)
def remove_linker_types(self):
"""pysimm.system.System.remove_linker_types
Reassigns :class:`~pysimm.system.Particle`.type references to original :class:`~pysimm.system.ParticleType` objects without linker prepend
Args:
None
Returns:
None
"""
for p in self.particles:
if p.type.name.find('@') >= 0:
pt = self.particle_types.get(p.type.name.split('@')[-1])
if pt:
p.type = pt[0]
else:
print('cannot find regular type for linker %s'
% p.type.name)
def read_lammps_dump(self, fname):
"""pysimm.system.System.read_lammps_dump
Updates particle positions and box size from LAMMPS dump file.
Assumes following format for each atom line:
tag charge xcoord ycoord zcoord xvelocity yvelocity zvelocity
Args:
fname: LAMMPS dump file
Returns:
None
"""
nparticles = 0
with open(fname) as f:
line = f.readline()
while line:
if len(line.split()) > 1 and line.split()[1] == 'NUMBER':
nparticles = int(f.readline())
elif len(line.split()) > 1 and line.split()[1] == 'BOX':
self.dim.xlo, self.dim.xhi = map(float, f.readline().split())
self.dim.ylo, self.dim.yhi = map(float, f.readline().split())
self.dim.zlo, self.dim.zhi = map(float, f.readline().split())
self.set_volume()
self.set_density()
elif len(line.split()) > 1 and line.split()[1] == 'ATOMS':
for i in range(nparticles):
tag, q, x, y, z, vx, vy, vz = map(float, f.readline().split())
tag = int(tag)
if self.particles[tag]:
p = self.particles[tag]
p.charge = q
p.x = x
p.vx = vx
p.y = y
p.vy = vy
p.z = z
p.vz = vz
line = f.readline()
def read_lammpstrj(self, trj, frame=1):
"""pysimm.system.System.read_lammpstrj
Updates particle positions and box size from LAMMPS trajectory file at given frame.
Assumes one of following formats for each atom line:
tag xcoord ycoord zcoord
OR
tag type_id xcoord ycoord zcoord
OR
tag type_id xcoord ycoord zcoord ximage yimage zimage
Args:
trj: LAMMPS trajectory file
frame: sequential frame number (not LAMMPS timestep) default=1
Returns:
None
"""
t_frame = 0
nparticles = 0
updated = 0
with open(trj) as f:
line = f.readline()
while line:
if len(line.split()) > 1 and line.split()[1] == 'TIMESTEP':
t_frame += 1
elif len(line.split()) > 1 and line.split()[1] == 'NUMBER':
nparticles = int(f.readline())
elif (len(line.split()) > 1 and line.split()[1] == 'BOX' and
t_frame == frame):
self.dim.xlo, self.dim.xhi = map(float,
f.readline().split())
self.dim.ylo, self.dim.yhi = map(float,
f.readline().split())
self.dim.zlo, self.dim.zhi = map(float,
f.readline().split())
elif (len(line.split()) > 1 and line.split()[1] == 'ATOMS' and
t_frame == frame):
for i in range(nparticles):
line = f.readline().split()
if len(line) == 4:
id_, x, y, z = map(float, line)
elif len(line) == 5:
id_, type_, x, y, z = map(float, line)
elif len(line) == 8:
id_, type_, x, y, z, ix, iy, iz = map(float, line)
else:
error_print('cannot understand lammpstrj formatting; exiting')
return
id_ = int(id_)
if self.particles[id_]:
updated += 1
self.particles[id_].x = x
self.particles[id_].y = y
self.particles[id_].z = z
line = f.readline()
verbose_print('updated particle positions for %s of %s particles from trajectory' % (updated, nparticles))
def read_xyz(self, xyz, frame=1, quiet=False):
"""pysimm.system.System.read_xyz
Updates particle positions and box size from xyz file at given frame
Args:
xyz: xyz trajectory file
frame: sequential frame number default=1
quiet: True to print status default=False
Returns:
None
"""
if not quiet:
verbose_print('reading particle positions from %s' % xyz)
warning_print('particles are assumed to be in order in xyz file')
t_frame = 0
with open(xyz) as f:
line = f.readline()
while line:
t_frame += 1
assert int(line.split()[0]) == self.particles.count
line = f.readline()
for n in range(1, self.particles.count + 1):
p = self.particles[n]
if t_frame == 1:
if not p.type.elem and p.type.name:
if p.type.name[0].lower() != 'l':
p.type.elem = p.type.name[0].upper()
else:
p.type.elem = p.type.name[1].upper()
line = f.readline()
if t_frame == frame:
x, y, z = map(float, line.split()[1:])
p.x = x
p.y = y
p.z = z
if t_frame == frame:
print('read %s particle positions from %s'
% (self.particles.count, xyz))
line = f.readline()
def update_types(self, ptypes, btypes, atypes, dtypes, itypes):
"""pysimm.system.System.update_types
Updates type objects from a given list of types.
Args:
ptypes: list of :class:`~pysimm.system.ParticleType` objects from which to update
btypes: list of :class:`~pysimm.system.BondType` objects from which to update
atypes: list of :class:`~pysimm.system.AngleType` objects from which to update
dtypes: list of :class:`~pysimm.system.DihedralType` objects from which to update
itypes: list of :class:`~pysimm.system.ImproperType` objects from which to update
"""
if ptypes is not None:
for p in self.particles:
pt = self.particle_types.get(p.type.name, first=True)
if pt:
p.type = pt[0]
self.particle_types.remove('all')
for pt in ptypes:
self.particle_types.add(pt)
if btypes is not None:
for b in self.bonds:
bt = self.bond_types.get(b.type.name, first=True)
if bt:
b.type = bt[0]
self.bond_types.remove('all')
for bt in btypes:
self.bond_types.add(bt)
if atypes is not None:
for a in self.angles:
at = self.angle_types.get(a.type.name, first=True)
if at:
a.type = at[0]
self.angle_types.remove('all')
for at in atypes:
self.angle_types.add(at)
if dtypes is not None:
for d in self.dihedrals:
dt = self.dihedral_types.get(d.type.name, first=True)
if dt:
d.type = dt[0]
self.dihedral_types.remove('all')
for dt in dtypes:
self.dihedral_types.add(dt)
if itypes is not None:
for i in self.impropers:
it = self.improper_types.get(i.type.name, first=True)
if it:
i.type = it[0]
self.improper_types.remove('all')
for it in itypes:
self.improper_types.add(it)
def read_type_names(self, types_file):
"""pysimm.system.System.read_type_names
Update :class:`~pysimm.system.ParticleType` names from file.
Args:
types_file: type dictionary file name
Returns:
None
"""
ptypes = dict()
btypes = dict()
atypes = dict()
dtypes = dict()
itypes = dict()
if os.path.isfile(types_file):
f = open(types_file)
elif isinstance(types_file, str):
f = StringIO(types_file)
for line in f:
line = line.split()
if line and line[0].lower() == 'atom':
for i in range(self.particle_types.count):
line = next(f).split()
ptypes[int(line[0])] = line[1]
elif line and line[0].lower() == 'bond':
for i in range(self.bond_types.count):
line = next(f).split()
btypes[int(line[0])] = line[1]
elif line and line[0].lower() == 'angle':
for i in range(self.angle_types.count):
line = next(f).split()
atypes[int(line[0])] = line[1]
elif line and line[0].lower() == 'dihedral':
for i in range(self.dihedral_types.count):
line = next(f).split()
dtypes[int(line[0])] = line[1]
elif line and line[0].lower() == 'improper':
for i in range(self.improper_types.count):
line = next(f).split()
itypes[int(line[0])] = line[1]
for t in self.particle_types:
t.name = ptypes[t.tag]
if t.name[0] == 'L':
if t.name[1].upper() in ['H', 'C', 'N', 'O']:
t.elem = t.name[1].upper()
else:
if t.name[0].upper() in ['H', 'C', 'N', 'O']:
t.elem = t.name[0].upper()
for t in self.bond_types:
t.name = btypes[t.tag]
t.rname = ','.join(reversed(t.name.split(',')))
for t in self.angle_types:
t.name = atypes[t.tag]
t.rname = ','.join(reversed(t.name.split(',')))
for t in self.dihedral_types:
t.name = dtypes[t.tag]
t.rname = ','.join(reversed(t.name.split(',')))
for t in self.improper_types:
t.name = itypes[t.tag]
t.rname = ','.join(reversed(t.name.split(',')))
def remove_spare_bonding(self, update_tags=True):
"""pysimm.system.System.remove_spare_bonding
Removes bonds, angles, dihedrals and impropers that reference particles not in :class:`~pysimm.system.System`.particles
Args:
update_tags: True to update all tags after removal of bonding items default=True
"""
for b in self.bonds:
if b.a not in self.particles or b.b not in self.particles:
self.bonds.remove(b.tag, update=False)
for a in self.angles:
if (a.a not in self.particles or a.b not in self.particles or
a.c not in self.particles):
self.angles.remove(a.tag, update=False)
for d in self.dihedrals:
if (d.a not in self.particles or d.b not in self.particles or
d.c not in self.particles or d.d not in self.particles):
self.dihedrals.remove(d.tag, update=False)
for i in self.impropers:
if (i.a not in self.particles or i.b not in self.particles or
i.c not in self.particles or i.d not in self.particles):
self.impropers.remove(i.tag, update=False)
if update_tags:
self.update_tags()
def update_tags(self):
"""pysimm.system.System.update_tags
Update Item tags in :class:`~pysimm.utils.ItemContainer` objects to preserve continuous tags. Removes all objects and then reinserts them.
Args:
None
Returns:
None
"""
particles = self.particles.get('all')
self.particles.remove('all')
for p in particles:
del p.tag
self.particles.add(p)
ptypes = self.particle_types.get('all')
self.particle_types.remove('all')
for pt in ptypes:
del pt.tag
self.particle_types.add(pt)
bonds = self.bonds.get('all')
self.bonds.remove('all')
for b in bonds:
del b.tag
self.bonds.add(b)
btypes = self.bond_types.get('all')
self.bond_types.remove('all')
for bt in btypes:
del bt.tag
self.bond_types.add(bt)
angles = self.angles.get('all')
self.angles.remove('all')
for a in angles:
del a.tag
self.angles.add(a)
atypes = self.angle_types.get('all')
self.angle_types.remove('all')
for at in atypes:
del at.tag
self.angle_types.add(at)
dihedrals = self.dihedrals.get('all')
self.dihedrals.remove('all')
for d in dihedrals:
del d.tag
self.dihedrals.add(d)
dtypes = self.dihedral_types.get('all')
self.dihedral_types.remove('all')
for dt in dtypes:
del dt.tag
self.dihedral_types.add(dt)
impropers = self.impropers.get('all')
self.impropers.remove('all')
for i in impropers:
del i.tag
self.impropers.add(i)
itypes = self.improper_types.get('all')
self.improper_types.remove('all')
for it in itypes:
del it.tag
self.improper_types.add(it)
def set_references(self):
"""pysimm.system.System.set_references
Set object references when :class:`~pysimm.system.System` information read from text file.
For example, if bond type value 2 is read from file, set :class:`~pysimm.system.Bond`.type to bond_types[2]
Args:
None
Returns:
None
"""
for p in self.particles:
if isinstance(p.type, int) and self.particle_types[p.type]:
p.type = self.particle_types[p.type]
elif isinstance(p.type, int) and not self.particle_types[p.type]:
error_print('error: Cannot find type with tag %s in system '
'particles types' % p.type)
for b in self.bonds:
if isinstance(b.type, int) and self.bond_types[b.type]:
b.type = self.bond_types[b.type]
elif isinstance(b.type, int) and not self.bond_types[b.type]:
error_print('error: Cannot find type with tag %s in system '
'bond types' % b.type)
for a in self.angles:
if isinstance(a.type, int) and self.angle_types[a.type]:
a.type = self.angle_types[a.type]
elif isinstance(b.type, int) and not self.angle_types[a.type]:
error_print('error: Cannot find type with tag %s in system '
'angle types' % a.type)
for d in self.dihedrals:
if isinstance(d.type, int) and self.dihedral_types[d.type]:
d.type = self.dihedral_types[d.type]
elif isinstance(d.type, int) and not self.dihedral_types[d.type]:
error_print('error: Cannot find type with tag %s in system '
'dihedral types' % d.type)
for i in self.impropers:
if isinstance(i.type, int) and self.improper_types[i.type]:
i.type = self.improper_types[i.type]
elif isinstance(i.type, int) and not self.improper_types[i.type]:
error_print('error: Cannot find type with tag %s in system '
'improper types' % i.type)
def objectify(self):
"""pysimm.system.System.objectify
Set references for :class:`~pysimm.system.Bond`, :class:`~pysimm.system.Angle`, :class:`~pysimm.system.Dihedral`, :class:`~pysimm.system.Improper` objects.
For example, if read from file that bond #1 is between particle 1 and 2 set :class:`~pysimm.system.Bond`.a to particles[1], etc.
Args:
None
Returns:
None
"""
if self.objectified:
return 'already objectified'
self.set_references()
for p in self.particles:
if not isinstance(p.molecule, Molecule):
if not self.molecules[p.molecule]:
m = Molecule()
m.tag = p.molecule
self.molecules.add(m)
p.molecule = self.molecules[p.molecule]
self.molecules[p.molecule.tag].particles.add(p)
p.bonds = ItemContainer()
p.angles = ItemContainer()
p.dihedrals = ItemContainer()
p.impropers = ItemContainer()
for b in self.bonds:
if type(b.a) == int:
b.a = self.particles[b.a]
b.b = self.particles[b.b]
b.a.bonds.add(b)
b.b.bonds.add(b)
if b.a.molecule:
b.a.molecule.bonds.add(b)
for a in self.angles:
if type(a.a) == int:
a.a = self.particles[a.a]
a.b = self.particles[a.b]
a.c = self.particles[a.c]
if a.a.molecule:
a.a.molecule.angles.add(a)
for d in self.dihedrals:
if type(d.a) == int:
d.a = self.particles[d.a]
d.b = self.particles[d.b]
d.c = self.particles[d.c]
d.d = self.particles[d.d]
if d.a.molecule:
d.a.molecule.dihedrals.add(d)
for i in self.impropers:
if type(i.a) == int:
i.a = self.particles[i.a]
i.b = self.particles[i.b]
i.c = self.particles[i.c]
i.d = self.particles[i.d]
if i.a.molecule:
i.a.molecule.impropers.add(i)
self.objectified = True
def add_particle_bonding(self):
"""pysimm.system.System.add_particle_bonding
Update :class:`~pysimm.system.Particle` objects such that :class:`~pysimm.system.Particle`.bonded_to contains other :class:`~pysimm.system.Particle` objects invloved in bonding
Args:
None
Returns:
None
"""
for p in self.particles:
p.bonded_to = ItemContainer()
p.bonds = ItemContainer()
for b in self.bonds:
b.a.bonded_to.add(b.b)
b.a.bonds.add(b)
b.b.bonded_to.add(b.a)
b.b.bonds.add(b)
def set_excluded_particles(self, bonds=True, angles=True, dihedrals=True):
"""pysimm.system.System.set_excluded_particles
Updates :class:`~pysimm.system.Particle` object such that :class:`~pysimm.system.Particle`.excluded_particles contains other :class:`~pysimm.system.Particle` objects involved in
1-2, 1-3, and/or 1-4 interactions
Args:
bonds: exclude particles involved in 1-2 interactions
angles: exclude particles involved in 1-3 interactions
dihedrals: exclude particles involved in 1-4 interactions
"""
for p in self.particles:
p.excluded_particles = ItemContainer()
if bonds:
for b in self.bonds:
if b.a.tag < b.b.tag:
b.a.excluded_particles.add(b.b)
else:
b.b.excluded_particles.add(b.a)
if angles:
for a in self.angles:
if a.a.tag < a.b.tag:
a.a.excluded_particles.add(a.b)
if a.a.tag < a.c.tag:
a.a.excluded_particles.add(a.c)
if a.b.tag < a.a.tag:
a.b.excluded_particles.add(a.a)
if a.b.tag < a.c.tag:
a.b.excluded_particles.add(a.c)
if a.c.tag < a.a.tag:
a.c.excluded_particles.add(a.a)
if a.c.tag < a.b.tag:
a.c.excluded_particles.add(a.b)
if dihedrals:
for d in self.dihedrals:
if d.a.tag < d.b.tag:
d.a.excluded_particles.add(d.b)
if d.a.tag < d.c.tag:
d.a.excluded_particles.add(d.c)
if d.a.tag < d.d.tag:
d.a.excluded_particles.add(d.d)
if d.b.tag < d.a.tag:
d.b.excluded_particles.add(d.a)
if d.b.tag < d.c.tag:
d.b.excluded_particles.add(d.c)
if d.b.tag < d.d.tag:
d.b.excluded_particles.add(d.d)
if d.c.tag < d.a.tag:
d.c.excluded_particles.add(d.a)
if d.c.tag < d.b.tag:
d.c.excluded_particles.add(d.b)
if d.c.tag < d.d.tag:
d.c.excluded_particles.add(d.d)
if d.d.tag < d.a.tag:
d.d.excluded_particles.add(d.a)
if d.d.tag < d.b.tag:
d.d.excluded_particles.add(d.b)
if d.d.tag < d.c.tag:
d.d.excluded_particles.add(d.c)
def set_atomic_numbers(self):
"""pysimm.system.System.set_atomic_numbers
Updates :class:`~pysimm.system.ParticleType` objects with atomic number based on :class:`~pysimm.system.ParticleType`.elem
Args:
None
Returns:
None
"""
for pt in self.particle_types:
if pt.elem == 'H':
pt.atomic_number = 1
elif pt.elem == 'He':
pt.atomic_number = 2
elif pt.elem == 'Li':
pt.atomic_number = 3
elif pt.elem == 'Be':
pt.atomic_number = 4
elif pt.elem == 'B':
pt.atomic_number = 5
elif pt.elem == 'C':
pt.atomic_number = 6
elif pt.elem == 'N':
pt.atomic_number = 7
elif pt.elem == 'O':
pt.atomic_number = 8
elif pt.elem == 'F':
pt.atomic_number = 9
elif pt.elem == 'Ne':
pt.atomic_number = 10
elif pt.elem == 'Na':
pt.atomic_number = 11
elif pt.elem == 'Mg':
pt.atomic_number = 12
elif pt.elem == 'Al':
pt.atomic_number = 13
elif pt.elem == 'Si':
pt.atomic_number = 14
elif pt.elem == 'P':
pt.atomic_number = 15
elif pt.elem == 'S':
pt.atomic_number = 16
elif pt.elem == 'Cl':
pt.atomic_number = 17
elif pt.elem == 'Ar':
pt.atomic_number = 18
elif pt.elem == 'K':
pt.atomic_number = 19
elif pt.elem == 'Ca':
pt.atomic_number = 20
elif pt.elem == 'Br':
pt.atomic_number = 35
elif pt.elem == 'I':
pt.atomic_number = 53
def add_particle_bonded_to(self, p, p0, f=None, sep=1.5):
"""pysimm.system.System.add_particle_bonded_to
Add new :class:`~pysimm.system.Particle` to :class:`~pysimm.system.System` bonded to p0 and automatically update new forcefield types
Args:
p: new :class:`~pysimm.system.Particle` object to be added
p0: original :class:`~pysimm.system.Particle` object in :class:`~pysimm.system.System` to which p will be bonded
f: :class:`~pysimm.forcefield.Forcefield` object from which new force field types will be retrieved
Returns:
new Particle being added to system for convenient reference
"""
if p.x is None or p.y is None or p.z is None:
phi = random() * 2 * pi
theta = acos(random() * 2 - 1)
p.x = p0.x + sep * cos(theta) * sin(phi)
p.y = p0.y + sep * sin(theta) * sin(phi)
p.z = p0.z + sep * cos(phi)
if p.charge is None:
p.charge = 0
if p.molecule is None:
p.molecule = p0.molecule
self.add_particle(p)
self.add_bond(p0, p, f)
if not p0.bonded_to:
self.add_particle_bonding()
for p_ in p0.bonded_to:
if p_ is not p:
self.add_angle(p_, p0, p, f)
for p_b in p_.bonded_to:
if p_b is not p0:
self.add_dihedral(p_b, p_, p0, p, f)
return p
def add_particle(self, p):
"""pysimm.system.System.add_particle
Add new :class:`~pysimm.system.Particle` to :class:`~pysimm.system.System`.
Args:
p: new :class:`~pysimm.system.Particle` object to be added
Returns:
None
"""
self.particles.add(p)
def rotate(self, around=None, theta_x=0, theta_y=0, theta_z=0, rot_matrix=None):
"""pysimm.system.System.rotate
*** REQUIRES NUMPY ***
Rotates entire system around given :class:`~pysimm.system.Particle` by user defined angles
Args:
around: :class:`~pysimm.system.Particle` around which :class:`~pysimm.system.System` will be rotated default=None
theta_x: angle around which system will be rotated on x axis
theta_y: angle around which system will be rotated on y axis
theta_z: angle around which system will be rotated on z axis
rot_matrix: rotation matrix to use for rotation
Returns:
None
"""
if not np:
raise PysimmError('pysimm.system.System.rotate function requires numpy')
theta_x = random() * 2 * pi if theta_x is 'random' else theta_x
theta_y = random() * 2 * pi if theta_y is 'random' else theta_y
theta_z = random() * 2 * pi if theta_z is 'random' else theta_z
if around is None:
around = []
self.set_cog()
around.append(self.cog[0])
around.append(self.cog[1])
around.append(self.cog[2])
elif isinstance(around, Particle):
around = [around.x, around.y, around.z]
if (isinstance(around, list) and len(around) == 3 and
len(set([isinstance(x, float) for x in around])) == 1 and isinstance(around[0], float)):
for p in self.particles:
p.x -= around[0]
p.y -= around[1]
p.z -= around[2]
if rot_matrix is not None:
p.x, p.y, p.z = [x[0] for x in (rot_matrix*np.matrix([[p.x], [p.y], [p.z]])).tolist()]
else:
p.x, p.y, p.z = rotate_vector(p.x, p.y, p.z, theta_x, theta_y, theta_z)
p.x += around[0]
p.y += around[1]
p.z += around[2]
def make_new_bonds(self, p1=None, p2=None, f=None, angles=True, dihedrals=True, impropers=True):
"""pysimm.system.System.make_new_bonds
Makes new bond between two particles and updates new force field types
Args:
p1: :class:`~pysimm.system.Particle` object involved in new bond
p2: :class:`~pysimm.system.Particle` object involved in new bond
f: :class:`~pysimm.forcefield.Forcefield` object from which new force field types will be retrieved
angles: True to update new angles default=True
dihedrals: True to update new dihedrals default=True
impropers: True to update new impropers default=True
Returns:
None
"""
self.add_particle_bonding()
if p1.molecule is not p2.molecule:
if p1.molecule.particles.count < p2.molecule.particles.count:
old_molecule_tag = p1.molecule.tag
for p_ in p1.molecule.particles:
p_.molecule = p2.molecule
else:
old_molecule_tag = p2.molecule.tag
for p_ in p2.molecule.particles:
p_.molecule = p1.molecule
self.molecules.remove(old_molecule_tag)
self.add_bond(p1, p2, f)
if angles or dihedrals or impropers:
for p in p1.bonded_to:
if angles:
if p is not p2:
self.add_angle(p, p1, p2, f)
if dihedrals:
for pb in p.bonded_to:
if pb is not p1 and p is not p2:
self.add_dihedral(pb, p, p1, p2, f)
for p in p2.bonded_to:
if angles:
if p is not p1:
self.add_angle(p1, p2, p, f)
if dihedrals:
for pb in p.bonded_to:
if pb is not p2 and p is not p1:
self.add_dihedral(p1, p2, p, pb, f)
if dihedrals:
for pb1 in p1.bonded_to:
for pb2 in p2.bonded_to:
if pb1 is not p2 and pb2 is not p1:
self.add_dihedral(pb1, p1, p2, pb2, f)
if impropers:
if self.ff_class == '2':
for perm in permutations(p1.bonded_to, 3):
unique = True
for i in self.impropers:
if i.a is not p1:
continue
if set([i.b, i.c, i.d]) == set([perm[0], perm[1],
perm[2]]):
unique = False
break
if unique:
self.add_improper(p1, perm[0], perm[1], perm[2], f)
for perm in permutations(p2.bonded_to, 3):
unique = True
for i in self.impropers:
if i.a is not p2:
continue
if set([i.b, i.c, i.d]) == set([perm[0], perm[1],
perm[2]]):
unique = False
break
if unique:
self.add_improper(p2, perm[0], perm[1], perm[2], f)
def add_bond(self, a=None, b=None, f=None):
"""pysimm.system.System.add_bond
Add :class:`~pysimm.system.Bond` to system between two particles
Args:
a: :class:`~pysimm.system.Particle` involved in new :class:`~pysimm.system.Bond`
b: :class:`~pysimm.system.Particle` involved in new :class:`~pysimm.system.Bond`
f: :class:`~pysimm.forcefield.Forcefield` object from which new force field type will be retrieved
Returns:
None
"""
if a is b:
return
a_name = a.type.eq_bond or a.type.name
b_name = b.type.eq_bond or b.type.name
btype = self.bond_types.get('%s,%s' % (a_name, b_name))
if not btype and f:
btype = f.bond_types.get('%s,%s' % (a_name, b_name))
if btype:
bt = btype[0].copy()
self.bond_types.add(bt)
btype = self.bond_types.get('%s,%s' % (a_name, b_name))
if btype:
new_b = Bond(type=btype[0], a=a, b=b)
self.bonds.add(new_b)
if a.bonded_to is None or b.bonded_to is None:
self.add_particle_bonding()
if a.bonded_to and b not in a.bonded_to:
a.bonded_to.add(b)
if b.bonded_to and a not in b.bonded_to:
b.bonded_to.add(a)
else:
error_print('error: system does not contain bond type named %s,%s '
'or could not find type in forcefield supplied'
% (a_name, b_name))
return
def add_angle(self, a=None, b=None, c=None, f=None):
"""pysimm.system.System.add_angle
Add :class:`~pysimm.system.Angle` to system between three particles
Args:
a: :class:`~pysimm.system.Particle` involved in new :class:`~pysimm.system.Angle`
b: :class:`~pysimm.system.Particle` involved in new :class:`~pysimm.system.Angle` (middle particle)
c: :class:`~pysimm.system.Particle` involved in new :class:`~pysimm.system.Angle`
f: :class:`~pysimm.forcefield.Forcefield` object from which new force field type will be retrieved
Returns:
None
"""
if a is c:
return
a_name = a.type.eq_angle or a.type.name
b_name = b.type.eq_angle or b.type.name
c_name = c.type.eq_angle or c.type.name
atype = self.angle_types.get(
'%s,%s,%s' % (a_name, b_name, c_name),
item_wildcard=None
)
if not atype and f:
atype = self.angle_types.get(
'%s,%s,%s' % (a_name, b_name, c_name)
)
atype.extend(
f.angle_types.get(
'%s,%s,%s' % (a_name, b_name, c_name)
)
)
atype = sorted(atype, key=lambda x: x.name.count('X'))
if atype:
if not self.angle_types.get(atype[0].name, item_wildcard=None):
atype = self.angle_types.add(atype[0].copy())
else:
atype = self.angle_types.get(atype[0].name, item_wildcard=None)[0]
elif atype:
atype = atype[0]
if atype:
self.angles.add(Angle(type=atype, a=a, b=b, c=c))
else:
error_print('error: system does not contain angle type named '
'%s,%s,%s or could not find type in forcefield supplied'
% (a_name, b_name, c_name))
return
def add_dihedral(self, a=None, b=None, c=None, d=None, f=None):
"""pysimm.system.System.add_dihedral
Add :class:`~pysimm.system.Dihedral` to system between four particles
Args:
a: :class:`~pysimm.system.Particle` involved in new :class:`~pysimm.system.Dihedral`
b: :class:`~pysimm.system.Particle` involved in new :class:`~pysimm.system.Dihedral` (middle particle)
c: :class:`~pysimm.system.Particle` involved in new :class:`~pysimm.system.Dihedral` (middle particle)
d: :class:`~pysimm.system.Particle` involved in new :class:`~pysimm.system.Dihedral`
f: :class:`~pysimm.forcefield.Forcefield` object from which new force field type will be retrieved
Returns:
None
"""
if a is c or b is d:
return
a_name = a.type.eq_dihedral or a.type.name
b_name = b.type.eq_dihedral or b.type.name
c_name = c.type.eq_dihedral or c.type.name
d_name = d.type.eq_dihedral or d.type.name
dtype = self.dihedral_types.get(
'%s,%s,%s,%s' % (a_name, b_name, c_name, d_name),
item_wildcard=None
)
if not dtype and f:
dtype = self.dihedral_types.get(
'%s,%s,%s,%s' % (a_name, b_name, c_name, d_name)
)
dtype.extend(
f.dihedral_types.get(
'%s,%s,%s,%s' % (a_name, b_name, c_name, d_name)
)
)
dtype = sorted(dtype, key=lambda x: x.name.count('X'))
if dtype:
if not self.dihedral_types.get(dtype[0].name, item_wildcard=None):
dtype = self.dihedral_types.add(dtype[0].copy())
else:
dtype = self.dihedral_types.get(dtype[0].name, item_wildcard=None)[0]
elif dtype:
dtype = dtype[0]
if dtype:
self.dihedrals.add(Dihedral(type=dtype, a=a, b=b, c=c, d=d))
else:
error_print('error: system does not contain dihedral type named '
'%s,%s,%s,%s or could not find type in forcefield '
'supplied' % (a_name, b_name,
c_name, d_name))
error_print('tags: %s %s %s %s' % (a.tag, b.tag, c.tag, d.tag))
return
def add_improper(self, a=None, b=None, c=None, d=None, f=None):
"""pysimm.system.System.add_improper
Add :class:`~pysimm.system.Improper` to system between four particles
Args:
a: :class:`~pysimm.system.pysimm.system.Particle` involved in new :class:`~pysimm.system.Improper` (middle particle)
b: :class:`~pysimm.system.pysimm.system.Particle` involved in new :class:`~pysimm.system.Improper`
c: :class:`~pysimm.system.pysimm.system.Particle` involved in new :class:`~pysimm.system.Improper`
d: :class:`~pysimm.system.pysimm.system.Particle` involved in new :class:`~pysimm.system.Improper`
f: :class:`~pysimm.system.pysimm.forcefield.Forcefield` object from which new force field type will be retrieved
Returns:
None
"""
if a is b or a is c or a is d:
return
a_name = a.type.eq_improper or a.type.name
b_name = b.type.eq_improper or b.type.name
c_name = c.type.eq_improper or c.type.name
d_name = d.type.eq_improper or d.type.name
itype = self.improper_types.get('%s,%s,%s,%s'
% (a_name, b_name,
c_name, d_name),
improper_type=True,
item_wildcard=None)
if not itype and f:
itype = self.improper_types.get(
'%s,%s,%s,%s' % (a_name, b_name, c_name, d_name),
improper_type=True
)
itype.extend(
f.improper_types.get(
'%s,%s,%s,%s' % (a_name, b_name, c_name, d_name),
improper_type=True
)
)
itype = sorted(itype, key=lambda x: x.name.count('X'))
if itype:
if not self.improper_types.get(itype[0].name, item_wildcard=None, improper_type=True):
itype = self.improper_types.add(itype[0].copy())
else:
itype = self.improper_types.get(itype[0].name, item_wildcard=None, improper_type=True)[0]
elif itype:
itype = itype[0]
if itype:
self.impropers.add(Improper(type=itype, a=a, b=b, c=c, d=d))
else:
return
def check_forcefield(self):
"""pysimm.system.System.check_forcefield
Iterates through particles and prints the following:
tag
type name
type element
type description
bonded elements
Args:
None
Returns:
None
"""
if not self.objectified:
self.objectify()
for p in self.particles:
p.bond_elements = [x.a.type.elem if p is x.b else
x.b.type.elem for x in p.bonds]
p.nbonds = len(p.bond_elements)
print(p.tag, p.type.name, p.type.elem, p.type.desc, p.bond_elements)
def apply_forcefield(self, f, charges='default', set_box=True, box_padding=10,
update_ptypes=False, skip_ptypes=False):
"""pysimm.system.System.apply_forcefield
Applies force field data to :class:`~pysimm.system.System` based on typing rules defined in :class:`~pysimm.forcefield.Forcefield` object f
Args:
f: :class:`~pysimm.forcefield.Forcefield` object from which new force field type will be retrieved
charges: type of charges to be applied default='default'
set_box: Update simulation box information based on particle positions default=True
box_padding: Add padding to simulation box if updating dimensions default=10 (Angstroms)
update_ptypes: If True, update particle types based on current :class:`~pysimm.system.ParticleType` names default=False
skip_ptypes: if True, do not change particle types
Returns:
None
"""
self.ff_class = f.ff_class
self.forcefield = f.name
if update_ptypes:
self.update_particle_types_from_forcefield(f)
skip_ptypes = True
if not skip_ptypes:
f.assign_ptypes(self)
if self.bonds.count > 0:
f.assign_btypes(self)
f.assign_atypes(self)
f.assign_dtypes(self)
f.assign_itypes(self)
if charges:
f.assign_charges(self, charges=charges)
if set_box:
self.set_box(box_padding, center=False)
def apply_charges(self, f, charges='default'):
"""pysimm.system.System.apply_charges
Applies charges derived using method provided by user. Defaults to 'default'. Calls :func:`~pysimm.forcefield.Forcefield.assign_charges` method of forcefield object provided.
Args:
f: :class:`~pysimm.forcefield.Forcefield` object
charges: type of charges to be applied default='default'
Returns:
None
"""
f.assign_charges(self, charges=charges)
def write_lammps_mol(self, out_data):
"""pysimm.system.System.write_lammps_mol
Write :class:`~pysimm.system.System` data formatted as LAMMPS molecule template
Args:
out_data: where to write data, file name or 'string'
Returns:
None or string if data file if out_data='string'
"""
if out_data == 'string':
out_file = StringIO()
else:
out_file = open(out_data, 'w+')
self.set_mass()
self.set_cog()
out_file.write('%s\n\n' % self.name)
out_file.write('%s atoms\n' % self.particles.count)
out_file.write('%s bonds\n' % self.bonds.count)
out_file.write('%s angles\n' % self.angles.count)
out_file.write('%s dihedrals\n' % self.dihedrals.count)
out_file.write('%s impropers\n' % self.impropers.count)
if self.particles.count > 0:
out_file.write('Coords\n\n')
for p in self.particles:
out_file.write('{} {} {} {}\n'.format(p.tag, p.x, p.y, p.z))
out_file.write('\n')
if self.particles.count > 0:
out_file.write('Types\n\n')
for p in self.particles:
out_file.write('{} {}\n'.format(p.tag, p.type.tag))
out_file.write('\n')
if self.particles.count > 0:
out_file.write('Charges\n\n')
for p in self.particles:
out_file.write('{} {}\n'.format(p.tag, p.charge))
out_file.write('\n')
if self.bonds.count > 0:
out_file.write('Bonds\n\n')
for b in self.bonds:
out_file.write('{} {} {} {}\n'.format(b.tag, b.type.tag, b.a.tag, b.b.tag))
out_file.write('\n')
if self.angles.count > 0:
out_file.write('Angles\n\n')
for a in self.angles:
out_file.write('{} {} {} {} {}\n'.format(a.tag, a.type.tag, a.a.tag, a.b.tag, a.c.tag))
out_file.write('\n')
if self.dihedrals.count > 0:
out_file.write('Dihedrals\n\n')
for d in self.dihedrals:
out_file.write('{} {} {} {} {} {}\n'.format(d.tag, d.type.tag, d.a.tag, d.b.tag, d.c.tag, d.d.tag))
out_file.write('\n')
if self.impropers.count > 0:
out_file.write('Impropers\n\n')
for i in self.impropers:
out_file.write('{} {} {} {} {} {}\n'.format(i.tag, i.type.tag, i.a.tag, i.b.tag, i.c.tag, i.d.tag))
if out_data == 'string':
s = out_file.getvalue()
out_file.close()
return s
else:
out_file.close()
def write_lammps(self, out_data, **kwargs):
"""pysimm.system.System.write_lammps
Write :class:`~pysimm.system.System` data formatted for LAMMPS
Args:
out_data: where to write data, file name or 'string'
Returns:
None or string if data file if out_data='string'
"""
empty = kwargs.get('empty')
pair_style = kwargs.get('pair_style', self.pair_style)
bond_style = kwargs.get('bond_style', self.bond_style)
angle_style = kwargs.get('angle_style', self.angle_style)
dihedral_style = kwargs.get('dihedral_style', self.dihedral_style)
improper_style = kwargs.get('improper_style', self.improper_style)
if out_data == 'string':
out_file = StringIO()
else:
out_file = open(out_data, 'w+')
if empty:
out_file.write('%s\n\n' % self.name)
out_file.write('%s atoms\n' % 0)
out_file.write('%s bonds\n' % 0)
out_file.write('%s angles\n' % 0)
out_file.write('%s dihedrals\n' % 0)
out_file.write('%s impropers\n' % 0)
else:
out_file.write('%s\n\n' % self.name)
out_file.write('%s atoms\n' % self.particles.count)
out_file.write('%s bonds\n' % self.bonds.count)
out_file.write('%s angles\n' % self.angles.count)
out_file.write('%s dihedrals\n' % self.dihedrals.count)
out_file.write('%s impropers\n' % self.impropers.count)
out_file.write('\n')
out_file.write('%s atom types\n' % self.particle_types.count)
if self.bond_types.count > 0:
out_file.write('%s bond types\n' % self.bond_types.count)
if self.angle_types.count > 0:
out_file.write('%s angle types\n' % self.angle_types.count)
if self.dihedral_types.count > 0:
out_file.write('%s dihedral types\n' % self.dihedral_types.count)
if self.improper_types.count > 0:
out_file.write('%s improper types\n' % self.improper_types.count)
out_file.write('\n')
out_file.write('%f %f xlo xhi\n' % (self.dim.xlo, self.dim.xhi))
out_file.write('%f %f ylo yhi\n' % (self.dim.ylo, self.dim.yhi))
out_file.write('%f %f zlo zhi\n' % (self.dim.zlo, self.dim.zhi))
out_file.write('\n')
if self.particle_types.count > 0:
out_file.write('Masses\n\n')
for pt in self.particle_types:
out_file.write(pt.write_lammps('mass'))
out_file.write('\n')
if self.write_coeffs and self.particle_types.count > 0:
out_file.write('Pair Coeffs\n\n')
for pt in self.particle_types:
out_file.write(pt.write_lammps(pair_style))
out_file.write('\n')
if self.write_coeffs and self.bond_types.count > 0:
out_file.write('Bond Coeffs\n\n')
for b in self.bond_types:
out_file.write(b.write_lammps(bond_style))
out_file.write('\n')
if self.write_coeffs and self.angle_types.count > 0:
out_file.write('Angle Coeffs\n\n')
for a in self.angle_types:
out_file.write(a.write_lammps(angle_style))
out_file.write('\n')
if self.write_coeffs and (self.angle_types.count > 0 and (self.ff_class == '2' or
angle_style == 'class2')):
out_file.write('BondBond Coeffs\n\n')
for a in self.angle_types:
out_file.write(a.write_lammps(angle_style, cross_term='BondBond'))
out_file.write('\n')
out_file.write('BondAngle Coeffs\n\n')
for a in self.angle_types:
out_file.write(a.write_lammps(angle_style, cross_term='BondAngle'))
out_file.write('\n')
if self.write_coeffs and self.dihedral_types.count > 0:
out_file.write('Dihedral Coeffs\n\n')
for dt in self.dihedral_types:
out_file.write(dt.write_lammps(dihedral_style))
out_file.write('\n')
if self.write_coeffs and self.dihedral_types.count > 0 and (self.ff_class == '2' or
dihedral_style == 'class2'):
out_file.write('MiddleBondTorsion Coeffs\n\n')
for d in self.dihedral_types:
out_file.write(d.write_lammps(dihedral_style, cross_term='MiddleBond'))
out_file.write('\n')
out_file.write('EndBondTorsion Coeffs\n\n')
for d in self.dihedral_types:
out_file.write(d.write_lammps(dihedral_style, cross_term='EndBond'))
out_file.write('\n')
out_file.write('AngleTorsion Coeffs\n\n')
for d in self.dihedral_types:
out_file.write(d.write_lammps(dihedral_style, cross_term='Angle'))
out_file.write('\n')
out_file.write('AngleAngleTorsion Coeffs\n\n')
for d in self.dihedral_types:
out_file.write(d.write_lammps(dihedral_style, cross_term='AngleAngle'))
out_file.write('\n')
out_file.write('BondBond13 Coeffs\n\n')
for d in self.dihedral_types:
out_file.write(d.write_lammps(dihedral_style, cross_term='BondBond13'))
out_file.write('\n')
if self.write_coeffs and self.improper_types.count > 0:
out_file.write('Improper Coeffs\n\n')
for i in self.improper_types:
out_file.write(i.write_lammps(improper_style))
out_file.write('\n')
if self.write_coeffs and self.improper_types.count > 0 and (self.ff_class == '2' or
improper_style == 'class2'):
out_file.write('AngleAngle Coeffs\n\n')
for i in self.improper_types:
out_file.write(i.write_lammps(improper_style, cross_term='AngleAngle'))
out_file.write('\n')
if self.particles.count > 0 and not empty:
out_file.write('Atoms\n\n')
for p in self.particles:
if not p.molecule:
p.molecule = Item()
p.molecule.tag = 1
if not p.charge:
p.charge = 0
if isinstance(p.molecule, int):
out_file.write('%4d\t%d\t%d\t%s\t%s\t%s\t%s\n'
% (p.tag, p.molecule, p.type.tag, p.charge,
p.x, p.y, p.z))
else:
out_file.write('%4d\t%d\t%d\t%s\t%s\t%s\t%s\n'
% (p.tag, p.molecule.tag, p.type.tag, p.charge,
p.x, p.y, p.z))
out_file.write('\n')
out_file.write('Velocities\n\n')
for p in self.particles:
if not p.vx:
p.vx = 0.
if not p.vy:
p.vy = 0.
if not p.vz:
p.vz = 0.
out_file.write('%4d\t%s\t%s\t%s\n' % (p.tag, p.vx, p.vy, p.vz))
out_file.write('\n')
if self.bonds.count > 0 and not empty:
out_file.write('Bonds\n\n')
for b in self.bonds:
out_file.write('%4d\t%d\t%d\t%d\n'
% (b.tag, b.type.tag, b.a.tag, b.b.tag))
out_file.write('\n')
if self.angles.count > 0 and not empty:
out_file.write('Angles\n\n')
for a in self.angles:
out_file.write('%4d\t%d\t%d\t%d\t%d\n'
% (a.tag, a.type.tag, a.a.tag, a.b.tag, a.c.tag))
out_file.write('\n')
if self.dihedrals.count > 0 and not empty:
out_file.write('Dihedrals\n\n')
for d in self.dihedrals:
out_file.write('%4d\t%d\t%d\t%d\t%d\t%d\n'
% (d.tag, d.type.tag,
d.a.tag, d.b.tag, d.c.tag, d.d.tag))
out_file.write('\n')
if self.impropers.count > 0 and not empty:
out_file.write('Impropers\n\n')
for i in self.impropers:
if self.ff_class == '2' or self.improper_style == 'class2':
out_file.write('%4d\t%d\t%d\t%d\t%d\t%d\n'
% (i.tag, i.type.tag,
i.b.tag, i.a.tag, i.c.tag, i.d.tag))
else:
out_file.write('%4d\t%d\t%d\t%d\t%d\t%d\n'
% (i.tag, i.type.tag,
i.a.tag, i.b.tag, i.c.tag, i.d.tag))
out_file.write('\n')
if out_data == 'string':
s = out_file.getvalue()
out_file.close()
return s
else:
out_file.close()
def write_xyz(self, outfile='data.xyz', **kwargs):
"""pysimm.system.System.write_xyz
Write :class:`~pysimm.system.System` data in xyz format
Args:
outfile: where to write data, file name or 'string'
Returns:
None or string of data file if out_data='string'
"""
elem = kwargs.get('elem', True)
append = kwargs.get('append')
if outfile == 'string':
out = StringIO()
else:
if append:
out = open(outfile, 'a')
else:
out = open(outfile, 'w')
out.write('%s\n' % self.particles.count)
out.write('xyz file written from pySIMM system module\n')
for p in self.particles:
if elem and p.type and p.type.elem is not None:
out.write('%s %s %s %s\n' % (p.type.elem, p.x, p.y, p.z))
elif elem and p.elem is not None:
out.write('%s %s %s %s\n' % (p.elem, p.x, p.y, p.z))
else:
out.write('%s %s %s %s\n' % (p.type.tag, p.x, p.y, p.z))
if outfile == 'string':
s = out.getvalue()
out.close()
return s
else:
out.close()
def write_chemdoodle_json(self, outfile, **kwargs):
"""pysimm.system.System.write_chemdoodle_json
Write :class:`~pysimm.system.System` data in chemdoodle json format
Args:
outfile: where to write data, file name or 'string'
Returns:
None or string of data file if out_data='string'
"""
atoms = []
bonds = []
for p in self.particles:
if p.type and p.type.elem:
atoms.append({"x": p.x, "y": p.y, "z": p.z, "l": p.type.elem, "i": p.type.name, "c": p.charge})
elif p.elem and p.type:
atoms.append({"x": p.x, "y": p.y, "z": p.z, "l": p.elem, "i": p.type.name, "c": p.charge})
elif p.elem:
atoms.append({"x": p.x, "y": p.y, "z": p.z, "l": p.elem})
else:
atoms.append({"x": p.x, "y": p.y, "z": p.z, "i": p.type.name, "c": p.charge})
for b in self.bonds:
if b.order:
bonds.append({"b": b.a.tag-1, "e": b.b.tag-1, "o": b.order})
else:
bonds.append({"b": b.a.tag-1, "e": b.b.tag-1})
j = {"a": atoms, "b": bonds}
if outfile == 'string':
out = StringIO()
else:
out = open(outfile, 'w+')
out.write(json.dumps(j))
if outfile == 'string':
s = out.getvalue()
out.close()
return s
else:
out.close()
def write_mol(self, outfile='data.mol'):
"""pysimm.system.System.write_mol
Write :class:`~pysimm.system.System` data in mol format
Args:
outfile: where to write data, file name or 'string'
Returns:
None or string of data file if out_data='string'
"""
if outfile == 'string':
out = StringIO()
else:
out = open(outfile, 'w+')
out.write('system\n')
out.write('written using pySIMM system module\n\n')
out.write('%s\t%s\n' % (self.particles.count, self.bonds.count))
for p in self.particles:
if not p.charge:
p.charge = 0.0
if p.type and p.type.elem:
out.write('%10.4f%10.4f%10.4f %s 0 %10.4f\n'
% (p.x, p.y, p.z, '{0: >3}'.format(p.type.elem),
p.charge))
elif p.elem:
out.write('%10.4f%10.4f%10.4f %s 0 %10.4f\n'
% (p.x, p.y, p.z, '{0: >3}'.format(p.elem),
p.charge))
elif p.type:
out.write('%10.4f%10.4f%10.4f %s 0 %10.4f\n'
% (p.x, p.y, p.z, '{0: >3}'.format(p.type.tag),
p.charge))
for b in self.bonds:
if b.order:
out.write('%s\t%s\t%s\t%s\t%s\t%s\n'
% (b.a.tag, b.b.tag, b.order, 0, 0, 0))
else:
out.write('%s\t%s\t%s\t%s\t%s\t%s\n'
% (b.a.tag, b.b.tag, 1, 0, 0, 0))
out.write('M END')
if outfile == 'string':
s = out.getvalue()
out.close()
return s
else:
out.close()
def write_pdb(self, outfile='data.pdb', type_names=True):
"""pysimm.system.System.write_pdb
Write :class:`~pysimm.system.System` data in pdb format
Args:
outfile: where to write data, file name or 'string'
Returns:
None or string of data file if out_data='string'
"""
if outfile == 'string':
out = StringIO()
else:
out = open(outfile, 'w+')
out.write('{:<10}pdb written using pySIMM system module\n'
.format('HEADER'))
for m in self.molecules:
for p in sorted(m.particles, key=lambda x: x.tag):
if p.type:
out.write(
'{:<6}{:>5} {:>4} RES {:4} {: 8.3f}{: 8.3f}{: 8.3f}{:>22}{:>2}\n'.format(
'ATOM', p.tag, p.type.name[0:4] if type_names else p.type.elem,
p.molecule.tag, p.x, p.y, p.z, '', p.type.elem
)
)
elif p.elem:
out.write(
'{:<6}{:>5} {:>4} RES {:4} {: 8.3f}{: 8.3f}{: 8.3f}{:>22}{:>2}\n'.format(
'ATOM', p.tag, p.elem, p.molecule.tag,
p.x, p.y, p.z, '', p.elem
)
)
out.write('TER\n')
for p in self.particles:
if p.bonds:
out.write('{:<6}{:>5}'
.format('CONECT', p.tag))
for t in sorted([x.a.tag if p is x.b else x.b.tag for x in
p.bonds]):
out.write('{:>5}'.format(t))
out.write('\n')
if outfile == 'string':
s = out.getvalue()
out.close()
return s
else:
out.close()
def write_yaml(self, file_):
"""pysimm.system.System.write_yaml
Write :class:`~pysimm.system.System` data in yaml format
Args:
outfile: file name to write data
Returns:
None
"""
n = self.copy()
s = vars(n)
for k, v in s.items():
if isinstance(v, ItemContainer):
s[k] = vars(v)
for k_, v_ in s[k].items():
if k_ == '_dict':
for t, i in v_.items():
s[k][k_][t] = vars(i)
for key, value in s[k][k_][t].items():
if isinstance(value, ItemContainer) or (isinstance(value, list) and
value and isinstance(value[0], Item)):
s[k][k_][t][key] = [x.tag for x in value]
elif isinstance(value, Item) or isinstance(value, System) and value.tag:
s[k][k_][t][key] = value.tag
elif isinstance(v, Item):
s[k] = vars(v)
else:
s[k] = v
if file_ == 'string':
f = StringIO()
f.write(json.dumps(s, indent=4, separators=(',', ': ')))
yaml_ = f.getvalue()
f.close()
return yaml_
with open(file_, 'w') as f:
f.write(json.dumps(s, indent=4, separators=(',', ': ')))
def write_cssr(self, outfile='data.cssr', **kwargs):
"""pysimm.system.System.write_cssr
Write :class:`~pysimm.system.System` data in cssr format
file format: line, format, contents
1: 38X, 3F8.3 : - length of the three cell parameters (a, b, and c) in angstroms.
2: 21X, 3F8.3, 4X, 'SPGR =', I3, 1X, A11 : - a, b, g in degrees, space group number, space group name.
3: 2I4, 1X, A60 : - Number of atoms stored, coordinate system flag (0=fractional, 1=orthogonal coordinates in Angstrom), first title.
4: A53 : - A line of text that can be used to describe the file.
5-: I4, 1X, A4, 2X, 3(F9.5.1X), 8I4, 1X, F7.3 : - Atom serial number, atom name, x, y, z coordinates, bonding connectivities (max 8), charge.
Note: The atom name is a concatenation of the element symbol and the atom serial number.
Args:
outfile: where to write data, file name or 'string'
frac: 0 for using fractional coordinates
aname: 0 for using element as atom name; else using atom type name
Returns:
None or string of data file if out_data='string'
"""
if outfile == 'string':
out = StringIO()
else:
out = open(outfile, 'w+')
frac = kwargs.get('frac', 1)
aname = kwargs.get('aname', 0)
out.write('%s%8.3f%8.3f%8.3f\n' % (38*' ', self.dim.dx, self.dim.dy, self.dim.dz))
out.write('%s%8.3f%8.3f%8.3f SPGR= %3d %s\n' % (21*' ', 90.0, 90.0, 90.0, 1, 'P 1'))
out.write('%4d%4d %s\n' % (self.particles.count, frac, 'CSSR written using pySIMM system module'))
out.write('%s\n' % self.name)
for p in self.particles:
if not p.charge:
p.charge = 0.0
if p.type:
if aname == 0:
if p.type.elem:
name = p.type.elem
elif p.elem:
name = p.elem
else:
name = p.type.tag
else:
if p.type.name:
name = p.type.name
else:
name = p.type.tag
else:
name = p.tag
if frac == 0:
x = p.x/self.dim.dx
y = p.y/self.dim.dy
z = p.z/self.dim.dz
else:
x = p.x
y = p.y
z = p.z
bonds = ''
n_bonds = 0
for b in p.bonds:
if p is b.a:
bonds += ' {:4d}'.format(b.b.tag)
else:
bonds += ' {:4d}'.format(b.a.tag)
n_bonds += 1
for i in range(n_bonds+1, 9):
bonds = bonds + ' {:4d}'.format(0)
out.write('%4d %4s %9.5f %9.5f %9.5f %s %7.3f\n'
% (p.tag, name, x, y, z, bonds, p.charge))
out.write('\n')
if outfile == 'string':
s = out.getvalue()
out.close()
return s
else:
out.close()
def consolidate_types(self):
"""pysimm.system.System.consolidate_types
Removes duplicate types and reassigns references
Args:
None
Returns:
None
"""
for pt in self.particle_types:
for dup in self.particle_types:
if pt is not dup and pt.name == dup.name:
for p in self.particles:
if p.type == dup:
p.type = pt
self.particle_types.remove(dup.tag)
for bt in self.bond_types:
for dup in self.bond_types:
if bt is not dup and bt.name == dup.name:
for b in self.bonds:
if b.type == dup:
b.type = bt
self.bond_types.remove(dup.tag)
for at in self.angle_types:
for dup in self.angle_types:
if at is not dup and at.name == dup.name:
for a in self.angles:
if a.type == dup:
a.type = at
self.angle_types.remove(dup.tag)
for dt in self.dihedral_types:
for dup in self.dihedral_types:
if dt is not dup and dt.name == dup.name:
for d in self.dihedrals:
if d.type == dup:
d.type = dt
self.dihedral_types.remove(dup.tag)
for it in self.improper_types:
for dup in self.improper_types:
if it is not dup and it.name == dup.name:
for i in self.impropers:
if i.type == dup:
i.type = it
self.improper_types.remove(dup.tag)
def set_cog(self):
"""pysimm.system.System.set_cog
Calculate center of gravity of :class:`~pysimm.system.System` and assign to :class:`~pysimm.system.System`.cog
Args:
None
Returns:
None
"""
self.cog = [0, 0, 0]
for p in self.particles:
self.cog[0] += p.x
self.cog[1] += p.y
self.cog[2] += p.z
if self.particles.count:
self.cog = [c / self.particles.count for c in self.cog]
def shift_particles(self, shiftx, shifty, shiftz):
"""pysimm.system.System.shift_particles
Shifts all particles by shiftx, shifty, shiftz. Recalculates cog.
Args:
shiftx: distance to shift particles in x direction
shifty: distance to shift particles in y direction
shiftz: distance to shift particles in z direction
Returns:
None
"""
for p in self.particles:
p.translate(shiftx, shifty, shiftz)
self.set_cog()
def center(self, what='particles', at=[0, 0, 0], move_both=True):
"""pysimm.system.System.center
Centers particles center of geometry or simulation box at given coordinate. A vector is defined based on the current coordinate for the center of either the particles or the simulation box and the "at" parameter. This shift vector is applied to the entity defined by the "what" parameter. Optionally, both the particles and the box can be shifted by the same vector.
Args:
what: what is being centered: "particles" or "box"
at: new coordinate for center of particles or box
move_both: if True, determines vector for shift defined by "what" and "at" parameters, and applies shift to both particles and box. If false, only shift what is defined by "what" parameter.
Returns:
None
"""
if what == 'particles':
self.set_cog()
move_vec = [at[n] - self.cog[n] for n in range(3)]
self.shift_particles(*move_vec)
if move_both:
self.dim.translate(*move_vec)
elif what == 'box':
self.dim.size()
box_center = [self.dim.xlo+self.dim.dx/2, self.dim.ylo+self.dim.dy/2, self.dim.zlo+self.dim.dz/2]
move_vec = [at[n] - box_center[n] for n in range(3)]
self.dim.translate(*move_vec)
if move_both:
self.shift_particles(*move_vec)
else:
error_print('can only choose to center "particles" or "box"')
def center_system(self):
"""pysimm.system.System.center_system
DEPRECATED: Use :class:`~pysimm.system.System`.center('box', [0, 0, 0], True) instead
Args:
None
Returns:
None
"""
warning_print("DEPRECATED: Use System.center('box', [0, 0, 0], True) instead of System.center_system())")
self.center('box', [0, 0, 0], True)
def center_at_origin(self):
"""pysimm.system.System.center_at_origin
DEPRECATED: Use :class:`~pysimm.system.System`.center('particles', [0, 0, 0], True) instead
Args:
None
Returns:
None
"""
warning_print("DEPRECATED: Use System.center('particles', [0, 0, 0], True) instead of System.center_at_origin())")
self.center('particles', [0, 0, 0], True)
def set_mass(self):
"""pysimm.system.System.set_mass
Set total mass of particles in :class:`~pysimm.system.System`
Args:
None
Returns:
None
"""
self.mass = 0
for p in self.particles:
if p.type.mass is None:
self.mass = 0
warning_print('Some particles do not have a mass')
break
self.mass += p.type.mass
def set_volume(self):
"""pysimm.system.System.set_volume
Set volume of :class:`~pysimm.system.System` based on Dimension
Args:
None
Returns:
None
"""
if self.dim.check():
self.volume = ((self.dim.xhi - self.dim.xlo) *
(self.dim.yhi - self.dim.ylo) *
(self.dim.zhi - self.dim.zlo))
def set_density(self):
"""pysimm.system.System.set_density
Calculate density of :class:`~pysimm.system.System` from mass and volume
Args:
None
Returns:
None
"""
self.set_mass()
self.set_volume()
if self.mass and self.volume:
self.density = self.mass / 6.02e23 / self.volume * 1e24
def set_velocity(self):
"""pysimm.system.System.set_velocity
Calculate total velocity of particles in :class:`~pysimm.system.System`
Args:
None
Returns:
None
"""
self.vx = 0.0
self.vy = 0.0
self.vz = 0.0
for p in self.particles:
if p.vx is None:
p.vx = 0
self.vx += p.vx
if p.vy is None:
p.vy = 0
self.vy += p.vy
if p.vz is None:
p.vz = 0
self.vz += p.vz
def zero_velocity(self):
"""pysimm.system.System.zero_velocity
Enforce zero shift velocity in :class:`~pysimm.system.System`
Args:
None
Returns:
None
"""
self.set_velocity()
shift_x = shift_y = shift_z = 0.0
if self.vx != 0:
shift_x = self.vx / self.particles.count
if self.vy != 0:
shift_y = self.vy / self.particles.count
if self.vz != 0:
shift_z = self.vz / self.particles.count
if shift_x != 0 or shift_y != 0 or shift_z != 0:
for p in self.particles:
p.vx -= shift_x
p.vy -= shift_y
p.vz -= shift_z
self.set_velocity()
def set_box(self, padding=0., center=True):
"""pysimm.system.System.set_box
Update :class:`~pysimm.system.System`.dim with user defined padding. Used to construct a simulation box if it doesn't exist, or adjust the size of the simulation box following system modifications.
Args:
padding: add padding to all sides of box (Angstrom)
center: if True, place center of box at origin default=True
Returns:
None
"""
xmin = ymin = zmin = sys.float_info.max
xmax = ymax = zmax = sys.float_info.min
for p in self.particles:
if p.x < xmin:
xmin = p.x
if p.x > xmax:
xmax = p.x
if p.y < ymin:
ymin = p.y
if p.y > ymax:
ymax = p.y
if p.z < zmin:
zmin = p.z
if p.z > zmax:
zmax = p.z
self.dim.xlo = xmin - padding
self.dim.xhi = xmax + padding
self.dim.ylo = ymin - padding
self.dim.yhi = ymax + padding
self.dim.zlo = zmin - padding
self.dim.zhi = zmax + padding
if center:
self.center('particles', [0, 0, 0], True)
def set_mm_dist(self, molecules=None):
"""pysimm.system.System.set_mm_dist
Calculate molecular mass distribution (mainly for polymer systems).
Sets :class:`~pysimm.system.System`.mw, :class:`~pysimm.system.System`.mn, and :class:`~pysimm.system.System`.disperisty
Args:
molecules: :class:`~pysimm.utils.ItemContainer` of molecules to calculate distributions defaul='all'
Returns:
None
"""
if molecules is None or molecules == 'all':
molecules = self.molecules
for m in molecules:
m.set_mass()
self.mn = 0
self.mw = 0
for m in molecules:
self.mn += m.mass
self.mw += pow(m.mass, 2)
self.mw /= self.mn
self.mn /= molecules.count
self.dispersity = self.mw / self.mn
self.pdi = self.mw / self.mn
def set_frac_free_volume(self, v_void=None):
"""pysimm.system.System.set_frac_free_volume
Calculates fractional free volume from void volume and bulk density
Args:
v_void: void volume if not defined in :class:`~pysimm.system.System`.void_volume default=None
Returns:
None
"""
if not v_void and not self.void_volume:
error_print('Void volume not provided, cannot calculate fractional free volume')
return
elif not v_void:
self.set_density()
self.frac_free_volume = calc.frac_free_volume(1/self.density, self.void_volume)
elif not self.void_volume:
self.set_density()
self.frac_free_volume = calc.frac_free_volume(1/self.density, v_void)
if not self.frac_free_volume or self.frac_free_volume < 0:
self.frac_free_volume = 0.0
def visualize(self, vis_exec='vmd', **kwargs):
"""pysimm.system.System.visualize
Visualize system in third party software with given executable. Software must accept pdb or xyz as first
command line argument.
Args:
vis_exec: executable to launch visualization software default='vmd'
unwrap (optional): if True, unwrap :class:`~pysimm.system.System` first default=None
format (optional): set format default='xyz'
Returns:
None
"""
if not call:
raise PysimmError('pysimm.system.System.visualize function requires subprocess.call')
unwrap = kwargs.get('unwrap')
format = kwargs.get('format', 'xyz')
verbose_print(self.dim.dx, self.dim.xlo, self.dim.xhi)
verbose_print(self.dim.dy, self.dim.ylo, self.dim.yhi)
verbose_print(self.dim.dz, self.dim.zlo, self.dim.zhi)
if unwrap:
self.unwrap()
if format == 'xyz':
name_ = 'pysimm_temp.xyz'
self.write_xyz(name_)
elif format == 'pdb':
name_ = 'pysimm_temp.pdb'
self.write_pdb(name_)
call('%s %s' % (vis_exec, name_), shell=True)
os.remove(name_)
def viz(self, **kwargs):
self.visualize(vis_exec='vmd', unwrap=False, format='xyz', **kwargs)
class Molecule(System):
"""pysimm.system.Molecule
Very similar to :class:`~pysimm.system.System`, but requires less information
"""
def __init__(self, **kwargs):
System.__init__(self, **kwargs)
mt = kwargs.get('tag')
if mt and isinstance(mt, int):
self.tag = mt
def read_yaml(file_, **kwargs):
"""pysimm.system.read_yaml
Interprets yaml file and creates :class:`~pysimm.system.System` object
Args:
file_: yaml file name
Returns:
:class:`~pysimm.system.System` object
"""
if os.path.isfile(file_):
dict_ = json.loads(open(file_).read())
else:
dict_ = json.loads(file_)
s = System()
for k, v in dict_.items():
if not isinstance(v, dict):
setattr(s, k, v)
if isinstance(dict_.get('dim'), dict):
s.dim = Dimension(**dict_.get('dim'))
if isinstance(dict_.get('particle_types'), dict):
s.particle_types = ItemContainer()
for pt in dict_.get('particle_types').get('_dict').values():
s.particle_types.add(ParticleType(**pt))
if isinstance(dict_.get('bond_types'), dict):
s.bond_types = ItemContainer()
for bt in dict_.get('bond_types').get('_dict').values():
s.bond_types.add(BondType(**bt))
if isinstance(dict_.get('angle_types'), dict):
s.angle_types = ItemContainer()
for at in dict_.get('angle_types').get('_dict').values():
s.angle_types.add(AngleType(**at))
if isinstance(dict_.get('dihedral_types'), dict):
s.dihedral_types = ItemContainer()
for dt in dict_.get('dihedral_types').get('_dict').values():
s.dihedral_types.add(DihedralType(**dt))
if isinstance(dict_.get('improper_types'), dict):
s.improper_types = ItemContainer()
for it in dict_.get('improper_types').get('_dict').values():
s.improper_types.add(ImproperType(**it))
if isinstance(dict_.get('particles'), dict):
s.particles = ItemContainer()
for p in dict_.get('particles').get('_dict').values():
s.particles.add(Particle(**p))
if isinstance(dict_.get('bonds'), dict):
s.bonds = ItemContainer()
for b in dict_.get('bonds').get('_dict').values():
s.bonds.add(Bond(**b))
if isinstance(dict_.get('angles'), dict):
s.angles = ItemContainer()
for a in dict_.get('angles').get('_dict').values():
s.angles.add(Angle(**a))
if isinstance(dict_.get('dihedrals'), dict):
s.dihedrals = ItemContainer()
for d in dict_.get('dihedrals').get('_dict').values():
s.dihedrals.add(Dihedral(**d))
if isinstance(dict_.get('impropers'), dict):
s.impropers = ItemContainer()
for i in dict_.get('impropers').get('_dict').values():
s.impropers.add(Improper(**i))
if isinstance(dict_.get('molecules'), dict):
s.molecules = ItemContainer()
for m in dict_.get('molecules').get('_dict').values():
mol = Molecule()
for k, v in m.items():
if isinstance(v, list) and not v:
setattr(mol, k, ItemContainer())
else:
setattr(mol, k, v)
particles = [x for x in mol.particles]
mol.particles = ItemContainer()
for n in particles:
mol.particles.add(s.particles[n])
bonds = [x for x in mol.bonds]
mol.bonds = ItemContainer()
for n in bonds:
mol.bonds.add(s.bonds[n])
angles = [x for x in mol.angles]
mol.angles = ItemContainer()
for n in angles:
mol.angles.add(s.angles[n])
dihedrals = [x for x in mol.dihedrals]
mol.dihedrals = ItemContainer()
for n in dihedrals:
mol.dihedrals.add(s.dihedrals[n])
impropers = [x for x in mol.impropers]
mol.impropers = ItemContainer()
for n in impropers:
mol.impropers.add(s.impropers[n])
s.molecules.add(mol)
for p in s.particles:
if s.particle_types[p.type]:
p.type = s.particle_types[p.type]
if s.molecules[p.molecule]:
p.molecule = s.molecules[p.molecule]
bonds = [x for x in p.bonds]
p.bonds = ItemContainer()
for n in bonds:
p.bonds.add(s.bonds[n])
angles = [x for x in p.angles]
for n in angles:
p.angles.add(s.angles[n])
dihedrals = [x for x in p.dihedrals]
for n in dihedrals:
p.dihedrals.add(s.dihedrals[n])
impropers = [x for x in p.impropers]
for n in impropers:
p.impropers.add(s.impropers[n])
for b in s.bonds:
if s.bond_types[b.type]:
b.type = s.bond_types[b.type]
b.a = s.particles[b.a]
b.b = s.particles[b.b]
for a in s.angles:
if s.angle_types[a.type]:
a.type = s.angle_types[a.type]
a.a = s.particles[a.a]
a.b = s.particles[a.b]
a.c = s.particles[a.c]
for d in s.dihedrals:
if s.dihedral_types[d.type]:
d.type = s.dihedral_types[d.type]
d.a = s.particles[d.a]
d.b = s.particles[d.b]
d.c = s.particles[d.c]
d.d = s.particles[d.d]
for i in s.impropers:
if s.improper_types[i.type]:
i.type = s.improper_types[i.type]
i.a = s.particles[i.a]
i.b = s.particles[i.b]
i.c = s.particles[i.c]
i.d = s.particles[i.d]
return s
def read_xyz(file_, **kwargs):
"""pysimm.system.read_xyz
Interprets xyz file and creates :class:`~pysimm.system.System` object
Args:
file_: xyz file name
quiet(optional): if False, print status
Returns:
:class:`~pysimm.system.System` object
"""
quiet = kwargs.get('quiet')
if os.path.isfile(file_):
debug_print('reading file')
f = open(file_)
elif isinstance(file_, str):
debug_print('reading string')
f = StringIO(file_)
s = System()
nparticles = int(next(f).strip())
name = next(f).strip()
s.name = name
for _ in range(nparticles):
elem, x, y, z = next(f).split()
x = float(x)
y = float(y)
z = float(z)
s.particles.add(Particle(elem=elem, x=x, y=y, z=z))
f.close()
for p in s.particles:
pt = s.particle_types.get(p.elem)
if pt:
p.type = pt[0]
else:
pt = ParticleType(elem=p.elem, name=p.elem)
p.type = pt
s.particle_types.add(pt)
if not quiet:
verbose_print('read %s particles' % s.particles.count)
s.set_box(padding=0.5)
return s
def read_chemdoodle_json(file_, **kwargs):
"""pysimm.system.read_chemdoodle_json
Interprets ChemDoodle JSON (Java Script Object Notation) file and creates :class:`~pysimm.system.System` object
Args:
file_: json file name
quiet(optional): if False, print status
Returns:
:class:`~pysimm.system.System` object
"""
quiet = kwargs.get('quiet')
if os.path.isfile(file_):
debug_print('reading file')
f = open(file_)
elif isinstance(file_, str):
debug_print('reading string')
f = StringIO(file_)
s = System()
data = json.loads(f.read())
for a in data.get('a'):
s.particles.add(Particle(
x=a.get('x'),
y=a.get('y'),
z=a.get('z'),
charge=a.get('c'),
elem=a.get('l'),
type_name=a.get('i')
))
for b in data.get('b'):
s.bonds.add(Bond(
a=s.particles[b.get('b')+1],
b=s.particles[b.get('e')+1],
order=b.get('o')
))
return s
def read_lammps(data_file, **kwargs):
"""pysimm.system.read_lammps
Interprets LAMMPS data file and creates :class:`~pysimm.system.System` object
Args:
data_file: LAMMPS data file name
quiet(optional): if False, print status
atom_style (optional): option to let user override (understands charge, molecular, full)
pair_style (optional): option to let user override
bond_style (optional): option to let user override
angle_style (optional): option to let user override
dihedral_style (optional): option to let user override
improper_style (optional): option to let user override
set_types (optional): if True, objectify default=True
name (optional): provide name for system
Returns:
:class:`~pysimm.system.System` object
"""
atom_style = kwargs.get('atom_style')
pair_style = kwargs.get('pair_style')
bond_style = kwargs.get('bond_style')
angle_style = kwargs.get('angle_style')
dihedral_style = kwargs.get('dihedral_style')
improper_style = kwargs.get('improper_style')
set_types = kwargs.get('set_types', True)
name = kwargs.get('name')
quiet = kwargs.get('quiet')
if os.path.isfile(data_file):
if not quiet:
verbose_print('reading lammps data file "%s"' % data_file)
f = open(data_file)
elif isinstance(data_file, str):
if not quiet:
verbose_print('reading lammps data file from string')
f = StringIO(data_file)
else:
raise PysimmError('pysimm.system.read_lammps requires either '
'file or string as first argument')
if name:
if not quiet:
verbose_print('creating pysimm.system.System object with name %s'
% name)
s = System(name=name)
else:
s = System(name=next(f).strip())
nparticles = nparticle_types = nbonds = nbond_types = 0
nangles = nangle_types = ndihedrals = ndihedral_types = 0
nimpropers = nimproper_types = 0
for line in f:
line = line.split()
if len(line) > 1 and line[1] == 'atoms':
nparticles = int(line[0])
elif len(line) > 1 and line[1] == 'atom':
nparticle_types = int(line[0])
elif len(line) > 1 and line[1] == 'bonds':
nbonds = int(line[0])
elif len(line) > 1 and line[1] == 'bond':
nbond_types = int(line[0])
elif len(line) > 1 and line[1] == 'angles':
nangles = int(line[0])
elif len(line) > 1 and line[1] == 'angle':
nangle_types = int(line[0])
elif len(line) > 1 and line[1] == 'dihedrals':
ndihedrals = int(line[0])
elif len(line) > 1 and line[1] == 'dihedral':
ndihedral_types = int(line[0])
elif len(line) > 1 and line[1] == 'impropers':
nimpropers = int(line[0])
elif len(line) > 1 and line[1] == 'improper':
nimproper_types = int(line[0])
elif len(line) > 3 and line[2] == 'xlo':
s.dim.xlo = float(line[0])
s.dim.xhi = float(line[1])
elif len(line) > 3 and line[2] == 'ylo':
s.dim.ylo = float(line[0])
s.dim.yhi = float(line[1])
elif len(line) > 3 and line[2] == 'zlo':
s.dim.zlo = float(line[0])
s.dim.zhi = float(line[1])
elif len(line) > 0 and line[0] == 'Masses':
next(f)
for i in range(nparticle_types):
pt = ParticleType.parse_lammps(next(f), 'mass')
if s.particle_types[pt.tag]:
s.particle_types[pt.tag].mass = pt.mass
else:
s.particle_types.add(pt)
if not quiet:
verbose_print('read masses for %s ParticleTypes'
% s.particle_types.count)
elif len(line) > 0 and line[0] == 'Pair':
if '#' in line and not pair_style:
line = ' '.join(line).split('#')
pair_style = line[1].strip()
next(f)
for i in range(nparticle_types):
line = next(f)
if not pair_style:
warning_print('unknown pair style - infering from number of parameters (2=lj 3=buck 4=charmm)')
pair_style = ParticleType.guess_style(
len(line.split('#')[0].split()[1:])
)
if pair_style:
pt = ParticleType.parse_lammps(line, pair_style)
if s.particle_types[pt.tag]:
s.particle_types[pt.tag].set(**vars(pt))
else:
s.particle_types.add(pt)
verbose_print('read "%s" nonbonded parameters '
'for %s ParticleTypes'
% (pair_style, s.particle_types.count))
elif len(line) > 0 and line[0] == 'Bond':
next(f)
for i in range(nbond_types):
line = next(f)
if not bond_style:
warning_print('unknown bond_style - infering from number of parameters (2=harmonic 4=class2)')
bond_style = BondType.guess_style(
len(line.split('#')[0].split()[1:])
)
if bond_style:
s.bond_types.add(BondType.parse_lammps(line, bond_style))
verbose_print('read "%s" bond parameters '
'for %s BondTypes'
% (bond_style, s.bond_types.count))
elif len(line) > 0 and line[0] == 'Angle':
next(f)
for i in range(nangle_types):
line = next(f)
if not angle_style:
warning_print('unknown angle_style - infering from number of parameters (2=harmonic)')
angle_style = AngleType.guess_style(
len(line.split('#')[0].split()[1:])
)
if angle_style:
s.angle_types.add(AngleType.parse_lammps(line, angle_style))
verbose_print('read "%s" angle parameters '
'for %s AngleTypes'
% (angle_style, s.angle_types.count))
elif len(line) > 0 and line[0] == 'BondBond':
next(f)
for i in range(nangle_types):
line = next(f).strip().split()
tag = int(line[0])
s.angle_types[tag].m = float(line[1])
s.angle_types[tag].r1 = float(line[2])
s.angle_types[tag].r2 = float(line[3])
verbose_print('read "%s" angle (bond-bond) '
'parameters for %s AngleTypes'
% (angle_style, s.angle_types.count))
elif len(line) > 0 and line[0] == 'BondAngle':
next(f)
for i in range(nangle_types):
line = next(f).strip().split()
tag = int(line[0])
s.angle_types[tag].n1 = float(line[1])
s.angle_types[tag].n2 = float(line[2])
s.angle_types[tag].r1 = float(line[3])
s.angle_types[tag].r2 = float(line[4])
if angle_style:
verbose_print('read "%s" angle (bond-angle) '
'parameters for %s AngleTypes'
% (angle_style, s.angle_types.count))
elif len(line) > 0 and line[0] == 'Dihedral':
next(f)
for i in range(ndihedral_types):
line = next(f)
if not dihedral_style:
warning_print('unknown dihedral_style - infering from number of parameters (3=harmonic 6=class2 [7, 10]=fourier)')
dihedral_style = DihedralType.guess_style(
len(line.split('#')[0].split()[1:])
)
if dihedral_style:
dt = DihedralType.parse_lammps(line, dihedral_style)
s.dihedral_types.add(dt)
verbose_print('read "%s" dihedral parameters '
'for %s DihedralTypes'
% (dihedral_style, s.dihedral_types.count))
elif len(line) > 0 and line[0] == 'MiddleBondTorsion':
next(f)
for i in range(ndihedral_types):
line = next(f).strip().split()
tag = int(line[0])
s.dihedral_types[tag].a1 = float(line[1])
s.dihedral_types[tag].a2 = float(line[2])
s.dihedral_types[tag].a3 = float(line[3])
s.dihedral_types[tag].r2 = float(line[4])
if dihedral_style:
verbose_print('read "%s" dihedral '
'(middle-bond-torsion parameters for '
'%s DihedralTypes'
% (dihedral_style, ndihedral_types))
elif len(line) > 0 and line[0] == 'EndBondTorsion':
next(f)
for i in range(ndihedral_types):
line = next(f).strip().split()
tag = int(line[0])
s.dihedral_types[tag].b1 = float(line[1])
s.dihedral_types[tag].b2 = float(line[2])
s.dihedral_types[tag].b3 = float(line[3])
s.dihedral_types[tag].c1 = float(line[4])
s.dihedral_types[tag].c2 = float(line[5])
s.dihedral_types[tag].c3 = float(line[6])
s.dihedral_types[tag].r1 = float(line[7])
s.dihedral_types[tag].r3 = float(line[8])
if dihedral_style:
verbose_print('read "%s" dihedral '
'(end-bond-torsion parameters for '
'%s DihedralTypes'
% (dihedral_style, ndihedral_types))
elif len(line) > 0 and line[0] == 'AngleTorsion':
next(f)
for i in range(ndihedral_types):
line = next(f).strip().split()
tag = int(line[0])
s.dihedral_types[tag].d1 = float(line[1])
s.dihedral_types[tag].d2 = float(line[2])
s.dihedral_types[tag].d3 = float(line[3])
s.dihedral_types[tag].e1 = float(line[4])
s.dihedral_types[tag].e2 = float(line[5])
s.dihedral_types[tag].e3 = float(line[6])
s.dihedral_types[tag].theta1 = float(line[7])
s.dihedral_types[tag].theta2 = float(line[8])
if dihedral_style:
verbose_print('read "%s" dihedral '
'(angle-torsion parameters for '
'%s DihedralTypes'
% (dihedral_style, ndihedral_types))
elif len(line) > 0 and line[0] == 'AngleAngleTorsion':
next(f)
for i in range(ndihedral_types):
line = next(f).strip().split()
tag = int(line[0])
s.dihedral_types[tag].m = float(line[1])
s.dihedral_types[tag].theta1 = float(line[2])
s.dihedral_types[tag].theta2 = float(line[3])
if dihedral_style:
verbose_print('read "%s" dihedral '
'(angle-angle-torsion parameters for '
'%s DihedralTypes'
% (dihedral_style, ndihedral_types))
elif len(line) > 0 and line[0] == 'BondBond13':
next(f)
for i in range(ndihedral_types):
line = next(f).strip().split()
tag = int(line[0])
s.dihedral_types[tag].n = float(line[1])
s.dihedral_types[tag].r1 = float(line[2])
s.dihedral_types[tag].r3 = float(line[3])
if dihedral_style:
verbose_print('read "%s" dihedral '
'(bond-bond-1-3 parameters for '
'%s DihedralTypes'
% (dihedral_style, ndihedral_types))
elif len(line) > 0 and line[0] == 'Improper':
next(f)
for i in range(nimproper_types):
line = next(f)
if not improper_style:
warning_print('unknown improper_style - infering from number of parameters (3=cvff)')
improper_style = ImproperType.guess_style(
len(line.split('#')[0].split()[1:])
)
if improper_style.startswith('harmonic') and 'class2' in [bond_style, angle_style, dihedral_style]:
improper_style = 'class2'
if improper_style:
s.improper_types.add(ImproperType.parse_lammps(line, improper_style))
verbose_print('read "%s" improper parameters '
'for %s ImproperTypes'
% (improper_style, s.improper_types.count))
elif len(line) > 0 and line[0] == 'AngleAngle':
improper_style = 'class2'
next(f)
for i in range(nimproper_types):
line = next(f).strip().split()
tag = int(line[0])
s.improper_types[tag].m1 = float(line[1])
s.improper_types[tag].m2 = float(line[2])
s.improper_types[tag].m3 = float(line[3])
s.improper_types[tag].theta1 = float(line[4])
s.improper_types[tag].theta2 = float(line[5])
s.improper_types[tag].theta3 = float(line[6])
if improper_style:
verbose_print('read "%s" improper '
'(angle-angle parameters for '
'%s ImproperTypes'
% (improper_style, nimproper_types))
elif len(line) > 0 and line[0] == 'Atoms':
next(f)
for i in range(nparticles):
line = next(f).strip().split()
tag = int(line[0])
if not atom_style:
if len(line) == 7:
atom_style = 'full'
elif len(line) == 6:
try:
int(line[2])
atom_style = 'molecular'
except:
atom_style = 'charge'
else:
warning_print('cannot determine atom_style; assuming atom_style "full"')
atom_style = 'full'
if atom_style == 'full':
d_ = {'tag': tag, 'molecule': int(line[1]), 'type': int(line[2]),
'charge': float(line[3]), 'x': float(line[4]),
'y': float(line[5]), 'z': float(line[6])}
elif atom_style == 'charge':
d_ = {'tag': tag, 'molecule': 0, 'type': int(line[1]),
'charge': float(line[2]), 'x': float(line[3]),
'y': float(line[4]), 'z': float(line[5])}
elif atom_style == 'molecular':
d_ = {'tag': tag, 'molecule': int(line[1]), 'type': int(line[2]),
'charge': 0., 'x': float(line[3]), 'y': float(line[4]), 'z': float(line[5])}
if s.particles[tag]:
p = s.particles[tag]
p.set(**d_)
else:
p = Particle(vx=0., vy=0., vz=0., **d_)
s.particles.add(p)
p.frac_x = p.x / s.dim.dx
p.frac_y = p.y / s.dim.dy
p.frac_z = p.z / s.dim.dz
if not quiet:
verbose_print('read %s particles' % nparticles)
elif len(line) > 0 and line[0] == 'Velocities':
next(f)
for i in range(nparticles):
line = next(f).strip().split()
tag = int(line[0])
if s.particles[tag]:
p = s.particles[tag]
d_ = {'vx': float(line[1]), 'vy': float(line[2]),
'vz': float(line[3])}
p.set(**d_)
else:
p = Particle(tag=tag, vx=float(line[1]), vy=float(line[2]),
vz=float(line[3]))
s.particles.add(p)
if not quiet:
verbose_print('read velocities for %s particles' % nparticles)
elif len(line) > 0 and line[0] == 'Bonds':
next(f)
for i in range(nbonds):
line = next(f).strip().split()
tag = int(line[0])
b = Bond(tag=tag, type=int(line[1]),
a=int(line[2]), b=int(line[3]))
s.bonds.add(b)
if not quiet:
verbose_print('read %s bonds' % nbonds)
elif len(line) > 0 and line[0] == 'Angles':
next(f)
for i in range(nangles):
line = next(f).strip().split()
tag = int(line[0])
a = Angle(tag=tag, type=int(line[1]),
a=int(line[2]), b=int(line[3]), c=int(line[4]))
s.angles.add(a)
if not quiet:
verbose_print('read %s angles' % nangles)
elif len(line) > 0 and line[0] == 'Dihedrals':
next(f)
for i in range(ndihedrals):
line = next(f).strip().split()
tag = int(line[0])
d = Dihedral(tag=tag, type=int(line[1]),
a=int(line[2]), b=int(line[3]),
c=int(line[4]), d=int(line[5]))
s.dihedrals.add(d)
if not quiet:
verbose_print('read %s dihedrals' % ndihedrals)
elif len(line) > 0 and line[0] == 'Impropers':
next(f)
for i in range(nimpropers):
line = next(f).strip().split()
tag = int(line[0])
if (s.ff_class == '2' or improper_style == 'class2' or (s.improper_types[1] and s.improper_types[1].m1
is not None)):
s.impropers.add(Improper(tag=tag, type=int(line[1]),
a=int(line[3]), b=int(line[2]),
c=int(line[4]), d=int(line[5])))
else:
s.impropers.add(Improper(tag=tag, type=int(line[1]),
a=int(line[2]), b=int(line[3]),
c=int(line[4]), d=int(line[5])))
if not quiet:
verbose_print('read %s impropers' % nimpropers)
f.close()
s.atom_style = atom_style
s.pair_style = pair_style
s.bond_style = bond_style
s.angle_style = angle_style
s.dihedral_style = dihedral_style
if improper_style:
s.improper_style = improper_style
elif not improper_style and s.impropers.count > 1:
if not quiet:
verbose_print('improper style not set explicitly '
'but impropers exist in system, guessing style '
'based on other forcefield styles...')
if (s.bond_style.startswith('harm') or
s.angle_style.startswith('harm') or
s.dihedral_style.startswith('harm')):
improper_style = 'harmonic'
s.improper_style = 'harmonic'
elif (s.bond_style.startswith('class2') or
s.angle_style.startswith('class2') or
s.dihedral_style.startswith('class2')):
improper_style = 'class2'
s.improper_style = 'class2'
if s.improper_style:
if not quiet:
verbose_print('setting improper style to "%s", '
'if this is incorrect try explicitly setting '
'improper_style as argument in '
'pysimm.system.read_lammps' % improper_style)
else:
if not quiet:
error_print('still cannot determine improper style...')
if pair_style and pair_style.startswith('lj'):
if ((s.bond_style and s.bond_style.startswith('class2')) or
(s.angle_style and s.angle_style.startswith('class2')) or
(s.dihedral_style and s.dihedral_style.startswith('class2'))):
s.pair_style = 'class2'
styles = [s.pair_style, s.bond_style, s.angle_style, s.dihedral_style,
s.improper_style]
if 'class2' in styles:
s.ff_class = '2'
else:
s.ff_class = '1'
if 'harmonic' in styles and 'class2' in styles:
if not quiet:
warning_print('it appears there is a mixture of class1 and class2 '
'forcefield styles in your system...this is usually '
'unadvised')
if set_types:
s.objectify()
for pt in s.particle_types:
if pt.name and pt.name.find('@') >= 0:
if pt.name.split('@')[-1][0].upper() in ['H', 'C', 'N', 'O', 'F', 'S']:
pt.elem = pt.name.split('@')[-1][0].upper()
if pt.name and pt.name[0] == 'L' and pt.name[1] != 'i':
pt.elem = pt.name[1].upper()
elif pt.name:
pt.elem = pt.name[0:2]
if pt.name[1:3] == 'Na':
pt.elem = 'Na'
if pt.name[0].upper() in ['H', 'C', 'N', 'O', 'F', 'S']:
pt.elem = pt.name[0].upper()
for p in s.particles:
if isinstance(p.type, ParticleType) and p.type.name and p.type.name.find('@') >= 0:
if p.type.name[0].upper() == 'H':
p.linker = 'head'
elif p.type.name[0].upper() == 'T':
p.linker = 'tail'
elif p.type.name[0].upper() == 'L':
p.linker = True
if s.objectified:
s.set_cog()
s.set_mass()
s.set_volume()
s.set_density()
s.set_velocity()
return s
def read_pubchem_smiles(smiles, quiet=False, type_with=None):
"""pysimm.system.read_pubchem_smiles
Interface with pubchem restful API to create molecular system from SMILES format
Args:
smiles: smiles formatted string of molecule
type_with: :class:`~pysimm.forcefield.Forcefield` object to type with default=None
Returns:
:class:`~pysimm.system.System` object
"""
req = ('https://pubchem.ncbi.nlm.nih.gov/'
'rest/pug/compound/smiles/%s/SDF/?record_type=3d' % smiles)
if not quiet:
print('making request to pubchem RESTful API:')
print(req)
try:
resp = urlopen(req)
return read_mol(resp.read().decode('utf-8'), type_with=type_with)
except (HTTPError, URLError):
print('Could not retrieve pubchem entry for smiles %s' % smiles)
def read_pubchem_cid(cid, type_with=None):
"""pysimm.system.read_pubchem_smiles
Interface with pubchem restful API to create molecular system from SMILES format
Args:
smiles: smiles formatted string of molecule
type_with: :class:`~pysimm.forcefield.Forcefield` object to type with default=None
Returns:
:class:`~pysimm.system.System` object
"""
req = ('https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/cid/{}/SDF/?record_type=3d'.format(cid))
print('making request to pubchem RESTful API:')
print(req)
try:
resp = urlopen(req)
return read_mol(resp.read().decode('utf-8'), type_with=type_with)
except (HTTPError, URLError):
print('Could not retrieve pubchem entry for cid %s' % cid)
def read_cml(cml_file, **kwargs):
"""pysimm.system.read_cml
Interprets cml file and creates :class:`~pysimm.system.System` object
Args:
cml_file: cml file name
linkers (optional): if True, use spinMultiplicity to determine linker default=None
Returns:
:class:`~pysimm.system.System` object
"""
linkers = kwargs.get('linkers')
if os.path.isfile(cml_file):
debug_print('reading file')
iter_parse = Et.iterparse(cml_file)
elif isinstance(cml_file, str):
debug_print('reading string')
iter_parse = Et.iterparse(StringIO(cml_file))
else:
raise PysimmError('pysimm.system.read_cml requires a file as argument')
for _, el in iter_parse:
if '}' in el.tag:
el.tag = el.tag.split('}', 1)[1]
root = iter_parse.root
s = System(name='read using pysimm.system.read_cml')
particles = root.find('atomArray')
bonds = root.find('bondArray')
for p_ in particles:
tag = int(p_.attrib['id'].replace('a', '').replace(',', ''))
elem = p_.attrib['elementType']
x = float(p_.attrib['x3'])
y = float(p_.attrib['y3'])
z = float(p_.attrib['z3'])
if linkers:
linker = True if p_.attrib.get('spinMultiplicity') else None
else:
linker = None
p = Particle(tag=tag, elem=elem, x=x, y=y, z=z, charge=0, molecule=1, linker=linker)
s.particles.add(p)
for b_ in bonds:
a, b = b_.attrib['atomRefs2'].split()
a = int(a.replace('a', '').replace(',', ''))
b = int(b.replace('a', '').replace(',', ''))
order = b_.attrib['order']
if order == 'A':
order = 4
else:
order = int(order)
b = Bond(a=a, b=b, order=order)
s.bonds.add(b)
s.objectify()
return s
def read_mol(mol_file, type_with=None, version='V2000'):
"""pysimm.system.read_mol
Interprets mol file and creates :class:`~pysimm.system.System` object
Args:
mol_file: mol file name
f (optional): :class:`~pysimm.forcefield.Forcefield` object to get data from
version: version of mol file to expect default='V2000'
Returns:
:class:`~pysimm.system.System` object
"""
if os.path.isfile(mol_file):
debug_print('reading file')
f = open(mol_file)
elif isinstance(mol_file, str):
debug_print('reading string')
f = StringIO(mol_file)
else:
raise PysimmError('pysimm.system.read_mol requires either '
'file or string as argument')
s = System(name='read using pysimm.system.read_mol')
for n in range(3):
next(f)
line = next(f)
nparticles = int(line.split()[0])
nbonds = int(line.split()[1])
if len(line.split()) >= 3:
version = line.split()[-1]
if version == 'V2000':
for n in range(nparticles):
line = next(f)
x, y, z, elem, something, charge = line.split()[:6]
p = Particle(x=float(x), y=float(y), z=float(z), molecule=1,
elem=elem, charge=float(charge))
s.particles.add(p)
if p.elem[0] == 'L':
p.linker = True
p.elem = p.elem[1:]
elif p.charge == 5:
p.linker = True
p.charge = 0
for n in range(nbonds):
line = next(f)
a, b, order = map(int, line.split()[:3])
new_bond = s.bonds.add(Bond(a=a, b=b, order=order))
elif version == 'V3000':
next(f)
line = next(f)
nparticles = int(line.split()[3])
nbonds = int(line.split()[4])
next(f)
for n in range(nparticles):
line = next(f)
id_, elem, x, y, z, charge = line.split()[2:8]
p = Particle(x=float(x), y=float(y), z=float(z), molecule=1,
elem=elem, charge=float(charge))
s.particles.add(p)
next(f)
next(f)
for n in range(nbonds):
line = next(f)
id_, order, a, b = map(int, line.split()[2:6])
s.bonds.add(Bond(a=a, b=b, order=order))
s.objectify()
if type_with:
try:
s.apply_forcefield(type_with)
except Exception:
print('forcefield typing with forcefield %s unsuccessful'
% type_with.name)
return s
def read_prepc(prec_file):
"""pysimm.system.read_prepc
Interprets prepc file and creates :class:`~pysimm.system.System` object
Args:
prepc_file: ac file name
Returns:
:class:`~pysimm.system.System` object
"""
if os.path.isfile(prec_file):
debug_print('reading file')
f = open(prec_file)
elif isinstance(prec_file, str):
debug_print('reading string')
f = StringIO(prec_file)
else:
raise PysimmError('pysimm.system.read_pdb requires either '
'file or string as argument')
s = System(name='read using pysimm.system.read_prepc')
for line in f:
for _ in range(10):
line = next(f)
while line.split():
tag = int(line.split()[0])
name = line.split()[1]
type_name = line.split()[2]
x = float(line.split()[4])
y = float(line.split()[5])
z = float(line.split()[6])
charge = float(line.split()[7])
elem = type_name[0]
p = Particle(tag=tag, name=name, type_name=type_name, x=x, y=y, z=z, elem=elem, charge=charge)
if not s.particles[tag]:
s.particles.add(p)
line = next(f)
break
f.close()
return s
def read_ac(ac_file):
"""pysimm.system.read_ac
Interprets ac file and creates :class:`~pysimm.system.System` object
Args:
ac_file: ac file name
Returns:
:class:`~pysimm.system.System` object
"""
if os.path.isfile(ac_file):
debug_print('reading file')
f = open(ac_file)
elif isinstance(ac_file, str):
debug_print('reading string')
f = StringIO(ac_file)
else:
raise PysimmError('pysimm.system.read_pdb requires either '
'file or string as argument')
s = System(name='read using pysimm.system.read_ac')
for line in f:
if line.startswith('ATOM'):
tag = int(line.split()[1])
name = line.split()[2]
resname = line.split()[3]
resid = line.split()[4]
x = float(line.split()[5])
y = float(line.split()[6])
z = float(line.split()[7])
charge = float(line.split()[8])
type_name = line.split()[9]
elem = type_name[0]
p = Particle(tag=tag, name=name, type_name=type_name, resname=resname, resid=resid, x=x, y=y, z=z, elem=elem, charge=charge)
if not s.particles[tag]:
s.particles.add(p)
if line.startswith('BOND'):
tag = int(line.split()[1])
a = s.particles[int(line.split()[2])]
b = s.particles[int(line.split()[3])]
b = Bond(tag=tag, a=a, b=b)
if not s.bonds[tag]:
s.bonds.add(b)
f.close()
return s
def read_pdb(pdb_file):
"""pysimm.system.read_pdb
Interprets pdb file and creates :class:`~pysimm.system.System` object
Args:
pdb_file: pdb file name
Returns:
:class:`~pysimm.system.System` object
"""
if os.path.isfile(pdb_file):
debug_print('reading file')
f = open(pdb_file)
elif isinstance(pdb_file, str):
debug_print('reading string')
f = StringIO(pdb_file)
else:
raise PysimmError('pysimm.system.read_pdb requires either '
'file or string as argument')
s = System(name='read using pysimm.system.read_pdb')
for line in f:
if line.startswith('ATOM'):
tag = int(line[6:11].strip())
name = line[12:16].strip()
resname = line[17:20].strip()
chainid = line[21]
resid = line[22:26].strip()
x = float(line[30:38].strip())
y = float(line[38:46].strip())
z = float(line[46:54].strip())
elem = line[76:78].strip()
p = Particle(tag=tag, name=name, resname=resname, chainid=chainid, resid=resid, x=x, y=y, z=z, elem=elem)
if not s.particles[tag]:
s.particles.add(p)
f.close()
return s
def compare(s1, s2):
print('Particle Types:\n')
for pt in s1.particle_types:
s2_pt = s2.particle_types.get(pt.name)
if s2_pt and len(s2_pt) == 1:
s2_pt = s2_pt[0]
print('%s\n%s\n' % (vars(pt), vars(s2_pt)))
print('\n\nBond Types:\n')
for bt in s1.bond_types:
s2_bt = s2.bond_types.get(bt.name)
if s2_bt and len(s2_bt) == 1:
s2_bt = s2_bt[0]
print('%s\n%s\n' % (vars(bt), vars(s2_bt)))
print('\n\nAngle Types:\n')
for at in s1.angle_types:
s2_at = s2.angle_types.get(at.name)
if s2_at and len(s2_at) == 1:
s2_at = s2_at[0]
print('%s\n%s\n' % (vars(at), vars(s2_at)))
print('\n\nDihedral Types:\n')
for dt in s1.dihedral_types:
s2_dt = s2.dihedral_types.get(dt.name)
if s2_dt and len(s2_dt) == 1:
s2_dt = s2_dt[0]
print('%s\n%s\n' % (vars(dt), vars(s2_dt)))
print('\n\nImproper Types:\n')
for it in s1.improper_types:
s2_it = s2.improper_types.get(it.name)
if s2_it and len(s2_it) == 1:
s2_it = s2_it[0]
print('%s\n%s\n' % (vars(it), vars(s2_it)))
def get_types(*arg, **kwargs):
"""pysimm.system.get_types
Get unique type names from list of systems
Args:
write (optional): if True, write types dictionary to filename
Returns:
(ptypes, btypes, atypes, dtypes, itypes)
*** for use with update_types ***
"""
write = kwargs.get('write')
ptypes = ItemContainer()
btypes = ItemContainer()
atypes = ItemContainer()
dtypes = ItemContainer()
itypes = ItemContainer()
for s in arg:
for t in s.particle_types:
if t.name and t.name not in [x.name for x in ptypes]:
ptypes.add(t.copy())
for t in s.bond_types:
if t.name and t.name not in [x.name for x in btypes]:
btypes.add(t.copy())
for t in s.angle_types:
if t.name and t.name not in [x.name for x in atypes]:
atypes.add(t.copy())
for t in s.dihedral_types:
if t.name and t.name not in [x.name for x in dtypes]:
dtypes.add(t.copy())
for t in s.improper_types:
if t.name and t.name not in [x.name for x in itypes]:
itypes.add(t.copy())
if write:
t_file = open('types.txt', 'w+')
if ptypes.count > 0:
t_file.write('atom types\n')
for t in ptypes:
t_file.write('%s %s\n' % (t.tag, t.name))
if btypes.count > 0:
t_file.write('\nbond types\n')
for t in btypes:
t_file.write('%s %s\n' % (t.tag, t.name))
if atypes.count > 0:
t_file.write('\nangle types\n')
for t in atypes:
t_file.write('%s %s\n' % (t.tag, t.name))
if dtypes.count > 0:
t_file.write('\ndihedral types\n')
for t in dtypes:
t_file.write('%s %s\n' % (t.tag, t.name))
if itypes.count > 0:
t_file.write('\nimproper types\n')
for t in itypes:
t_file.write('%s %s\n' % (t.tag, t.name))
t_file.close()
return ptypes, btypes, atypes, dtypes, itypes
def distance_to_origin(p):
"""pysimm.system.distance_to_origin
Calculates distance of particle to origin.
Args:
p: Particle object with x, y, and z attributes
Returns:
Distance of particle to origin
"""
return sqrt(pow(p.x, 2) + pow(p.y, 2) + pow(p.z, 2))
def replicate(ref, nrep, s_=None, density=0.3, rand=True, print_insertions=True):
"""pysimm.system.replicate
Replicates list of :class:`~pysimm.system.System` objects into new (or exisintg) :class:`~pysimm.system.System`.
Can be random insertion.
Args:
ref: reference :class:`~pysimm.system.System`(s) (this can be a list)
nrep: number of insertions to perform (can be list but must match length of ref)
s_: :class:`~pysimm.system.System` into which insertions will be performed default=None
density: density of new :class:`~pysimm.system.System` default=0.3 (set to None to not change box)
rand: if True, random insertion is performed
print_insertions: if True, update screen with number of insertions
"""
if not isinstance(ref, list):
ref = [ref]
if not isinstance(nrep, list):
nrep = [nrep]
assert len(ref) == len(nrep)
if s_ is None:
s_ = System()
s_.ff_class = ref[0].ff_class
s_.forcefield = ref[0].forcefield
s_.pair_style = ref[0].pair_style
s_.bond_style = ref[0].bond_style
s_.angle_style = ref[0].angle_style
s_.dihedral_style = ref[0].dihedral_style
s_.improper_style = ref[0].improper_style
for r in ref:
r.set_mass()
r.center('particles', [0, 0, 0], True)
r.r = 0
for p in r.particles:
r.r = max(r.r, distance_to_origin(p))
s_.molecule_types.add(r)
mass = 0
for i, r in enumerate(ref):
mass += r.mass * nrep[i]
mass /= 6.02e23
if density:
volume = float(mass) / density
boxl = pow(volume, 1 / 3.) * 1e8
s_.dim.xlo = -1. * boxl / 2.
s_.dim.xhi = boxl / 2.
s_.dim.ylo = -1. * boxl / 2.
s_.dim.yhi = boxl / 2.
s_.dim.zlo = -1. * boxl / 2.
s_.dim.zhi = boxl / 2.
num = 0
for j, r in enumerate(ref):
for n in range(nrep[j]):
if rand:
rotate_x = random() * 2 * pi
rotate_y = random() * 2 * pi
rotate_z = random() * 2 * pi
dx = s_.dim.xhi - s_.dim.xlo
dx = (-dx / 2. + r.r) + random() * (dx - 2 * r.r)
dy = s_.dim.yhi - s_.dim.ylo
dy = (-dy / 2. + r.r) + random() * (dy - 2 * r.r)
dz = s_.dim.zhi - s_.dim.zlo
dz = (-dz / 2. + r.r) + random() * (dz - 2 * r.r)
r_ = r.copy(rotate_x=rotate_x, rotate_y=rotate_y,
rotate_z=rotate_z, dx=dx, dy=dy, dz=dz)
else:
r_ = r.copy()
s_.add(r_, change_dim=False, update_properties=False)
num += 1
if print_insertions:
verbose_print('Molecule %s inserted' % num)
s_.set_density()
s_.set_cog()
s_.set_velocity()
return s_
| mit |
LorenzoBi/computational_physics | class_exercises/Lorenzo_Biasi_es8.py | 1 | 2687 | import numpy as np
from random import random, uniform
import matplotlib.pyplot as plt
def simulate_decay(n_atoms, probability):
iteration = 0
nuclei = np.ones(n_atoms)
n_active = n_atoms
while n_active > 0:
for i in range(n_atoms):
if random() < probability and nuclei[i] != 0:
nuclei[i] = 0
n_active -= 1
iteration += 1
return iteration
def simultaneous_walks(step, n_drunks):
positions = np.zeros((n_drunks, 2))
N_ITER_MAX = 100
mean_x = np.zeros(N_ITER_MAX)
mean_y = np.zeros(N_ITER_MAX)
mean_x2 = np.zeros(N_ITER_MAX)
mean_y2 = np.zeros(N_ITER_MAX)
for iteration in range(N_ITER_MAX):
for i in range(n_drunks):
angle = uniform(0, 2 * np.pi)
positions[i, :] += np.array([np.cos(angle), np.sin(angle)]) * step
mean_x[iteration] += positions[i, 0]
mean_y[iteration] += positions[i, 1]
mean_x2[iteration] += positions[i, 0] ** 2
mean_y2[iteration] += positions[i, 1] ** 2
mean_x[iteration] = mean_x[iteration] / float(n_drunks)
mean_y[iteration] = mean_y[iteration] / float(n_drunks)
mean_x2[iteration] = mean_x2[iteration] / float(n_drunks)
mean_y2[iteration] = mean_y2[iteration] / float(n_drunks)
return mean_x, mean_y, mean_x2, mean_y2
def montecarlo_quart_circle(n_shoot):
n_hits = 0
for i in range(n_shoot):
if random() < np.sqrt(1 - random() ** 2):
n_hits += 1
return n_hits
def problem1():
n_atoms = 1000
probability = .2
N_TIMES = 20
average = 0
for i in range(N_TIMES):
average += simulate_decay(n_atoms, probability)
print 'Average life time for'
print 'p =', probability, ' and number of atoms', n_atoms
print average / float(N_TIMES)
def problem2():
mean_x, mean_y, mean_x2, mean_y2 = simultaneous_walks(1, 1000)
iterations = np.arange(1, len(mean_x) + 1)
plt.figure('random_walk')
plt.rc('text', usetex=True)
plt.plot(iterations, mean_x, '.', label=r'$<x>$')
plt.plot(iterations, mean_x2, '.', label=r'$<x^2>$')
plt.plot(iterations, mean_y, '.', label=r'$<y>$')
plt.plot(iterations, mean_y2, '.', label=r'$<y^2>$')
plt.plot(iterations, iterations / 2., label=r'$\frac{t}{2}$')
plt.legend()
plt.grid()
def problem3():
n_total = 100000
print 'Montecarlo value:'
print montecarlo_quart_circle(n_total) / float(n_total)
print 'Actual value:'
print np.pi / 4.
if __name__ == '__main__':
print 'Problem 1 \n'
problem1()
problem2()
print '\n\nProblem 2 \n'
problem3()
plt.show()
| mit |
ak110/pytoolkit | pytoolkit/pipeline/xgb.py | 1 | 4775 | """xgboost"""
from __future__ import annotations
import logging
import pathlib
import typing
import numpy as np
import pandas as pd
import sklearn.metrics
import pytoolkit as tk
from .core import Model
logger = logging.getLogger(__name__)
class XGBModel(Model):
"""XGBoostのモデル。
Args:
params: XGBoostのパラメータ
nfold: cvの分割数
models_dir: 保存先ディレクトリ
early_stopping_rounds: xgboost.cvのパラメータ
num_boost_round: xgboost.cvのパラメータ
verbose_eval: xgboost.cvのパラメータ
callbacks: xgboost.cvのパラメータ
cv_params: xgboost.cvのパラメータ (kwargs)
"""
def __init__(
self,
params: typing.Dict[str, typing.Any],
nfold: int,
models_dir: tk.typing.PathLike,
early_stopping_rounds: int = 200,
num_boost_round: int = 9999,
verbose_eval: int = 100,
callbacks: typing.List[typing.Callable[[typing.Any], None]] = None,
cv_params: typing.Dict[str, typing.Any] = None,
preprocessors: tk.pipeline.EstimatorListType = None,
postprocessors: tk.pipeline.EstimatorListType = None,
):
import xgboost
super().__init__(nfold, models_dir, preprocessors, postprocessors)
self.params = params
self.early_stopping_rounds = early_stopping_rounds
self.num_boost_round = num_boost_round
self.verbose_eval = verbose_eval
self.callbacks = callbacks
self.cv_params = cv_params
self.gbms_: typing.Optional[typing.List[xgboost.Booster]] = None
self.best_ntree_limit_: typing.Optional[int] = None
def _save(self, models_dir: pathlib.Path):
assert self.gbms_ is not None
tk.utils.dump(self.gbms_, models_dir / "model.pkl")
tk.utils.dump(self.best_ntree_limit_, models_dir / "best_ntree_limit.pkl")
# ついでにfeature_importanceも。
df_importance = self.feature_importance()
df_importance.to_excel(str(models_dir / "feature_importance.xlsx"))
def _load(self, models_dir: pathlib.Path):
self.gbms_ = tk.utils.load(models_dir / "model.pkl")
self.best_ntree_limit_ = tk.utils.load(models_dir / "best_ntree_limit.pkl")
assert self.gbms_ is not None
assert len(self.gbms_) == self.nfold
def _cv(self, dataset: tk.data.Dataset, folds: tk.validation.FoldsType) -> None:
import xgboost
assert isinstance(dataset.data, pd.DataFrame)
train_set = xgboost.DMatrix(
data=dataset.data,
label=dataset.labels,
weight=dataset.weights,
feature_names=dataset.data.columns.values,
)
self.gbms_ = []
def model_extractor(env):
assert self.gbms_ is not None
self.gbms_.clear()
self.gbms_.extend([f.bst for f in env.cvfolds])
eval_hist = xgboost.cv(
self.params,
dtrain=train_set,
folds=folds,
callbacks=(self.callbacks or []) + [model_extractor],
num_boost_round=self.num_boost_round,
early_stopping_rounds=self.early_stopping_rounds,
verbose_eval=self.verbose_eval,
**(self.cv_params or {}),
)
scores = {}
for k, v in eval_hist.items():
if k.endswith("-mean"):
name, score = k[:-5], v.values[-1]
scores[name] = score
logger.info(f"cv {name}: {score:,.3f}")
self.best_ntree_limit_ = len(v)
def _predict(self, dataset: tk.data.Dataset, fold: int) -> np.ndarray:
import xgboost
assert self.gbms_ is not None
assert self.best_ntree_limit_ is not None
assert isinstance(dataset.data, pd.DataFrame)
data = xgboost.DMatrix(
data=dataset.data, feature_names=dataset.data.columns.values
)
return self.gbms_[fold].predict(data, ntree_limit=self.best_ntree_limit_)
def feature_importance(self, importance_type: str = "total_gain"):
"""Feature ImportanceをDataFrameで返す。"""
assert self.gbms_ is not None
columns = self.gbms_[0].feature_names
for gbm in self.gbms_:
assert tuple(columns) == tuple(gbm.feature_names)
fi = np.zeros((len(columns),), dtype=np.float32)
for gbm in self.gbms_:
d = gbm.get_score(importance_type=importance_type)
fi += [d.get(c, 0) for c in columns]
return pd.DataFrame(data={"importance": fi}, index=columns)
def xgb_r2(preds, dtrain):
"""XGBoost用R2"""
labels = dtrain.get_label()
return "r2", np.float32(sklearn.metrics.r2_score(labels, preds))
| mit |
VipinRathor/zeppelin | python/src/main/resources/python/bootstrap_sql.py | 60 | 1189 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Setup SQL over Pandas DataFrames
# It requires next dependencies to be installed:
# - pandas
# - pandasql
from __future__ import print_function
try:
from pandasql import sqldf
pysqldf = lambda q: sqldf(q, globals())
except ImportError:
pysqldf = lambda q: print("Can not run SQL over Pandas DataFrame" +
"Make sure 'pandas' and 'pandasql' libraries are installed")
| apache-2.0 |
alfayez/gnuradio | gnuradio-core/src/examples/volk_benchmark/volk_plot.py | 78 | 6117 | #!/usr/bin/env python
import sys, math
import argparse
from volk_test_funcs import *
try:
import matplotlib
import matplotlib.pyplot as plt
except ImportError:
sys.stderr.write("Could not import Matplotlib (http://matplotlib.sourceforge.net/)\n")
sys.exit(1)
def main():
desc='Plot Volk performance results from a SQLite database. ' + \
'Run one of the volk tests first (e.g, volk_math.py)'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-D', '--database', type=str,
default='volk_results.db',
help='Database file to read data from [default: %(default)s]')
parser.add_argument('-E', '--errorbars',
action='store_true', default=False,
help='Show error bars (1 standard dev.)')
parser.add_argument('-P', '--plot', type=str,
choices=['mean', 'min', 'max'],
default='mean',
help='Set the type of plot to produce [default: %(default)s]')
parser.add_argument('-%', '--percent', type=str,
default=None, metavar="table",
help='Show percent difference to the given type [default: %(default)s]')
args = parser.parse_args()
# Set up global plotting properties
matplotlib.rcParams['figure.subplot.bottom'] = 0.2
matplotlib.rcParams['figure.subplot.top'] = 0.95
matplotlib.rcParams['figure.subplot.right'] = 0.98
matplotlib.rcParams['ytick.labelsize'] = 16
matplotlib.rcParams['xtick.labelsize'] = 16
matplotlib.rcParams['legend.fontsize'] = 18
# Get list of tables to compare
conn = create_connection(args.database)
tables = list_tables(conn)
M = len(tables)
# Colors to distinguish each table in the bar graph
# More than 5 tables will wrap around to the start.
colors = ['b', 'r', 'g', 'm', 'k']
# Set up figure for plotting
f0 = plt.figure(0, facecolor='w', figsize=(14,10))
s0 = f0.add_subplot(1,1,1)
# Create a register of names that exist in all tables
tmp_regs = []
for table in tables:
# Get results from the next table
res = get_results(conn, table[0])
tmp_regs.append(list())
for r in res:
try:
tmp_regs[-1].index(r['kernel'])
except ValueError:
tmp_regs[-1].append(r['kernel'])
# Get only those names that are common in all tables
name_reg = tmp_regs[0]
for t in tmp_regs[1:]:
name_reg = list(set(name_reg) & set(t))
name_reg.sort()
# Pull the data out for each table into a dictionary
# we can ref the table by it's name and the data associated
# with a given kernel in name_reg by it's name.
# This ensures there is no sorting issue with the data in the
# dictionary, so the kernels are plotted against each other.
table_data = dict()
for i,table in enumerate(tables):
# Get results from the next table
res = get_results(conn, table[0])
data = dict()
for r in res:
data[r['kernel']] = r
table_data[table[0]] = data
if args.percent is not None:
for i,t in enumerate(table_data):
if args.percent == t:
norm_data = []
for name in name_reg:
if(args.plot == 'max'):
norm_data.append(table_data[t][name]['max'])
elif(args.plot == 'min'):
norm_data.append(table_data[t][name]['min'])
elif(args.plot == 'mean'):
norm_data.append(table_data[t][name]['avg'])
# Plot the results
x0 = xrange(len(name_reg))
i = 0
for t in (table_data):
ydata = []
stds = []
for name in name_reg:
stds.append(math.sqrt(table_data[t][name]['var']))
if(args.plot == 'max'):
ydata.append(table_data[t][name]['max'])
elif(args.plot == 'min'):
ydata.append(table_data[t][name]['min'])
elif(args.plot == 'mean'):
ydata.append(table_data[t][name]['avg'])
if args.percent is not None:
ydata = [-100*(y-n)/y for y,n in zip(ydata,norm_data)]
if(args.percent != t):
# makes x values for this data set placement
# width of bars depends on number of comparisons
wdth = 0.80/(M-1)
x1 = [x + i*wdth for x in x0]
i += 1
s0.bar(x1, ydata, width=wdth,
color=colors[(i-1)%M], label=t,
edgecolor='k', linewidth=2)
else:
# makes x values for this data set placement
# width of bars depends on number of comparisons
wdth = 0.80/M
x1 = [x + i*wdth for x in x0]
i += 1
if(args.errorbars is False):
s0.bar(x1, ydata, width=wdth,
color=colors[(i-1)%M], label=t,
edgecolor='k', linewidth=2)
else:
s0.bar(x1, ydata, width=wdth,
yerr=stds,
color=colors[i%M], label=t,
edgecolor='k', linewidth=2,
error_kw={"ecolor": 'k', "capsize":5,
"linewidth":2})
nitems = res[0]['nitems']
if args.percent is None:
s0.set_ylabel("Processing time (sec) [{0:G} items]".format(nitems),
fontsize=22, fontweight='bold',
horizontalalignment='center')
else:
s0.set_ylabel("% Improvement over {0} [{1:G} items]".format(
args.percent, nitems),
fontsize=22, fontweight='bold')
s0.legend()
s0.set_xticks(x0)
s0.set_xticklabels(name_reg)
for label in s0.xaxis.get_ticklabels():
label.set_rotation(45)
label.set_fontsize(16)
plt.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
jseabold/scikit-learn | sklearn/metrics/setup.py | 299 | 1024 | import os
import os.path
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("metrics", parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension("pairwise_fast",
sources=["pairwise_fast.c"],
include_dirs=[os.path.join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
| bsd-3-clause |
neuroidss/nupic.research | projects/vehicle-control/agent/run_sm.py | 6 | 7819 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from collections import defaultdict
import operator
import time
import numpy
from unity_client.server import Server
from nupic.encoders.coordinate import CoordinateEncoder
from nupic.encoders.scalar import ScalarEncoder
from nupic.algorithms.monitor_mixin.trace import CountsTrace
from sensorimotor.extended_temporal_memory import ApicalTiebreakPairMemory
from htmresearch.support.apical_tm_pair_monitor_mixin import (
ApicalTMPairMonitorMixin)
class MonitoredApicalTiebreakPairMemory(
ApicalTMPairMonitorMixin, ApicalTiebreakPairMemory): pass
SCALE = 5
RADIUS = 10
class Agent(object):
def __init__(self):
self.encoder = CoordinateEncoder(n=1024,
w=21)
self.motorEncoder = ScalarEncoder(21, -1, 1,
n=1024)
self.tm = MonitoredApicalTiebreakPairMemory(
columnDimensions=[2048],
basalInputDimensions: (999999,) # Dodge input checking.
cellsPerColumn=1,
initialPermanence=0.5,
connectedPermanence=0.6,
permanenceIncrement=0.1,
permanenceDecrement=0.02,
minThreshold=35,
activationThreshold=35,
maxNewSynapseCount=40)
self.plotter = Plotter(self.tm, showOverlaps=False, showOverlapsValues=False)
self.lastState = None
self.lastAction = None
self.prevMotorPattern = ()
def sync(self, outputData):
if not ("location" in outputData and
"steer" in outputData):
print "Warning: Missing data:", outputData
return
reset = outputData.get("reset") or False
if reset:
print "Reset."
self.tm.reset()
location = outputData["location"]
steer = outputData["steer"]
x = int(location["x"] * SCALE)
z = int(location["z"] * SCALE)
coordinate = numpy.array([x, z])
encoding = self.encoder.encode((coordinate, RADIUS))
motorEncoding = self.motorEncoder.encode(steer)
sensorPattern = set(encoding.nonzero()[0])
motorPattern = set(motorEncoding.nonzero()[0])
self.tm.compute(sensorPattern,
activeCellsExternalBasal=motorPattern,
reinforceCandidatesExternalBasal=self.prevMotorPattern,
growthCandidatesExternalBasal=self.prevMotorPattern)
print self.tm.mmPrettyPrintMetrics(self.tm.mmGetDefaultMetrics())
self.plotter.update(encoding, reset)
if reset:
self.plotter.render()
self.lastState = encoding
self.lastAction = steer
self.prevMotorPattern = motorPattern
class Plotter(object):
def __init__(self, tm, showOverlaps=False, showOverlapsValues=False):
self.tm = tm
self.showOverlaps = showOverlaps
self.showOverlapsValues = showOverlapsValues
self.encodings = []
self.resets = []
self.numSegmentsPerCell = []
self.numSynapsesPerSegment = []
import matplotlib.pyplot as plt
self.plt = plt
import matplotlib.cm as cm
self.cm = cm
from pylab import rcParams
if self.showOverlaps and self.showOverlapsValues:
rcParams.update({'figure.figsize': (20, 20)})
else:
rcParams.update({'figure.figsize': (6, 12)})
rcParams.update({'figure.autolayout': True})
rcParams.update({'figure.facecolor': 'white'})
rcParams.update({'ytick.labelsize': 8})
def update(self, encoding, reset):
self.encodings.append(encoding)
self.resets.append(reset)
# TODO: Deal with empty segments / unconnected synapses
numSegmentsPerCell = [len(segments) for segments in
self.tm.connections._segmentsForCell.values()]
self.numSegmentsPerCell.append(numpy.array(numSegmentsPerCell))
numSynapsesPerSegment = [len(synapses) for synapses in
self.tm.connections._synapsesForSegment.values()]
self.numSynapsesPerSegment.append(numpy.array(numSynapsesPerSegment))
def render(self):
timestamp = int(time.time())
self.plt.figure(1)
self.plt.clf()
self._renderMetrics(timestamp)
if self.showOverlaps:
self.plt.figure(2)
self.plt.clf()
self._renderOverlaps(timestamp)
def _renderMetrics(self, timestamp):
traces = self.tm.mmGetDefaultTraces()
traces = [trace for trace in traces if type(trace) is CountsTrace]
t = len(traces)
n = t + 2
for i in xrange(t):
trace = traces[i]
self.plt.subplot(n, 1, i+1)
self._plot(trace.data, trace.title)
self.plt.subplot(n, 1, t+1)
self._plotDistributions(self.numSegmentsPerCell, "# segments per cell")
self.plt.subplot(n, 1, t+2)
self._plotDistributions(self.numSynapsesPerSegment, "# synapses per segment")
self.plt.draw()
self.plt.savefig("sm-{0}_A.png".format(timestamp))
def _renderOverlaps(self, timestamp):
self.plt.subplot(1, 1, 1)
overlaps = self._computeOverlaps()
self._imshow(overlaps, "Overlaps", aspect=None)
for i in self._computeResetIndices():
self.plt.axvline(i, color='black', alpha=0.5)
self.plt.axhline(i, color='black', alpha=0.5)
if self.showOverlapsValues:
for i in range(len(overlaps)):
for j in range(len(overlaps[i])):
overlap = "%.1f" % overlaps[i][j]
self.plt.annotate(overlap, xy=(i, j), fontsize=6, color='red', verticalalignment='center', horizontalalignment='center')
self.plt.draw()
self.plt.savefig("sm-{0}_B.png".format(timestamp))
def _computeOverlaps(self):
overlaps = []
encodings = self.encodings
for i in range(len(encodings)):
row = []
for j in range(len(encodings)):
n = max(encodings[i].sum(), encodings[j].sum())
overlap = (encodings[i] & encodings[j]).sum() / float(n)
row.append(overlap)
overlaps.append(row)
return overlaps
def _computeResetIndices(self):
return numpy.array(self.resets).nonzero()[0]
def _plot(self, data, title):
self.plt.plot(range(len(data)), data)
self._finishPlot(data, title)
def _finishPlot(self, data, title):
self.plt.title(title)
self.plt.xlim(0, len(data))
for i in self._computeResetIndices():
self.plt.axvline(i, color='black', alpha=0.5)
def _imshow(self, data, title, aspect='auto'):
self.plt.title(title)
self.plt.imshow(data,
cmap=self.cm.Greys,
interpolation="nearest",
aspect=aspect,
vmin=0,
vmax=1)
def _plotDistributions(self, data, title):
means = [numpy.mean(x) if len(x) else 0 for x in data]
maxs = [numpy.max(x) if len(x) else 0 for x in data]
self.plt.plot(range(len(data)), means, label='mean')
self.plt.plot(range(len(data)), maxs, label='max')
self.plt.legend(loc='lower right')
self._finishPlot(data, title)
if __name__ == "__main__":
agent = Agent()
Server(agent)
| agpl-3.0 |
aetilley/scikit-learn | examples/decomposition/plot_pca_3d.py | 354 | 2432 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Principal components analysis (PCA)
=========================================================
These figures aid in illustrating how a point cloud
can be very flat in one direction--which is where PCA
comes in to choose a direction that is not flat.
"""
print(__doc__)
# Authors: Gael Varoquaux
# Jaques Grobler
# Kevin Hughes
# License: BSD 3 clause
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
###############################################################################
# Create the data
e = np.exp(1)
np.random.seed(4)
def pdf(x):
return 0.5 * (stats.norm(scale=0.25 / e).pdf(x)
+ stats.norm(scale=4 / e).pdf(x))
y = np.random.normal(scale=0.5, size=(30000))
x = np.random.normal(scale=0.5, size=(30000))
z = np.random.normal(scale=0.1, size=len(x))
density = pdf(x) * pdf(y)
pdf_z = pdf(5 * z)
density *= pdf_z
a = x + y
b = 2 * y
c = a - b + z
norm = np.sqrt(a.var() + b.var())
a /= norm
b /= norm
###############################################################################
# Plot the figures
def plot_figs(fig_num, elev, azim):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=elev, azim=azim)
ax.scatter(a[::10], b[::10], c[::10], c=density[::10], marker='+', alpha=.4)
Y = np.c_[a, b, c]
# Using SciPy's SVD, this would be:
# _, pca_score, V = scipy.linalg.svd(Y, full_matrices=False)
pca = PCA(n_components=3)
pca.fit(Y)
pca_score = pca.explained_variance_ratio_
V = pca.components_
x_pca_axis, y_pca_axis, z_pca_axis = V.T * pca_score / pca_score.min()
x_pca_axis, y_pca_axis, z_pca_axis = 3 * V.T
x_pca_plane = np.r_[x_pca_axis[:2], - x_pca_axis[1::-1]]
y_pca_plane = np.r_[y_pca_axis[:2], - y_pca_axis[1::-1]]
z_pca_plane = np.r_[z_pca_axis[:2], - z_pca_axis[1::-1]]
x_pca_plane.shape = (2, 2)
y_pca_plane.shape = (2, 2)
z_pca_plane.shape = (2, 2)
ax.plot_surface(x_pca_plane, y_pca_plane, z_pca_plane)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
elev = -40
azim = -80
plot_figs(1, elev, azim)
elev = 30
azim = 20
plot_figs(2, elev, azim)
plt.show()
| bsd-3-clause |
nsoranzo/tools-iuc | tools/fsd/fsd.py | 17 | 44897 | #!/usr/bin/env python
# Family size distribution of SSCSs
#
# Author: Monika Heinzl, Johannes-Kepler University Linz (Austria)
# Contact: [email protected]
#
# Takes at least one TABULAR file with tags before the alignment to the SSCS, but up to 4 files can be provided, as input.
# The program produces a plot which shows the distribution of family sizes of the all SSCSs from the input files and
# a tabular file with the data of the plot, as well as a TXT file with all tags of the DCS and their family sizes.
# If only one file is provided, then a family size distribution, which is separated after SSCSs without a partner and DCSs, is produced.
# Whereas a family size distribution with multiple data in one plot is produced, when more than one file (up to 4) is given.
# USAGE: python FSD_Galaxy_1.4_commandLine_FINAL.py --inputFile1 filename --inputName1 filename --inputFile2 filename2 --inputName2 filename2 --inputFile3 filename3 --inputName3 filename3 --inputFile4 filename4 --inputName4 filename4 --log_axis --output_tabular outptufile_name_tabular --output_pdf outptufile_name_pdf
import argparse
import sys
import matplotlib.pyplot as plt
import numpy
from matplotlib.backends.backend_pdf import PdfPages
plt.switch_backend('agg')
def readFileReferenceFree(file):
with open(file, 'r') as dest_f:
data_array = numpy.genfromtxt(dest_f, skip_header=0, delimiter='\t', comments='#', dtype=str)
return(data_array)
def make_argparser():
parser = argparse.ArgumentParser(description='Family Size Distribution of duplex sequencing data')
parser.add_argument('--inputFile1', help='Tabular File with three columns: ab or ba, tag and family size.')
parser.add_argument('--inputName1')
parser.add_argument('--inputFile2', default=None, help='Tabular File with three columns: ab or ba, tag and family size.')
parser.add_argument('--inputName2')
parser.add_argument('--inputFile3', default=None, help='Tabular File with three columns: ab or ba, tag and family size.')
parser.add_argument('--inputName3')
parser.add_argument('--inputFile4', default=None, help='Tabular File with three columns: ab or ba, tag and family size.')
parser.add_argument('--inputName4')
parser.add_argument('--log_axis', action="store_false", help='Transform y axis in log scale.')
parser.add_argument('--rel_freq', action="store_false", help='If False, the relative frequencies are displayed.')
parser.add_argument('--output_pdf', default="data.pdf", type=str, help='Name of the pdf file.')
parser.add_argument('--output_tabular', default="data.tabular", type=str, help='Name of the tabular file.')
return parser
def compare_read_families(argv):
parser = make_argparser()
args = parser.parse_args(argv[1:])
firstFile = args.inputFile1
name1 = args.inputName1
secondFile = args.inputFile2
name2 = args.inputName2
thirdFile = args.inputFile3
name3 = args.inputName3
fourthFile = args.inputFile4
name4 = args.inputName4
log_axis = args.log_axis
rel_freq = args.rel_freq
title_file = args.output_tabular
title_file2 = args.output_pdf
sep = "\t"
plt.rc('figure', figsize=(11.69, 8.27)) # A4 format
plt.rcParams['patch.edgecolor'] = "black"
plt.rcParams['axes.facecolor'] = "E0E0E0" # grey background color
plt.rcParams['xtick.labelsize'] = 14
plt.rcParams['ytick.labelsize'] = 14
list_to_plot = []
label = []
data_array_list = []
list_to_plot_original = []
colors = []
bins = numpy.arange(1, 22)
with open(title_file, "w") as output_file, PdfPages(title_file2) as pdf:
fig = plt.figure()
fig.subplots_adjust(left=0.12, right=0.97, bottom=0.23, top=0.94, hspace=0)
fig2 = plt.figure()
fig2.subplots_adjust(left=0.12, right=0.97, bottom=0.23, top=0.94, hspace=0)
if firstFile is not None:
file1 = readFileReferenceFree(firstFile)
integers = numpy.array(file1[:, 0]).astype(int) # keep original family sizes
list_to_plot_original.append(integers)
colors.append("#0000FF")
# for plot: replace all big family sizes by 22
data1 = numpy.clip(integers, bins[0], bins[-1])
name1 = name1.split(".tabular")[0]
if len(name1) > 40:
name1 = name1[:40]
list_to_plot.append(data1)
label.append(name1)
data_array_list.append(file1)
legend = "\n\n\n{}".format(name1)
fig.text(0.05, 0.11, legend, size=10, transform=plt.gcf().transFigure)
fig2.text(0.05, 0.11, legend, size=10, transform=plt.gcf().transFigure)
legend1 = "singletons:\nnr. of tags\n{:,} ({:.3f})".format(numpy.bincount(data1)[1],
float(numpy.bincount(data1)[1]) / len(data1))
fig.text(0.32, 0.11, legend1, size=10, transform=plt.gcf().transFigure)
fig2.text(0.32, 0.11, legend1, size=10, transform=plt.gcf().transFigure)
legend3b = "PE reads\n{:,} ({:.3f})".format(numpy.bincount(data1)[1],
float(numpy.bincount(data1)[1]) / sum(integers))
fig.text(0.45, 0.11, legend3b, size=10, transform=plt.gcf().transFigure)
fig2.text(0.45, 0.11, legend3b, size=10, transform=plt.gcf().transFigure)
legend4 = "family size > 20:\nnr. of tags\n{:,} ({:.3f})".format(len(integers[integers > 20]),
float(len(integers[integers > 20])) / len(integers))
fig.text(0.58, 0.11, legend4, size=10, transform=plt.gcf().transFigure)
fig2.text(0.58, 0.11, legend4, size=10, transform=plt.gcf().transFigure)
legend5 = "PE reads\n{:,} ({:.3f})".format(sum(integers[integers > 20]),
float(sum(integers[integers > 20])) / sum(integers))
fig.text(0.70, 0.11, legend5, size=10, transform=plt.gcf().transFigure)
fig2.text(0.70, 0.11, legend5, size=10, transform=plt.gcf().transFigure)
legend6 = "total nr. of\ntags\n{:,}".format(len(data1))
fig.text(0.82, 0.11, legend6, size=10, transform=plt.gcf().transFigure)
fig2.text(0.82, 0.11, legend6, size=10, transform=plt.gcf().transFigure)
legend6b = "PE reads\n{:,}".format(sum(integers))
fig.text(0.89, 0.11, legend6b, size=10, transform=plt.gcf().transFigure)
fig2.text(0.89, 0.11, legend6b, size=10, transform=plt.gcf().transFigure)
if secondFile is not None:
file2 = readFileReferenceFree(secondFile)
integers2 = numpy.array(file2[:, 0]).astype(int) # keep original family sizes
list_to_plot_original.append(integers2)
colors.append("#298A08")
data2 = numpy.clip(integers2, bins[0], bins[-1])
list_to_plot.append(data2)
name2 = name2.split(".tabular")[0]
if len(name2) > 40:
name2 = name2[:40]
label.append(name2)
data_array_list.append(file2)
fig.text(0.05, 0.09, name2, size=10, transform=plt.gcf().transFigure)
fig2.text(0.05, 0.09, name2, size=10, transform=plt.gcf().transFigure)
legend1 = "{:,} ({:.3f})".format(numpy.bincount(data2)[1], float(numpy.bincount(data2)[1]) / len(data2))
fig.text(0.32, 0.09, legend1, size=10, transform=plt.gcf().transFigure)
fig2.text(0.32, 0.09, legend1, size=10, transform=plt.gcf().transFigure)
legend3 = "{:,} ({:.3f})".format(numpy.bincount(data2)[1], float(numpy.bincount(data2)[1]) / sum(integers2))
fig.text(0.45, 0.09, legend3, size=10, transform=plt.gcf().transFigure)
fig2.text(0.45, 0.09, legend3, size=10, transform=plt.gcf().transFigure)
legend4 = "{:,} ({:.3f})".format(len(integers2[integers2 > 20]),
float(len(integers2[integers2 > 20])) / len(integers2))
fig.text(0.58, 0.09, legend4, size=10, transform=plt.gcf().transFigure)
fig2.text(0.58, 0.09, legend4, size=10, transform=plt.gcf().transFigure)
legend5 = "{:,} ({:.3f})".format(sum(integers2[integers2 > 20]),
float(sum(integers2[integers2 > 20])) / sum(integers2))
fig.text(0.70, 0.09, legend5, size=10, transform=plt.gcf().transFigure)
fig2.text(0.70, 0.09, legend5, size=10, transform=plt.gcf().transFigure)
legend6 = "{:,}".format(len(data2))
fig.text(0.82, 0.09, legend6, size=10, transform=plt.gcf().transFigure)
fig2.text(0.82, 0.09, legend6, size=10, transform=plt.gcf().transFigure)
legend6b = "{:,}".format(sum(integers2))
fig.text(0.89, 0.09, legend6b, size=10, transform=plt.gcf().transFigure)
fig2.text(0.89, 0.09, legend6b, size=10, transform=plt.gcf().transFigure)
if thirdFile is not None:
file3 = readFileReferenceFree(thirdFile)
integers3 = numpy.array(file3[:, 0]).astype(int) # keep original family sizes
list_to_plot_original.append(integers3)
colors.append("#DF0101")
data3 = numpy.clip(integers3, bins[0], bins[-1])
list_to_plot.append(data3)
name3 = name3.split(".tabular")[0]
if len(name3) > 40:
name3 = name3[:40]
label.append(name3)
data_array_list.append(file3)
fig.text(0.05, 0.07, name3, size=10, transform=plt.gcf().transFigure)
fig2.text(0.05, 0.07, name3, size=10, transform=plt.gcf().transFigure)
legend1 = "{:,} ({:.3f})".format(numpy.bincount(data3)[1], float(numpy.bincount(data3)[1]) / len(data3))
fig.text(0.32, 0.07, legend1, size=10, transform=plt.gcf().transFigure)
fig2.text(0.32, 0.07, legend1, size=10, transform=plt.gcf().transFigure)
legend3b = "{:,} ({:.3f})".format(numpy.bincount(data3)[1],
float(numpy.bincount(data3)[1]) / sum(integers3))
fig.text(0.45, 0.07, legend3b, size=10, transform=plt.gcf().transFigure)
fig2.text(0.45, 0.07, legend3b, size=10, transform=plt.gcf().transFigure)
legend4 = "{:,} ({:.3f})".format(len(integers3[integers3 > 20]),
float(len(integers3[integers3 > 20])) / len(integers3))
fig.text(0.58, 0.07, legend4, size=10, transform=plt.gcf().transFigure)
fig2.text(0.58, 0.07, legend4, size=10, transform=plt.gcf().transFigure)
legend5 = "{:,} ({:.3f})".format(sum(integers3[integers3 > 20]),
float(sum(integers3[integers3 > 20])) / sum(integers3))
fig.text(0.70, 0.07, legend5, size=10, transform=plt.gcf().transFigure)
fig2.text(0.70, 0.07, legend5, size=10, transform=plt.gcf().transFigure)
legend6 = "{:,}".format(len(data3))
fig.text(0.82, 0.07, legend6, size=10, transform=plt.gcf().transFigure)
fig2.text(0.82, 0.07, legend6, size=10, transform=plt.gcf().transFigure)
legend6b = "{:,}".format(sum(integers3))
fig.text(0.89, 0.07, legend6b, size=10, transform=plt.gcf().transFigure)
fig2.text(0.89, 0.07, legend6b, size=10, transform=plt.gcf().transFigure)
if fourthFile is not None:
file4 = readFileReferenceFree(fourthFile)
integers4 = numpy.array(file4[:, 0]).astype(int) # keep original family sizes
list_to_plot_original.append(integers4)
colors.append("#04cec7")
data4 = numpy.clip(integers4, bins[0], bins[-1])
list_to_plot.append(data4)
name4 = name4.split(".tabular")[0]
if len(name4) > 40:
name4 = name4[:40]
label.append(name4)
data_array_list.append(file4)
fig.text(0.05, 0.05, name4, size=10, transform=plt.gcf().transFigure)
fig2.text(0.05, 0.05, name4, size=10, transform=plt.gcf().transFigure)
legend1 = "{:,} ({:.3f})".format(numpy.bincount(data4)[1], float(numpy.bincount(data4)[1]) / len(data4))
fig.text(0.32, 0.05, legend1, size=10, transform=plt.gcf().transFigure)
fig2.text(0.32, 0.05, legend1, size=10, transform=plt.gcf().transFigure)
legend3b = "{:,} ({:.3f})".format(numpy.bincount(data4)[1],
float(numpy.bincount(data4)[1]) / sum(integers4))
fig.text(0.45, 0.05, legend3b, size=10, transform=plt.gcf().transFigure)
fig2.text(0.45, 0.05, legend3b, size=10, transform=plt.gcf().transFigure)
legend4 = "{:,} ({:.3f})".format(len(integers4[integers4 > 20]),
float(len(integers4[integers4 > 20])) / len(integers4))
fig.text(0.58, 0.05, legend4, size=10, transform=plt.gcf().transFigure)
fig2.text(0.58, 0.05, legend4, size=10, transform=plt.gcf().transFigure)
legend5 = "{:,} ({:.3f})".format(sum(integers4[integers4 > 20]),
float(sum(integers4[integers4 > 20])) / sum(integers4))
fig.text(0.70, 0.05, legend5, size=10, transform=plt.gcf().transFigure)
fig2.text(0.70, 0.05, legend5, size=10, transform=plt.gcf().transFigure)
legend6 = "{:,}".format(len(data4))
fig.text(0.82, 0.05, legend6, size=10, transform=plt.gcf().transFigure)
fig2.text(0.82, 0.05, legend6, size=10, transform=plt.gcf().transFigure)
legend6b = "{:,}".format(sum(integers4))
fig.text(0.89, 0.05, legend6b, size=10, transform=plt.gcf().transFigure)
fig2.text(0.89, 0.05, legend6b, size=10, transform=plt.gcf().transFigure)
list_to_plot2 = list_to_plot
if rel_freq:
ylab = "Relative Frequency"
else:
ylab = "Absolute Frequency"
# PLOT FSD based on tags
fig.suptitle('Family Size Distribution (FSD) based on families', fontsize=14)
ax = fig.add_subplot(1, 1, 1)
ticks = numpy.arange(1, 22, 1)
ticks1 = [str(_) for _ in ticks]
ticks1[len(ticks1) - 1] = ">20"
ax.set_xticks([], [])
if rel_freq:
w = [numpy.zeros_like(data) + 1. / len(data) for data in list_to_plot2]
counts = ax.hist(list_to_plot2, weights=w, bins=numpy.arange(1, 23), stacked=False, edgecolor="black", color=colors, linewidth=1, label=label, align="left", alpha=0.7, rwidth=0.8)
ax.set_ylim(0, 1.07)
else:
counts = ax.hist(list_to_plot2, bins=numpy.arange(1, 23), stacked=False, edgecolor="black", linewidth=1, label=label, align="left", alpha=0.7, rwidth=0.8, color=colors)
ax.set_xticks(numpy.array(ticks))
ax.set_xticklabels(ticks1)
ax.legend(loc='upper right', fontsize=14, frameon=True, bbox_to_anchor=(0.9, 1))
ax.set_ylabel(ylab, fontsize=14)
ax.set_xlabel("Family size", fontsize=14)
if log_axis:
ax.set_yscale('log')
ax.grid(b=True, which="major", color="#424242", linestyle=":")
ax.margins(0.01, None)
pdf.savefig(fig)
# PLOT FSD based on PE reads
fig2.suptitle('Family Size Distribution (FSD) based on PE reads', fontsize=14)
ax2 = fig2.add_subplot(1, 1, 1)
ticks = numpy.arange(1, 22)
ticks1 = [str(_) for _ in ticks]
ticks1[len(ticks1) - 1] = ">20"
reads = []
reads_rel = []
barWidth = 0 - (len(list_to_plot) + 1) / 2 * 1. / (len(list_to_plot) + 1)
ax2.set_xticks([], [])
for i in range(len(list_to_plot2)):
x = list(numpy.arange(1, 22).astype(float))
unique, c = numpy.unique(list_to_plot2[i], return_counts=True)
y = unique * c
if sum(list_to_plot_original[i] > 20) > 0:
y[len(y) - 1] = sum(list_to_plot_original[i][list_to_plot_original[i] > 20])
y = [y[x[idx] == unique][0] if x[idx] in unique else 0 for idx in range(len(x))]
reads.append(y)
reads_rel.append(list(numpy.float_(y)) / sum(y))
if len(list_to_plot2) == 1:
x = [xi * 0.5 for xi in x]
w = 0.4
else:
x = [xi + barWidth for xi in x]
w = 1. / (len(list_to_plot) + 1)
if rel_freq:
ax2.bar(x, list(numpy.float_(y)) / numpy.sum(y), align="edge", width=w, edgecolor="black", label=label[i], linewidth=1, alpha=0.7, color=colors[i])
ax2.set_ylim(0, 1.07)
else:
ax2.bar(x, y, align="edge", width=w, edgecolor="black", label=label[i], linewidth=1, alpha=0.7, color=colors[i])
if i == len(list_to_plot2) - 1:
barWidth += 1. / (len(list_to_plot) + 1) + 1. / (len(list_to_plot) + 1)
else:
barWidth += 1. / (len(list_to_plot) + 1)
ax2.legend(loc='upper right', fontsize=14, frameon=True, bbox_to_anchor=(0.9, 1))
if len(list_to_plot2) == 1:
ax2.set_xticks(numpy.array([xi + 0.2 for xi in x]))
else:
ax2.set_xticks(numpy.array(ticks))
ax2.set_xticklabels(ticks1)
ax2.set_xlabel("Family size", fontsize=14)
ax2.set_ylabel(ylab, fontsize=14)
if log_axis:
ax2.set_yscale('log')
ax2.grid(b=True, which="major", color="#424242", linestyle=":")
ax2.margins(0.01, None)
pdf.savefig(fig2)
plt.close()
# write data to CSV file tags
counts = [numpy.bincount(di, minlength=22)[1:] for di in list_to_plot2] # original counts of family sizes
output_file.write("Values from family size distribution with all datasets based on families\n")
output_file.write("\nFamily size")
for i in label:
output_file.write("{}{}".format(sep, i))
output_file.write("\n")
j = 0
for fs in bins:
if fs == 21:
fs = ">20"
else:
fs = "={}".format(fs)
output_file.write("FS{}{}".format(fs, sep))
for n in range(len(label)):
output_file.write("{}{}".format(int(counts[n][j]), sep))
output_file.write("\n")
j += 1
output_file.write("sum{}".format(sep))
for i in counts:
output_file.write("{}{}".format(int(sum(i)), sep))
# write data to CSV file PE reads
output_file.write("\n\nValues from family size distribution with all datasets based on PE reads\n")
output_file.write("\nFamily size")
for i in label:
output_file.write("{}{}".format(sep, i))
output_file.write("\n")
j = 0
for fs in bins:
if fs == 21:
fs = ">20"
else:
fs = "={}".format(fs)
output_file.write("FS{}{}".format(fs, sep))
if len(label) == 1:
output_file.write("{}{}".format(int(reads[0][j]), sep))
else:
for n in range(len(label)):
output_file.write("{}{}".format(int(reads[n][j]), sep))
output_file.write("\n")
j += 1
output_file.write("sum{}".format(sep))
if len(label) == 1:
output_file.write("{}{}".format(int(sum(numpy.concatenate(reads))), sep))
else:
for i in reads:
output_file.write("{}{}".format(int(sum(i)), sep))
output_file.write("\n")
# Family size distribution after DCS and SSCS
for dataset, data_o, name_file in zip(list_to_plot, data_array_list, label):
tags = numpy.array(data_o[:, 2])
seq = numpy.array(data_o[:, 1])
data = numpy.array(dataset)
data_o = numpy.array(data_o[:, 0]).astype(int)
# find all unique tags and get the indices for ALL tags, but only once
u, index_unique, c = numpy.unique(numpy.array(seq), return_counts=True, return_index=True)
d = u[c > 1]
# get family sizes, tag for duplicates
duplTags_double = data[numpy.in1d(seq, d)]
duplTags_double_o = data_o[numpy.in1d(seq, d)]
duplTags = duplTags_double[0::2] # ab of DCS
duplTags_o = duplTags_double_o[0::2] # ab of DCS
duplTagsBA = duplTags_double[1::2] # ba of DCS
duplTagsBA_o = duplTags_double_o[1::2] # ba of DCS
# get family sizes for SSCS with no partner
ab = numpy.where(tags == "ab")[0]
abSeq = seq[ab]
ab_o = data_o[ab]
ab = data[ab]
ba = numpy.where(tags == "ba")[0]
baSeq = seq[ba]
ba_o = data_o[ba]
ba = data[ba]
dataAB = ab[numpy.in1d(abSeq, d, invert=True)]
dataAB_o = ab_o[numpy.in1d(abSeq, d, invert=True)]
dataBA = ba[numpy.in1d(baSeq, d, invert=True)]
dataBA_o = ba_o[numpy.in1d(baSeq, d, invert=True)]
list1 = [duplTags_double, dataAB, dataBA] # list for plotting
list1_o = [duplTags_double_o, dataAB_o, dataBA_o] # list for plotting
# information for family size >= 3
dataAB_FS3 = dataAB[dataAB >= 3]
dataAB_FS3_o = dataAB_o[dataAB_o >= 3]
dataBA_FS3 = dataBA[dataBA >= 3]
dataBA_FS3_o = dataBA_o[dataBA_o >= 3]
duplTags_FS3 = duplTags[(duplTags >= 3) & (duplTagsBA >= 3)] # ab+ba with FS>=3
duplTags_FS3_BA = duplTagsBA[(duplTags >= 3) & (duplTagsBA >= 3)] # ba+ab with FS>=3
duplTags_double_FS3 = len(duplTags_FS3) + len(duplTags_FS3_BA) # both ab and ba strands with FS>=3
# original FS
duplTags_FS3_o = duplTags_o[(duplTags_o >= 3) & (duplTagsBA_o >= 3)] # ab+ba with FS>=3
duplTags_FS3_BA_o = duplTagsBA_o[(duplTags_o >= 3) & (duplTagsBA_o >= 3)] # ba+ab with FS>=3
duplTags_double_FS3_o = sum(duplTags_FS3_o) + sum(duplTags_FS3_BA_o) # both ab and ba strands with FS>=3
fig = plt.figure()
plt.subplots_adjust(left=0.12, right=0.97, bottom=0.3, top=0.94, hspace=0)
if rel_freq:
w = [numpy.zeros_like(dj) + 1. / len(numpy.concatenate(list1)) for dj in list1]
plt.hist(list1, bins=numpy.arange(1, 23), stacked=True, label=["duplex", "ab", "ba"], weights=w, edgecolor="black", linewidth=1, align="left", color=["#FF0000", "#5FB404", "#FFBF00"], rwidth=0.8)
plt.ylim(0, 1.07)
else:
plt.hist(list1, bins=numpy.arange(1, 23), stacked=True, label=["duplex", "ab", "ba"], edgecolor="black", linewidth=1, align="left", color=["#FF0000", "#5FB404", "#FFBF00"], rwidth=0.8)
# tick labels of x axis
ticks = numpy.arange(1, 22, 1)
ticks1 = [str(_) for _ in ticks]
ticks1[len(ticks1) - 1] = ">20"
plt.xticks(numpy.array(ticks), ticks1)
singl = len(data_o[data_o == 1])
last = len(data_o[data_o > 20]) # large families
if log_axis:
plt.yscale('log')
plt.legend(loc='upper right', fontsize=14, bbox_to_anchor=(0.9, 1), frameon=True)
plt.title("{}: FSD based on families".format(name_file), fontsize=14)
plt.xlabel("Family size", fontsize=14)
plt.ylabel(ylab, fontsize=14)
plt.margins(0.01, None)
plt.grid(b=True, which="major", color="#424242", linestyle=":")
# extra information beneath the plot
legend = "SSCS ab= \nSSCS ba= \nDCS (total)= \ntotal nr. of tags="
plt.text(0.1, 0.09, legend, size=10, transform=plt.gcf().transFigure)
legend = "nr. of tags\n\n{:,}\n{:,}\n{:,} ({:,})\n{:,} ({:,})".format(len(dataAB), len(dataBA),
len(duplTags), len(duplTags_double), (len(dataAB) + len(dataBA) + len(duplTags)),
(len(ab) + len(ba)))
plt.text(0.23, 0.09, legend, size=10, transform=plt.gcf().transFigure)
legend5 = "PE reads\n\n{:,}\n{:,}\n{:,} ({:,})\n{:,} ({:,})".format(sum(dataAB_o), sum(dataBA_o),
sum(duplTags_o), sum(duplTags_double_o),
(sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)),
(sum(ab_o) + sum(ba_o)))
plt.text(0.38, 0.09, legend5, size=10, transform=plt.gcf().transFigure)
legend = "rel. freq. of tags\nunique\n{:.3f}\n{:.3f}\n{:.3f}\n{:,}".format(
float(len(dataAB)) / (len(dataAB) + len(dataBA) + len(duplTags)),
float(len(dataBA)) / (len(dataAB) + len(dataBA) + len(duplTags)),
float(len(duplTags)) / (len(dataAB) + len(dataBA) + len(duplTags)),
(len(dataAB) + len(dataBA) + len(duplTags)))
plt.text(0.54, 0.09, legend, size=10, transform=plt.gcf().transFigure)
legend = "total\n{:.3f}\n{:.3f}\n{:.3f} ({:.3f})\n{:,}".format(float(len(dataAB)) / (len(ab) + len(ba)),
float(len(dataBA)) / (len(ab) + len(ba)),
float(len(duplTags)) / (len(ab) + len(ba)),
float(len(duplTags_double)) / (len(ab) + len(ba)),
(len(ab) + len(ba)))
plt.text(0.64, 0.09, legend, size=10, transform=plt.gcf().transFigure)
legend1 = "\nsingletons:\nfamily size > 20:"
plt.text(0.1, 0.03, legend1, size=10, transform=plt.gcf().transFigure)
legend4 = "{:,}\n{:,}".format(singl, last)
plt.text(0.23, 0.03, legend4, size=10, transform=plt.gcf().transFigure)
legend3 = "{:.3f}\n{:.3f}".format(float(singl) / len(data), float(last) / len(data))
plt.text(0.64, 0.03, legend3, size=10, transform=plt.gcf().transFigure)
legend3 = "\n\n{:,}".format(sum(data_o[data_o > 20]))
plt.text(0.38, 0.03, legend3, size=10, transform=plt.gcf().transFigure)
legend3 = "{:.3f}\n{:.3f}".format(float(singl) / sum(data_o), float(sum(data_o[data_o > 20])) / sum(data_o))
plt.text(0.84, 0.03, legend3, size=10, transform=plt.gcf().transFigure)
legend = "PE reads\nunique\n{:.3f}\n{:.3f}\n{:.3f}\n{:,}".format(
float(sum(dataAB_o)) / (sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)),
float(sum(dataBA_o)) / (sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)),
float(sum(duplTags_o)) / (sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)),
(sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)))
plt.text(0.74, 0.09, legend, size=10, transform=plt.gcf().transFigure)
legend = "total\n{:.3f}\n{:.3f}\n{:.3f} ({:.3f})\n{:,}".format(
float(sum(dataAB_o)) / (sum(ab_o) + sum(ba_o)),
float(sum(dataBA_o)) / (sum(ab_o) + sum(ba_o)),
float(sum(duplTags_o)) / (sum(ab_o) + sum(ba_o)),
float(sum(duplTags_double_o)) / (sum(ab_o) + sum(ba_o)), (sum(ab_o) + sum(ba_o)))
plt.text(0.84, 0.09, legend, size=10, transform=plt.gcf().transFigure)
pdf.savefig(fig)
plt.close()
# PLOT FSD based on PE reads
fig3 = plt.figure()
plt.subplots_adjust(left=0.12, right=0.97, bottom=0.3, top=0.94, hspace=0)
fig3.suptitle("{}: FSD based on PE reads".format(name_file), fontsize=14)
ax2 = fig3.add_subplot(1, 1, 1)
ticks = numpy.arange(1, 22)
ticks1 = [str(_) for _ in ticks]
ticks1[len(ticks1) - 1] = ">20"
reads = []
reads_rel = []
# barWidth = 0 - (len(list_to_plot) + 1) / 2 * 1. / (len(list_to_plot) + 1)
ax2.set_xticks([], [])
list_y = []
label = ["duplex", "ab", "ba"]
col = ["#FF0000", "#5FB404", "#FFBF00"]
for i in range(len(list1)):
x = list(numpy.arange(1, 22).astype(float))
unique, c = numpy.unique(list1[i], return_counts=True)
y = unique * c
if sum(list1_o[i] > 20) > 0:
y[len(y) - 1] = sum(list1_o[i][list1_o[i] > 20])
y = [y[x[idx] == unique][0] if x[idx] in unique else 0 for idx in range(len(x))]
reads.append(y)
reads_rel.append(list(numpy.float_(y)) / sum(numpy.concatenate(list1_o)))
if rel_freq:
y = list(numpy.float_(y)) / sum(numpy.concatenate(list1_o))
ax2.set_ylim(0, 1.07)
else:
y = y
list_y.append(y)
if i == 0:
ax2.bar(x, y, align="center", width=0.8, edgecolor="black", label=label[0], linewidth=1, alpha=1, color=col[0])
elif i == 1:
ax2.bar(x, y, bottom=list_y[i - 1], align="center", width=0.8, edgecolor="black", label=label[1], linewidth=1, alpha=1, color=col[1])
elif i == 2:
bars = numpy.add(list_y[0], list_y[1]).tolist()
ax2.bar(x, y, bottom=bars, align="center", width=0.8, edgecolor="black", label=label[2], linewidth=1, alpha=1, color=col[2])
ax2.legend(loc='upper right', fontsize=14, frameon=True, bbox_to_anchor=(0.9, 1))
singl = len(data_o[data_o == 1])
last = len(data_o[data_o > 20]) # large families
ax2.set_xticks(numpy.array(ticks))
ax2.set_xticklabels(ticks1)
ax2.set_xlabel("Family size", fontsize=14)
ax2.set_ylabel(ylab, fontsize=14)
if log_axis:
ax2.set_yscale('log')
ax2.grid(b=True, which="major", color="#424242", linestyle=":")
ax2.margins(0.01, None)
# extra information beneath the plot
legend = "SSCS ab= \nSSCS ba= \nDCS (total)= \ntotal nr. of tags="
plt.text(0.1, 0.09, legend, size=10, transform=plt.gcf().transFigure)
legend = "nr. of tags\n\n{:,}\n{:,}\n{:,} ({:,})\n{:,} ({:,})".format(len(dataAB), len(dataBA),
len(duplTags), len(duplTags_double), (len(dataAB) + len(dataBA) + len(duplTags)),
(len(ab) + len(ba)))
plt.text(0.23, 0.09, legend, size=10, transform=plt.gcf().transFigure)
legend5 = "PE reads\n\n{:,}\n{:,}\n{:,} ({:,})\n{:,} ({:,})".format(sum(dataAB_o), sum(dataBA_o),
sum(duplTags_o), sum(duplTags_double_o),
(sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)),
(sum(ab_o) + sum(ba_o)))
plt.text(0.38, 0.09, legend5, size=10, transform=plt.gcf().transFigure)
legend = "rel. freq. of tags\nunique\n{:.3f}\n{:.3f}\n{:.3f}\n{:,}".format(
float(len(dataAB)) / (len(dataAB) + len(dataBA) + len(duplTags)),
float(len(dataBA)) / (len(dataAB) + len(dataBA) + len(duplTags)),
float(len(duplTags)) / (len(dataAB) + len(dataBA) + len(duplTags)),
(len(dataAB) + len(dataBA) + len(duplTags)))
plt.text(0.54, 0.09, legend, size=10, transform=plt.gcf().transFigure)
legend = "total\n{:.3f}\n{:.3f}\n{:.3f} ({:.3f})\n{:,}".format(float(len(dataAB)) / (len(ab) + len(ba)),
float(len(dataBA)) / (len(ab) + len(ba)),
float(len(duplTags)) / (len(ab) + len(ba)),
float(len(duplTags_double)) / (len(ab) + len(ba)),
(len(ab) + len(ba)))
plt.text(0.64, 0.09, legend, size=10, transform=plt.gcf().transFigure)
legend1 = "\nsingletons:\nfamily size > 20:"
plt.text(0.1, 0.03, legend1, size=10, transform=plt.gcf().transFigure)
legend4 = "{:,}\n{:,}".format(singl, last)
plt.text(0.23, 0.03, legend4, size=10, transform=plt.gcf().transFigure)
legend3 = "{:.3f}\n{:.3f}".format(float(singl) / len(data), float(last) / len(data))
plt.text(0.64, 0.03, legend3, size=10, transform=plt.gcf().transFigure)
legend3 = "\n\n{:,}".format(sum(data_o[data_o > 20]))
plt.text(0.38, 0.03, legend3, size=10, transform=plt.gcf().transFigure)
legend3 = "{:.3f}\n{:.3f}".format(float(singl) / sum(data_o), float(sum(data_o[data_o > 20])) / sum(data_o))
plt.text(0.84, 0.03, legend3, size=10, transform=plt.gcf().transFigure)
legend = "PE reads\nunique\n{:.3f}\n{:.3f}\n{:.3f}\n{:,}".format(
float(sum(dataAB_o)) / (sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)),
float(sum(dataBA_o)) / (sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)),
float(sum(duplTags_o)) / (sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)),
(sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)))
plt.text(0.74, 0.09, legend, size=10, transform=plt.gcf().transFigure)
legend = "total\n{:.3f}\n{:.3f}\n{:.3f} ({:.3f})\n{:,}".format(
float(sum(dataAB_o)) / (sum(ab_o) + sum(ba_o)),
float(sum(dataBA_o)) / (sum(ab_o) + sum(ba_o)),
float(sum(duplTags_o)) / (sum(ab_o) + sum(ba_o)),
float(sum(duplTags_double_o)) / (sum(ab_o) + sum(ba_o)), (sum(ab_o) + sum(ba_o)))
plt.text(0.84, 0.09, legend, size=10, transform=plt.gcf().transFigure)
pdf.savefig(fig3)
plt.close()
# write same information to a csv file
count = numpy.bincount(data_o) # original counts of family sizes
output_file.write("\nDataset:{}{}\n".format(sep, name_file))
output_file.write("max. family size:{}{}\n".format(sep, max(data_o)))
output_file.write("absolute frequency:{}{}\n".format(sep, count[len(count) - 1]))
output_file.write("relative frequency:{}{:.3f}\n\n".format(sep, float(count[len(count) - 1]) / sum(count)))
output_file.write("median family size:{}{}\n".format(sep, numpy.median(numpy.array(data_o))))
output_file.write("mean family size:{}{}\n\n".format(sep, numpy.mean(numpy.array(data_o))))
output_file.write(
"{}singletons:{}{}{}family size > 20:{}{}{}{}length of dataset:\n".format(sep, sep, sep, sep, sep, sep,
sep, sep))
output_file.write(
"{}nr. of tags{}rel. freq of tags{}rel.freq of PE reads{}nr. of tags{}rel. freq of tags{}nr. of PE reads{}rel. freq of PE reads{}total nr. of tags{}total nr. of PE reads\n".format(
sep, sep, sep, sep, sep, sep, sep, sep, sep))
output_file.write("{}{}{}{}{:.3f}{}{:.3f}{}{}{}{:.3f}{}{}{}{:.3f}{}{}{}{}\n\n".format(
name_file, sep, singl, sep, float(singl) / len(data), sep, float(singl) / sum(data_o), sep,
last, sep, float(last) / len(data), sep, sum(data_o[data_o > 20]), sep, float(sum(data_o[data_o > 20])) / sum(data_o), sep, len(data),
sep, sum(data_o)))
# information for FS >= 1
output_file.write(
"The unique frequencies were calculated from the dataset where the tags occured only once (=ab without DCS, ba without DCS)\n"
"Whereas the total frequencies were calculated from the whole dataset (=including the DCS).\n\n")
output_file.write(
"FS >= 1{}nr. of tags{}nr. of PE reads{}rel. freq of tags{}{}rel. freq of PE reads:\n".format(sep, sep,
sep, sep,
sep))
output_file.write("{}{}{}unique:{}total{}unique{}total:\n".format(sep, sep, sep, sep, sep, sep))
output_file.write("SSCS ab{}{}{}{}{}{:.3f}{}{:.3f}{}{:.3f}{}{:.3f}\n".format(
sep, len(dataAB), sep, sum(dataAB_o), sep,
float(len(dataAB)) / (len(dataAB) + len(dataBA) + len(duplTags)),
sep, float(len(dataAB)) / (len(ab) + len(ba)), sep, float(sum(dataAB_o)) / (sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)),
sep, float(sum(dataAB_o)) / (sum(ab_o) + sum(ba_o))))
output_file.write("SSCS ba{}{}{}{}{}{:.3f}{}{:.3f}{}{:.3f}{}{:.3f}\n".format(
sep, len(dataBA), sep, sum(dataBA_o), sep,
float(len(dataBA)) / (len(dataAB) + len(dataBA) + len(duplTags)),
sep, float(len(dataBA)) / (len(ab) + len(ba)), sep,
float(sum(dataBA_o)) / (sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)),
sep, float(sum(dataBA_o)) / (sum(ab_o) + sum(ba_o))))
output_file.write(
"DCS (total){}{} ({}){}{} ({}){}{:.3f}{}{:.3f} ({:.3f}){}{:.3f}{}{:.3f} ({:.3f})\n".format(
sep, len(duplTags), len(duplTags_double), sep, sum(duplTags_o), sum(duplTags_double_o), sep,
float(len(duplTags)) / (len(dataAB) + len(dataBA) + len(duplTags)), sep,
float(len(duplTags)) / (len(ab) + len(ba)), float(len(duplTags_double)) / (len(ab) + len(ba)), sep,
float(sum(duplTags_o)) / (sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)), sep,
float(sum(duplTags_o)) / (sum(ab_o) + sum(ba_o)),
float(sum(duplTags_double_o)) / (sum(ab_o) + sum(ba_o))))
output_file.write("total nr. of tags{}{}{}{}{}{}{}{}{}{}{}{}\n".format(
sep, (len(dataAB) + len(dataBA) + len(duplTags)), sep,
(sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)), sep,
(len(dataAB) + len(dataBA) + len(duplTags)), sep, (len(ab) + len(ba)), sep,
(sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)), sep, (sum(ab_o) + sum(ba_o))))
# information for FS >= 3
output_file.write(
"\nFS >= 3{}nr. of tags{}nr. of PE reads{}rel. freq of tags{}{}rel. freq of PE reads:\n".format(sep,
sep,
sep,
sep,
sep))
output_file.write("{}{}{}unique:{}total{}unique{}total:\n".format(sep, sep, sep, sep, sep, sep))
output_file.write("SSCS ab{}{}{}{}{}{:.3f}{}{:.3f}{}{:.3f}{}{:.3f}\n".format(
sep, len(dataAB_FS3), sep, sum(dataAB_FS3_o), sep,
float(len(dataAB_FS3)) / (len(dataAB_FS3) + len(dataBA_FS3) + len(duplTags_FS3)), sep,
float(len(dataAB_FS3)) / (len(dataBA_FS3) + len(dataBA_FS3) + duplTags_double_FS3),
sep, float(sum(dataAB_FS3_o)) / (sum(dataAB_FS3_o) + sum(dataBA_FS3_o) + sum(duplTags_FS3_o)),
sep, float(sum(dataAB_FS3_o)) / (sum(dataBA_FS3_o) + sum(dataBA_FS3_o) + duplTags_double_FS3_o)))
output_file.write("SSCS ba{}{}{}{}{}{:.3f}{}{:.3f}{}{:.3f}{}{:.3f}\n".format(
sep, len(dataBA_FS3), sep, sum(dataBA_FS3_o), sep,
float(len(dataBA_FS3)) / (len(dataBA_FS3) + len(dataBA_FS3) + len(duplTags_FS3)),
sep, float(len(dataBA_FS3)) / (len(dataBA_FS3) + len(dataBA_FS3) + duplTags_double_FS3),
sep, float(sum(dataBA_FS3_o)) / (sum(dataBA_FS3_o) + sum(dataBA_FS3_o) + sum(duplTags_FS3_o)),
sep, float(sum(dataBA_FS3_o)) / (sum(dataBA_FS3_o) + sum(dataBA_FS3_o) + duplTags_double_FS3_o)))
output_file.write(
"DCS (total){}{} ({}){}{} ({}){}{:.3f}{}{:.3f} ({:.3f}){}{:.3f}{}{:.3f} ({:.3f})\n".format(
sep, len(duplTags_FS3), duplTags_double_FS3, sep, sum(duplTags_FS3_o), duplTags_double_FS3_o, sep,
float(len(duplTags_FS3)) / (len(dataAB_FS3) + len(dataBA_FS3) + len(duplTags_FS3)), sep,
float(len(duplTags_FS3)) / (len(dataAB_FS3) + len(dataBA_FS3) + duplTags_double_FS3),
float(duplTags_double_FS3) / (len(dataAB_FS3) + len(dataBA_FS3) + duplTags_double_FS3),
sep, float(sum(duplTags_FS3_o)) / (sum(dataAB_FS3_o) + sum(dataBA_FS3_o) + sum(duplTags_FS3_o)),
sep,
float(sum(duplTags_FS3_o)) / (sum(dataAB_FS3_o) + sum(dataBA_FS3_o) + duplTags_double_FS3_o),
float(duplTags_double_FS3_o) / (sum(dataAB_FS3_o) + sum(dataBA_FS3_o) + duplTags_double_FS3_o)))
output_file.write("total nr. of tags{}{}{}{}{}{}{}{}{}{}{}{}\n".format(
sep, (len(dataAB_FS3) + len(dataBA_FS3) + len(duplTags_FS3)), sep,
(sum(dataAB_FS3_o) + sum(dataBA_FS3_o) + sum(duplTags_FS3_o)),
sep, (len(dataAB_FS3) + len(dataBA_FS3) + len(duplTags_FS3)), sep,
(len(dataAB_FS3) + len(dataBA_FS3) + duplTags_double_FS3),
sep, (sum(dataAB_FS3_o) + sum(dataBA_FS3_o) + sum(duplTags_FS3_o)), sep,
(sum(dataAB_FS3_o) + sum(dataBA_FS3_o) + duplTags_double_FS3_o)))
counts = [numpy.bincount(dk, minlength=22)[1:] for dk in list1] # original counts of family sizes
output_file.write("\nValues from family size distribution based on families\n")
output_file.write("{}duplex{}ab{}ba{}sum\n".format(sep, sep, sep, sep))
j = 0
for fs in bins:
if fs == 21:
fs = ">20"
else:
fs = "={}".format(fs)
output_file.write("FS{}{}".format(fs, sep))
for n in range(3):
output_file.write("{}{}".format(int(counts[n][j]), sep))
output_file.write("{}\n".format(counts[0][j] + counts[1][j] + counts[2][j]))
j += 1
output_file.write("sum{}".format(sep))
for i in counts:
output_file.write("{}{}".format(int(sum(i)), sep))
output_file.write("{}\n".format(sum(counts[0] + counts[1] + counts[2])))
output_file.write("\nValues from family size distribution based on PE reads\n")
output_file.write("{}duplex{}ab{}ba{}sum\n".format(sep, sep, sep, sep))
j = 0
for fs in bins:
if fs == 21:
fs = ">20"
else:
fs = "={}".format(fs)
output_file.write("FS{}{}".format(fs, sep))
for n in range(3):
output_file.write("{}{}".format(int(reads[n][j]), sep))
output_file.write("{}\n".format(reads[0][j] + reads[1][j] + reads[2][j]))
j += 1
output_file.write("sum{}".format(sep))
for i in reads:
output_file.write("{}{}".format(int(sum(i)), sep))
output_file.write("{}\n".format(sum(reads[0] + reads[1] + reads[2])))
print("Files successfully created!")
if __name__ == '__main__':
sys.exit(compare_read_families(sys.argv))
| mit |
hitszxp/scikit-learn | examples/neural_networks/plot_rbm_logistic_classification.py | 258 | 4609 | """
==============================================================
Restricted Boltzmann Machine features for digit classification
==============================================================
For greyscale image data where pixel values can be interpreted as degrees of
blackness on a white background, like handwritten digit recognition, the
Bernoulli Restricted Boltzmann machine model (:class:`BernoulliRBM
<sklearn.neural_network.BernoulliRBM>`) can perform effective non-linear
feature extraction.
In order to learn good latent representations from a small dataset, we
artificially generate more labeled data by perturbing the training data with
linear shifts of 1 pixel in each direction.
This example shows how to build a classification pipeline with a BernoulliRBM
feature extractor and a :class:`LogisticRegression
<sklearn.linear_model.LogisticRegression>` classifier. The hyperparameters
of the entire model (learning rate, hidden layer size, regularization)
were optimized by grid search, but the search is not reproduced here because
of runtime constraints.
Logistic regression on raw pixel values is presented for comparison. The
example shows that the features extracted by the BernoulliRBM help improve the
classification accuracy.
"""
from __future__ import print_function
print(__doc__)
# Authors: Yann N. Dauphin, Vlad Niculae, Gabriel Synnaeve
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import convolve
from sklearn import linear_model, datasets, metrics
from sklearn.cross_validation import train_test_split
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline
###############################################################################
# Setting up
def nudge_dataset(X, Y):
"""
This produces a dataset 5 times bigger than the original one,
by moving the 8x8 images in X around by 1px to left, right, down, up
"""
direction_vectors = [
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[1, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 1],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 1, 0]]]
shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant',
weights=w).ravel()
X = np.concatenate([X] +
[np.apply_along_axis(shift, 1, X, vector)
for vector in direction_vectors])
Y = np.concatenate([Y for _ in range(5)], axis=0)
return X, Y
# Load Data
digits = datasets.load_digits()
X = np.asarray(digits.data, 'float32')
X, Y = nudge_dataset(X, digits.target)
X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling
X_train, X_test, Y_train, Y_test = train_test_split(X, Y,
test_size=0.2,
random_state=0)
# Models we will use
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
###############################################################################
# Training
# Hyper-parameters. These were set by cross-validation,
# using a GridSearchCV. Here we are not performing cross-validation to
# save time.
rbm.learning_rate = 0.06
rbm.n_iter = 20
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = 100
logistic.C = 6000.0
# Training RBM-Logistic Pipeline
classifier.fit(X_train, Y_train)
# Training Logistic regression
logistic_classifier = linear_model.LogisticRegression(C=100.0)
logistic_classifier.fit(X_train, Y_train)
###############################################################################
# Evaluation
print()
print("Logistic regression using RBM features:\n%s\n" % (
metrics.classification_report(
Y_test,
classifier.predict(X_test))))
print("Logistic regression using raw pixel features:\n%s\n" % (
metrics.classification_report(
Y_test,
logistic_classifier.predict(X_test))))
###############################################################################
# Plotting
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(rbm.components_):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape((8, 8)), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('100 components extracted by RBM', fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
Gorbagzog/StageIAP | Poubelle/JeanCatalog2.py | 1 | 1651 |
# coding: utf-8
get_ipython().magic('matplotlib osx')
import pyfits
import numpy as np
import matplotlib.pyplot as plt
import scipy
from astropy.cosmology import Planck15 as cosmo
def comov_volume(omega_sample, zmin, zmax):
"Compute the comoving volume between two redshifts in a solid angle."
V = omega_sample/41253*(cosmo.comoving_volume(zmax)-cosmo.comoving_volume(
zmin))
return V
"""Load Jean's Catalog"""
hdulist = pyfits.open('../Data/COSMOS2015_clustering_v2.0_clean.fits')
tdata = hdulist[1].data
hdulist.close()
tdata = tdata[tdata['photo_z']<99]
tdata = tdata[tdata['clean']>0]
tdata = tdata[tdata['mstar_cosmo']>7.2]
#Redshift selection
zmin = 0.3
zmax=0.7
zbin = tdata[tdata['photo_z']>zmin]
zbin = zbin[zbin['photo_z']<zmax]
n = 100 #number of mass bins for our graph
mmin = zbin['mstar_cosmo'].min()
mmax = zbin['mstar_cosmo'].max()
step = (mmax-mmin)/n #resolution
omega_sample = 1.2
V = comov_volume(omega_sample, zmin, zmax)
zmoy = np.average(zbin['photo_z']) #We take the average z to compute h
h = cosmo.H(zmoy)/(100*cosmo.H0.unit)
V_corr = V*h*h*h
V_corr = V_corr.value
N = np.empty(n)
for i in range(n):
"Compute the number of galaxies more massive than m for each mass bin"
N[i] = np.sum(zbin['mstar_cosmo']>(mmin+step*i))
N = N / (V*h*h*h)
#plot
fig, ax = plt.subplots()
ax.plot(np.linspace(mmin, mmax, num=n)*h,N)
ax.set_title('Abundance for Jean\'s catalog')
ax.set_ylabel('N(>M*), $h^{3}.Mpc^{-3}$', size=14)
ax.set_xlabel('Mass, $log(M_{\odot}/h)$', size=14)
plt.show()
# a = 1/(1+zmoy)
# print('Le redshift moyen est '+str(zmoy)+', le facteur d\'échelle est donc de '+str(a))
| gpl-3.0 |
ishank08/scikit-learn | sklearn/linear_model/tests/test_omp.py | 76 | 7752 | # Author: Vlad Niculae
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model import (orthogonal_mp, orthogonal_mp_gram,
OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV,
LinearRegression)
from sklearn.utils import check_random_state
from sklearn.datasets import make_sparse_coded_signal
n_samples, n_features, n_nonzero_coefs, n_targets = 20, 30, 5, 3
y, X, gamma = make_sparse_coded_signal(n_targets, n_features, n_samples,
n_nonzero_coefs, random_state=0)
G, Xy = np.dot(X.T, X), np.dot(X.T, y)
# this makes X (n_samples, n_features)
# and y (n_samples, 3)
def test_correct_shapes():
assert_equal(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp(X, y, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_correct_shapes_gram():
assert_equal(orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_n_nonzero_coefs():
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0],
n_nonzero_coefs=5)) <= 5)
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5,
precompute=True)) <= 5)
def test_tol():
tol = 0.5
gamma = orthogonal_mp(X, y[:, 0], tol=tol)
gamma_gram = orthogonal_mp(X, y[:, 0], tol=tol, precompute=True)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma)) ** 2) <= tol)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma_gram)) ** 2) <= tol)
def test_with_without_gram():
assert_array_almost_equal(
orthogonal_mp(X, y, n_nonzero_coefs=5),
orthogonal_mp(X, y, n_nonzero_coefs=5, precompute=True))
def test_with_without_gram_tol():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=1.),
orthogonal_mp(X, y, tol=1., precompute=True))
def test_unreachable_accuracy():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=0),
orthogonal_mp(X, y, n_nonzero_coefs=n_features))
assert_array_almost_equal(
assert_warns(RuntimeWarning, orthogonal_mp, X, y, tol=0,
precompute=True),
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_features))
def test_bad_input():
assert_raises(ValueError, orthogonal_mp, X, y, tol=-1)
assert_raises(ValueError, orthogonal_mp, X, y, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp, X, y,
n_nonzero_coefs=n_features + 1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, tol=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy,
n_nonzero_coefs=n_features + 1)
def test_perfect_signal_recovery():
idx, = gamma[:, 0].nonzero()
gamma_rec = orthogonal_mp(X, y[:, 0], 5)
gamma_gram = orthogonal_mp_gram(G, Xy[:, 0], 5)
assert_array_equal(idx, np.flatnonzero(gamma_rec))
assert_array_equal(idx, np.flatnonzero(gamma_gram))
assert_array_almost_equal(gamma[:, 0], gamma_rec, decimal=2)
assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2)
def test_estimator():
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_.shape, ())
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_.shape, (n_targets,))
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
omp.set_params(fit_intercept=False, normalize=False)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
def test_identical_regressors():
newX = X.copy()
newX[:, 1] = newX[:, 0]
gamma = np.zeros(n_features)
gamma[0] = gamma[1] = 1.
newy = np.dot(newX, gamma)
assert_warns(RuntimeWarning, orthogonal_mp, newX, newy, 2)
def test_swapped_regressors():
gamma = np.zeros(n_features)
# X[:, 21] should be selected first, then X[:, 0] selected second,
# which will take X[:, 21]'s place in case the algorithm does
# column swapping for optimization (which is the case at the moment)
gamma[21] = 1.0
gamma[0] = 0.5
new_y = np.dot(X, gamma)
new_Xy = np.dot(X.T, new_y)
gamma_hat = orthogonal_mp(X, new_y, 2)
gamma_hat_gram = orthogonal_mp_gram(G, new_Xy, 2)
assert_array_equal(np.flatnonzero(gamma_hat), [0, 21])
assert_array_equal(np.flatnonzero(gamma_hat_gram), [0, 21])
def test_no_atoms():
y_empty = np.zeros_like(y)
Xy_empty = np.dot(X.T, y_empty)
gamma_empty = ignore_warnings(orthogonal_mp)(X, y_empty, 1)
gamma_empty_gram = ignore_warnings(orthogonal_mp)(G, Xy_empty, 1)
assert_equal(np.all(gamma_empty == 0), True)
assert_equal(np.all(gamma_empty_gram == 0), True)
def test_omp_path():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
path = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_return_path_prop_with_gram():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True,
precompute=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False,
precompute=True)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_cv():
y_ = y[:, 0]
gamma_ = gamma[:, 0]
ompcv = OrthogonalMatchingPursuitCV(normalize=True, fit_intercept=False,
max_iter=10, cv=5)
ompcv.fit(X, y_)
assert_equal(ompcv.n_nonzero_coefs_, n_nonzero_coefs)
assert_array_almost_equal(ompcv.coef_, gamma_)
omp = OrthogonalMatchingPursuit(normalize=True, fit_intercept=False,
n_nonzero_coefs=ompcv.n_nonzero_coefs_)
omp.fit(X, y_)
assert_array_almost_equal(ompcv.coef_, omp.coef_)
def test_omp_reaches_least_squares():
# Use small simple data; it's a sanity check but OMP can stop early
rng = check_random_state(0)
n_samples, n_features = (10, 8)
n_targets = 3
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_targets)
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_features)
lstsq = LinearRegression()
omp.fit(X, Y)
lstsq.fit(X, Y)
assert_array_almost_equal(omp.coef_, lstsq.coef_)
| bsd-3-clause |
bryanwweber/UConnRCMPy | docs/conf.py | 1 | 10603 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# UConnRCMPy documentation build configuration file, created by
# sphinx-quickstart on Sat Jan 9 10:26:33 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import pkg_resources
import datetime
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
on_travis = os.environ.get('TRAVIS') == 'True'
if not on_travis:
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.mathjax',
'sphinx.ext.coverage',
'sphinx.ext.intersphinx',
]
# add_function_parentheses = False
autodoc_default_flags = ['members']
# autodoc_member_order = 'bysource'
autoclass_content = 'class'
napoleon_numpy_docstring = True
napoleon_google_docstring = False
# napoleon_use_ivar = True
intersphinx_mapping = {'python': ('https://docs.python.org/3.6', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
'matplotlib': ('http://matplotlib.org/2.0.1/', None),
'cantera': ('http://cantera.github.io/docs/sphinx/html/', None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'UConnRCMPy'
author = 'Bryan W. Weber'
this_year = datetime.date.today().year
copyright = '{}, {}'.format(this_year, author)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
try:
release = pkg_resources.get_distribution(project).version
except:
release = 'unknown'
# The short X.Y version.
version = '.'.join(release.split('.')[:1])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'py:obj'
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'github_user': 'bryanwweber',
'github_repo': 'uconnrcmpy',
'github_banner': True,
'github_button': True,
'show_powered_by': True,
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'UConnRCMPydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'UConnRCMPy.tex', 'UConnRCMPy Documentation',
'Bryan W. Weber', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'uconnrcmpy', 'UConnRCMPy Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'UConnRCMPy', 'UConnRCMPy Documentation',
author, 'UConnRCMPy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
| bsd-3-clause |
carolinarias/Sensing-the-City | data_visualization/web/radialboxplots/cumulativedata.py | 1 | 4503 | #!/usr/bin/env python
# This script generate the csv with cumulative data to be plotted in radial box plot
from monary import Monary
from datetime import datetime, timedelta
from pprint import pprint
from pymongo import MongoClient
import sys
import numpy as np
import numpy.ma as ma
import json
import collections
import time
import glob
import struct
import pandas as pd
start = datetime.strptime('2013-11-01', '%Y-%m-%d') # inizio dal primo giorno completo
#stop = datetime.strptime('2013-11-08', '%Y-%m-%d') # fine ultimo giorno completo
#start = datetime.strptime('2013-11-08', '%Y-%m-%d') # inizio dal primo giorno completo
#stop = datetime.strptime('2013-11-15', '%Y-%m-%d') # fine ultimo giorno completo
#start = datetime.strptime('2013-11-15', '%Y-%m-%d') # inizio dal primo giorno completo
#stop = datetime.strptime('2013-11-22', '%Y-%m-%d') # fine ultimo giorno completo
#start = datetime.strptime('2013-11-22', '%Y-%m-%d') # inizio dal primo giorno completo
#stop = datetime.strptime('2013-11-29', '%Y-%m-%d') # fine ultimo giorno completo
#start = datetime.strptime('2013-11-29', '%Y-%m-%d') # inizio dal primo giorno completo
#stop = datetime.strptime('2013-12-06', '%Y-%m-%d') # fine ultimo giorno completo
#start = datetime.strptime('2013-12-06', '%Y-%m-%d') # inizio dal primo giorno completo
#stop = datetime.strptime('2013-12-13', '%Y-%m-%d') # fine ultimo giorno completo
#start = datetime.strptime('2013-12-13', '%Y-%m-%d') # inizio dal primo giorno completo
#stop = datetime.strptime('2013-12-20', '%Y-%m-%d') # fine ultimo giorno completo
#start = datetime.strptime('2013-12-20', '%Y-%m-%d') # inizio dal primo giorno completo
#stop = datetime.strptime('2013-12-27', '%Y-%m-%d') # fine ultimo giorno completo
#start = datetime.strptime('2013-12-25', '%Y-%m-%d') # inizio dal primo giorno completo
stop = datetime.strptime('2014-01-01', '%Y-%m-%d') # fine ultimo giorno completo
#stop = datetime.strptime('2014-01-01', '%Y-%m-%d') # fine ultimo giorno completo
print start
startm = str(start.month)
startd = str(start.day)
print stop
stopm = str(stop.month)
stopd = str(stop.day)
print '-------------------------'
## DATABASE CONNECTION USING MONARY
client = Monary('localhost', 27017)
obj = {}
while (start < stop) :
# pipeline = [{"$match": {"ts" : start} },{"$unwind" : "$data_mi"},{"$group" : {"_id" : "$cellId", "smsin" : {"$sum" : "$data_mi.sms_in"}}}] # only sms_in values
pipeline = [{"$match": {"ts" : start} },{"$unwind" : "$data_mi"},{"$group" : {"_id" : "$cellId", "smsin" : {"$sum" : "$data_mi.sms_in"}, "smsout" : {"$sum" : "$data_mi.sms_out"}, "callin" : {"$sum" : "$data_mi.call_in"}, "callout" : {"$sum" : "$data_mi.call_out"}, "inttraf" : {"$sum" : "$data_mi.int_traf"}}}]
obj[start] = client.aggregate(
"tlc", # database name
"data", # collection name for real data
pipeline, # query agregate pipeline for while cicle
# TODO use this for all the information set
["_id", "smsin", "smsout", "callin", "callout", "inttraf"], # field names (in Mongo record)
["int32", "float64", "float64", "float64", "float64", "float64"] # Monary field types
# TODO use this for only smsin subset
# ["_id", "smsin"], # field names (in Mongo record)
# ["int32", "float64"] # Monary field types
)
start = start + timedelta(minutes = 10)
smsin_obj = {}
smsout_obj = {}
callin_obj = {}
callout_obj = {}
inttraf_obj = {}
series = pd.Series(obj)
d = {}
d["timestamp"] = []
d["smsin"] = []
d["smsout"] = []
d["callin"] = []
d["callout"] = []
d["inttraf"] = []
#pprint(series)
for ts, array in series.iteritems():
# pprint(array[1].data) # vector of values long the number of cells
sum_smsin = sum(array[1].data)
sum_smsout = sum(array[2].data)
sum_callin = sum(array[3].data)
sum_callout = sum(array[4].data)
sum_inttraf = sum(array[5].data)
# print(sum(array[1].data))
# print type(sum_values)
d["timestamp"] += [ts]
d["smsin"] += [sum_smsin]
d["smsout"] += [sum_smsout]
d["callin"] += [sum_callin]
d["callout"] += [sum_callout]
d["inttraf"] += [sum_inttraf]
df_all = pd.DataFrame(d)
print df_all
df_all.to_csv('../csv_graph/cumulative_'+startm+'-'+startd+'_'+stopm+'-'+stopd+'.csv', sep=',', index_label='index', encoding='utf-8')
# ATT the labels on the d list and the index_label='index' must match the names used on the javascript side
| mit |
nealbob/nealbob.github.io | code/MLdemo.py | 1 | 5234 | import econlearn
import numpy as np
import sklearn
from matplotlib import pyplot as plt
X = np.random.rand(100)
Y = np.maximum(X - 0.5, 0) + np.random.normal(scale=0.1, size=100)
Xt = np.arange(min(X), max(X), 0.001)
Nt = Xt.shape[0]
folder = '/home/nealbob/Dropbox/Presentations/ML/'
def plot_line(X, Y, Xt, Yt, name):
plt.scatter(X, Y, marker="+")
plt.xlim(0,1)
plt.ylim(-0.2,0.8)
Y_act = np.maximum(Xt - 0.5, 0)
plt.plot(Xt, Y_act, color="blue", linewidth="0.3")
plt.plot(Xt, Yt, color="g", linewidth="2.5")
plt.savefig(folder + name, dpi=200)
plt.show()
plt.scatter(X, Y, marker="+")
plt.xlim(0,1)
plt.ylim(-0.2,0.8)
plt.savefig(folder + "data.png", dpi=200)
plt.show()
plt.scatter(X, Y, marker="+")
plt.xlim(0,1)
plt.ylim(-0.2,0.8)
Yt = np.maximum(Xt - 0.5, 0)
plt.plot(Xt, Yt, color="blue", linewidth="0.3")
plt.savefig(folder + "datamodel.png", dpi=200)
plt.show()
ols = sklearn.linear_model.LinearRegression(fit_intercept=True)
ols.fit(X.reshape([100,1]), Y)
Yt = ols.predict(Xt.reshape([Nt,1]))
plot_line(X, Y, Xt, Yt, 'linear.png')
tile = econlearn.TilecodeRegressor(1, [4], 1)
tile.fit(X, Y)
Yt = tile.predict(Xt)
plot_line(X, Y, Xt, Yt, 'bias.png')
tile = econlearn.TilecodeRegressor(1, [40], 1)
tile.fit(X, Y)
Yt = tile.predict(Xt)
plot_line(X, Y, Xt, Yt, 'noise.png')
Y_act = np.maximum(X - 0.5, 0)
ols.fit(Y_act.reshape([100,1]), Y)
Yt = np.maximum(Xt - 0.5, 0)
Yt = ols.predict(Yt.reshape([Nt,1]))
plot_line(X, Y, Xt, Yt, 'true.png')
R2 = np.zeros(30)
CV = np.zeros(30)
for i in range(30):
for j in range(1000):
Xtemp = np.random.rand(100)
Ytemp = np.maximum(Xtemp - 0.5, 0) + np.random.normal(scale=0.1, size=100)
Xtest = np.random.rand(1000)
Ytest = np.maximum(Xtest - 0.5, 0) + np.random.normal(scale=0.1, size=1000)
tile = econlearn.TilecodeRegressor(1, [i+1], 1)
tile.fit(Xtemp, Ytemp, score=True)
R2[i] += (tile.tile.R2)*0.001
Ytt = tile.predict(Xtest)
ss_res = np.dot((Ytest - Ytt),(Ytest - Ytt))
ymean = np.mean(Ytest)
ss_tot = np.dot((Ytest-ymean),(Ytest-ymean))
CV[i] += (1-ss_res/ss_tot)*0.001
plt.plot(range(1,31), CV, label="out-of-sample fit")
plt.plot(range(1,31), R2, label="in-sample fit")
plt.xlabel("number of bins")
plt.ylabel("$R^2$")
plt.legend()
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=2, mode="expand", borderaxespad=0.)
plt.ylim(0,1)
plt.xlim(0,30)
plt.savefig(folder + 'performance.png', dpi=200)
imax = np.where(CV == np.max(CV))
imax[0][0] = imax[0][0] + 1
plt.scatter(imax, np.array(CV[imax]))
plt.ylim(0,1)
plt.xlim(0,30)
plt.savefig(folder + 'maxperformance.png', dpi=200)
plt.show()
tile = econlearn.TilecodeRegressor(1, [imax[0][0]], 1)
tile.fit(X, Y)
Yt = tile.predict(Xt)
plot_line(X, Y, Xt, Yt, 'good.png')
tile = econlearn.TilecodeRegressor(1, [3], 80)
tile.fit(X, Y, method="SGD", n_iters=30, eta=0.15)
Yt = tile.predict(Xt)
plot_line(X, Y, Xt, Yt, 'ensemble.png')
CV_OLS = np.zeros(30)
CV_ML = np.zeros(30)
CV_ML2 = np.zeros(30)
CV_ACT = np.zeros(30)
sample = np.zeros(30)
for i in range(30):
N = 8 + 5*i
sample[i] = N
for j in range(200):
X = np.random.rand(N)
Y = np.maximum(X - 0.5, 0) + np.random.normal(scale=0.1, size=N)
Xtest = np.random.rand(1000)
Ytest = np.maximum(Xtest - 0.5, 0) + np.random.normal(scale=0.1, size=1000)
#tile = econlearn.TilecodeRegressor(1, [8], 1)
#tile.fit(X, Y)
#Yt = tile.predict(Xtest)
#ss_res = np.dot((Ytest - Yt),(Ytest - Yt))
#ymean = np.mean(Ytest)
#ss_tot = np.dot((Ytest-ymean),(Ytest-ymean))
#CV_ML[i] += (1-ss_res/ss_tot)*(1/200.0)
tile = econlearn.TilecodeRegressor(1, [3], 80)
tile.fit(X, Y, method="SGD", n_iters=30, eta=0.15)
Yt = tile.predict(Xtest)
ss_res = np.dot((Ytest - Yt),(Ytest - Yt))
ymean = np.mean(Ytest)
ss_tot = np.dot((Ytest-ymean),(Ytest-ymean))
CV_ML2[i] += (1-ss_res/ss_tot)*(1/200.0)
ols.fit(X.reshape([N,1]), Y)
Yt = ols.predict(Xtest.reshape([1000,1]))
ss_res = np.dot((Ytest - Yt),(Ytest - Yt))
ymean = np.mean(Ytest)
ss_tot = np.dot((Ytest-ymean),(Ytest-ymean))
CV_OLS[i] += (1-ss_res/ss_tot)*(1/200.0)
Y_act = np.maximum(X - 0.5, 0)
ols.fit(Y_act.reshape([N,1]), Y)
X_act = np.maximum(Xtest - 0.5, 0)
Yt = ols.predict(X_act.reshape([1000,1]))
ss_res = np.dot((Ytest - Yt),(Ytest - Yt))
ymean = np.mean(Ytest)
ss_tot = np.dot((Ytest-ymean),(Ytest-ymean))
CV_ACT[i] += (1-ss_res/ss_tot)*(1/200.0)
plt.plot(sample, CV_OLS, color="blue", label="OLS: linear")
plt.plot(sample, CV_ACT, color="blue", linestyle="dashed", label="OLS: true")
plt.xlabel("sample size")
plt.ylabel("out-of-sample $R^2$")
plt.ylim(0,0.9)
#plt.plot(sample, CV_ML, color="green", linestyle="dashed", label="ML: step function")
plt.plot(sample, CV_ML2, color="green", label="ML")
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=3, mode="expand", borderaxespad=0.)
plt.savefig(folder + 'samplesize2.png', dpi=200)
plt.show()
| mit |
BasuruK/sGlass | Outdoor_Object_Recognition_Engine/train_CNN.py | 1 | 7200 | from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense, Dropout
from keras.callbacks import History, ModelCheckpoint
from keras.preprocessing.image import ImageDataGenerator
from keras.models import load_model
from keras.preprocessing import image as img
import datetime
import os.path
import matplotlib.pyplot as plt
import numpy as np
# Initialize the CNN
classifier = Sequential()
# Step 1 - Convolution
classifier.add(Conv2D(32, (5, 5), input_shape = (64, 64, 3), activation = 'relu', padding='same'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
# Step 2 - Add more Convolution Layers making it Deep followed by a Pooling Layer
classifier.add(Conv2D(32, (5, 5), activation = 'relu', padding='same'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
classifier.add(Dropout(0.25))
classifier.add(Conv2D(64, (5, 5), activation = 'relu', padding='same'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
classifier.add(Conv2D(128, (5, 5), activation = 'relu', padding='same'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
classifier.add(Dropout(0.25))
# Step 3 - Flattening
classifier.add(Flatten())
# Step 4 - Fully Connected Neural Network
# Hidden Layer 1 - Activation Function RELU
classifier.add(Dense(units = 512, activation = 'relu'))
classifier.add(Dropout(0.5))
classifier.add(Dense(units = 3, activation = 'softmax'))
# Compile the CNN
# Categorical Crossentropy - to classify between multiple classes of images
classifier.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])
# Image Augmentation and Training Section
# Image Augmentation to prevent Overfitting (Applying random transformation on images to train set.ie.
# scaling, rotating and stretching)
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
vertical_flip=True,
rotation_range=0.3,
width_shift_range=0.3,
height_shift_range=0.3
)
test_datagen = ImageDataGenerator(rescale=1./255)
# Load the training dataset folder
training_set = train_datagen.flow_from_directory(
'dataset/training_set',
target_size=(64, 64),
batch_size=105,
class_mode='categorical')
# Load the test data set folder
test_set = test_datagen.flow_from_directory(
'dataset/test_set',
target_size=(64, 64),
batch_size=105,
class_mode='categorical')
# Get the accuracy and loss data to plot the graph
history = History()
# checkpoint = ModelCheckpoint(filepath='models_backups/' + str(str(datetime.datetime.now().hour)), monitor='val_loss',
# verbose=0, mode='auto', period=1)
print(classifier.summary())
# Fit the clasifier on the CNN data
if os.path.isfile('my_model.h5') == False:
classifier.fit_generator(
training_set,
steps_per_epoch=3000,
epochs=5,
validation_data=test_set,
validation_steps=3000,
callbacks=[history]
)
# Save the generated model to my_model.h5
classifier.save('my_model.h5')
else:
classifier = load_model('my_model.h5')
# Returns the labels for the classes according to the folder structre of clases
def get_labels_for_clases():
# return ['car', 'cat', 'dog', 'shoe']
return ['car', 'cat', 'dog']
# Run prediction for a single image
def predict_for_single_image(image):
# label the images according the folder structure
lables = get_labels_for_clases()
out = classifier.predict_classes(image, verbose=0)
return lables[out[0]]
# Run Prediction for image and give the output as percentages for each class similarities
def predict_probabilities_for_classes(classifier, image):
labels = get_labels_for_clases()
probabilities = classifier.predict(image)
print(probabilities)
# Expand two arrays to relevant class structure
probabilities = [(format(x * 100, '.2f') + "%") for x in probabilities[0]]
print(list(zip(labels, probabilities)))
# Plot the graphs
def plot_graphs_on_data(history):
# Plot Accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epocs')
plt.legend(['Train Data', 'Test Data'], loc='upper left')
plt.show()
# Plot Loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('Epocs')
plt.legend(['Train Data', 'Test Data'], loc='upper left')
plt.show()
# Preparedness image
def preprocess_image(folder_name, file_name):
image = img.load_img(folder_name + '/' + file_name, target_size=(64, 64))
test_image = img.img_to_array(image)
test_image = np.expand_dims(test_image, axis=0)
return test_image
# Run custom set testing
def custom_set_accuracy_test(input_folder_name):
files_in_directory = os.listdir(input_folder_name + '/')
match_count = 0
fail_count = 0
match_and_fail_count_list = []
columns = 6
i = 0
plt.figure(figsize=(15, 15))
# for each image in the directory run prediction and display that with the image
for file_name in files_in_directory:
test_image = preprocess_image(input_folder_name, file_name)
prediction_for_image = predict_for_single_image(test_image)
# Plot the images on a graph
plt.subplot(len(files_in_directory) / columns + 1, columns, i + 1)
if file_name.split(".")[0] == prediction_for_image:
match_and_fail_count_list.append(file_name + " =======>" + " Match")
match_count += 1
# Plot Positive Images on the graph
plt.title(file_name)
plt.xlabel(prediction_for_image)
plt.imshow(plt.imread(input_folder_name + '/' + file_name))
else:
match_and_fail_count_list.append(
file_name + " =======>" + " Fail. " + "Predicted => " + prediction_for_image)
fail_count += 1
# Plot Positive Images on the graph
plt.title(file_name)
plt.xlabel(prediction_for_image)
plt.imshow(plt.imread(input_folder_name + '/' + file_name))
i += 1
plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
plt.show()
[print(x) for x in match_and_fail_count_list] # Print each item in list
custom_set_accuracy = (match_count / len(files_in_directory)) * 100
print('Total Images : ', len(files_in_directory))
print('Successes : ', match_count)
print('Failures : ', fail_count)
print("Custom Set accuracy = ", custom_set_accuracy)
# Draw the Graph for the predicted Results
# use this only after training.
# plot_graphs_on_data(history)
# image = img.load_img('custom_test/dog.1.jpg', target_size=(64, 64))
# test_image = img.img_to_array(image)
# test_image = np.expand_dims(test_image, axis=0)
# print(training_set.class_indices)
# predict_probabilities_for_classes(classifier, test_image)
custom_set_accuracy_test('custom_test')
| gpl-3.0 |
jkarnows/scikit-learn | sklearn/neighbors/base.py | 115 | 29783 | """Base and mixin classes for nearest neighbors"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import csr_matrix, issparse
from .ball_tree import BallTree
from .kd_tree import KDTree
from ..base import BaseEstimator
from ..metrics import pairwise_distances
from ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from ..utils import check_X_y, check_array
from ..utils.fixes import argpartition
from ..utils.validation import DataConversionWarning
from ..utils.validation import NotFittedError
from ..externals import six
VALID_METRICS = dict(ball_tree=BallTree.valid_metrics,
kd_tree=KDTree.valid_metrics,
# The following list comes from the
# sklearn.metrics.pairwise doc string
brute=(list(PAIRWISE_DISTANCE_FUNCTIONS.keys()) +
['braycurtis', 'canberra', 'chebyshev',
'correlation', 'cosine', 'dice', 'hamming',
'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean',
'yule', 'wminkowski']))
VALID_METRICS_SPARSE = dict(ball_tree=[],
kd_tree=[],
brute=PAIRWISE_DISTANCE_FUNCTIONS.keys())
class NeighborsWarning(UserWarning):
pass
# Make sure that NeighborsWarning are displayed more than once
warnings.simplefilter("always", NeighborsWarning)
def _check_weights(weights):
"""Check to make sure weights are valid"""
if weights in (None, 'uniform', 'distance'):
return weights
elif callable(weights):
return weights
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
def _get_weights(dist, weights):
"""Get the weights from an array of distances and a parameter ``weights``
Parameters
===========
dist: ndarray
The input distances
weights: {'uniform', 'distance' or a callable}
The kind of weighting used
Returns
========
weights_arr: array of the same shape as ``dist``
if ``weights == 'uniform'``, then returns None
"""
if weights in (None, 'uniform'):
return None
elif weights == 'distance':
# if user attempts to classify a point that was zero distance from one
# or more training points, those training points are weighted as 1.0
# and the other points as 0.0
if dist.dtype is np.dtype(object):
for point_dist_i, point_dist in enumerate(dist):
# check if point_dist is iterable
# (ex: RadiusNeighborClassifier.predict may set an element of
# dist to 1e-6 to represent an 'outlier')
if hasattr(point_dist, '__contains__') and 0. in point_dist:
dist[point_dist_i] = point_dist == 0.
else:
dist[point_dist_i] = 1. / point_dist
else:
with np.errstate(divide='ignore'):
dist = 1. / dist
inf_mask = np.isinf(dist)
inf_row = np.any(inf_mask, axis=1)
dist[inf_row] = inf_mask[inf_row]
return dist
elif callable(weights):
return weights(dist)
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
class NeighborsBase(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for nearest neighbors estimators."""
@abstractmethod
def __init__(self):
pass
def _init_params(self, n_neighbors=None, radius=None,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, **kwargs):
if kwargs:
warnings.warn("Passing additional arguments to the metric "
"function as **kwargs is deprecated "
"and will no longer be supported in 0.18. "
"Use metric_params instead.",
DeprecationWarning, stacklevel=3)
if metric_params is None:
metric_params = {}
metric_params.update(kwargs)
self.n_neighbors = n_neighbors
self.radius = radius
self.algorithm = algorithm
self.leaf_size = leaf_size
self.metric = metric
self.metric_params = metric_params
self.p = p
if algorithm not in ['auto', 'brute',
'kd_tree', 'ball_tree']:
raise ValueError("unrecognized algorithm: '%s'" % algorithm)
if algorithm == 'auto':
alg_check = 'ball_tree'
else:
alg_check = algorithm
if callable(metric):
if algorithm == 'kd_tree':
# callable metric is only valid for brute force and ball_tree
raise ValueError(
"kd_tree algorithm does not support callable metric '%s'"
% metric)
elif metric not in VALID_METRICS[alg_check]:
raise ValueError("Metric '%s' not valid for algorithm '%s'"
% (metric, algorithm))
if self.metric_params is not None and 'p' in self.metric_params:
warnings.warn("Parameter p is found in metric_params. "
"The corresponding parameter from __init__ "
"is ignored.", SyntaxWarning, stacklevel=3)
effective_p = metric_params['p']
else:
effective_p = self.p
if self.metric in ['wminkowski', 'minkowski'] and effective_p < 1:
raise ValueError("p must be greater than one for minkowski metric")
self._fit_X = None
self._tree = None
self._fit_method = None
def _fit(self, X):
if self.metric_params is None:
self.effective_metric_params_ = {}
else:
self.effective_metric_params_ = self.metric_params.copy()
effective_p = self.effective_metric_params_.get('p', self.p)
if self.metric in ['wminkowski', 'minkowski']:
self.effective_metric_params_['p'] = effective_p
self.effective_metric_ = self.metric
# For minkowski distance, use more efficient methods where available
if self.metric == 'minkowski':
p = self.effective_metric_params_.pop('p', 2)
if p < 1:
raise ValueError("p must be greater than one "
"for minkowski metric")
elif p == 1:
self.effective_metric_ = 'manhattan'
elif p == 2:
self.effective_metric_ = 'euclidean'
elif p == np.inf:
self.effective_metric_ = 'chebyshev'
else:
self.effective_metric_params_['p'] = p
if isinstance(X, NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
return self
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'ball_tree'
return self
elif isinstance(X, KDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'kd_tree'
return self
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
if n_samples == 0:
raise ValueError("n_samples must be greater than 0")
if issparse(X):
if self.algorithm not in ('auto', 'brute'):
warnings.warn("cannot use tree with sparse input: "
"using brute force")
if self.effective_metric_ not in VALID_METRICS_SPARSE['brute']:
raise ValueError("metric '%s' not valid for sparse input"
% self.effective_metric_)
self._fit_X = X.copy()
self._tree = None
self._fit_method = 'brute'
return self
self._fit_method = self.algorithm
self._fit_X = X
if self._fit_method == 'auto':
# A tree approach is better for small number of neighbors,
# and KDTree is generally faster when available
if (self.n_neighbors is None
or self.n_neighbors < self._fit_X.shape[0] // 2):
if self.effective_metric_ in VALID_METRICS['kd_tree']:
self._fit_method = 'kd_tree'
else:
self._fit_method = 'ball_tree'
else:
self._fit_method = 'brute'
if self._fit_method == 'ball_tree':
self._tree = BallTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'kd_tree':
self._tree = KDTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'brute':
self._tree = None
else:
raise ValueError("algorithm = '%s' not recognized"
% self.algorithm)
return self
class KNeighborsMixin(object):
"""Mixin for k-neighbors searches"""
def kneighbors(self, X=None, n_neighbors=None, return_distance=True):
"""Finds the K-neighbors of a point.
Returns distance
Parameters
----------
X : array-like, last dimension same as that of fit data, optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors to get (default is the value
passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the lengths to points, only present if
return_distance=True
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.kneighbors([1., 1., 1.])) # doctest: +ELLIPSIS
(array([[ 0.5]]), array([[2]]...))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False) # doctest: +ELLIPSIS
array([[1],
[2]]...)
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
# Include an extra neighbor to account for the sample itself being
# returned, which is removed later
n_neighbors += 1
train_size = self._fit_X.shape[0]
if n_neighbors > train_size:
raise ValueError(
"Expected n_neighbors <= n_samples, "
" but n_samples = %d, n_neighbors = %d" %
(train_size, n_neighbors)
)
n_samples, _ = X.shape
sample_range = np.arange(n_samples)[:, None]
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_params_)
neigh_ind = argpartition(dist, n_neighbors - 1, axis=1)
neigh_ind = neigh_ind[:, :n_neighbors]
# argpartition doesn't guarantee sorted order, so we sort again
neigh_ind = neigh_ind[
sample_range, np.argsort(dist[sample_range, neigh_ind])]
if return_distance:
if self.effective_metric_ == 'euclidean':
result = np.sqrt(dist[sample_range, neigh_ind]), neigh_ind
else:
result = dist[sample_range, neigh_ind], neigh_ind
else:
result = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
result = self._tree.query(X, n_neighbors,
return_distance=return_distance)
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return result
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = result
else:
neigh_ind = result
sample_mask = neigh_ind != sample_range
# Corner case: When the number of duplicates are more
# than the number of neighbors, the first NN will not
# be the sample, but a duplicate.
# In that case mask the first duplicate.
dup_gr_nbrs = np.all(sample_mask, axis=1)
sample_mask[:, 0][dup_gr_nbrs] = False
neigh_ind = np.reshape(
neigh_ind[sample_mask], (n_samples, n_neighbors - 1))
if return_distance:
dist = np.reshape(
dist[sample_mask], (n_samples, n_neighbors - 1))
return dist, neigh_ind
return neigh_ind
def kneighbors_graph(self, X=None, n_neighbors=None,
mode='connectivity'):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like, last dimension same as that of fit data, optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors for each sample.
(default is value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples_fit]
n_samples_fit is the number of samples in the fitted data
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=2)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.kneighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
NearestNeighbors.radius_neighbors_graph
"""
if n_neighbors is None:
n_neighbors = self.n_neighbors
# kneighbors does the None handling.
if X is not None:
X = check_array(X, accept_sparse='csr')
n_samples1 = X.shape[0]
else:
n_samples1 = self._fit_X.shape[0]
n_samples2 = self._fit_X.shape[0]
n_nonzero = n_samples1 * n_neighbors
A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
# construct CSR matrix representation of the k-NN graph
if mode == 'connectivity':
A_data = np.ones(n_samples1 * n_neighbors)
A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
elif mode == 'distance':
A_data, A_ind = self.kneighbors(
X, n_neighbors, return_distance=True)
A_data = np.ravel(A_data)
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity" '
'or "distance" but got "%s" instead' % mode)
kneighbors_graph = csr_matrix((A_data, A_ind.ravel(), A_indptr),
shape=(n_samples1, n_samples2))
return kneighbors_graph
class RadiusNeighborsMixin(object):
"""Mixin for radius-based neighbors searches"""
def radius_neighbors(self, X=None, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of each point from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
Parameters
----------
X : array-like, (n_samples, n_features), optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array, shape (n_samples,) of arrays
Array representing the distances to each point, only present if
return_distance=True. The distance values are computed according
to the ``metric`` constructor parameter.
ind : array, shape (n_samples,) of arrays
An array of arrays of indices of the approximate nearest points
from the population matrix that lie within a ball of size
``radius`` around the query points.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1, 1, 1]:
>>> import numpy as np
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.6)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> rng = neigh.radius_neighbors([1., 1., 1.])
>>> print(np.asarray(rng[0][0])) # doctest: +ELLIPSIS
[ 1.5 0.5]
>>> print(np.asarray(rng[1][0])) # doctest: +ELLIPSIS
[1 2]
The first array returned contains the distances to all points which
are closer than 1.6, while the second array returned contains their
indices. In general, multiple points can be queried at the same time.
Notes
-----
Because the number of neighbors of each point is not necessarily
equal, the results for multiple query points cannot be fit in a
standard data array.
For efficiency, `radius_neighbors` returns arrays of objects, where
each object is a 1D array of indices or distances.
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
if radius is None:
radius = self.radius
n_samples = X.shape[0]
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
radius *= radius
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_params_)
neigh_ind_list = [np.where(d <= radius)[0] for d in dist]
# See https://github.com/numpy/numpy/issues/5456
# if you want to understand why this is initialized this way.
neigh_ind = np.empty(n_samples, dtype='object')
neigh_ind[:] = neigh_ind_list
if return_distance:
dist_array = np.empty(n_samples, dtype='object')
if self.effective_metric_ == 'euclidean':
dist_list = [np.sqrt(d[neigh_ind[i]])
for i, d in enumerate(dist)]
else:
dist_list = [d[neigh_ind[i]]
for i, d in enumerate(dist)]
dist_array[:] = dist_list
results = dist_array, neigh_ind
else:
results = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
results = self._tree.query_radius(X, radius,
return_distance=return_distance)
if return_distance:
results = results[::-1]
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return results
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = results
else:
neigh_ind = results
for ind, ind_neighbor in enumerate(neigh_ind):
mask = ind_neighbor != ind
neigh_ind[ind] = ind_neighbor[mask]
if return_distance:
dist[ind] = dist[ind][mask]
if return_distance:
return dist, neigh_ind
return neigh_ind
def radius_neighbors_graph(self, X=None, radius=None, mode='connectivity'):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like, shape = [n_samples, n_features], optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Radius of neighborhoods.
(default is the value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.5)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.radius_neighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if X is not None:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
n_samples2 = self._fit_X.shape[0]
if radius is None:
radius = self.radius
# construct CSR matrix representation of the NN graph
if mode == 'connectivity':
A_ind = self.radius_neighbors(X, radius,
return_distance=False)
A_data = None
elif mode == 'distance':
dist, A_ind = self.radius_neighbors(X, radius,
return_distance=True)
A_data = np.concatenate(list(dist))
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", '
'or "distance" but got %s instead' % mode)
n_samples1 = A_ind.shape[0]
n_neighbors = np.array([len(a) for a in A_ind])
A_ind = np.concatenate(list(A_ind))
if A_data is None:
A_data = np.ones(len(A_ind))
A_indptr = np.concatenate((np.zeros(1, dtype=int),
np.cumsum(n_neighbors)))
return csr_matrix((A_data, A_ind, A_indptr),
shape=(n_samples1, n_samples2))
class SupervisedFloatMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
y : {array-like, sparse matrix}
Target values, array of float values, shape = [n_samples]
or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
self._y = y
return self._fit(X)
class SupervisedIntegerMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
y : {array-like, sparse matrix}
Target values of shape = [n_samples] or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:
if y.ndim != 1:
warnings.warn("A column-vector y was passed when a 1d array "
"was expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
self.outputs_2d_ = False
y = y.reshape((-1, 1))
else:
self.outputs_2d_ = True
self.classes_ = []
self._y = np.empty(y.shape, dtype=np.int)
for k in range(self._y.shape[1]):
classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes)
if not self.outputs_2d_:
self.classes_ = self.classes_[0]
self._y = self._y.ravel()
return self._fit(X)
class UnsupervisedMixin(object):
def fit(self, X, y=None):
"""Fit the model using X as training data
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
"""
return self._fit(X)
| bsd-3-clause |
fengzhyuan/scikit-learn | examples/decomposition/plot_ica_vs_pca.py | 306 | 3329 | """
==========================
FastICA on 2D point clouds
==========================
This example illustrates visually in the feature space a comparison by
results using two different component analysis techniques.
:ref:`ICA` vs :ref:`PCA`.
Representing ICA in the feature space gives the view of 'geometric ICA':
ICA is an algorithm that finds directions in the feature space
corresponding to projections with high non-Gaussianity. These directions
need not be orthogonal in the original feature space, but they are
orthogonal in the whitened feature space, in which all directions
correspond to the same variance.
PCA, on the other hand, finds orthogonal directions in the raw feature
space that correspond to directions accounting for maximum variance.
Here we simulate independent sources using a highly non-Gaussian
process, 2 student T with a low number of degrees of freedom (top left
figure). We mix them to create observations (top right figure).
In this raw observation space, directions identified by PCA are
represented by orange vectors. We represent the signal in the PCA space,
after whitening by the variance corresponding to the PCA vectors (lower
left). Running ICA corresponds to finding a rotation in this space to
identify the directions of largest non-Gaussianity (lower right).
"""
print(__doc__)
# Authors: Alexandre Gramfort, Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, FastICA
###############################################################################
# Generate sample data
rng = np.random.RandomState(42)
S = rng.standard_t(1.5, size=(20000, 2))
S[:, 0] *= 2.
# Mix data
A = np.array([[1, 1], [0, 2]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
pca = PCA()
S_pca_ = pca.fit(X).transform(X)
ica = FastICA(random_state=rng)
S_ica_ = ica.fit(X).transform(X) # Estimate the sources
S_ica_ /= S_ica_.std(axis=0)
###############################################################################
# Plot results
def plot_samples(S, axis_list=None):
plt.scatter(S[:, 0], S[:, 1], s=2, marker='o', zorder=10,
color='steelblue', alpha=0.5)
if axis_list is not None:
colors = ['orange', 'red']
for color, axis in zip(colors, axis_list):
axis /= axis.std()
x_axis, y_axis = axis
# Trick to get legend to work
plt.plot(0.1 * x_axis, 0.1 * y_axis, linewidth=2, color=color)
plt.quiver(0, 0, x_axis, y_axis, zorder=11, width=0.01, scale=6,
color=color)
plt.hlines(0, -3, 3)
plt.vlines(0, -3, 3)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.xlabel('x')
plt.ylabel('y')
plt.figure()
plt.subplot(2, 2, 1)
plot_samples(S / S.std())
plt.title('True Independent Sources')
axis_list = [pca.components_.T, ica.mixing_]
plt.subplot(2, 2, 2)
plot_samples(X / np.std(X), axis_list=axis_list)
legend = plt.legend(['PCA', 'ICA'], loc='upper right')
legend.set_zorder(100)
plt.title('Observations')
plt.subplot(2, 2, 3)
plot_samples(S_pca_ / np.std(S_pca_, axis=0))
plt.title('PCA recovered signals')
plt.subplot(2, 2, 4)
plot_samples(S_ica_ / np.std(S_ica_))
plt.title('ICA recovered signals')
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36)
plt.show()
| bsd-3-clause |
joshbohde/scikit-learn | examples/svm/plot_separating_hyperplane.py | 2 | 1239 | """
=========================================
SVM: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a Support Vector Machines classifier with
linear kernel.
"""
print __doc__
import numpy as np
import pylab as pl
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2,2], np.random.randn(20, 2) + [2, 2]]
Y = [0]*20 + [1]*20
# fit the model
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0]/w[1]
xx = np.linspace(-5, 5)
yy = a*xx - (clf.intercept_[0])/w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
yy_down = a*xx + (b[1] - a*b[0])
b = clf.support_vectors_[-1]
yy_up = a*xx + (b[1] - a*b[0])
# plot the line, the points, and the nearest vectors to the plane
pl.set_cmap(pl.cm.Paired)
pl.plot(xx, yy, 'k-')
pl.plot(xx, yy_down, 'k--')
pl.plot(xx, yy_up, 'k--')
pl.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
pl.scatter(X[:,0], X[:,1], c=Y)
pl.axis('tight')
pl.show()
| bsd-3-clause |
RPGOne/scikit-learn | sklearn/metrics/base.py | 46 | 4627 | """
Common code for all metrics
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# License: BSD 3 clause
from __future__ import division
import numpy as np
from ..utils import check_array, check_consistent_length
from ..utils.multiclass import type_of_target
from ..exceptions import UndefinedMetricWarning as _UndefinedMetricWarning
from ..utils import deprecated
@deprecated("UndefinedMetricWarning has been moved into the sklearn.exceptions"
" module. It will not be available here from version 0.19")
class UndefinedMetricWarning(_UndefinedMetricWarning):
pass
def _average_binary_score(binary_metric, y_true, y_score, average,
sample_weight=None):
"""Average a binary metric for multilabel classification
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
binary_metric : callable, returns shape [n_classes]
The binary metric function to use.
Returns
-------
score : float or array of shape [n_classes]
If not ``None``, average the score, else return the score for each
classes.
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options:
raise ValueError('average has to be one of {0}'
''.format(average_options))
y_type = type_of_target(y_true)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
if y_type == "binary":
return binary_metric(y_true, y_score, sample_weight=sample_weight)
check_consistent_length(y_true, y_score, sample_weight)
y_true = check_array(y_true)
y_score = check_array(y_score)
not_average_axis = 1
score_weight = sample_weight
average_weight = None
if average == "micro":
if score_weight is not None:
score_weight = np.repeat(score_weight, y_true.shape[1])
y_true = y_true.ravel()
y_score = y_score.ravel()
elif average == 'weighted':
if score_weight is not None:
average_weight = np.sum(np.multiply(
y_true, np.reshape(score_weight, (-1, 1))), axis=0)
else:
average_weight = np.sum(y_true, axis=0)
if average_weight.sum() == 0:
return 0
elif average == 'samples':
# swap average_weight <-> score_weight
average_weight = score_weight
score_weight = None
not_average_axis = 0
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_score.ndim == 1:
y_score = y_score.reshape((-1, 1))
n_classes = y_score.shape[not_average_axis]
score = np.zeros((n_classes,))
for c in range(n_classes):
y_true_c = y_true.take([c], axis=not_average_axis).ravel()
y_score_c = y_score.take([c], axis=not_average_axis).ravel()
score[c] = binary_metric(y_true_c, y_score_c,
sample_weight=score_weight)
# Average the results
if average is not None:
return np.average(score, weights=average_weight)
else:
return score
| bsd-3-clause |
toobaz/pandas | pandas/tests/reshape/test_melt.py | 2 | 33227 | import numpy as np
from numpy import nan
import pytest
import pandas as pd
from pandas import DataFrame, lreshape, melt, wide_to_long
import pandas.util.testing as tm
class TestMelt:
def setup_method(self, method):
self.df = tm.makeTimeDataFrame()[:10]
self.df["id1"] = (self.df["A"] > 0).astype(np.int64)
self.df["id2"] = (self.df["B"] > 0).astype(np.int64)
self.var_name = "var"
self.value_name = "val"
self.df1 = pd.DataFrame(
[
[1.067683, -1.110463, 0.20867],
[-1.321405, 0.368915, -1.055342],
[-0.807333, 0.08298, -0.873361],
]
)
self.df1.columns = [list("ABC"), list("abc")]
self.df1.columns.names = ["CAP", "low"]
def test_top_level_method(self):
result = melt(self.df)
assert result.columns.tolist() == ["variable", "value"]
def test_method_signatures(self):
tm.assert_frame_equal(self.df.melt(), melt(self.df))
tm.assert_frame_equal(
self.df.melt(id_vars=["id1", "id2"], value_vars=["A", "B"]),
melt(self.df, id_vars=["id1", "id2"], value_vars=["A", "B"]),
)
tm.assert_frame_equal(
self.df.melt(var_name=self.var_name, value_name=self.value_name),
melt(self.df, var_name=self.var_name, value_name=self.value_name),
)
tm.assert_frame_equal(self.df1.melt(col_level=0), melt(self.df1, col_level=0))
def test_default_col_names(self):
result = self.df.melt()
assert result.columns.tolist() == ["variable", "value"]
result1 = self.df.melt(id_vars=["id1"])
assert result1.columns.tolist() == ["id1", "variable", "value"]
result2 = self.df.melt(id_vars=["id1", "id2"])
assert result2.columns.tolist() == ["id1", "id2", "variable", "value"]
def test_value_vars(self):
result3 = self.df.melt(id_vars=["id1", "id2"], value_vars="A")
assert len(result3) == 10
result4 = self.df.melt(id_vars=["id1", "id2"], value_vars=["A", "B"])
expected4 = DataFrame(
{
"id1": self.df["id1"].tolist() * 2,
"id2": self.df["id2"].tolist() * 2,
"variable": ["A"] * 10 + ["B"] * 10,
"value": (self.df["A"].tolist() + self.df["B"].tolist()),
},
columns=["id1", "id2", "variable", "value"],
)
tm.assert_frame_equal(result4, expected4)
def test_value_vars_types(self):
# GH 15348
expected = DataFrame(
{
"id1": self.df["id1"].tolist() * 2,
"id2": self.df["id2"].tolist() * 2,
"variable": ["A"] * 10 + ["B"] * 10,
"value": (self.df["A"].tolist() + self.df["B"].tolist()),
},
columns=["id1", "id2", "variable", "value"],
)
for type_ in (tuple, list, np.array):
result = self.df.melt(id_vars=["id1", "id2"], value_vars=type_(("A", "B")))
tm.assert_frame_equal(result, expected)
def test_vars_work_with_multiindex(self):
expected = DataFrame(
{
("A", "a"): self.df1[("A", "a")],
"CAP": ["B"] * len(self.df1),
"low": ["b"] * len(self.df1),
"value": self.df1[("B", "b")],
},
columns=[("A", "a"), "CAP", "low", "value"],
)
result = self.df1.melt(id_vars=[("A", "a")], value_vars=[("B", "b")])
tm.assert_frame_equal(result, expected)
def test_single_vars_work_with_multiindex(self):
expected = DataFrame(
{
"A": {0: 1.067683, 1: -1.321405, 2: -0.807333},
"CAP": {0: "B", 1: "B", 2: "B"},
"value": {0: -1.110463, 1: 0.368915, 2: 0.08298},
}
)
result = self.df1.melt(["A"], ["B"], col_level=0)
tm.assert_frame_equal(result, expected)
def test_tuple_vars_fail_with_multiindex(self):
# melt should fail with an informative error message if
# the columns have a MultiIndex and a tuple is passed
# for id_vars or value_vars.
tuple_a = ("A", "a")
list_a = [tuple_a]
tuple_b = ("B", "b")
list_b = [tuple_b]
msg = r"(id|value)_vars must be a list of tuples when columns are a MultiIndex"
for id_vars, value_vars in (
(tuple_a, list_b),
(list_a, tuple_b),
(tuple_a, tuple_b),
):
with pytest.raises(ValueError, match=msg):
self.df1.melt(id_vars=id_vars, value_vars=value_vars)
def test_custom_var_name(self):
result5 = self.df.melt(var_name=self.var_name)
assert result5.columns.tolist() == ["var", "value"]
result6 = self.df.melt(id_vars=["id1"], var_name=self.var_name)
assert result6.columns.tolist() == ["id1", "var", "value"]
result7 = self.df.melt(id_vars=["id1", "id2"], var_name=self.var_name)
assert result7.columns.tolist() == ["id1", "id2", "var", "value"]
result8 = self.df.melt(
id_vars=["id1", "id2"], value_vars="A", var_name=self.var_name
)
assert result8.columns.tolist() == ["id1", "id2", "var", "value"]
result9 = self.df.melt(
id_vars=["id1", "id2"], value_vars=["A", "B"], var_name=self.var_name
)
expected9 = DataFrame(
{
"id1": self.df["id1"].tolist() * 2,
"id2": self.df["id2"].tolist() * 2,
self.var_name: ["A"] * 10 + ["B"] * 10,
"value": (self.df["A"].tolist() + self.df["B"].tolist()),
},
columns=["id1", "id2", self.var_name, "value"],
)
tm.assert_frame_equal(result9, expected9)
def test_custom_value_name(self):
result10 = self.df.melt(value_name=self.value_name)
assert result10.columns.tolist() == ["variable", "val"]
result11 = self.df.melt(id_vars=["id1"], value_name=self.value_name)
assert result11.columns.tolist() == ["id1", "variable", "val"]
result12 = self.df.melt(id_vars=["id1", "id2"], value_name=self.value_name)
assert result12.columns.tolist() == ["id1", "id2", "variable", "val"]
result13 = self.df.melt(
id_vars=["id1", "id2"], value_vars="A", value_name=self.value_name
)
assert result13.columns.tolist() == ["id1", "id2", "variable", "val"]
result14 = self.df.melt(
id_vars=["id1", "id2"], value_vars=["A", "B"], value_name=self.value_name
)
expected14 = DataFrame(
{
"id1": self.df["id1"].tolist() * 2,
"id2": self.df["id2"].tolist() * 2,
"variable": ["A"] * 10 + ["B"] * 10,
self.value_name: (self.df["A"].tolist() + self.df["B"].tolist()),
},
columns=["id1", "id2", "variable", self.value_name],
)
tm.assert_frame_equal(result14, expected14)
def test_custom_var_and_value_name(self):
result15 = self.df.melt(var_name=self.var_name, value_name=self.value_name)
assert result15.columns.tolist() == ["var", "val"]
result16 = self.df.melt(
id_vars=["id1"], var_name=self.var_name, value_name=self.value_name
)
assert result16.columns.tolist() == ["id1", "var", "val"]
result17 = self.df.melt(
id_vars=["id1", "id2"], var_name=self.var_name, value_name=self.value_name
)
assert result17.columns.tolist() == ["id1", "id2", "var", "val"]
result18 = self.df.melt(
id_vars=["id1", "id2"],
value_vars="A",
var_name=self.var_name,
value_name=self.value_name,
)
assert result18.columns.tolist() == ["id1", "id2", "var", "val"]
result19 = self.df.melt(
id_vars=["id1", "id2"],
value_vars=["A", "B"],
var_name=self.var_name,
value_name=self.value_name,
)
expected19 = DataFrame(
{
"id1": self.df["id1"].tolist() * 2,
"id2": self.df["id2"].tolist() * 2,
self.var_name: ["A"] * 10 + ["B"] * 10,
self.value_name: (self.df["A"].tolist() + self.df["B"].tolist()),
},
columns=["id1", "id2", self.var_name, self.value_name],
)
tm.assert_frame_equal(result19, expected19)
df20 = self.df.copy()
df20.columns.name = "foo"
result20 = df20.melt()
assert result20.columns.tolist() == ["foo", "value"]
def test_col_level(self):
res1 = self.df1.melt(col_level=0)
res2 = self.df1.melt(col_level="CAP")
assert res1.columns.tolist() == ["CAP", "value"]
assert res2.columns.tolist() == ["CAP", "value"]
def test_multiindex(self):
res = self.df1.melt()
assert res.columns.tolist() == ["CAP", "low", "value"]
@pytest.mark.parametrize(
"col",
[
pd.Series(pd.date_range("2010", periods=5, tz="US/Pacific")),
pd.Series(["a", "b", "c", "a", "d"], dtype="category"),
pd.Series([0, 1, 0, 0, 0]),
],
)
def test_pandas_dtypes(self, col):
# GH 15785
df = DataFrame(
{"klass": range(5), "col": col, "attr1": [1, 0, 0, 0, 0], "attr2": col}
)
expected_value = pd.concat([pd.Series([1, 0, 0, 0, 0]), col], ignore_index=True)
result = melt(
df, id_vars=["klass", "col"], var_name="attribute", value_name="value"
)
expected = DataFrame(
{
0: list(range(5)) * 2,
1: pd.concat([col] * 2, ignore_index=True),
2: ["attr1"] * 5 + ["attr2"] * 5,
3: expected_value,
}
)
expected.columns = ["klass", "col", "attribute", "value"]
tm.assert_frame_equal(result, expected)
def test_melt_missing_columns_raises(self):
# GH-23575
# This test is to ensure that pandas raises an error if melting is
# attempted with column names absent from the dataframe
# Generate data
df = pd.DataFrame(np.random.randn(5, 4), columns=list("abcd"))
# Try to melt with missing `value_vars` column name
msg = "The following '{Var}' are not present in the DataFrame: {Col}"
with pytest.raises(
KeyError, match=msg.format(Var="value_vars", Col="\\['C'\\]")
):
df.melt(["a", "b"], ["C", "d"])
# Try to melt with missing `id_vars` column name
with pytest.raises(KeyError, match=msg.format(Var="id_vars", Col="\\['A'\\]")):
df.melt(["A", "b"], ["c", "d"])
# Multiple missing
with pytest.raises(
KeyError,
match=msg.format(Var="id_vars", Col="\\['not_here', 'or_there'\\]"),
):
df.melt(["a", "b", "not_here", "or_there"], ["c", "d"])
# Multiindex melt fails if column is missing from multilevel melt
multi = df.copy()
multi.columns = [list("ABCD"), list("abcd")]
with pytest.raises(KeyError, match=msg.format(Var="id_vars", Col="\\['E'\\]")):
multi.melt([("E", "a")], [("B", "b")])
# Multiindex fails if column is missing from single level melt
with pytest.raises(
KeyError, match=msg.format(Var="value_vars", Col="\\['F'\\]")
):
multi.melt(["A"], ["F"], col_level=0)
class TestLreshape:
def test_pairs(self):
data = {
"birthdt": [
"08jan2009",
"20dec2008",
"30dec2008",
"21dec2008",
"11jan2009",
],
"birthwt": [1766, 3301, 1454, 3139, 4133],
"id": [101, 102, 103, 104, 105],
"sex": ["Male", "Female", "Female", "Female", "Female"],
"visitdt1": [
"11jan2009",
"22dec2008",
"04jan2009",
"29dec2008",
"20jan2009",
],
"visitdt2": ["21jan2009", nan, "22jan2009", "31dec2008", "03feb2009"],
"visitdt3": ["05feb2009", nan, nan, "02jan2009", "15feb2009"],
"wt1": [1823, 3338, 1549, 3298, 4306],
"wt2": [2011.0, nan, 1892.0, 3338.0, 4575.0],
"wt3": [2293.0, nan, nan, 3377.0, 4805.0],
}
df = DataFrame(data)
spec = {
"visitdt": ["visitdt{i:d}".format(i=i) for i in range(1, 4)],
"wt": ["wt{i:d}".format(i=i) for i in range(1, 4)],
}
result = lreshape(df, spec)
exp_data = {
"birthdt": [
"08jan2009",
"20dec2008",
"30dec2008",
"21dec2008",
"11jan2009",
"08jan2009",
"30dec2008",
"21dec2008",
"11jan2009",
"08jan2009",
"21dec2008",
"11jan2009",
],
"birthwt": [
1766,
3301,
1454,
3139,
4133,
1766,
1454,
3139,
4133,
1766,
3139,
4133,
],
"id": [101, 102, 103, 104, 105, 101, 103, 104, 105, 101, 104, 105],
"sex": [
"Male",
"Female",
"Female",
"Female",
"Female",
"Male",
"Female",
"Female",
"Female",
"Male",
"Female",
"Female",
],
"visitdt": [
"11jan2009",
"22dec2008",
"04jan2009",
"29dec2008",
"20jan2009",
"21jan2009",
"22jan2009",
"31dec2008",
"03feb2009",
"05feb2009",
"02jan2009",
"15feb2009",
],
"wt": [
1823.0,
3338.0,
1549.0,
3298.0,
4306.0,
2011.0,
1892.0,
3338.0,
4575.0,
2293.0,
3377.0,
4805.0,
],
}
exp = DataFrame(exp_data, columns=result.columns)
tm.assert_frame_equal(result, exp)
result = lreshape(df, spec, dropna=False)
exp_data = {
"birthdt": [
"08jan2009",
"20dec2008",
"30dec2008",
"21dec2008",
"11jan2009",
"08jan2009",
"20dec2008",
"30dec2008",
"21dec2008",
"11jan2009",
"08jan2009",
"20dec2008",
"30dec2008",
"21dec2008",
"11jan2009",
],
"birthwt": [
1766,
3301,
1454,
3139,
4133,
1766,
3301,
1454,
3139,
4133,
1766,
3301,
1454,
3139,
4133,
],
"id": [
101,
102,
103,
104,
105,
101,
102,
103,
104,
105,
101,
102,
103,
104,
105,
],
"sex": [
"Male",
"Female",
"Female",
"Female",
"Female",
"Male",
"Female",
"Female",
"Female",
"Female",
"Male",
"Female",
"Female",
"Female",
"Female",
],
"visitdt": [
"11jan2009",
"22dec2008",
"04jan2009",
"29dec2008",
"20jan2009",
"21jan2009",
nan,
"22jan2009",
"31dec2008",
"03feb2009",
"05feb2009",
nan,
nan,
"02jan2009",
"15feb2009",
],
"wt": [
1823.0,
3338.0,
1549.0,
3298.0,
4306.0,
2011.0,
nan,
1892.0,
3338.0,
4575.0,
2293.0,
nan,
nan,
3377.0,
4805.0,
],
}
exp = DataFrame(exp_data, columns=result.columns)
tm.assert_frame_equal(result, exp)
spec = {
"visitdt": ["visitdt{i:d}".format(i=i) for i in range(1, 3)],
"wt": ["wt{i:d}".format(i=i) for i in range(1, 4)],
}
msg = "All column lists must be same length"
with pytest.raises(ValueError, match=msg):
lreshape(df, spec)
class TestWideToLong:
def test_simple(self):
np.random.seed(123)
x = np.random.randn(3)
df = pd.DataFrame(
{
"A1970": {0: "a", 1: "b", 2: "c"},
"A1980": {0: "d", 1: "e", 2: "f"},
"B1970": {0: 2.5, 1: 1.2, 2: 0.7},
"B1980": {0: 3.2, 1: 1.3, 2: 0.1},
"X": dict(zip(range(3), x)),
}
)
df["id"] = df.index
exp_data = {
"X": x.tolist() + x.tolist(),
"A": ["a", "b", "c", "d", "e", "f"],
"B": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1],
"year": [1970, 1970, 1970, 1980, 1980, 1980],
"id": [0, 1, 2, 0, 1, 2],
}
expected = DataFrame(exp_data)
expected = expected.set_index(["id", "year"])[["X", "A", "B"]]
result = wide_to_long(df, ["A", "B"], i="id", j="year")
tm.assert_frame_equal(result, expected)
def test_stubs(self):
# GH9204
df = pd.DataFrame([[0, 1, 2, 3, 8], [4, 5, 6, 7, 9]])
df.columns = ["id", "inc1", "inc2", "edu1", "edu2"]
stubs = ["inc", "edu"]
# TODO: unused?
df_long = pd.wide_to_long(df, stubs, i="id", j="age") # noqa
assert stubs == ["inc", "edu"]
def test_separating_character(self):
# GH14779
np.random.seed(123)
x = np.random.randn(3)
df = pd.DataFrame(
{
"A.1970": {0: "a", 1: "b", 2: "c"},
"A.1980": {0: "d", 1: "e", 2: "f"},
"B.1970": {0: 2.5, 1: 1.2, 2: 0.7},
"B.1980": {0: 3.2, 1: 1.3, 2: 0.1},
"X": dict(zip(range(3), x)),
}
)
df["id"] = df.index
exp_data = {
"X": x.tolist() + x.tolist(),
"A": ["a", "b", "c", "d", "e", "f"],
"B": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1],
"year": [1970, 1970, 1970, 1980, 1980, 1980],
"id": [0, 1, 2, 0, 1, 2],
}
expected = DataFrame(exp_data)
expected = expected.set_index(["id", "year"])[["X", "A", "B"]]
result = wide_to_long(df, ["A", "B"], i="id", j="year", sep=".")
tm.assert_frame_equal(result, expected)
def test_escapable_characters(self):
np.random.seed(123)
x = np.random.randn(3)
df = pd.DataFrame(
{
"A(quarterly)1970": {0: "a", 1: "b", 2: "c"},
"A(quarterly)1980": {0: "d", 1: "e", 2: "f"},
"B(quarterly)1970": {0: 2.5, 1: 1.2, 2: 0.7},
"B(quarterly)1980": {0: 3.2, 1: 1.3, 2: 0.1},
"X": dict(zip(range(3), x)),
}
)
df["id"] = df.index
exp_data = {
"X": x.tolist() + x.tolist(),
"A(quarterly)": ["a", "b", "c", "d", "e", "f"],
"B(quarterly)": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1],
"year": [1970, 1970, 1970, 1980, 1980, 1980],
"id": [0, 1, 2, 0, 1, 2],
}
expected = DataFrame(exp_data)
expected = expected.set_index(["id", "year"])[
["X", "A(quarterly)", "B(quarterly)"]
]
result = wide_to_long(df, ["A(quarterly)", "B(quarterly)"], i="id", j="year")
tm.assert_frame_equal(result, expected)
def test_unbalanced(self):
# test that we can have a varying amount of time variables
df = pd.DataFrame(
{
"A2010": [1.0, 2.0],
"A2011": [3.0, 4.0],
"B2010": [5.0, 6.0],
"X": ["X1", "X2"],
}
)
df["id"] = df.index
exp_data = {
"X": ["X1", "X1", "X2", "X2"],
"A": [1.0, 3.0, 2.0, 4.0],
"B": [5.0, np.nan, 6.0, np.nan],
"id": [0, 0, 1, 1],
"year": [2010, 2011, 2010, 2011],
}
expected = pd.DataFrame(exp_data)
expected = expected.set_index(["id", "year"])[["X", "A", "B"]]
result = wide_to_long(df, ["A", "B"], i="id", j="year")
tm.assert_frame_equal(result, expected)
def test_character_overlap(self):
# Test we handle overlapping characters in both id_vars and value_vars
df = pd.DataFrame(
{
"A11": ["a11", "a22", "a33"],
"A12": ["a21", "a22", "a23"],
"B11": ["b11", "b12", "b13"],
"B12": ["b21", "b22", "b23"],
"BB11": [1, 2, 3],
"BB12": [4, 5, 6],
"BBBX": [91, 92, 93],
"BBBZ": [91, 92, 93],
}
)
df["id"] = df.index
expected = pd.DataFrame(
{
"BBBX": [91, 92, 93, 91, 92, 93],
"BBBZ": [91, 92, 93, 91, 92, 93],
"A": ["a11", "a22", "a33", "a21", "a22", "a23"],
"B": ["b11", "b12", "b13", "b21", "b22", "b23"],
"BB": [1, 2, 3, 4, 5, 6],
"id": [0, 1, 2, 0, 1, 2],
"year": [11, 11, 11, 12, 12, 12],
}
)
expected = expected.set_index(["id", "year"])[["BBBX", "BBBZ", "A", "B", "BB"]]
result = wide_to_long(df, ["A", "B", "BB"], i="id", j="year")
tm.assert_frame_equal(result.sort_index(axis=1), expected.sort_index(axis=1))
def test_invalid_separator(self):
# if an invalid separator is supplied a empty data frame is returned
sep = "nope!"
df = pd.DataFrame(
{
"A2010": [1.0, 2.0],
"A2011": [3.0, 4.0],
"B2010": [5.0, 6.0],
"X": ["X1", "X2"],
}
)
df["id"] = df.index
exp_data = {
"X": "",
"A2010": [],
"A2011": [],
"B2010": [],
"id": [],
"year": [],
"A": [],
"B": [],
}
expected = pd.DataFrame(exp_data).astype({"year": "int"})
expected = expected.set_index(["id", "year"])[
["X", "A2010", "A2011", "B2010", "A", "B"]
]
expected.index.set_levels([0, 1], level=0, inplace=True)
result = wide_to_long(df, ["A", "B"], i="id", j="year", sep=sep)
tm.assert_frame_equal(result.sort_index(axis=1), expected.sort_index(axis=1))
def test_num_string_disambiguation(self):
# Test that we can disambiguate number value_vars from
# string value_vars
df = pd.DataFrame(
{
"A11": ["a11", "a22", "a33"],
"A12": ["a21", "a22", "a23"],
"B11": ["b11", "b12", "b13"],
"B12": ["b21", "b22", "b23"],
"BB11": [1, 2, 3],
"BB12": [4, 5, 6],
"Arating": [91, 92, 93],
"Arating_old": [91, 92, 93],
}
)
df["id"] = df.index
expected = pd.DataFrame(
{
"Arating": [91, 92, 93, 91, 92, 93],
"Arating_old": [91, 92, 93, 91, 92, 93],
"A": ["a11", "a22", "a33", "a21", "a22", "a23"],
"B": ["b11", "b12", "b13", "b21", "b22", "b23"],
"BB": [1, 2, 3, 4, 5, 6],
"id": [0, 1, 2, 0, 1, 2],
"year": [11, 11, 11, 12, 12, 12],
}
)
expected = expected.set_index(["id", "year"])[
["Arating", "Arating_old", "A", "B", "BB"]
]
result = wide_to_long(df, ["A", "B", "BB"], i="id", j="year")
tm.assert_frame_equal(result.sort_index(axis=1), expected.sort_index(axis=1))
def test_invalid_suffixtype(self):
# If all stubs names end with a string, but a numeric suffix is
# assumed, an empty data frame is returned
df = pd.DataFrame(
{
"Aone": [1.0, 2.0],
"Atwo": [3.0, 4.0],
"Bone": [5.0, 6.0],
"X": ["X1", "X2"],
}
)
df["id"] = df.index
exp_data = {
"X": "",
"Aone": [],
"Atwo": [],
"Bone": [],
"id": [],
"year": [],
"A": [],
"B": [],
}
expected = pd.DataFrame(exp_data).astype({"year": "int"})
expected = expected.set_index(["id", "year"])
expected.index.set_levels([0, 1], level=0, inplace=True)
result = wide_to_long(df, ["A", "B"], i="id", j="year")
tm.assert_frame_equal(result.sort_index(axis=1), expected.sort_index(axis=1))
def test_multiple_id_columns(self):
# Taken from http://www.ats.ucla.edu/stat/stata/modules/reshapel.htm
df = pd.DataFrame(
{
"famid": [1, 1, 1, 2, 2, 2, 3, 3, 3],
"birth": [1, 2, 3, 1, 2, 3, 1, 2, 3],
"ht1": [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1],
"ht2": [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9],
}
)
expected = pd.DataFrame(
{
"ht": [
2.8,
3.4,
2.9,
3.8,
2.2,
2.9,
2.0,
3.2,
1.8,
2.8,
1.9,
2.4,
2.2,
3.3,
2.3,
3.4,
2.1,
2.9,
],
"famid": [1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3],
"birth": [1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3],
"age": [1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2],
}
)
expected = expected.set_index(["famid", "birth", "age"])[["ht"]]
result = wide_to_long(df, "ht", i=["famid", "birth"], j="age")
tm.assert_frame_equal(result, expected)
def test_non_unique_idvars(self):
# GH16382
# Raise an error message if non unique id vars (i) are passed
df = pd.DataFrame(
{"A_A1": [1, 2, 3, 4, 5], "B_B1": [1, 2, 3, 4, 5], "x": [1, 1, 1, 1, 1]}
)
msg = "the id variables need to uniquely identify each row"
with pytest.raises(ValueError, match=msg):
wide_to_long(df, ["A_A", "B_B"], i="x", j="colname")
def test_cast_j_int(self):
df = pd.DataFrame(
{
"actor_1": ["CCH Pounder", "Johnny Depp", "Christoph Waltz"],
"actor_2": ["Joel David Moore", "Orlando Bloom", "Rory Kinnear"],
"actor_fb_likes_1": [1000.0, 40000.0, 11000.0],
"actor_fb_likes_2": [936.0, 5000.0, 393.0],
"title": ["Avatar", "Pirates of the Caribbean", "Spectre"],
}
)
expected = pd.DataFrame(
{
"actor": [
"CCH Pounder",
"Johnny Depp",
"Christoph Waltz",
"Joel David Moore",
"Orlando Bloom",
"Rory Kinnear",
],
"actor_fb_likes": [1000.0, 40000.0, 11000.0, 936.0, 5000.0, 393.0],
"num": [1, 1, 1, 2, 2, 2],
"title": [
"Avatar",
"Pirates of the Caribbean",
"Spectre",
"Avatar",
"Pirates of the Caribbean",
"Spectre",
],
}
).set_index(["title", "num"])
result = wide_to_long(
df, ["actor", "actor_fb_likes"], i="title", j="num", sep="_"
)
tm.assert_frame_equal(result, expected)
def test_identical_stubnames(self):
df = pd.DataFrame(
{
"A2010": [1.0, 2.0],
"A2011": [3.0, 4.0],
"B2010": [5.0, 6.0],
"A": ["X1", "X2"],
}
)
msg = "stubname can't be identical to a column name"
with pytest.raises(ValueError, match=msg):
wide_to_long(df, ["A", "B"], i="A", j="colname")
def test_nonnumeric_suffix(self):
df = pd.DataFrame(
{
"treatment_placebo": [1.0, 2.0],
"treatment_test": [3.0, 4.0],
"result_placebo": [5.0, 6.0],
"A": ["X1", "X2"],
}
)
expected = pd.DataFrame(
{
"A": ["X1", "X1", "X2", "X2"],
"colname": ["placebo", "test", "placebo", "test"],
"result": [5.0, np.nan, 6.0, np.nan],
"treatment": [1.0, 3.0, 2.0, 4.0],
}
)
expected = expected.set_index(["A", "colname"])
result = wide_to_long(
df, ["result", "treatment"], i="A", j="colname", suffix="[a-z]+", sep="_"
)
tm.assert_frame_equal(result, expected)
def test_mixed_type_suffix(self):
df = pd.DataFrame(
{
"A": ["X1", "X2"],
"result_1": [0, 9],
"result_foo": [5.0, 6.0],
"treatment_1": [1.0, 2.0],
"treatment_foo": [3.0, 4.0],
}
)
expected = pd.DataFrame(
{
"A": ["X1", "X2", "X1", "X2"],
"colname": ["1", "1", "foo", "foo"],
"result": [0.0, 9.0, 5.0, 6.0],
"treatment": [1.0, 2.0, 3.0, 4.0],
}
).set_index(["A", "colname"])
result = wide_to_long(
df, ["result", "treatment"], i="A", j="colname", suffix=".+", sep="_"
)
tm.assert_frame_equal(result, expected)
def test_float_suffix(self):
df = pd.DataFrame(
{
"treatment_1.1": [1.0, 2.0],
"treatment_2.1": [3.0, 4.0],
"result_1.2": [5.0, 6.0],
"result_1": [0, 9],
"A": ["X1", "X2"],
}
)
expected = pd.DataFrame(
{
"A": ["X1", "X1", "X1", "X1", "X2", "X2", "X2", "X2"],
"colname": [1, 1.1, 1.2, 2.1, 1, 1.1, 1.2, 2.1],
"result": [0.0, np.nan, 5.0, np.nan, 9.0, np.nan, 6.0, np.nan],
"treatment": [np.nan, 1.0, np.nan, 3.0, np.nan, 2.0, np.nan, 4.0],
}
)
expected = expected.set_index(["A", "colname"])
result = wide_to_long(
df, ["result", "treatment"], i="A", j="colname", suffix="[0-9.]+", sep="_"
)
tm.assert_frame_equal(result, expected)
def test_col_substring_of_stubname(self):
# GH22468
# Don't raise ValueError when a column name is a substring
# of a stubname that's been passed as a string
wide_data = {
"node_id": {0: 0, 1: 1, 2: 2, 3: 3, 4: 4},
"A": {0: 0.80, 1: 0.0, 2: 0.25, 3: 1.0, 4: 0.81},
"PA0": {0: 0.74, 1: 0.56, 2: 0.56, 3: 0.98, 4: 0.6},
"PA1": {0: 0.77, 1: 0.64, 2: 0.52, 3: 0.98, 4: 0.67},
"PA3": {0: 0.34, 1: 0.70, 2: 0.52, 3: 0.98, 4: 0.67},
}
wide_df = pd.DataFrame.from_dict(wide_data)
expected = pd.wide_to_long(
wide_df, stubnames=["PA"], i=["node_id", "A"], j="time"
)
result = pd.wide_to_long(wide_df, stubnames="PA", i=["node_id", "A"], j="time")
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
justacec/bokeh | sphinx/source/conf.py | 3 | 8374 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
#
# Bokeh documentation build configuration file, created by
# sphinx-quickstart on Sat Oct 12 23:43:03 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.graphviz',
'sphinx.ext.ifconfig',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
'bokeh.sphinxext.bokeh_autodoc',
'bokeh.sphinxext.bokeh_gallery',
'bokeh.sphinxext.bokeh_github',
'bokeh.sphinxext.bokeh_jinja',
'bokeh.sphinxext.bokeh_model',
'bokeh.sphinxext.bokeh_palette',
'bokeh.sphinxext.bokeh_plot',
'bokeh.sphinxext.bokeh_prop',
'bokeh.sphinxext.bokeh_sitemap',
'bokeh.sphinxext.collapsible_code_block',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Bokeh'
copyright = '© Copyright 2015, Continuum Analytics.'
# Get the standard computed Bokeh version string to use for |version|
# and |release|
from bokeh import __version__
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# Check for version override (e.g. when re-deploying a previously released
# docs, or when pushing test docs that do not have a corresponding BokehJS
# available on CDN)
from bokeh.settings import settings
if settings.docs_version():
version = release = settings.docs_version()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# Sort members by type
autodoc_member_order = 'groupwise'
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bokeh_theme'
html_theme_path = ['.']
MAIN_SITE = '//bokehplots.com'
html_context = {
'SITEMAP_BASE_URL': 'http://bokeh.pydata.org/en/', # Trailing slash is needed
'SITENAME': 'Bokeh Docs',
'DESCRIPTION': 'Bokeh visualization library, documentation site.',
'AUTHOR': 'Bokeh contributors',
'VERSION': version,
# Nav
'NAV': (
('About', MAIN_SITE + '/pages/about-bokeh.html'),
('Gallery', '/docs/gallery.html'),
('Docs', '//bokeh.pydata.org/en/latest/'),
('Github', '//github.com/bokeh/bokeh'),
),
# Links
'LINKS': (
('FAQs', MAIN_SITE + '/pages/faqs.html'),
('Technical vision', MAIN_SITE + '/pages/technical-vision.html'),
('Roadmap', MAIN_SITE + '/pages/roadmap.html'),
('Citation', MAIN_SITE + '/pages/citation.html'),
),
# About Links
'ABOUT': (
('About', MAIN_SITE + '/pages/about-bokeh.html'),
('Team', MAIN_SITE + '/pages/team.html'),
('Contact', MAIN_SITE + '/pages/contact.html'),
),
# Social links
'SOCIAL': (
('Contribute', MAIN_SITE + '/pages/contribute.html'),
('Mailing list', '//groups.google.com/a/continuum.io/forum/#!forum/bokeh'),
('Github', '//github.com/bokeh/bokeh'),
('Twitter', '//twitter.com/BokehPlots'),
('YouTube', '//www.youtube.com/channel/UCK0rSk29mmg4UT4bIOvPYhw')
),
# Links for the docs sub navigation
'NAV_DOCS': (
('Installation', 'installation'),
('User Guide', 'user_guide'),
('Gallery', 'gallery'),
('Reference', 'reference'),
('Releases', 'releases/%s' % version),
('Developer Guide', 'dev_guide'),
),
'ALL_VERSIONS': ['0.11.1', '0.11.0', '0.10.0', '0.9.3', '0.8.2'],
'css_server': os.environ.get('BOKEH_DOCS_CSS_SERVER', 'bokehplots.com'),
}
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# Output file base name for HTML help builder.
htmlhelp_basename = 'Bokehdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Bokeh.tex', u'Bokeh Documentation', u'Continuum Analytics', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'bokeh', u'Bokeh Documentation',
[u'Continuum Analytics'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Bokeh', u'Bokeh Documentation', u'Continuum Analytics', 'Bokeh', 'Interactive Web Plotting for Python', 'Graphics'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# intersphinx settings
intersphinx_mapping = {
'python': ('https://docs.python.org/', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None)
}
| bsd-3-clause |
kiyoto/statsmodels | statsmodels/datasets/utils.py | 25 | 10983 | from statsmodels.compat.python import (range, StringIO, urlopen,
HTTPError, URLError, lrange,
cPickle, urljoin, BytesIO)
import sys
import shutil
from os import environ
from os import makedirs
from os.path import expanduser
from os.path import exists
from os.path import join
import numpy as np
from numpy import array
from pandas import read_csv, DataFrame, Index
def webuse(data, baseurl='http://www.stata-press.com/data/r11/', as_df=True):
"""
Download and return an example dataset from Stata.
Parameters
----------
data : str
Name of dataset to fetch.
baseurl : str
The base URL to the stata datasets.
as_df : bool
If True, returns a `pandas.DataFrame`
Returns
-------
dta : Record Array
A record array containing the Stata dataset.
Examples
--------
>>> dta = webuse('auto')
Notes
-----
Make sure baseurl has trailing forward slash. Doesn't do any
error checking in response URLs.
"""
# lazy imports
from statsmodels.iolib import genfromdta
url = urljoin(baseurl, data+'.dta')
dta = urlopen(url)
dta = BytesIO(dta.read()) # make it truly file-like
if as_df: # could make this faster if we don't process dta twice?
return DataFrame.from_records(genfromdta(dta))
else:
return genfromdta(dta)
class Dataset(dict):
def __init__(self, **kw):
# define some default attributes, so pylint can find them
self.endog = None
self.exog = None
self.data = None
self.names = None
dict.__init__(self, kw)
self.__dict__ = self
# Some datasets have string variables. If you want a raw_data
# attribute you must create this in the dataset's load function.
try: # some datasets have string variables
self.raw_data = self.data.view((float, len(self.names)))
except:
pass
def __repr__(self):
return str(self.__class__)
def process_recarray(data, endog_idx=0, exog_idx=None, stack=True, dtype=None):
names = list(data.dtype.names)
if isinstance(endog_idx, int):
endog = array(data[names[endog_idx]], dtype=dtype)
endog_name = names[endog_idx]
endog_idx = [endog_idx]
else:
endog_name = [names[i] for i in endog_idx]
if stack:
endog = np.column_stack(data[field] for field in endog_name)
else:
endog = data[endog_name]
if exog_idx is None:
exog_name = [names[i] for i in range(len(names))
if i not in endog_idx]
else:
exog_name = [names[i] for i in exog_idx]
if stack:
exog = np.column_stack(data[field] for field in exog_name)
else:
exog = data[exog_name]
if dtype:
endog = endog.astype(dtype)
exog = exog.astype(dtype)
dataset = Dataset(data=data, names=names, endog=endog, exog=exog,
endog_name=endog_name, exog_name=exog_name)
return dataset
def process_recarray_pandas(data, endog_idx=0, exog_idx=None, dtype=None,
index_idx=None):
data = DataFrame(data, dtype=dtype)
names = data.columns
if isinstance(endog_idx, int):
endog_name = names[endog_idx]
endog = data[endog_name]
if exog_idx is None:
exog = data.drop([endog_name], axis=1)
else:
exog = data.filter(names[exog_idx])
else:
endog = data.ix[:, endog_idx]
endog_name = list(endog.columns)
if exog_idx is None:
exog = data.drop(endog_name, axis=1)
elif isinstance(exog_idx, int):
exog = data.filter([names[exog_idx]])
else:
exog = data.filter(names[exog_idx])
if index_idx is not None: # NOTE: will have to be improved for dates
endog.index = Index(data.ix[:, index_idx])
exog.index = Index(data.ix[:, index_idx])
data = data.set_index(names[index_idx])
exog_name = list(exog.columns)
dataset = Dataset(data=data, names=list(names), endog=endog, exog=exog,
endog_name=endog_name, exog_name=exog_name)
return dataset
def _maybe_reset_index(data):
"""
All the Rdatasets have the integer row.labels from R if there is no
real index. Strip this for a zero-based index
"""
if data.index.equals(Index(lrange(1, len(data) + 1))):
data = data.reset_index(drop=True)
return data
def _get_cache(cache):
if cache is False:
# do not do any caching or load from cache
cache = None
elif cache is True: # use default dir for cache
cache = get_data_home(None)
else:
cache = get_data_home(cache)
return cache
def _cache_it(data, cache_path):
if sys.version_info[0] >= 3:
# for some reason encode("zip") won't work for me in Python 3?
import zlib
# use protocol 2 so can open with python 2.x if cached in 3.x
open(cache_path, "wb").write(zlib.compress(cPickle.dumps(data,
protocol=2)))
else:
open(cache_path, "wb").write(cPickle.dumps(data).encode("zip"))
def _open_cache(cache_path):
if sys.version_info[0] >= 3:
# NOTE: don't know why but decode('zip') doesn't work on my
# Python 3 build
import zlib
data = zlib.decompress(open(cache_path, 'rb').read())
# return as bytes object encoded in utf-8 for cross-compat of cached
data = cPickle.loads(data).encode('utf-8')
else:
data = open(cache_path, 'rb').read().decode('zip')
data = cPickle.loads(data)
return data
def _urlopen_cached(url, cache):
"""
Tries to load data from cache location otherwise downloads it. If it
downloads the data and cache is not None then it will put the downloaded
data in the cache path.
"""
from_cache = False
if cache is not None:
cache_path = join(cache,
url.split("://")[-1].replace('/', ',') + ".zip")
try:
data = _open_cache(cache_path)
from_cache = True
except:
pass
# not using the cache or didn't find it in cache
if not from_cache:
data = urlopen(url).read()
if cache is not None: # then put it in the cache
_cache_it(data, cache_path)
return data, from_cache
def _get_data(base_url, dataname, cache, extension="csv"):
url = base_url + (dataname + ".%s") % extension
try:
data, from_cache = _urlopen_cached(url, cache)
except HTTPError as err:
if '404' in str(err):
raise ValueError("Dataset %s was not found." % dataname)
else:
raise err
data = data.decode('utf-8', 'strict')
return StringIO(data), from_cache
def _get_dataset_meta(dataname, package, cache):
# get the index, you'll probably want this cached because you have
# to download info about all the data to get info about any of the data...
index_url = ("https://raw.github.com/vincentarelbundock/Rdatasets/master/"
"datasets.csv")
data, _ = _urlopen_cached(index_url, cache)
# Python 3
if sys.version[0] == '3': # pragma: no cover
data = data.decode('utf-8', 'strict')
index = read_csv(StringIO(data))
idx = np.logical_and(index.Item == dataname, index.Package == package)
dataset_meta = index.ix[idx]
return dataset_meta["Title"].item()
def get_rdataset(dataname, package="datasets", cache=False):
"""download and return R dataset
Parameters
----------
dataname : str
The name of the dataset you want to download
package : str
The package in which the dataset is found. The default is the core
'datasets' package.
cache : bool or str
If True, will download this data into the STATSMODELS_DATA folder.
The default location is a folder called statsmodels_data in the
user home folder. Otherwise, you can specify a path to a folder to
use for caching the data. If False, the data will not be cached.
Returns
-------
dataset : Dataset instance
A `statsmodels.data.utils.Dataset` instance. This objects has
attributes::
* data - A pandas DataFrame containing the data
* title - The dataset title
* package - The package from which the data came
* from_cache - Whether not cached data was retrieved
* __doc__ - The verbatim R documentation.
Notes
-----
If the R dataset has an integer index. This is reset to be zero-based.
Otherwise the index is preserved. The caching facilities are dumb. That
is, no download dates, e-tags, or otherwise identifying information
is checked to see if the data should be downloaded again or not. If the
dataset is in the cache, it's used.
"""
# NOTE: use raw github bc html site might not be most up to date
data_base_url = ("https://raw.github.com/vincentarelbundock/Rdatasets/"
"master/csv/"+package+"/")
docs_base_url = ("https://raw.github.com/vincentarelbundock/Rdatasets/"
"master/doc/"+package+"/rst/")
cache = _get_cache(cache)
data, from_cache = _get_data(data_base_url, dataname, cache)
data = read_csv(data, index_col=0)
data = _maybe_reset_index(data)
title = _get_dataset_meta(dataname, package, cache)
doc, _ = _get_data(docs_base_url, dataname, cache, "rst")
return Dataset(data=data, __doc__=doc.read(), package=package, title=title,
from_cache=from_cache)
# The below function were taken from sklearn
def get_data_home(data_home=None):
"""Return the path of the statsmodels data dir.
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'statsmodels_data'
in the user home folder.
Alternatively, it can be set by the 'STATSMODELS_DATA' environment
variable or programatically by giving an explit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
"""
if data_home is None:
data_home = environ.get('STATSMODELS_DATA',
join('~', 'statsmodels_data'))
data_home = expanduser(data_home)
if not exists(data_home):
makedirs(data_home)
return data_home
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache."""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
def check_internet():
"""Check if internet is available"""
try:
urlopen("https://github.com")
except URLError as err:
return False
return True
| bsd-3-clause |
uqyge/combustionML | som/som.py | 1 | 9210 | import tensorflow as tf
import numpy as np
class SOM(object):
"""
2-D Self-Organizing Map with Gaussian Neighbourhood function
and linearly decreasing learning rate.
"""
#To check if the SOM has been trained
_trained = False
def __init__(self, m, n, dim, n_iterations=100, alpha=None, sigma=None):
"""
Initializes all necessary components of the TensorFlow
Graph.
m X n are the dimensions of the SOM. 'n_iterations' should
should be an integer denoting the number of iterations undergone
while training.
'dim' is the dimensionality of the training inputs.
'alpha' is a number denoting the initial time(iteration no)-based
learning rate. Default value is 0.3
'sigma' is the the initial neighbourhood value, denoting
the radius of influence of the BMU while training. By default, its
taken to be half of max(m, n).
"""
#Assign required variables first
self._m = m
self._n = n
if alpha is None:
alpha = 0.3
else:
alpha = float(alpha)
if sigma is None:
sigma = max(m, n) / 2.0
else:
sigma = float(sigma)
self._n_iterations = abs(int(n_iterations))
##INITIALIZE GRAPH
self._graph = tf.Graph()
##POPULATE GRAPH WITH NECESSARY COMPONENTS
with self._graph.as_default():
##VARIABLES AND CONSTANT OPS FOR DATA STORAGE
#Randomly initialized weightage vectors for all neurons,
#stored together as a matrix Variable of size [m*n, dim]
self._weightage_vects = tf.Variable(tf.random_normal(
[m*n, dim]))
#Matrix of size [m*n, 2] for SOM grid locations
#of neurons
self._location_vects = tf.constant(np.array(
list(self._neuron_locations(m, n))))
##PLACEHOLDERS FOR TRAINING INPUTS
#We need to assign them as attributes to self, since they
#will be fed in during training
#The training vector
self._vect_input = tf.placeholder(tf.float32, [dim])
#Iteration number
self._iter_input = tf.placeholder(tf.float32)
##CONSTRUCT TRAINING OP PIECE BY PIECE
#Only the final, 'root' training op needs to be assigned as
#an attribute to self, since all the rest will be executed
#automatically during training
#To compute the Best Matching Unit given a vector
#Basically calculates the Euclidean distance between every
#neuron's weightage vector and the input, and returns the
#index of the neuron which gives the least value
bmu_index = tf.argmin(tf.sqrt(tf.reduce_sum(
tf.pow(tf.subtract(self._weightage_vects, tf.stack(
[self._vect_input for i in range(m*n)])), 2), 1)),
0)
#This will extract the location of the BMU based on the BMU's
#index
slice_input = tf.pad(tf.reshape(bmu_index, [1]),
np.array([[0, 1]]))
bmu_loc = tf.reshape(tf.slice(self._location_vects, slice_input,
tf.constant(np.array([1, 2]))),
[2])
#To compute the alpha and sigma values based on iteration
#number
learning_rate_op = tf.subtract(1.0, tf.div(self._iter_input,
self._n_iterations))
_alpha_op = tf.multiply(alpha, learning_rate_op)
_sigma_op = tf.multiply(sigma, learning_rate_op)
#Construct the op that will generate a vector with learning
#rates for all neurons, based on iteration number and location
#wrt BMU.
bmu_distance_squares = tf.reduce_sum(tf.pow(tf.subtract(
self._location_vects, tf.stack(
[bmu_loc for i in range(m*n)])), 2), 1)
neighbourhood_func = tf.exp(tf.negative(tf.div(tf.cast(
bmu_distance_squares, "float32"), tf.pow(_sigma_op, 2))))
learning_rate_op = tf.multiply(_alpha_op, neighbourhood_func)
#Finally, the op that will use learning_rate_op to update
#the weightage vectors of all neurons based on a particular
#input
learning_rate_multiplier = tf.stack([tf.tile(tf.slice(
learning_rate_op, np.array([i]), np.array([1])), [dim])
for i in range(m*n)])
weightage_delta = tf.multiply(
learning_rate_multiplier,
tf.subtract(tf.stack([self._vect_input for i in range(m*n)]),
self._weightage_vects))
new_weightages_op = tf.add(self._weightage_vects,
weightage_delta)
self._training_op = tf.assign(self._weightage_vects,
new_weightages_op)
##INITIALIZE SESSION
self._sess = tf.Session()
##INITIALIZE VARIABLES
#init_op = tf.initialize_all_variables()
init_op = tf.global_variables_initializer()
self._sess.run(init_op)
def _neuron_locations(self, m, n):
"""
Yields one by one the 2-D locations of the individual neurons
in the SOM.
"""
#Nested iterations over both dimensions
#to generate all 2-D locations in the map
for i in range(m):
for j in range(n):
yield np.array([i, j])
def train(self, input_vects):
"""
Trains the SOM.
'input_vects' should be an iterable of 1-D NumPy arrays with
dimensionality as provided during initialization of this SOM.
Current weightage vectors for all neurons(initially random) are
taken as starting conditions for training.
"""
#Training iterations
for iter_no in range(self._n_iterations):
#Train with each vector one by one
for input_vect in input_vects:
self._sess.run(self._training_op,
feed_dict={self._vect_input: input_vect,
self._iter_input: iter_no})
print(str(iter_no)+'/'+str(self._n_iterations))
#Store a centroid grid for easy retrieval later on
centroid_grid = [[] for i in range(self._m)]
self._weightages = list(self._sess.run(self._weightage_vects))
self._locations = list(self._sess.run(self._location_vects))
for i, loc in enumerate(self._locations):
centroid_grid[loc[0]].append(self._weightages[i])
self._centroid_grid = centroid_grid
self._trained = True
def get_centroids(self):
"""
Returns a list of 'm' lists, with each inner list containing
the 'n' corresponding centroid locations as 1-D NumPy arrays.
"""
if not self._trained:
raise ValueError("SOM not trained yet")
return self._centroid_grid
def map_vects(self, input_vects):
"""
Maps each input vector to the relevant neuron in the SOM
grid.
'input_vects' should be an iterable of 1-D NumPy arrays with
dimensionality as provided during initialization of this SOM.
Returns a list of 1-D NumPy arrays containing (row, column)
info for each input vector(in the same order), corresponding
to mapped neuron.
"""
if not self._trained:
raise ValueError("SOM not trained yet")
to_return = []
for vect in input_vects:
min_index = min([i for i in range(len(self._weightages))],
key=lambda x: np.linalg.norm(vect-
self._weightages[x]))
to_return.append(self._locations[min_index])
return to_return
#For plotting the images
from matplotlib import pyplot as plt
#Training inputs for RGBcolors
colors = np.array(
[[0., 0., 0.],
[0., 0., 1.],
[0., 0., 0.5],
[0.125, 0.529, 1.0],
[0.33, 0.4, 0.67],
[0.6, 0.5, 1.0],
[0., 1., 0.],
[1., 0., 0.],
[0., 1., 1.],
[1., 0., 1.],
[1., 1., 0.],
[1., 1., 1.],
[.33, .33, .33],
[.5, .5, .5],
[.66, .66, .66]])
color_names = \
['black', 'blue', 'darkblue', 'skyblue',
'greyblue', 'lilac', 'green', 'red',
'cyan', 'violet', 'yellow', 'white',
'darkgrey', 'mediumgrey', 'lightgrey']
#Train a 20x30 SOM with 400 iterations
som = SOM(20, 20, 3, 400)
som.train(colors)
#Get output grid
image_grid = som.get_centroids()
#Map colours to their closest neurons
mapped = som.map_vects(colors)
#Plot
plt.imshow(image_grid)
plt.title('Color SOM')
for i, m in enumerate(mapped):
plt.text(m[1], m[0], color_names[i], ha='center', va='center',
bbox=dict(facecolor='white', alpha=0.5, lw=0))
plt.show() | mit |
BuzzFeedNews/zika-data | scripts/parse-colombia.py | 1 | 1215 | #!/usr/bin/env python
import pdfplumber
import pdfplumber.utils
import pandas as pd
from collections import OrderedDict
def get_column(chars, x_start, x_end, dtype):
return chars[
(chars["x0"] >= x_start) &
(chars["x0"] < x_end)
].groupby("doctop")\
.apply(pdfplumber.utils.collate_chars)\
.str.strip()\
.astype(dtype)\
.values
def parse(pdf):
chars = pd.DataFrame(pdf.chars)
data_chars = chars[
(chars["fontname"] == "QURFPK+ArialNarrow")
].sort_values(["doctop", "x0"])
counts = pd.DataFrame(OrderedDict([
("region", get_column(data_chars, 0, 200, str)),
("samples_received", get_column(data_chars, 200, 283, int)),
("samples_testable", get_column(data_chars, 283, 366, int)),
("samples_tested", get_column(data_chars, 366, 452, int)),
("samples_in_progress", get_column(data_chars, 452, 512, int)),
]))
return counts
if __name__ == "__main__":
import sys
if hasattr(sys.stdin, "buffer"):
buf = sys.stdin.buffer
else:
buf = sys.stdin
pdf = pdfplumber.load(buf)
data = parse(pdf)
data.to_csv(sys.stdout, index=False, encoding="utf-8")
| mit |
AlexRobson/scikit-learn | sklearn/tests/test_qda.py | 155 | 3481 | import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn import qda
# Data is just 6 separable points in the plane
X = np.array([[0, 0], [-2, -2], [-2, -1], [-1, -1], [-1, -2],
[1, 3], [1, 2], [2, 1], [2, 2]])
y = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2])
y3 = np.array([1, 2, 3, 2, 3, 1, 2, 3, 1])
# Degenerate data with 1 feature (still should be separable)
X1 = np.array([[-3, ], [-2, ], [-1, ], [-1, ], [0, ], [1, ], [1, ],
[2, ], [3, ]])
# Data that has zero variance in one dimension and needs regularization
X2 = np.array([[-3, 0], [-2, 0], [-1, 0], [-1, 0], [0, 0], [1, 0], [1, 0],
[2, 0], [3, 0]])
# One element class
y4 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2])
# Data with less samples in a class than n_features
X5 = np.c_[np.arange(8), np.zeros((8,3))]
y5 = np.array([0, 0, 0, 0, 0, 1, 1, 1])
def test_qda():
# QDA classification.
# This checks that QDA implements fit and predict and returns
# correct values for a simple toy dataset.
clf = qda.QDA()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
# Assure that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y)
# Test probas estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8)
y_pred3 = clf.fit(X, y3).predict(X)
# QDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3))
# Classes should have at least 2 elements
assert_raises(ValueError, clf.fit, X, y4)
def test_qda_priors():
clf = qda.QDA()
y_pred = clf.fit(X, y).predict(X)
n_pos = np.sum(y_pred == 2)
neg = 1e-10
clf = qda.QDA(priors=np.array([neg, 1 - neg]))
y_pred = clf.fit(X, y).predict(X)
n_pos2 = np.sum(y_pred == 2)
assert_greater(n_pos2, n_pos)
def test_qda_store_covariances():
# The default is to not set the covariances_ attribute
clf = qda.QDA().fit(X, y)
assert_true(not hasattr(clf, 'covariances_'))
# Test the actual attribute:
clf = qda.QDA().fit(X, y, store_covariances=True)
assert_true(hasattr(clf, 'covariances_'))
assert_array_almost_equal(
clf.covariances_[0],
np.array([[0.7, 0.45], [0.45, 0.7]])
)
assert_array_almost_equal(
clf.covariances_[1],
np.array([[0.33333333, -0.33333333], [-0.33333333, 0.66666667]])
)
def test_qda_regularization():
# the default is reg_param=0. and will cause issues
# when there is a constant variable
clf = qda.QDA()
with ignore_warnings():
y_pred = clf.fit(X2, y).predict(X2)
assert_true(np.any(y_pred != y))
# adding a little regularization fixes the problem
clf = qda.QDA(reg_param=0.01)
with ignore_warnings():
clf.fit(X2, y)
y_pred = clf.predict(X2)
assert_array_equal(y_pred, y)
# Case n_samples_in_a_class < n_features
clf = qda.QDA(reg_param=0.1)
with ignore_warnings():
clf.fit(X5, y5)
y_pred5 = clf.predict(X5)
assert_array_equal(y_pred5, y5)
| bsd-3-clause |
GeraldLoeffler/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/cm.py | 70 | 5385 | """
This module contains the instantiations of color mapping classes
"""
import numpy as np
from numpy import ma
import matplotlib as mpl
import matplotlib.colors as colors
import matplotlib.cbook as cbook
from matplotlib._cm import *
def get_cmap(name=None, lut=None):
"""
Get a colormap instance, defaulting to rc values if *name* is None
"""
if name is None: name = mpl.rcParams['image.cmap']
if lut is None: lut = mpl.rcParams['image.lut']
assert(name in datad.keys())
return colors.LinearSegmentedColormap(name, datad[name], lut)
class ScalarMappable:
"""
This is a mixin class to support scalar -> RGBA mapping. Handles
normalization and colormapping
"""
def __init__(self, norm=None, cmap=None):
"""
*norm* is an instance of :class:`colors.Normalize` or one of
its subclasses, used to map luminance to 0-1. *cmap* is a
:mod:`cm` colormap instance, for example :data:`cm.jet`
"""
self.callbacksSM = cbook.CallbackRegistry((
'changed',))
if cmap is None: cmap = get_cmap()
if norm is None: norm = colors.Normalize()
self._A = None
self.norm = norm
self.cmap = cmap
self.colorbar = None
self.update_dict = {'array':False}
def set_colorbar(self, im, ax):
'set the colorbar image and axes associated with mappable'
self.colorbar = im, ax
def to_rgba(self, x, alpha=1.0, bytes=False):
'''Return a normalized rgba array corresponding to *x*. If *x*
is already an rgb array, insert *alpha*; if it is already
rgba, return it unchanged. If *bytes* is True, return rgba as
4 uint8s instead of 4 floats.
'''
try:
if x.ndim == 3:
if x.shape[2] == 3:
if x.dtype == np.uint8:
alpha = np.array(alpha*255, np.uint8)
m, n = x.shape[:2]
xx = np.empty(shape=(m,n,4), dtype = x.dtype)
xx[:,:,:3] = x
xx[:,:,3] = alpha
elif x.shape[2] == 4:
xx = x
else:
raise ValueError("third dimension must be 3 or 4")
if bytes and xx.dtype != np.uint8:
xx = (xx * 255).astype(np.uint8)
return xx
except AttributeError:
pass
x = ma.asarray(x)
x = self.norm(x)
x = self.cmap(x, alpha=alpha, bytes=bytes)
return x
def set_array(self, A):
'Set the image array from numpy array *A*'
self._A = A
self.update_dict['array'] = True
def get_array(self):
'Return the array'
return self._A
def get_cmap(self):
'return the colormap'
return self.cmap
def get_clim(self):
'return the min, max of the color limits for image scaling'
return self.norm.vmin, self.norm.vmax
def set_clim(self, vmin=None, vmax=None):
"""
set the norm limits for image scaling; if *vmin* is a length2
sequence, interpret it as ``(vmin, vmax)`` which is used to
support setp
ACCEPTS: a length 2 sequence of floats
"""
if (vmin is not None and vmax is None and
cbook.iterable(vmin) and len(vmin)==2):
vmin, vmax = vmin
if vmin is not None: self.norm.vmin = vmin
if vmax is not None: self.norm.vmax = vmax
self.changed()
def set_cmap(self, cmap):
"""
set the colormap for luminance data
ACCEPTS: a colormap
"""
if cmap is None: cmap = get_cmap()
self.cmap = cmap
self.changed()
def set_norm(self, norm):
'set the normalization instance'
if norm is None: norm = colors.Normalize()
self.norm = norm
self.changed()
def autoscale(self):
"""
Autoscale the scalar limits on the norm instance using the
current array
"""
if self._A is None:
raise TypeError('You must first set_array for mappable')
self.norm.autoscale(self._A)
self.changed()
def autoscale_None(self):
"""
Autoscale the scalar limits on the norm instance using the
current array, changing only limits that are None
"""
if self._A is None:
raise TypeError('You must first set_array for mappable')
self.norm.autoscale_None(self._A)
self.changed()
def add_checker(self, checker):
"""
Add an entry to a dictionary of boolean flags
that are set to True when the mappable is changed.
"""
self.update_dict[checker] = False
def check_update(self, checker):
"""
If mappable has changed since the last check,
return True; else return False
"""
if self.update_dict[checker]:
self.update_dict[checker] = False
return True
return False
def changed(self):
"""
Call this whenever the mappable is changed to notify all the
callbackSM listeners to the 'changed' signal
"""
self.callbacksSM.process('changed', self)
for key in self.update_dict:
self.update_dict[key] = True
| agpl-3.0 |
saiwing-yeung/scikit-learn | examples/applications/plot_outlier_detection_housing.py | 28 | 5563 | """
====================================
Outlier detection on a real data set
====================================
This example illustrates the need for robust covariance estimation
on a real data set. It is useful both for outlier detection and for
a better understanding of the data structure.
We selected two sets of two variables from the Boston housing data set
as an illustration of what kind of analysis can be done with several
outlier detection tools. For the purpose of visualization, we are working
with two-dimensional examples, but one should be aware that things are
not so trivial in high-dimension, as it will be pointed out.
In both examples below, the main result is that the empirical covariance
estimate, as a non-robust one, is highly influenced by the heterogeneous
structure of the observations. Although the robust covariance estimate is
able to focus on the main mode of the data distribution, it sticks to the
assumption that the data should be Gaussian distributed, yielding some biased
estimation of the data structure, but yet accurate to some extent.
The One-Class SVM algorithm
First example
-------------
The first example illustrates how robust covariance estimation can help
concentrating on a relevant cluster when another one exists. Here, many
observations are confounded into one and break down the empirical covariance
estimation.
Of course, some screening tools would have pointed out the presence of two
clusters (Support Vector Machines, Gaussian Mixture Models, univariate
outlier detection, ...). But had it been a high-dimensional example, none
of these could be applied that easily.
Second example
--------------
The second example shows the ability of the Minimum Covariance Determinant
robust estimator of covariance to concentrate on the main mode of the data
distribution: the location seems to be well estimated, although the covariance
is hard to estimate due to the banana-shaped distribution. Anyway, we can
get rid of some outlying observations.
The One-Class SVM is able to capture the real data structure, but the
difficulty is to adjust its kernel bandwidth parameter so as to obtain
a good compromise between the shape of the data scatter matrix and the
risk of over-fitting the data.
"""
print(__doc__)
# Author: Virgile Fritsch <[email protected]>
# License: BSD 3 clause
import numpy as np
from sklearn.covariance import EllipticEnvelope
from sklearn.svm import OneClassSVM
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.datasets import load_boston
# Get data
X1 = load_boston()['data'][:, [8, 10]] # two clusters
X2 = load_boston()['data'][:, [5, 12]] # "banana"-shaped
# Define "classifiers" to be used
classifiers = {
"Empirical Covariance": EllipticEnvelope(support_fraction=1.,
contamination=0.261),
"Robust Covariance (Minimum Covariance Determinant)":
EllipticEnvelope(contamination=0.261),
"OCSVM": OneClassSVM(nu=0.261, gamma=0.05)}
colors = ['m', 'g', 'b']
legend1 = {}
legend2 = {}
# Learn a frontier for outlier detection with several classifiers
xx1, yy1 = np.meshgrid(np.linspace(-8, 28, 500), np.linspace(3, 40, 500))
xx2, yy2 = np.meshgrid(np.linspace(3, 10, 500), np.linspace(-5, 45, 500))
for i, (clf_name, clf) in enumerate(classifiers.items()):
plt.figure(1)
clf.fit(X1)
Z1 = clf.decision_function(np.c_[xx1.ravel(), yy1.ravel()])
Z1 = Z1.reshape(xx1.shape)
legend1[clf_name] = plt.contour(
xx1, yy1, Z1, levels=[0], linewidths=2, colors=colors[i])
plt.figure(2)
clf.fit(X2)
Z2 = clf.decision_function(np.c_[xx2.ravel(), yy2.ravel()])
Z2 = Z2.reshape(xx2.shape)
legend2[clf_name] = plt.contour(
xx2, yy2, Z2, levels=[0], linewidths=2, colors=colors[i])
legend1_values_list = list(legend1.values())
legend1_keys_list = list(legend1.keys())
# Plot the results (= shape of the data points cloud)
plt.figure(1) # two clusters
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X1[:, 0], X1[:, 1], color='black')
bbox_args = dict(boxstyle="round", fc="0.8")
arrow_args = dict(arrowstyle="->")
plt.annotate("several confounded points", xy=(24, 19),
xycoords="data", textcoords="data",
xytext=(13, 10), bbox=bbox_args, arrowprops=arrow_args)
plt.xlim((xx1.min(), xx1.max()))
plt.ylim((yy1.min(), yy1.max()))
plt.legend((legend1_values_list[0].collections[0],
legend1_values_list[1].collections[0],
legend1_values_list[2].collections[0]),
(legend1_keys_list[0], legend1_keys_list[1], legend1_keys_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("accessibility to radial highways")
plt.xlabel("pupil-teacher ratio by town")
legend2_values_list = list(legend2.values())
legend2_keys_list = list(legend2.keys())
plt.figure(2) # "banana" shape
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X2[:, 0], X2[:, 1], color='black')
plt.xlim((xx2.min(), xx2.max()))
plt.ylim((yy2.min(), yy2.max()))
plt.legend((legend2_values_list[0].collections[0],
legend2_values_list[1].collections[0],
legend2_values_list[2].collections[0]),
(legend2_keys_list[0], legend2_keys_list[1], legend2_keys_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("% lower status of the population")
plt.xlabel("average number of rooms per dwelling")
plt.show()
| bsd-3-clause |
elijah513/scikit-learn | sklearn/linear_model/tests/test_bayes.py | 299 | 1770 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.linear_model.bayes import BayesianRidge, ARDRegression
from sklearn import datasets
from sklearn.utils.testing import assert_array_almost_equal
def test_bayesian_on_diabetes():
# Test BayesianRidge on diabetes
raise SkipTest("XFailed Test")
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
clf = BayesianRidge(compute_score=True)
# Test with more samples than features
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
# Test with more features than samples
X = X[:5, :]
y = y[:5]
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
def test_toy_bayesian_ridge_object():
# Test BayesianRidge on toy
X = np.array([[1], [2], [6], [8], [10]])
Y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_toy_ard_object():
# Test BayesianRegression ARD classifier
X = np.array([[1], [2], [3]])
Y = np.array([1, 2, 3])
clf = ARDRegression(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
| bsd-3-clause |
Clyde-fare/scikit-learn | sklearn/datasets/tests/test_rcv1.py | 322 | 2414 | """Test the rcv1 loader.
Skipped if rcv1 is not already downloaded to data_home.
"""
import errno
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import fetch_rcv1
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
def test_fetch_rcv1():
try:
data1 = fetch_rcv1(shuffle=False, download_if_missing=False)
except IOError as e:
if e.errno == errno.ENOENT:
raise SkipTest("Download RCV1 dataset to run this test.")
X1, Y1 = data1.data, data1.target
cat_list, s1 = data1.target_names.tolist(), data1.sample_id
# test sparsity
assert_true(sp.issparse(X1))
assert_true(sp.issparse(Y1))
assert_equal(60915113, X1.data.size)
assert_equal(2606875, Y1.data.size)
# test shapes
assert_equal((804414, 47236), X1.shape)
assert_equal((804414, 103), Y1.shape)
assert_equal((804414,), s1.shape)
assert_equal(103, len(cat_list))
# test ordering of categories
first_categories = [u'C11', u'C12', u'C13', u'C14', u'C15', u'C151']
assert_array_equal(first_categories, cat_list[:6])
# test number of sample for some categories
some_categories = ('GMIL', 'E143', 'CCAT')
number_non_zero_in_cat = (5, 1206, 381327)
for num, cat in zip(number_non_zero_in_cat, some_categories):
j = cat_list.index(cat)
assert_equal(num, Y1[:, j].data.size)
# test shuffling and subset
data2 = fetch_rcv1(shuffle=True, subset='train', random_state=77,
download_if_missing=False)
X2, Y2 = data2.data, data2.target
s2 = data2.sample_id
# The first 23149 samples are the training samples
assert_array_equal(np.sort(s1[:23149]), np.sort(s2))
# test some precise values
some_sample_ids = (2286, 3274, 14042)
for sample_id in some_sample_ids:
idx1 = s1.tolist().index(sample_id)
idx2 = s2.tolist().index(sample_id)
feature_values_1 = X1[idx1, :].toarray()
feature_values_2 = X2[idx2, :].toarray()
assert_almost_equal(feature_values_1, feature_values_2)
target_values_1 = Y1[idx1, :].toarray()
target_values_2 = Y2[idx2, :].toarray()
assert_almost_equal(target_values_1, target_values_2)
| bsd-3-clause |
gfyoung/pandas | pandas/tests/apply/test_series_transform.py | 1 | 2526 | import numpy as np
import pytest
from pandas import Series, concat
import pandas._testing as tm
from pandas.core.base import SpecificationError
from pandas.core.groupby.base import transformation_kernels
# tshift only works on time index and is deprecated
# There is no Series.cumcount
series_kernels = [
x for x in sorted(transformation_kernels) if x not in ["tshift", "cumcount"]
]
@pytest.mark.parametrize("op", series_kernels)
def test_transform_groupby_kernel(string_series, op):
# GH 35964
args = [0.0] if op == "fillna" else []
ones = np.ones(string_series.shape[0])
expected = string_series.groupby(ones).transform(op, *args)
result = string_series.transform(op, 0, *args)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"ops, names",
[
([np.sqrt], ["sqrt"]),
([np.abs, np.sqrt], ["absolute", "sqrt"]),
(np.array([np.sqrt]), ["sqrt"]),
(np.array([np.abs, np.sqrt]), ["absolute", "sqrt"]),
],
)
def test_transform_listlike(string_series, ops, names):
# GH 35964
with np.errstate(all="ignore"):
expected = concat([op(string_series) for op in ops], axis=1)
expected.columns = names
result = string_series.transform(ops)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("box", [dict, Series])
def test_transform_dictlike(string_series, box):
# GH 35964
with np.errstate(all="ignore"):
expected = concat([np.sqrt(string_series), np.abs(string_series)], axis=1)
expected.columns = ["foo", "bar"]
result = string_series.transform(box({"foo": np.sqrt, "bar": np.abs}))
tm.assert_frame_equal(result, expected)
def test_transform_wont_agg(string_series):
# GH 35964
# we are trying to transform with an aggregator
msg = "Function did not transform"
with pytest.raises(ValueError, match=msg):
string_series.transform(["min", "max"])
msg = "Function did not transform"
with pytest.raises(ValueError, match=msg):
with np.errstate(all="ignore"):
string_series.transform(["sqrt", "max"])
def test_transform_axis_1_raises():
# GH 35964
msg = "No axis named 1 for object type Series"
with pytest.raises(ValueError, match=msg):
Series([1]).transform("sum", axis=1)
def test_transform_nested_renamer():
# GH 35964
match = "nested renamer is not supported"
with pytest.raises(SpecificationError, match=match):
Series([1]).transform({"A": {"B": ["sum"]}})
| bsd-3-clause |
mac-theobio/Lab_meeting | dataviz/plotly_python_qz/main.py | 1 | 2358 | import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.express as px
import pandas as pd
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
##df = pd.read_csv("./data/full.csv")
df = pd.read_csv("./data/monthly.csv")
order = ['January', 'February', "March",'April', 'May', 'June', 'July', 'August']
color_map = dict(zip([i for i in df["theme"].unique()],px.colors.qualitative.Plotly[:9]))
theme_names = df['theme'].unique()
app.layout = html.Div(children=[
html.H1(children='Youtube Viz'),
html.Div(children='''
Use the slider below to see monthly trends.
'''),
html.H6("Monthly Theme Counts"),
dcc.Graph(id='graph_with_slider'),
dcc.Slider(
id='month-slider',
min=df['published_month'].min(),
max=df['published_month'].max(),
marks=dict(zip([i for i in range(1,10)],order)),
step=None,
value=1
),
html.Div(
children='''
''', style = {'padding':50}),
dcc.Dropdown(id='dropdown',
options=[{'label':i, 'value':i} for i in theme_names],
value= theme_names[0],
style={'width':'70%'}
),
dcc.Graph(id='graph_with_dropdown')
])
@app.callback(
Output('graph_with_dropdown', 'figure'),
[Input('dropdown', 'value')])
def update_dropdown(selected_theme):
filtered_df = df.copy()
filtered_df = filtered_df[filtered_df['theme']==selected_theme]
fig = px.bar(filtered_df, x='published_month', y='count',
color='theme',
color_discrete_map= color_map)
fig.update_yaxes(range=[0,35])
fig.update_layout(#title=go.layout.Title("Number of videos per month"),
transition_duration=500)
return fig
@app.callback(
Output('graph_with_slider', 'figure'),
[Input('month-slider', 'value')])
def update_figure(selected_month):
filtered_df = df[df['published_month']==selected_month]
fig = px.bar(filtered_df,x="theme", y="count",
color="theme",
color_discrete_map=color_map,
#color_discrete_sequence=px.colors.qualitative.T10,
#yaxis=dict(range=[0, 35]),
labels= {"count":"Count"})
fig.update_yaxes(range=[0,35])
fig.update_layout(#title=go.layout.Title("Number of videos per month"),
transition_duration=500)
return fig
if __name__ == '__main__':
app.run_server(debug=True)
| gpl-3.0 |
natanielruiz/android-yolo | jni-build/jni/include/tensorflow/contrib/learn/python/learn/tests/dataframe/feeding_functions_test.py | 30 | 4777 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests feeding functions using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import tensorflow.contrib.learn.python.learn.dataframe.queues.feeding_functions as ff
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def vals_to_list(a):
return {key: val.tolist() if isinstance(val, np.ndarray) else val
for key, val in a.items()}
class _FeedingFunctionsTestCase(tf.test.TestCase):
"""Tests for feeding functions."""
def testArrayFeedFnBatchOne(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 1)
# cycle around a couple times
for x in range(0, 100):
i = x % 16
expected = {"index_placeholder": [i],
"value_placeholder": [[2 * i, 2 * i + 1]]}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchFive(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 5)
# cycle around a couple times
for _ in range(0, 101, 2):
aff()
expected = {"index_placeholder": [15, 0, 1, 2, 3],
"value_placeholder": [[30, 31], [0, 1], [2, 3], [4, 5], [6, 7]]}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchOneHundred(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 100)
expected = {"index_placeholder": list(range(0, 16)) * 6 + list(range(0, 4)),
"value_placeholder": np.arange(32).reshape([16, 2]).tolist() * 6
+ [[0, 1], [2, 3], [4, 5], [6, 7]]}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchOne(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 1)
# cycle around a couple times
for x in range(0, 100):
i = x % 32
expected = {"index_placeholder": [i + 96],
"a_placeholder": [32 + i],
"b_placeholder": [64 + i]}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchFive(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 5)
# cycle around a couple times
for _ in range(0, 101, 2):
aff()
expected = {"index_placeholder": [127, 96, 97, 98, 99],
"a_placeholder": [63, 32, 33, 34, 35],
"b_placeholder": [95, 64, 65, 66, 67]}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchOneHundred(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 100)
expected = {
"index_placeholder": list(range(96, 128)) * 3 + list(range(96, 100)),
"a_placeholder": list(range(32, 64)) * 3 + list(range(32, 36)),
"b_placeholder": list(range(64, 96)) * 3 + list(range(64, 68))
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
rasbt/python-machine-learning-book | code/optional-py-scripts/ch12.py | 4 | 33098 | # Sebastian Raschka, 2015 (http://sebastianraschka.com)
# Python Machine Learning - Code Examples
#
# Chapter 12 - Training Artificial Neural Networks for Image Recognition
#
# S. Raschka. Python Machine Learning. Packt Publishing Ltd., 2015.
# GitHub Repo: https://github.com/rasbt/python-machine-learning-book
#
# License: MIT
# https://github.com/rasbt/python-machine-learning-book/blob/master/LICENSE.txt
import os
import struct
import numpy as np
from scipy.special import expit
import sys
import matplotlib.pyplot as plt
#############################################################################
print(50 * '=')
print('Obtaining the MNIST dataset')
print(50 * '-')
s = """
The MNIST dataset is publicly available at http://yann.lecun.com/exdb/mnist/
and consists of the following four parts:
- Training set images: train-images-idx3-ubyte.gz
(9.9 MB, 47 MB unzipped, 60,000 samples)
- Training set labels: train-labels-idx1-ubyte.gz
(29 KB, 60 KB unzipped, 60,000 labels)
- Test set images: t10k-images-idx3-ubyte.gz
(1.6 MB, 7.8 MB, 10,000 samples)
- Test set labels: t10k-labels-idx1-ubyte.gz
(5 KB, 10 KB unzipped, 10,000 labels)
In this section, we will only be working with a subset of MNIST, thus,
we only need to download the training set images and training set labels.
After downloading the files, I recommend unzipping the files using
the Unix/Linux gzip tool from
the terminal for efficiency, e.g., using the command
gzip *ubyte.gz -d
in your local MNIST download directory, or, using your
favorite unzipping tool if you are working with a machine
running on Microsoft Windows. The images are stored in byte form,
and using the following function, we will read them into NumPy arrays
that we will use to train our MLP.
"""
print(s)
_ = input("Please hit enter to continue.")
def load_mnist(path, kind='train'):
"""Load MNIST data from `path`"""
labels_path = os.path.join(path,
'%s-labels-idx1-ubyte' % kind)
images_path = os.path.join(path,
'%s-images-idx3-ubyte' % kind)
with open(labels_path, 'rb') as lbpath:
magic, n = struct.unpack('>II',
lbpath.read(8))
labels = np.fromfile(lbpath,
dtype=np.uint8)
with open(images_path, 'rb') as imgpath:
magic, num, rows, cols = struct.unpack(">IIII",
imgpath.read(16))
images = np.fromfile(imgpath,
dtype=np.uint8).reshape(len(labels), 784)
return images, labels
X_train, y_train = load_mnist('mnist', kind='train')
print('Training rows: %d, columns: %d' % (X_train.shape[0], X_train.shape[1]))
X_test, y_test = load_mnist('mnist', kind='t10k')
print('Test rows: %d, columns: %d' % (X_test.shape[0], X_test.shape[1]))
fig, ax = plt.subplots(nrows=2, ncols=5, sharex=True, sharey=True,)
ax = ax.flatten()
for i in range(10):
img = X_train[y_train == i][0].reshape(28, 28)
ax[i].imshow(img, cmap='Greys', interpolation='nearest')
ax[0].set_xticks([])
ax[0].set_yticks([])
# plt.tight_layout()
# plt.savefig('./figures/mnist_all.png', dpi=300)
plt.show()
fig, ax = plt.subplots(nrows=5, ncols=5, sharex=True, sharey=True,)
ax = ax.flatten()
for i in range(25):
img = X_train[y_train == 7][i].reshape(28, 28)
ax[i].imshow(img, cmap='Greys', interpolation='nearest')
ax[0].set_xticks([])
ax[0].set_yticks([])
# plt.tight_layout()
# plt.savefig('./figures/mnist_7.png', dpi=300)
plt.show()
"""
Uncomment the following lines to optionally save the data in CSV format.
However, note that those CSV files will take up a
substantial amount of storage space:
- train_img.csv 1.1 GB (gigabytes)
- train_labels.csv 1.4 MB (megabytes)
- test_img.csv 187.0 MB
- test_labels 144 KB (kilobytes)
"""
# np.savetxt('train_img.csv', X_train, fmt='%i', delimiter=',')
# np.savetxt('train_labels.csv', y_train, fmt='%i', delimiter=',')
# X_train = np.genfromtxt('train_img.csv', dtype=int, delimiter=',')
# y_train = np.genfromtxt('train_labels.csv', dtype=int, delimiter=',')
# np.savetxt('test_img.csv', X_test, fmt='%i', delimiter=',')
# np.savetxt('test_labels.csv', y_test, fmt='%i', delimiter=',')
# X_test = np.genfromtxt('test_img.csv', dtype=int, delimiter=',')
# y_test = np.genfromtxt('test_labels.csv', dtype=int, delimiter=',')
#############################################################################
print(50 * '=')
print('Implementing a multi-layer perceptron')
print(50 * '-')
class NeuralNetMLP(object):
""" Feedforward neural network / Multi-layer perceptron classifier.
Parameters
------------
n_output : int
Number of output units, should be equal to the
number of unique class labels.
n_features : int
Number of features (dimensions) in the target dataset.
Should be equal to the number of columns in the X array.
n_hidden : int (default: 30)
Number of hidden units.
l1 : float (default: 0.0)
Lambda value for L1-regularization.
No regularization if l1=0.0 (default)
l2 : float (default: 0.0)
Lambda value for L2-regularization.
No regularization if l2=0.0 (default)
epochs : int (default: 500)
Number of passes over the training set.
eta : float (default: 0.001)
Learning rate.
alpha : float (default: 0.0)
Momentum constant. Factor multiplied with the
gradient of the previous epoch t-1 to improve
learning speed
w(t) := w(t) - (grad(t) + alpha*grad(t-1))
decrease_const : float (default: 0.0)
Decrease constant. Shrinks the learning rate
after each epoch via eta / (1 + epoch*decrease_const)
shuffle : bool (default: True)
Shuffles training data every epoch if True to prevent circles.
minibatches : int (default: 1)
Divides training data into k minibatches for efficiency.
Normal gradient descent learning if k=1 (default).
random_state : int (default: None)
Set random state for shuffling and initializing the weights.
Attributes
-----------
cost_ : list
Sum of squared errors after each epoch.
"""
def __init__(self, n_output, n_features, n_hidden=30,
l1=0.0, l2=0.0, epochs=500, eta=0.001,
alpha=0.0, decrease_const=0.0, shuffle=True,
minibatches=1, random_state=None):
np.random.seed(random_state)
self.n_output = n_output
self.n_features = n_features
self.n_hidden = n_hidden
self.w1, self.w2 = self._initialize_weights()
self.l1 = l1
self.l2 = l2
self.epochs = epochs
self.eta = eta
self.alpha = alpha
self.decrease_const = decrease_const
self.shuffle = shuffle
self.minibatches = minibatches
def _encode_labels(self, y, k):
"""Encode labels into one-hot representation
Parameters
------------
y : array, shape = [n_samples]
Target values.
Returns
-----------
onehot : array, shape = (n_labels, n_samples)
"""
onehot = np.zeros((k, y.shape[0]))
for idx, val in enumerate(y):
onehot[val, idx] = 1.0
return onehot
def _initialize_weights(self):
"""Initialize weights with small random numbers."""
w1 = np.random.uniform(-1.0, 1.0,
size=self.n_hidden*(self.n_features + 1))
w1 = w1.reshape(self.n_hidden, self.n_features + 1)
w2 = np.random.uniform(-1.0, 1.0,
size=self.n_output*(self.n_hidden + 1))
w2 = w2.reshape(self.n_output, self.n_hidden + 1)
return w1, w2
def _sigmoid(self, z):
"""Compute logistic function (sigmoid)
Uses scipy.special.expit to avoid overflow
error for very small input values z.
"""
# return 1.0 / (1.0 + np.exp(-z))
return expit(z)
def _sigmoid_gradient(self, z):
"""Compute gradient of the logistic function"""
sg = self._sigmoid(z)
return sg * (1 - sg)
def _add_bias_unit(self, X, how='column'):
"""Add bias unit (column or row of 1s) to array at index 0"""
if how == 'column':
X_new = np.ones((X.shape[0], X.shape[1]+1))
X_new[:, 1:] = X
elif how == 'row':
X_new = np.ones((X.shape[0]+1, X.shape[1]))
X_new[1:, :] = X
else:
raise AttributeError('`how` must be `column` or `row`')
return X_new
def _feedforward(self, X, w1, w2):
"""Compute feedforward step
Parameters
-----------
X : array, shape = [n_samples, n_features]
Input layer with original features.
w1 : array, shape = [n_hidden_units, n_features]
Weight matrix for input layer -> hidden layer.
w2 : array, shape = [n_output_units, n_hidden_units]
Weight matrix for hidden layer -> output layer.
Returns
----------
a1 : array, shape = [n_samples, n_features+1]
Input values with bias unit.
z2 : array, shape = [n_hidden, n_samples]
Net input of hidden layer.
a2 : array, shape = [n_hidden+1, n_samples]
Activation of hidden layer.
z3 : array, shape = [n_output_units, n_samples]
Net input of output layer.
a3 : array, shape = [n_output_units, n_samples]
Activation of output layer.
"""
a1 = self._add_bias_unit(X, how='column')
z2 = w1.dot(a1.T)
a2 = self._sigmoid(z2)
a2 = self._add_bias_unit(a2, how='row')
z3 = w2.dot(a2)
a3 = self._sigmoid(z3)
return a1, z2, a2, z3, a3
def _L2_reg(self, lambda_, w1, w2):
"""Compute L2-regularization cost"""
return (lambda_/2.0) * (np.sum(w1[:, 1:] ** 2) +
np.sum(w2[:, 1:] ** 2))
def _L1_reg(self, lambda_, w1, w2):
"""Compute L1-regularization cost"""
return (lambda_/2.0) * (np.abs(w1[:, 1:]).sum() +
np.abs(w2[:, 1:]).sum())
def _get_cost(self, y_enc, output, w1, w2):
"""Compute cost function.
Parameters
----------
y_enc : array, shape = (n_labels, n_samples)
one-hot encoded class labels.
output : array, shape = [n_output_units, n_samples]
Activation of the output layer (feedforward)
w1 : array, shape = [n_hidden_units, n_features]
Weight matrix for input layer -> hidden layer.
w2 : array, shape = [n_output_units, n_hidden_units]
Weight matrix for hidden layer -> output layer.
Returns
---------
cost : float
Regularized cost.
"""
term1 = -y_enc * (np.log(output))
term2 = (1 - y_enc) * np.log(1 - output)
cost = np.sum(term1 - term2)
L1_term = self._L1_reg(self.l1, w1, w2)
L2_term = self._L2_reg(self.l2, w1, w2)
cost = cost + L1_term + L2_term
return cost
def _get_gradient(self, a1, a2, a3, z2, y_enc, w1, w2):
""" Compute gradient step using backpropagation.
Parameters
------------
a1 : array, shape = [n_samples, n_features+1]
Input values with bias unit.
a2 : array, shape = [n_hidden+1, n_samples]
Activation of hidden layer.
a3 : array, shape = [n_output_units, n_samples]
Activation of output layer.
z2 : array, shape = [n_hidden, n_samples]
Net input of hidden layer.
y_enc : array, shape = (n_labels, n_samples)
one-hot encoded class labels.
w1 : array, shape = [n_hidden_units, n_features]
Weight matrix for input layer -> hidden layer.
w2 : array, shape = [n_output_units, n_hidden_units]
Weight matrix for hidden layer -> output layer.
Returns
---------
grad1 : array, shape = [n_hidden_units, n_features]
Gradient of the weight matrix w1.
grad2 : array, shape = [n_output_units, n_hidden_units]
Gradient of the weight matrix w2.
"""
# backpropagation
sigma3 = a3 - y_enc
z2 = self._add_bias_unit(z2, how='row')
sigma2 = w2.T.dot(sigma3) * self._sigmoid_gradient(z2)
sigma2 = sigma2[1:, :]
grad1 = sigma2.dot(a1)
grad2 = sigma3.dot(a2.T)
# regularize
grad1[:, 1:] += (w1[:, 1:] * (self.l1 + self.l2))
grad2[:, 1:] += (w2[:, 1:] * (self.l1 + self.l2))
return grad1, grad2
def predict(self, X):
"""Predict class labels
Parameters
-----------
X : array, shape = [n_samples, n_features]
Input layer with original features.
Returns:
----------
y_pred : array, shape = [n_samples]
Predicted class labels.
"""
if len(X.shape) != 2:
raise AttributeError('X must be a [n_samples, n_features] array.\n'
'Use X[:,None] for 1-feature classification,'
'\nor X[[i]] for 1-sample classification')
a1, z2, a2, z3, a3 = self._feedforward(X, self.w1, self.w2)
y_pred = np.argmax(z3, axis=0)
return y_pred
def fit(self, X, y, print_progress=False):
""" Learn weights from training data.
Parameters
-----------
X : array, shape = [n_samples, n_features]
Input layer with original features.
y : array, shape = [n_samples]
Target class labels.
print_progress : bool (default: False)
Prints progress as the number of epochs
to stderr.
Returns:
----------
self
"""
self.cost_ = []
X_data, y_data = X.copy(), y.copy()
y_enc = self._encode_labels(y, self.n_output)
delta_w1_prev = np.zeros(self.w1.shape)
delta_w2_prev = np.zeros(self.w2.shape)
for i in range(self.epochs):
# adaptive learning rate
self.eta /= (1 + self.decrease_const*i)
if print_progress:
sys.stderr.write('\rEpoch: %d/%d' % (i+1, self.epochs))
sys.stderr.flush()
if self.shuffle:
idx = np.random.permutation(y_data.shape[0])
X_data, y_enc = X_data[idx], y_enc[:, idx]
mini = np.array_split(range(y_data.shape[0]), self.minibatches)
for idx in mini:
# feedforward
a1, z2, a2, z3, a3 = self._feedforward(X_data[idx],
self.w1,
self.w2)
cost = self._get_cost(y_enc=y_enc[:, idx],
output=a3,
w1=self.w1,
w2=self.w2)
self.cost_.append(cost)
# compute gradient via backpropagation
grad1, grad2 = self._get_gradient(a1=a1, a2=a2,
a3=a3, z2=z2,
y_enc=y_enc[:, idx],
w1=self.w1,
w2=self.w2)
delta_w1, delta_w2 = self.eta * grad1, self.eta * grad2
self.w1 -= (delta_w1 + (self.alpha * delta_w1_prev))
self.w2 -= (delta_w2 + (self.alpha * delta_w2_prev))
delta_w1_prev, delta_w2_prev = delta_w1, delta_w2
return self
nn = NeuralNetMLP(n_output=10,
n_features=X_train.shape[1],
n_hidden=50,
l2=0.1,
l1=0.0,
epochs=1000,
eta=0.001,
alpha=0.001,
decrease_const=0.00001,
minibatches=50,
shuffle=True,
random_state=1)
nn.fit(X_train, y_train, print_progress=True)
plt.plot(range(len(nn.cost_)), nn.cost_)
plt.ylim([0, 2000])
plt.ylabel('Cost')
plt.xlabel('Epochs * 50')
# plt.tight_layout()
# plt.savefig('./figures/cost.png', dpi=300)
plt.show()
batches = np.array_split(range(len(nn.cost_)), 1000)
cost_ary = np.array(nn.cost_)
cost_avgs = [np.mean(cost_ary[i]) for i in batches]
plt.plot(range(len(cost_avgs)), cost_avgs, color='red')
plt.ylim([0, 2000])
plt.ylabel('Cost')
plt.xlabel('Epochs')
# plt.tight_layout()
# plt.savefig('./figures/cost2.png', dpi=300)
plt.show()
y_train_pred = nn.predict(X_train)
if sys.version_info < (3, 0):
acc = ((np.sum(y_train == y_train_pred, axis=0)).astype('float') /
X_train.shape[0])
else:
acc = np.sum(y_train == y_train_pred, axis=0) / X_train.shape[0]
print('Training accuracy: %.2f%%' % (acc * 100))
y_test_pred = nn.predict(X_test)
if sys.version_info < (3, 0):
acc = ((np.sum(y_test == y_test_pred, axis=0)).astype('float') /
X_test.shape[0])
else:
acc = np.sum(y_test == y_test_pred, axis=0) / X_test.shape[0]
print('Test accuracy: %.2f%%' % (acc * 100))
miscl_img = X_test[y_test != y_test_pred][:25]
correct_lab = y_test[y_test != y_test_pred][:25]
miscl_lab = y_test_pred[y_test != y_test_pred][:25]
fig, ax = plt.subplots(nrows=5, ncols=5, sharex=True, sharey=True,)
ax = ax.flatten()
for i in range(25):
img = miscl_img[i].reshape(28, 28)
ax[i].imshow(img, cmap='Greys', interpolation='nearest')
ax[i].set_title('%d) t: %d p: %d' % (i+1, correct_lab[i], miscl_lab[i]))
ax[0].set_xticks([])
ax[0].set_yticks([])
# plt.tight_layout()
# plt.savefig('./figures/mnist_miscl.png', dpi=300)
plt.show()
#############################################################################
print(50 * '=')
print('Debugging neural networks with gradient checking')
print(50 * '-')
class MLPGradientCheck(object):
""" Feedforward neural network / Multi-layer perceptron classifier.
Parameters
------------
n_output : int
Number of output units, should be equal to the
number of unique class labels.
n_features : int
Number of features (dimensions) in the target dataset.
Should be equal to the number of columns in the X array.
n_hidden : int (default: 30)
Number of hidden units.
l1 : float (default: 0.0)
Lambda value for L1-regularization.
No regularization if l1=0.0 (default)
l2 : float (default: 0.0)
Lambda value for L2-regularization.
No regularization if l2=0.0 (default)
epochs : int (default: 500)
Number of passes over the training set.
eta : float (default: 0.001)
Learning rate.
alpha : float (default: 0.0)
Momentum constant. Factor multiplied with the
gradient of the previous epoch t-1 to improve
learning speed
w(t) := w(t) - (grad(t) + alpha*grad(t-1))
decrease_const : float (default: 0.0)
Decrease constant. Shrinks the learning rate
after each epoch via eta / (1 + epoch*decrease_const)
shuffle : bool (default: False)
Shuffles training data every epoch if True to prevent circles.
minibatches : int (default: 1)
Divides training data into k minibatches for efficiency.
Normal gradient descent learning if k=1 (default).
random_state : int (default: None)
Set random state for shuffling and initializing the weights.
Attributes
-----------
cost_ : list
Sum of squared errors after each epoch.
"""
def __init__(self, n_output, n_features, n_hidden=30,
l1=0.0, l2=0.0, epochs=500, eta=0.001,
alpha=0.0, decrease_const=0.0, shuffle=True,
minibatches=1, random_state=None):
np.random.seed(random_state)
self.n_output = n_output
self.n_features = n_features
self.n_hidden = n_hidden
self.w1, self.w2 = self._initialize_weights()
self.l1 = l1
self.l2 = l2
self.epochs = epochs
self.eta = eta
self.alpha = alpha
self.decrease_const = decrease_const
self.shuffle = shuffle
self.minibatches = minibatches
def _encode_labels(self, y, k):
"""Encode labels into one-hot representation
Parameters
------------
y : array, shape = [n_samples]
Target values.
Returns
-----------
onehot : array, shape = (n_labels, n_samples)
"""
onehot = np.zeros((k, y.shape[0]))
for idx, val in enumerate(y):
onehot[val, idx] = 1.0
return onehot
def _initialize_weights(self):
"""Initialize weights with small random numbers."""
w1 = np.random.uniform(-1.0, 1.0,
size=self.n_hidden*(self.n_features + 1))
w1 = w1.reshape(self.n_hidden, self.n_features + 1)
w2 = np.random.uniform(-1.0, 1.0,
size=self.n_output*(self.n_hidden + 1))
w2 = w2.reshape(self.n_output, self.n_hidden + 1)
return w1, w2
def _sigmoid(self, z):
"""Compute logistic function (sigmoid)
Uses scipy.special.expit to avoid overflow
error for very small input values z.
"""
# return 1.0 / (1.0 + np.exp(-z))
return expit(z)
def _sigmoid_gradient(self, z):
"""Compute gradient of the logistic function"""
sg = self._sigmoid(z)
return sg * (1 - sg)
def _add_bias_unit(self, X, how='column'):
"""Add bias unit (column or row of 1s) to array at index 0"""
if how == 'column':
X_new = np.ones((X.shape[0], X.shape[1]+1))
X_new[:, 1:] = X
elif how == 'row':
X_new = np.ones((X.shape[0]+1, X.shape[1]))
X_new[1:, :] = X
else:
raise AttributeError('`how` must be `column` or `row`')
return X_new
def _feedforward(self, X, w1, w2):
"""Compute feedforward step
Parameters
-----------
X : array, shape = [n_samples, n_features]
Input layer with original features.
w1 : array, shape = [n_hidden_units, n_features]
Weight matrix for input layer -> hidden layer.
w2 : array, shape = [n_output_units, n_hidden_units]
Weight matrix for hidden layer -> output layer.
Returns
----------
a1 : array, shape = [n_samples, n_features+1]
Input values with bias unit.
z2 : array, shape = [n_hidden, n_samples]
Net input of hidden layer.
a2 : array, shape = [n_hidden+1, n_samples]
Activation of hidden layer.
z3 : array, shape = [n_output_units, n_samples]
Net input of output layer.
a3 : array, shape = [n_output_units, n_samples]
Activation of output layer.
"""
a1 = self._add_bias_unit(X, how='column')
z2 = w1.dot(a1.T)
a2 = self._sigmoid(z2)
a2 = self._add_bias_unit(a2, how='row')
z3 = w2.dot(a2)
a3 = self._sigmoid(z3)
return a1, z2, a2, z3, a3
def _L2_reg(self, lambda_, w1, w2):
"""Compute L2-regularization cost"""
return (lambda_/2.0) * (np.sum(w1[:, 1:] ** 2) +
np.sum(w2[:, 1:] ** 2))
def _L1_reg(self, lambda_, w1, w2):
"""Compute L1-regularization cost"""
return (lambda_/2.0) * (np.abs(w1[:, 1:]).sum() +
np.abs(w2[:, 1:]).sum())
def _get_cost(self, y_enc, output, w1, w2):
"""Compute cost function.
Parameters
----------
y_enc : array, shape = (n_labels, n_samples)
one-hot encoded class labels.
output : array, shape = [n_output_units, n_samples]
Activation of the output layer (feedforward)
w1 : array, shape = [n_hidden_units, n_features]
Weight matrix for input layer -> hidden layer.
w2 : array, shape = [n_output_units, n_hidden_units]
Weight matrix for hidden layer -> output layer.
Returns
---------
cost : float
Regularized cost.
"""
term1 = -y_enc * (np.log(output))
term2 = (1 - y_enc) * np.log(1 - output)
cost = np.sum(term1 - term2)
L1_term = self._L1_reg(self.l1, w1, w2)
L2_term = self._L2_reg(self.l2, w1, w2)
cost = cost + L1_term + L2_term
return cost
def _get_gradient(self, a1, a2, a3, z2, y_enc, w1, w2):
""" Compute gradient step using backpropagation.
Parameters
------------
a1 : array, shape = [n_samples, n_features+1]
Input values with bias unit.
a2 : array, shape = [n_hidden+1, n_samples]
Activation of hidden layer.
a3 : array, shape = [n_output_units, n_samples]
Activation of output layer.
z2 : array, shape = [n_hidden, n_samples]
Net input of hidden layer.
y_enc : array, shape = (n_labels, n_samples)
one-hot encoded class labels.
w1 : array, shape = [n_hidden_units, n_features]
Weight matrix for input layer -> hidden layer.
w2 : array, shape = [n_output_units, n_hidden_units]
Weight matrix for hidden layer -> output layer.
Returns
---------
grad1 : array, shape = [n_hidden_units, n_features]
Gradient of the weight matrix w1.
grad2 : array, shape = [n_output_units, n_hidden_units]
Gradient of the weight matrix w2.
"""
# backpropagation
sigma3 = a3 - y_enc
z2 = self._add_bias_unit(z2, how='row')
sigma2 = w2.T.dot(sigma3) * self._sigmoid_gradient(z2)
sigma2 = sigma2[1:, :]
grad1 = sigma2.dot(a1)
grad2 = sigma3.dot(a2.T)
# regularize
grad1[:, 1:] += (w1[:, 1:] * (self.l1 + self.l2))
grad2[:, 1:] += (w2[:, 1:] * (self.l1 + self.l2))
return grad1, grad2
def _gradient_checking(self, X, y_enc, w1, w2, epsilon, grad1, grad2):
""" Apply gradient checking (for debugging only)
Returns
---------
relative_error : float
Relative error between the numerically
approximated gradients and the backpropagated gradients.
"""
num_grad1 = np.zeros(np.shape(w1))
epsilon_ary1 = np.zeros(np.shape(w1))
for i in range(w1.shape[0]):
for j in range(w1.shape[1]):
epsilon_ary1[i, j] = epsilon
a1, z2, a2, z3, a3 = self._feedforward(X,
w1 - epsilon_ary1, w2)
cost1 = self._get_cost(y_enc, a3, w1-epsilon_ary1, w2)
a1, z2, a2, z3, a3 = self._feedforward(X,
w1 + epsilon_ary1, w2)
cost2 = self._get_cost(y_enc, a3, w1 + epsilon_ary1, w2)
num_grad1[i, j] = (cost2 - cost1) / (2 * epsilon)
epsilon_ary1[i, j] = 0
num_grad2 = np.zeros(np.shape(w2))
epsilon_ary2 = np.zeros(np.shape(w2))
for i in range(w2.shape[0]):
for j in range(w2.shape[1]):
epsilon_ary2[i, j] = epsilon
a1, z2, a2, z3, a3 = self._feedforward(X, w1,
w2 - epsilon_ary2)
cost1 = self._get_cost(y_enc, a3, w1, w2 - epsilon_ary2)
a1, z2, a2, z3, a3 = self._feedforward(X, w1,
w2 + epsilon_ary2)
cost2 = self._get_cost(y_enc, a3, w1, w2 + epsilon_ary2)
num_grad2[i, j] = (cost2 - cost1) / (2 * epsilon)
epsilon_ary2[i, j] = 0
num_grad = np.hstack((num_grad1.flatten(), num_grad2.flatten()))
grad = np.hstack((grad1.flatten(), grad2.flatten()))
norm1 = np.linalg.norm(num_grad - grad)
norm2 = np.linalg.norm(num_grad)
norm3 = np.linalg.norm(grad)
relative_error = norm1 / (norm2 + norm3)
return relative_error
def predict(self, X):
"""Predict class labels
Parameters
-----------
X : array, shape = [n_samples, n_features]
Input layer with original features.
Returns:
----------
y_pred : array, shape = [n_samples]
Predicted class labels.
"""
if len(X.shape) != 2:
raise AttributeError('X must be a [n_samples, n_features] array.\n'
'Use X[:,None] for 1-feature classification,'
'\nor X[[i]] for 1-sample classification')
a1, z2, a2, z3, a3 = self._feedforward(X, self.w1, self.w2)
y_pred = np.argmax(z3, axis=0)
return y_pred
def fit(self, X, y, print_progress=False):
""" Learn weights from training data.
Parameters
-----------
X : array, shape = [n_samples, n_features]
Input layer with original features.
y : array, shape = [n_samples]
Target class labels.
print_progress : bool (default: False)
Prints progress as the number of epochs
to stderr.
Returns:
----------
self
"""
self.cost_ = []
X_data, y_data = X.copy(), y.copy()
y_enc = self._encode_labels(y, self.n_output)
delta_w1_prev = np.zeros(self.w1.shape)
delta_w2_prev = np.zeros(self.w2.shape)
for i in range(self.epochs):
# adaptive learning rate
self.eta /= (1 + self.decrease_const*i)
if print_progress:
sys.stderr.write('\rEpoch: %d/%d' % (i+1, self.epochs))
sys.stderr.flush()
if self.shuffle:
idx = np.random.permutation(y_data.shape[0])
X_data, y_enc = X_data[idx], y_enc[idx]
mini = np.array_split(range(y_data.shape[0]), self.minibatches)
for idx in mini:
# feedforward
a1, z2, a2, z3, a3 = self._feedforward(X[idx],
self.w1,
self.w2)
cost = self._get_cost(y_enc=y_enc[:, idx],
output=a3,
w1=self.w1,
w2=self.w2)
self.cost_.append(cost)
# compute gradient via backpropagation
grad1, grad2 = self._get_gradient(a1=a1, a2=a2,
a3=a3, z2=z2,
y_enc=y_enc[:, idx],
w1=self.w1,
w2=self.w2)
# start gradient checking
grad_diff = self._gradient_checking(X=X_data[idx],
y_enc=y_enc[:, idx],
w1=self.w1,
w2=self.w2,
epsilon=1e-5,
grad1=grad1,
grad2=grad2)
if grad_diff <= 1e-7:
print('Ok: %s' % grad_diff)
elif grad_diff <= 1e-4:
print('Warning: %s' % grad_diff)
else:
print('PROBLEM: %s' % grad_diff)
# update weights; [alpha * delta_w_prev] for momentum learning
delta_w1, delta_w2 = self.eta * grad1, self.eta * grad2
self.w1 -= (delta_w1 + (self.alpha * delta_w1_prev))
self.w2 -= (delta_w2 + (self.alpha * delta_w2_prev))
delta_w1_prev, delta_w2_prev = delta_w1, delta_w2
return self
nn_check = MLPGradientCheck(n_output=10,
n_features=X_train.shape[1],
n_hidden=10,
l2=0.0,
l1=0.0,
epochs=10,
eta=0.001,
alpha=0.0,
decrease_const=0.0,
minibatches=1,
shuffle=False,
random_state=1)
nn_check.fit(X_train[:5], y_train[:5], print_progress=False)
| mit |
davebx/tools-iuc | tools/repmatch_gff3/repmatch_gff3_util.py | 22 | 17958 | import bisect
import csv
import os
import shutil
import sys
import tempfile
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot # noqa: I202,E402
# Graph settings
Y_LABEL = 'Counts'
X_LABEL = 'Number of matched replicates'
TICK_WIDTH = 3
# Amount to shift the graph to make labels fit, [left, right, top, bottom]
ADJUST = [0.180, 0.9, 0.9, 0.1]
# Length of tick marks, use TICK_WIDTH for width
pyplot.rc('xtick.major', size=10.00)
pyplot.rc('ytick.major', size=10.00)
pyplot.rc('lines', linewidth=4.00)
pyplot.rc('axes', linewidth=3.00)
pyplot.rc('font', family='Bitstream Vera Sans', size=32.0)
COLORS = 'krb'
ISPY2 = sys.version_info[0] == 2
class Replicate(object):
def __init__(self, id, dataset_path):
self.id = id
self.dataset_path = dataset_path
if ISPY2:
fh = open(dataset_path, 'rb')
else:
fh = open(dataset_path, 'r', newline='')
self.parse(csv.reader(fh, delimiter='\t'))
def parse(self, reader):
self.chromosomes = {}
for line in reader:
if line[0].startswith("#") or line[0].startswith('"'):
continue
cname, junk, junk, mid, midplus, value, strand, junk, attrs = line
attrs = parse_gff_attrs(attrs)
distance = int(attrs['cw_distance'])
mid = int(mid)
midplus = int(midplus)
value = float(value)
if cname not in self.chromosomes:
self.chromosomes[cname] = Chromosome(cname)
chrom = self.chromosomes[cname]
chrom.add_peak(Peak(cname, mid, value, distance, self))
for chrom in self.chromosomes.values():
chrom.sort_by_index()
def filter(self, up_limit, low_limit):
for chrom in self.chromosomes.values():
chrom.filter(up_limit, low_limit)
def size(self):
return sum([len(c.peaks) for c in self.chromosomes.values()])
class Chromosome(object):
def __init__(self, name):
self.name = name
self.peaks = []
def add_peak(self, peak):
self.peaks.append(peak)
def sort_by_index(self):
self.peaks.sort(key=lambda peak: peak.midpoint)
self.keys = make_keys(self.peaks)
def remove_peak(self, peak):
i = bisect.bisect_left(self.keys, peak.midpoint)
# If the peak was actually found
if i < len(self.peaks) and self.peaks[i].midpoint == peak.midpoint:
del self.keys[i]
del self.peaks[i]
def filter(self, up_limit, low_limit):
self.peaks = [p for p in self.peaks if low_limit <= p.distance <= up_limit]
self.keys = make_keys(self.peaks)
class Peak(object):
def __init__(self, chrom, midpoint, value, distance, replicate):
self.chrom = chrom
self.value = value
self.midpoint = midpoint
self.distance = distance
self.replicate = replicate
def normalized_value(self, med):
return self.value * med / self.replicate.median
class PeakGroup(object):
def __init__(self):
self.peaks = {}
def add_peak(self, repid, peak):
self.peaks[repid] = peak
@property
def chrom(self):
return list(self.peaks.values())[0].chrom
@property
def midpoint(self):
return int(median([peak.midpoint for peak in self.peaks.values()]))
@property
def num_replicates(self):
return len(self.peaks)
@property
def median_distance(self):
return int(median([peak.distance for peak in self.peaks.values()]))
@property
def value_sum(self):
return sum([peak.value for peak in self.peaks.values()])
def normalized_value(self, med):
values = []
for peak in self.peaks.values():
values.append(peak.normalized_value(med))
return median(values)
@property
def peakpeak_distance(self):
keys = list(self.peaks.keys())
return abs(self.peaks[keys[0]].midpoint - self.peaks[keys[1]].midpoint)
class FrequencyDistribution(object):
def __init__(self, d=None):
self.dist = d or {}
def add(self, x):
self.dist[x] = self.dist.get(x, 0) + 1
def graph_series(self):
x = []
y = []
for key, val in self.dist.items():
x.append(key)
y.append(val)
return x, y
def mode(self):
return max(self.dist.items(), key=lambda data: data[1])[0]
def size(self):
return sum(self.dist.values())
def stop_err(msg):
sys.stderr.write(msg)
sys.exit(1)
def median(data):
"""
Find the integer median of the data set.
"""
if not data:
return 0
sdata = sorted(data)
if len(data) % 2 == 0:
return (sdata[len(data) // 2] + sdata[len(data) // 2 - 1]) / 2
else:
return sdata[len(data) // 2]
def make_keys(peaks):
return [data.midpoint for data in peaks]
def get_window(chromosome, target_peaks, distance):
"""
Returns a window of all peaks from a replicate within a certain distance of
a peak from another replicate.
"""
lower = list(target_peaks)[0].midpoint
upper = list(target_peaks)[0].midpoint
for peak in target_peaks:
lower = min(lower, peak.midpoint - distance)
upper = max(upper, peak.midpoint + distance)
start_index = bisect.bisect_left(chromosome.keys, lower)
end_index = bisect.bisect_right(chromosome.keys, upper)
return (chromosome.peaks[start_index: end_index], chromosome.name)
def match_largest(window, peak, chrum):
if not window:
return None
if peak.chrom != chrum:
return None
return max(window, key=lambda cpeak: cpeak.value)
def match_closest(window, peak, chrum):
if not window:
return None
if peak.chrom != chrum:
return None
return min(window, key=lambda match: abs(match.midpoint - peak.midpoint))
def frequency_histogram(freqs, dataset_path, labels=[], title=''):
pyplot.clf()
pyplot.figure(figsize=(10, 10))
for i, freq in enumerate(freqs):
xvals, yvals = freq.graph_series()
# Go from high to low
xvals.reverse()
pyplot.bar([x - 0.4 + 0.8 / len(freqs) * i for x in xvals], yvals, width=0.8 / len(freqs), color=COLORS[i])
pyplot.xticks(range(min(xvals), max(xvals) + 1), map(str, reversed(range(min(xvals), max(xvals) + 1))))
pyplot.xlabel(X_LABEL)
pyplot.ylabel(Y_LABEL)
pyplot.subplots_adjust(left=ADJUST[0], right=ADJUST[1], top=ADJUST[2], bottom=ADJUST[3])
ax = pyplot.gca()
for l in ax.get_xticklines() + ax.get_yticklines():
l.set_markeredgewidth(TICK_WIDTH)
pyplot.savefig(dataset_path)
METHODS = {'closest': match_closest, 'largest': match_largest}
def gff_attrs(l):
if len(l) == 0:
return '.'
return ';'.join('%s=%s' % (tup[0], tup[1]) for tup in l)
def parse_gff_attrs(s):
d = {}
if s == '.':
return d
for item in s.split(';'):
key, val = item.split('=')
d[key] = val
return d
def gff_row(cname, start, end, score, source, stype='.', strand='.', phase='.', attrs=None):
return (cname, source, stype, start, end, score, strand, phase, gff_attrs(attrs or []))
def get_temporary_plot_path():
"""
Return the path to a temporary file with a valid image format
file extension that can be used with bioformats.
"""
tmp_dir = tempfile.mkdtemp(prefix='tmp-repmatch-')
fd, name = tempfile.mkstemp(suffix='.pdf', dir=tmp_dir)
os.close(fd)
return name
def process_files(dataset_paths, galaxy_hids, method, distance, step, replicates, up_limit, low_limit, output_files,
output_matched_peaks, output_unmatched_peaks, output_detail, output_statistics_table, output_statistics_histogram):
output_statistics_histogram_file = output_files in ["all"] and method in ["all"]
if len(dataset_paths) < 2:
return
if method == 'all':
match_methods = METHODS.keys()
else:
match_methods = [method]
for match_method in match_methods:
statistics = perform_process(dataset_paths,
galaxy_hids,
match_method,
distance,
step,
replicates,
up_limit,
low_limit,
output_files,
output_matched_peaks,
output_unmatched_peaks,
output_detail,
output_statistics_table,
output_statistics_histogram)
if output_statistics_histogram_file:
tmp_statistics_histogram_path = get_temporary_plot_path()
frequency_histogram([stat['distribution'] for stat in [statistics]],
tmp_statistics_histogram_path,
METHODS.keys())
shutil.move(tmp_statistics_histogram_path, output_statistics_histogram)
def perform_process(dataset_paths, galaxy_hids, method, distance, step, num_required, up_limit, low_limit, output_files,
output_matched_peaks, output_unmatched_peaks, output_detail, output_statistics_table, output_statistics_histogram):
output_detail_file = output_files in ["all"] and output_detail is not None
output_statistics_table_file = output_files in ["all"] and output_statistics_table is not None
output_unmatched_peaks_file = output_files in ["all", "matched_peaks_unmatched_peaks"] and output_unmatched_peaks is not None
output_statistics_histogram_file = output_files in ["all"] and output_statistics_histogram is not None
replicates = []
for i, dataset_path in enumerate(dataset_paths):
try:
galaxy_hid = galaxy_hids[i]
r = Replicate(galaxy_hid, dataset_path)
replicates.append(r)
except Exception as e:
stop_err('Unable to parse file "%s", exception: %s' % (dataset_path, str(e)))
attrs = 'd%sr%s' % (distance, num_required)
if up_limit != 1000:
attrs += 'u%d' % up_limit
if low_limit != -1000:
attrs += 'l%d' % low_limit
if step != 0:
attrs += 's%d' % step
def td_writer(file_path):
# Returns a tab-delimited writer for a certain output
if ISPY2:
fh = open(file_path, 'wb')
return csv.writer(fh, delimiter='\t')
else:
fh = open(file_path, 'w', newline='')
return csv.writer(fh, delimiter='\t', quoting=csv.QUOTE_NONE)
labels = ('chrom',
'median midpoint',
'median midpoint+1',
'median normalized reads',
'replicates',
'median c-w distance',
'reads sum')
for replicate in replicates:
labels += ('chrom',
'median midpoint',
'median midpoint+1',
'c-w sum',
'c-w distance',
'replicate id')
matched_peaks_output = td_writer(output_matched_peaks)
if output_statistics_table_file:
statistics_table_output = td_writer(output_statistics_table)
statistics_table_output.writerow(('data', 'median read count'))
if output_detail_file:
detail_output = td_writer(output_detail)
detail_output.writerow(labels)
if output_unmatched_peaks_file:
unmatched_peaks_output = td_writer(output_unmatched_peaks)
unmatched_peaks_output.writerow(('chrom', 'midpoint', 'midpoint+1', 'c-w sum', 'c-w distance', 'replicate id'))
# Perform filtering
if up_limit < 1000 or low_limit > -1000:
for replicate in replicates:
replicate.filter(up_limit, low_limit)
# Actually merge the peaks
peak_groups = []
unmatched_peaks = []
freq = FrequencyDistribution()
def do_match(reps, distance):
# Copy list because we will mutate it, but keep replicate references.
reps = reps[:]
while len(reps) > 1:
# Iterate over each replicate as "main"
main = reps[0]
reps.remove(main)
for chromosome in list(main.chromosomes.values()):
peaks_by_value = chromosome.peaks[:]
# Sort main replicate by value
peaks_by_value.sort(key=lambda peak: -peak.value)
def search_for_matches(group):
# Here we use multiple passes, expanding the window to be
# +- distance from any previously matched peak.
while True:
new_match = False
for replicate in reps:
if replicate.id in group.peaks:
# Stop if match already found for this replicate
continue
try:
# Lines changed to remove a major bug by Rohit Reja.
window, chrum = get_window(replicate.chromosomes[chromosome.name], list(group.peaks.values()), distance)
match = METHODS[method](window, peak, chrum)
except KeyError:
continue
if match:
group.add_peak(replicate.id, match)
new_match = True
if not new_match:
break
# Attempt to enlarge existing peak groups
for group in peak_groups:
old_peaks = list(group.peaks.values())
search_for_matches(group)
for peak in list(group.peaks.values()):
if peak not in old_peaks:
peak.replicate.chromosomes[chromosome.name].remove_peak(peak)
# Attempt to find new peaks groups. For each peak in the
# main replicate, search for matches in the other replicates
for peak in peaks_by_value:
matches = PeakGroup()
matches.add_peak(main.id, peak)
search_for_matches(matches)
# Were enough replicates matched?
if matches.num_replicates >= num_required:
for peak in list(matches.peaks.values()):
peak.replicate.chromosomes[chromosome.name].remove_peak(peak)
peak_groups.append(matches)
# Zero or less = no stepping
if step <= 0:
do_match(replicates, distance)
else:
for d in range(0, distance, step):
do_match(replicates, d)
for group in peak_groups:
freq.add(group.num_replicates)
# Collect together the remaining unmatched_peaks
for replicate in replicates:
for chromosome in replicate.chromosomes.values():
for peak in chromosome.peaks:
freq.add(1)
unmatched_peaks.append(peak)
# Average the unmatched_peaks count in the graph by # replicates
med = median([peak.value for group in peak_groups for peak in group.peaks.values()])
for replicate in replicates:
replicate.median = median([peak.value for group in peak_groups for peak in group.peaks.values() if peak.replicate == replicate])
statistics_table_output.writerow((replicate.id, replicate.median))
for group in peak_groups:
# Output matched_peaks (matched pairs).
matched_peaks_output.writerow(gff_row(cname=group.chrom,
start=group.midpoint,
end=group.midpoint + 1,
score=group.normalized_value(med),
source='repmatch',
stype='.',
strand='.',
phase='.',
attrs=[('median_distance', group.median_distance),
('value_sum', group.value_sum),
('replicates', group.num_replicates)]))
if output_detail_file:
matched_peaks = (group.chrom,
group.midpoint,
group.midpoint + 1,
group.normalized_value(med),
group.num_replicates,
group.median_distance,
group.value_sum)
for peak in group.peaks.values():
matched_peaks += (peak.chrom, peak.midpoint, peak.midpoint + 1, peak.value, peak.distance, peak.replicate.id)
detail_output.writerow(matched_peaks)
if output_unmatched_peaks_file:
for unmatched_peak in unmatched_peaks:
unmatched_peaks_output.writerow((unmatched_peak.chrom,
unmatched_peak.midpoint,
unmatched_peak.midpoint + 1,
unmatched_peak.value,
unmatched_peak.distance,
unmatched_peak.replicate.id))
if output_statistics_histogram_file:
tmp_statistics_histogram_path = get_temporary_plot_path()
frequency_histogram([freq], tmp_statistics_histogram_path)
shutil.move(tmp_statistics_histogram_path, output_statistics_histogram)
return {'distribution': freq}
| mit |
rhyolight/nupic.research | projects/sp_paper/plot_noise_robustness.py | 4 | 2710 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from htmresearch.support.sp_paper_utils import *
import matplotlib as mpl
from scipy.optimize import curve_fit
mpl.rcParams['pdf.fonttype'] = 42
def nakaRushton(x, c50):
n=5
c = 1-x
y = 1/(1 + 1/(c/c50)**n)
return y
expName = 'randomSDRVaryingSparsityContinuousLearning_seed_41'
plt.figure()
legendList = []
epochCheck = [0, 5, 10, 20, 40]
for epoch in epochCheck:
nrData = np.load \
('./results/input_output_overlap/{}/epoch_{}.npz'.format(expName, epoch))
noiseLevelList = nrData['arr_0']
inputOverlapScore = np.mean(nrData['arr_1'], 0)
outputOverlapScore = np.mean( nrData['arr_2'], 0)
plt.plot(noiseLevelList, outputOverlapScore)
popt, pcov = curve_fit(nakaRushton, noiseLevelList, outputOverlapScore,
p0=[0.5])
yfit = nakaRushton(noiseLevelList, popt)
# plt.plot(noiseLevelList, yfit, 'k--')
legendList.append('epoch {}'.format(epoch))
plt.legend(legendList)
plt.xlabel('Noise Level')
plt.ylabel('Change of SP output')
plt.savefig('./figures/noise_robustness_{}_beforeChange.pdf'.format(expName))
expName = 'randomSDRVaryingSparsityContinuousLearning_seed_41'
changeDataAt = 50
plt.figure()
legendList = []
epochCheck = [changeDataAt-1, changeDataAt, 119]
for epoch in epochCheck:
nrData = np.load(
'./results/input_output_overlap/{}/epoch_{}.npz'.format(expName, epoch))
noiseLevelList = nrData['arr_0']
inputOverlapScore = nrData['arr_1']
outputOverlapScore = np.mean(nrData['arr_2'], 0)
plt.plot(noiseLevelList, outputOverlapScore)
legendList.append('epoch {}'.format(epoch))
plt.legend(legendList)
plt.xlabel('Noise Level')
plt.ylabel('Change of SP output')
plt.savefig('./figures/noise_robustness_{}.pdf'.format(expName)) | gpl-3.0 |
WarrenWeckesser/scipy | scipy/stats/__init__.py | 3 | 12250 | """
.. _statsrefmanual:
==========================================
Statistical functions (:mod:`scipy.stats`)
==========================================
.. currentmodule:: scipy.stats
This module contains a large number of probability distributions,
summary and frequency statistics, correlation functions and statistical
tests, masked statistics, kernel density estimation, quasi-Monte Carlo
functionality, and more.
Statistics is a very large area, and there are topics that are out of scope
for SciPy and are covered by other packages. Some of the most important ones
are:
- `statsmodels <https://www.statsmodels.org/stable/index.html>`__:
regression, linear models, time series analysis, extensions to topics
also covered by ``scipy.stats``.
- `Pandas <https://pandas.pydata.org/>`__: tabular data, time series
functionality, interfaces to other statistical languages.
- `PyMC3 <https://docs.pymc.io/>`__: Bayesian statistical
modeling, probabilistic machine learning.
- `scikit-learn <https://scikit-learn.org/>`__: classification, regression,
model selection.
- `Seaborn <https://seaborn.pydata.org/>`__: statistical data visualization.
- `rpy2 <https://rpy2.github.io/>`__: Python to R bridge.
Probability distributions
=========================
Each univariate distribution is an instance of a subclass of `rv_continuous`
(`rv_discrete` for discrete distributions):
.. autosummary::
:toctree: generated/
rv_continuous
rv_discrete
rv_histogram
Continuous distributions
------------------------
.. autosummary::
:toctree: generated/
alpha -- Alpha
anglit -- Anglit
arcsine -- Arcsine
argus -- Argus
beta -- Beta
betaprime -- Beta Prime
bradford -- Bradford
burr -- Burr (Type III)
burr12 -- Burr (Type XII)
cauchy -- Cauchy
chi -- Chi
chi2 -- Chi-squared
cosine -- Cosine
crystalball -- Crystalball
dgamma -- Double Gamma
dweibull -- Double Weibull
erlang -- Erlang
expon -- Exponential
exponnorm -- Exponentially Modified Normal
exponweib -- Exponentiated Weibull
exponpow -- Exponential Power
f -- F (Snecdor F)
fatiguelife -- Fatigue Life (Birnbaum-Saunders)
fisk -- Fisk
foldcauchy -- Folded Cauchy
foldnorm -- Folded Normal
genlogistic -- Generalized Logistic
gennorm -- Generalized normal
genpareto -- Generalized Pareto
genexpon -- Generalized Exponential
genextreme -- Generalized Extreme Value
gausshyper -- Gauss Hypergeometric
gamma -- Gamma
gengamma -- Generalized gamma
genhalflogistic -- Generalized Half Logistic
genhyperbolic -- Generalized Hyperbolic
geninvgauss -- Generalized Inverse Gaussian
gilbrat -- Gilbrat
gompertz -- Gompertz (Truncated Gumbel)
gumbel_r -- Right Sided Gumbel, Log-Weibull, Fisher-Tippett, Extreme Value Type I
gumbel_l -- Left Sided Gumbel, etc.
halfcauchy -- Half Cauchy
halflogistic -- Half Logistic
halfnorm -- Half Normal
halfgennorm -- Generalized Half Normal
hypsecant -- Hyperbolic Secant
invgamma -- Inverse Gamma
invgauss -- Inverse Gaussian
invweibull -- Inverse Weibull
johnsonsb -- Johnson SB
johnsonsu -- Johnson SU
kappa4 -- Kappa 4 parameter
kappa3 -- Kappa 3 parameter
ksone -- Distribution of Kolmogorov-Smirnov one-sided test statistic
kstwo -- Distribution of Kolmogorov-Smirnov two-sided test statistic
kstwobign -- Limiting Distribution of scaled Kolmogorov-Smirnov two-sided test statistic.
laplace -- Laplace
laplace_asymmetric -- Asymmetric Laplace
levy -- Levy
levy_l
levy_stable
logistic -- Logistic
loggamma -- Log-Gamma
loglaplace -- Log-Laplace (Log Double Exponential)
lognorm -- Log-Normal
loguniform -- Log-Uniform
lomax -- Lomax (Pareto of the second kind)
maxwell -- Maxwell
mielke -- Mielke's Beta-Kappa
moyal -- Moyal
nakagami -- Nakagami
ncx2 -- Non-central chi-squared
ncf -- Non-central F
nct -- Non-central Student's T
norm -- Normal (Gaussian)
norminvgauss -- Normal Inverse Gaussian
pareto -- Pareto
pearson3 -- Pearson type III
powerlaw -- Power-function
powerlognorm -- Power log normal
powernorm -- Power normal
rdist -- R-distribution
rayleigh -- Rayleigh
rice -- Rice
recipinvgauss -- Reciprocal Inverse Gaussian
semicircular -- Semicircular
skewcauchy -- Skew Cauchy
skewnorm -- Skew normal
t -- Student's T
trapezoid -- Trapezoidal
triang -- Triangular
truncexpon -- Truncated Exponential
truncnorm -- Truncated Normal
tukeylambda -- Tukey-Lambda
uniform -- Uniform
vonmises -- Von-Mises (Circular)
vonmises_line -- Von-Mises (Line)
wald -- Wald
weibull_min -- Minimum Weibull (see Frechet)
weibull_max -- Maximum Weibull (see Frechet)
wrapcauchy -- Wrapped Cauchy
Multivariate distributions
--------------------------
.. autosummary::
:toctree: generated/
multivariate_normal -- Multivariate normal distribution
matrix_normal -- Matrix normal distribution
dirichlet -- Dirichlet
wishart -- Wishart
invwishart -- Inverse Wishart
multinomial -- Multinomial distribution
special_ortho_group -- SO(N) group
ortho_group -- O(N) group
unitary_group -- U(N) group
random_correlation -- random correlation matrices
multivariate_t -- Multivariate t-distribution
multivariate_hypergeom -- Multivariate hypergeometric distribution
Discrete distributions
----------------------
.. autosummary::
:toctree: generated/
bernoulli -- Bernoulli
betabinom -- Beta-Binomial
binom -- Binomial
boltzmann -- Boltzmann (Truncated Discrete Exponential)
dlaplace -- Discrete Laplacian
geom -- Geometric
hypergeom -- Hypergeometric
logser -- Logarithmic (Log-Series, Series)
nbinom -- Negative Binomial
nchypergeom_fisher -- Fisher's Noncentral Hypergeometric
nchypergeom_wallenius -- Wallenius's Noncentral Hypergeometric
nhypergeom -- Negative Hypergeometric
planck -- Planck (Discrete Exponential)
poisson -- Poisson
randint -- Discrete Uniform
skellam -- Skellam
yulesimon -- Yule-Simon
zipf -- Zipf (Zeta)
zipfian -- Zipfian
An overview of statistical functions is given below. Many of these functions
have a similar version in `scipy.stats.mstats` which work for masked arrays.
Summary statistics
==================
.. autosummary::
:toctree: generated/
describe -- Descriptive statistics
gmean -- Geometric mean
hmean -- Harmonic mean
kurtosis -- Fisher or Pearson kurtosis
mode -- Modal value
moment -- Central moment
skew -- Skewness
kstat --
kstatvar --
tmean -- Truncated arithmetic mean
tvar -- Truncated variance
tmin --
tmax --
tstd --
tsem --
variation -- Coefficient of variation
find_repeats
trim_mean
gstd -- Geometric Standard Deviation
iqr
sem
bayes_mvs
mvsdist
entropy
differential_entropy
median_absolute_deviation
median_abs_deviation
Frequency statistics
====================
.. autosummary::
:toctree: generated/
cumfreq
itemfreq
percentileofscore
scoreatpercentile
relfreq
.. autosummary::
:toctree: generated/
binned_statistic -- Compute a binned statistic for a set of data.
binned_statistic_2d -- Compute a 2-D binned statistic for a set of data.
binned_statistic_dd -- Compute a d-D binned statistic for a set of data.
Correlation functions
=====================
.. autosummary::
:toctree: generated/
f_oneway
alexandergovern
pearsonr
spearmanr
pointbiserialr
kendalltau
weightedtau
somersd
linregress
siegelslopes
theilslopes
multiscale_graphcorr
Statistical tests
=================
.. autosummary::
:toctree: generated/
ttest_1samp
ttest_ind
ttest_ind_from_stats
ttest_rel
chisquare
cramervonmises
cramervonmises_2samp
power_divergence
kstest
ks_1samp
ks_2samp
epps_singleton_2samp
mannwhitneyu
tiecorrect
rankdata
ranksums
wilcoxon
kruskal
friedmanchisquare
brunnermunzel
combine_pvalues
jarque_bera
page_trend_test
.. autosummary::
:toctree: generated/
ansari
bartlett
levene
shapiro
anderson
anderson_ksamp
binom_test
binomtest
fligner
median_test
mood
skewtest
kurtosistest
normaltest
Quasi-Monte Carlo
=================
.. toctree::
:maxdepth: 4
stats.qmc
Masked statistics functions
===========================
.. toctree::
stats.mstats
Other statistical functionality
===============================
Transformations
---------------
.. autosummary::
:toctree: generated/
boxcox
boxcox_normmax
boxcox_llf
yeojohnson
yeojohnson_normmax
yeojohnson_llf
obrientransform
sigmaclip
trimboth
trim1
zmap
zscore
Statistical distances
---------------------
.. autosummary::
:toctree: generated/
wasserstein_distance
energy_distance
Random variate generation
-------------------------
.. autosummary::
:toctree: generated/
rvs_ratio_uniforms
Circular statistical functions
------------------------------
.. autosummary::
:toctree: generated/
circmean
circvar
circstd
Contingency table functions
---------------------------
.. autosummary::
:toctree: generated/
chi2_contingency
contingency.crosstab
contingency.expected_freq
contingency.margins
contingency.relative_risk
contingency.association
fisher_exact
barnard_exact
Plot-tests
----------
.. autosummary::
:toctree: generated/
ppcc_max
ppcc_plot
probplot
boxcox_normplot
yeojohnson_normplot
Univariate and multivariate kernel density estimation
-----------------------------------------------------
.. autosummary::
:toctree: generated/
gaussian_kde
Warnings used in :mod:`scipy.stats`
-----------------------------------
.. autosummary::
:toctree: generated/
F_onewayConstantInputWarning
F_onewayBadInputSizesWarning
PearsonRConstantInputWarning
PearsonRNearConstantInputWarning
SpearmanRConstantInputWarning
"""
from .stats import *
from .distributions import *
from .morestats import *
from ._binomtest import binomtest
from ._binned_statistic import *
from .kde import gaussian_kde
from . import mstats
from . import qmc
from ._multivariate import *
from . import contingency
from .contingency import chi2_contingency
from ._entropy import *
from ._hypotests import *
from ._rvs_sampling import rvs_ratio_uniforms
from ._page_trend_test import page_trend_test
from ._mannwhitneyu import mannwhitneyu
__all__ = [s for s in dir() if not s.startswith("_")] # Remove dunders.
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
| bsd-3-clause |
yuyu2172/chainercv | examples/fpn/demo.py | 3 | 2626 | import argparse
import matplotlib.pyplot as plt
import chainer
from chainercv.datasets import coco_bbox_label_names
from chainercv.datasets import coco_instance_segmentation_label_names
from chainercv.links import FasterRCNNFPNResNet101
from chainercv.links import FasterRCNNFPNResNet50
from chainercv.links import MaskRCNNFPNResNet101
from chainercv.links import MaskRCNNFPNResNet50
from chainercv import utils
from chainercv.visualizations import vis_bbox
from chainercv.visualizations import vis_instance_segmentation
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--model',
choices=('faster_rcnn_fpn_resnet50', 'faster_rcnn_fpn_resnet101',
'mask_rcnn_fpn_resnet50', 'mask_rcnn_fpn_resnet101'),
default='faster_rcnn_fpn_resnet50')
parser.add_argument('--gpu', type=int, default=-1)
parser.add_argument('--pretrained-model')
parser.add_argument(
'--dataset', choices=('coco',), default='coco')
parser.add_argument('image')
args = parser.parse_args()
if args.model == 'faster_rcnn_fpn_resnet50':
mode = 'bbox'
cls = FasterRCNNFPNResNet50
elif args.model == 'faster_rcnn_fpn_resnet101':
mode = 'bbox'
cls = FasterRCNNFPNResNet101
elif args.model == 'mask_rcnn_fpn_resnet50':
mode = 'instance_segmentation'
cls = MaskRCNNFPNResNet50
elif args.model == 'mask_rcnn_fpn_resnet101':
mode = 'instance_segmentation'
cls = MaskRCNNFPNResNet101
if args.dataset == 'coco':
if args.pretrained_model is None:
args.pretrained_model = 'coco'
if mode == 'bbox':
label_names = coco_bbox_label_names
elif mode == 'instance_segmentation':
label_names = coco_instance_segmentation_label_names
model = cls(n_fg_class=len(label_names),
pretrained_model=args.pretrained_model)
if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use()
model.to_gpu()
img = utils.read_image(args.image)
if mode == 'bbox':
bboxes, labels, scores = model.predict([img])
bbox = bboxes[0]
label = labels[0]
score = scores[0]
vis_bbox(
img, bbox, label, score, label_names=label_names)
elif mode == 'instance_segmentation':
masks, labels, scores = model.predict([img])
mask = masks[0]
label = labels[0]
score = scores[0]
vis_instance_segmentation(
img, mask, label, score, label_names=label_names)
plt.show()
if __name__ == '__main__':
main()
| mit |
trnewman/VT-USRP-daughterboard-drivers | gr-utils/src/python/plot_data.py | 5 | 5834 | #
# Copyright 2007,2008 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
try:
import scipy
except ImportError:
print "Please install SciPy to run this script (http://www.scipy.org/)"
raise SystemExit, 1
try:
from pylab import *
except ImportError:
print "Please install Matplotlib to run this script (http://matplotlib.sourceforge.net/)"
raise SystemExit, 1
from optparse import OptionParser
class plot_data:
def __init__(self, datatype, filenames, options):
self.hfile = list()
self.legend_text = list()
for f in filenames:
self.hfile.append(open(f, "r"))
self.legend_text.append(f)
self.block_length = options.block
self.start = options.start
self.sample_rate = options.sample_rate
self.datatype = datatype
self.sizeof_data = datatype().nbytes # number of bytes per sample in file
self.axis_font_size = 16
self.label_font_size = 18
self.title_font_size = 20
self.text_size = 22
# Setup PLOT
self.fig = figure(1, figsize=(16, 9), facecolor='w')
rcParams['xtick.labelsize'] = self.axis_font_size
rcParams['ytick.labelsize'] = self.axis_font_size
self.text_file_pos = figtext(0.10, 0.88, "File Position: ", weight="heavy", size=self.text_size)
self.text_block = figtext(0.40, 0.88, ("Block Size: %d" % self.block_length),
weight="heavy", size=self.text_size)
self.text_sr = figtext(0.60, 0.88, ("Sample Rate: %.2f" % self.sample_rate),
weight="heavy", size=self.text_size)
self.make_plots()
self.button_left_axes = self.fig.add_axes([0.45, 0.01, 0.05, 0.05], frameon=True)
self.button_left = Button(self.button_left_axes, "<")
self.button_left_callback = self.button_left.on_clicked(self.button_left_click)
self.button_right_axes = self.fig.add_axes([0.50, 0.01, 0.05, 0.05], frameon=True)
self.button_right = Button(self.button_right_axes, ">")
self.button_right_callback = self.button_right.on_clicked(self.button_right_click)
self.xlim = self.sp_f.get_xlim()
self.manager = get_current_fig_manager()
connect('key_press_event', self.click)
show()
def get_data(self, hfile):
self.text_file_pos.set_text("File Position: %d" % (hfile.tell()//self.sizeof_data))
f = scipy.fromfile(hfile, dtype=self.datatype, count=self.block_length)
#print "Read in %d items" % len(self.f)
if(len(f) == 0):
print "End of File"
else:
self.f = f
self.time = [i*(1/self.sample_rate) for i in range(len(self.f))]
def make_plots(self):
self.sp_f = self.fig.add_subplot(2,1,1, position=[0.075, 0.2, 0.875, 0.6])
self.sp_f.set_title(("Amplitude"), fontsize=self.title_font_size, fontweight="bold")
self.sp_f.set_xlabel("Time (s)", fontsize=self.label_font_size, fontweight="bold")
self.sp_f.set_ylabel("Amplitude (V)", fontsize=self.label_font_size, fontweight="bold")
self.plot_f = list()
maxval = -1e12
minval = 1e12
for hf in self.hfile:
# if specified on the command-line, set file pointer
hf.seek(self.sizeof_data*self.start, 1)
self.get_data(hf)
# Subplot for real and imaginary parts of signal
self.plot_f += plot(self.time, self.f, 'o-')
maxval = max(maxval, max(self.f))
minval = min(minval, min(self.f))
self.sp_f.set_ylim([1.5*minval, 1.5*maxval])
self.leg = self.sp_f.legend(self.plot_f, self.legend_text)
draw()
def update_plots(self):
maxval = -1e12
minval = 1e12
for hf,p in zip(self.hfile,self.plot_f):
self.get_data(hf)
p.set_data([self.time, self.f])
maxval = max(maxval, max(self.f))
minval = min(minval, min(self.f))
self.sp_f.set_ylim([1.5*minval, 1.5*maxval])
draw()
def click(self, event):
forward_valid_keys = [" ", "down", "right"]
backward_valid_keys = ["up", "left"]
if(find(event.key, forward_valid_keys)):
self.step_forward()
elif(find(event.key, backward_valid_keys)):
self.step_backward()
def button_left_click(self, event):
self.step_backward()
def button_right_click(self, event):
self.step_forward()
def step_forward(self):
self.update_plots()
def step_backward(self):
for hf in self.hfile:
# Step back in file position
if(hf.tell() >= 2*self.sizeof_data*self.block_length ):
hf.seek(-2*self.sizeof_data*self.block_length, 1)
else:
hf.seek(-hf.tell(),1)
self.update_plots()
def find(item_in, list_search):
try:
return list_search.index(item_in) != None
except ValueError:
return False
| gpl-3.0 |
rcs-analytics/icy | icy/ml/prep.py | 1 | 5004 | import pandas as pd
import numpy as np
import scipy.sparse as sparse
from xgboost import DMatrix
from copy import deepcopy
from sklearn.preprocessing import StandardScaler, MinMaxScaler
def drop_nan_bulk(data, how='all', axes=[0, 1]):
for key in data:
for axis in axes:
# axis=0: rows, axis=1: cols
# how=all: drop if all values are nan, how=any: drop if any value is nan
data[key].dropna(axis=axis, how=how, inplace=True)
return data
def fill_nan_bulk(data, fill='const'):
if fill in ['mean', 'median', 'mode', 'min', 'max']:
for key in data:
data[key].fillna(-1, inplace=True)
for col in data[key].select_dtypes(include=[np.number]):
if fill == 'mean':
data[key].loc[:, col].replace(-1, data[key].loc[:, col].mean(), inplace=True)
elif fill == 'median':
data[key].loc[:, col].replace(-1, data[key].loc[:, col].median(), inplace=True)
elif fill == 'mode':
data[key].loc[:, col].replace(-1, data[key].loc[:, col].mode(), inplace=True)
elif fill == 'min':
data[key].loc[:, col].replace(-1, data[key].loc[:, col].min(), inplace=True)
elif fill == 'max':
data[key].loc[:, col].replace(-1, data[key].loc[:, col].max(), inplace=True)
elif fill == 'const':
for key in data:
data[key].fillna(-1, inplace=True)
elif type(fill) == [int, str]:
for key in data:
data[key].fillna(fill, inplace=True)
return data
def scale_minmax_bulk(scale, data):
for key in data:
data[key] = pd.DataFrame(MinMaxScaler(scale).fit_transform(data[key]), index=data[key].index, columns=data[key].columns)
return data
def scale_std_bulk(data, mean=True, std=True):
for key in data:
data[key] = pd.DataFrame(StandardScaler(with_mean=mean, with_std=std).fit_transform(data[key]), index=data[key].index, columns=data[key].columns)
return data
def remove_duplicates_bulk(data):
for key in data:
print(key, 'rows')
data[key] = data[key].drop_duplicates() # rows
print(key, 'cols')
data[key] = data[key].T.drop_duplicates().T # columns - very slow, try ndarray.swapaxes() ?
return data
def convert_numeric_bulk(data, dtype=None, strict=False):
for k in data:
if dtype != None:
if strict:
data[k] = data[k].astype(dtype)
else:
for col in data[key].select_dtypes(exclude=[np.number]):
data[k].loc[:, col] = data[k].loc[:, col].astype(dtype)
# for col in data[k]:
# if data[k][col].dtype not in ['bool', 'int64', 'float64']:
# data[k].loc[:, col] = data[k].loc[:, col].astype(dtype)
else:
data[k] = data[k].convert_objects(convert_dates=False, convert_numeric=True, convert_timedeltas=False)
return data
def to_dmatrix(data, labels=None):
if type(data) in [pd.DataFrame, pd.Series]:
if labels != None and type(labels) in [pd.DataFrame, pd.Series]:
return DMatrix(data.values, labels.values)
else:
return DMatrix(data.values)
else:
if labels != None:
return DMatrix(data, labels)
else:
return DMatrix(data)
def to_sparse(data, kind='csr', fill_value=0):
# add csc, coo variants?
if type(data) in [pd.DataFrame, pd.Series]:
data = data.to_sparse(fill_value)
elif type(data) == np.ndarray:
data = sparse.csr_matrix(data)
return data
def to_dense(data):
# add csc, coo variants?
if type(data) in [pd.DataFrame, pd.Series]:
data = data.to_dense()
else:
data = pd.DataFrame(data.todense())
return data
def join(data, joins):
for key1, key2, col in joins:
if type(col) == str:
if col in data[key1] or col in data[key2]:
proceed = True
else:
proceed = False
else:
for e in col:
if e in data[key1] or e in data[key2]:
proceed = True
else:
proceed = False
break
if proceed:
data[key2].columns = [key2+'_'+c for c in data[key2].columns]
try:
if type(col) == str:
right_on = key2 + '_' + col
else:
right_on = [key2 + '_' + c for c in col]
data[key1] = pd.merge(data[key1], data[key2], how='left', left_on=col, right_on=right_on, sort=False)
except KeyError:
data[key1] = data[key1].join(data[key2], on=col)
for key1, key2, col in joins:
# if col in data[key1]:
# data[key1].drop(col, axis=1, inplace=True)
if key2 in data:
del data[key2]
return data
| mit |
ornlneutronimaging/ResoFit | ResoFit/data/IPTS_13639/ipts_13639_In.py | 1 | 4179 | from ResoFit.calibration import Calibration
from ResoFit.fitresonance import FitResonance
import matplotlib.pyplot as plt
import numpy as np
from ResoFit._utilities import get_foil_density_gcm3
from ResoFit._utilities import Layer
import pprint
# Global parameters
energy_min = 1
energy_max = 1000
energy_step = 0.01
# Input sample name or names as str, case sensitive
layers = Layer()
# layers.add_layer(layer='Ag', thickness_mm=0.025)
# layers.add_layer(layer='Co', thickness_mm=0.025)
# layers.add_layer(layer='Hf', thickness_mm=0.025)
# layers.add_layer(layer='W', thickness_mm=0.05)
layers.add_layer(layer='In', thickness_mm=0.05)
# layers.add_layer(layer='Cd', thickness_mm=0.5)
# layers.add_layer(layer='Au', thickness_mm=0.01)
# simu = Simulation(energy_min=energy_min, energy_max=energy_max, energy_step=energy_step)
# simu.add_Layer(layer=layers)
# peak_dict = simu.peak_map(thres=0.015, min_dist=20)
# pprint.pprint(peak_dict)
folder = 'data/IPTS_13639/reso_data_13639'
data_file = 'In.csv'
spectra_file = 'spectra.csv'
image_start = 300 # Can be omitted or =None
image_end = 2730 # Can be omitted or =None
# norm_to_file = 'ob_1.csv' #'Ag.csv'
# norm_to_file = 'Ag.csv'
norm_to_file = 'ob_all.csv'
baseline = False
each_step = False
repeat = 1
source_to_detector_m = 16.126845685903064 # 16#16.445359069030175#16.447496101100739
offset_us = -12112.431834715671 # 0#2.7120797253959119#2.7355447625559037
# Calibrate the peak positions
calibration = Calibration(data_file=data_file,
spectra_file=spectra_file,
layer=layers,
energy_min=energy_min,
energy_max=energy_max,
energy_step=energy_step,
folder=folder,
baseline=baseline)
calibration.experiment.norm_to(norm_to_file)
calibration.experiment.slice(start=image_start, end=image_end)
# calibration.experiment.plot(
# x_type='energy',
# source_to_detector_m=source_to_detector_m,
# offset_us=offset_us,
# logx=True,
# fmt='-'
# )
calibrate_result = calibration.calibrate(source_to_detector_m=source_to_detector_m,
offset_us=offset_us,
vary='none',
each_step=each_step)
calibration.index_peak(thres_exp=0.05, min_dist_exp=10, min_dist_map=5, thres_map=0.05, rel_tol=0.05)
# calibration.analyze_peak()
calibration.plot(y_type='attenuation',
# y_type='transmission',
x_type='energy',
# t_unit='ms',
# before=True,
# interp=True,
mixed=True,
# peak_exp='all',
table=False,
peak_exp='indexed',
peak_height=True,
index_level='iso',
peak_id='all',
logx=True,
)
plt.xlim(left=0, right=1000)
plt.show()
df = calibration.export(y_type='attenuation',
# y_type='transmission',
x_type='energy',
# t_unit='ms',
# before=True,
# interp=True,
# mixed=True,
index_level='iso',
peak_id='all')
# # Fit the peak height
# fit = FitResonance(folder=folder,
# spectra_file=spectra_file,
# data_file=data_file,
# repeat=repeat,
# energy_min=energy_min,
# energy_max=energy_max,
# energy_step=energy_step,
# calibrated_offset_us=calibration.calibrated_offset_us,
# calibrated_source_to_detector_m=calibration.calibrated_source_to_detector_m,
# norm_to_file=norm_to_file,
# slice_start=image_start,
# slice_end=image_end,
# baseline=baseline)
# fit_result = fit.fit(layer, vary='thickness', each_step=each_step)
# fit.molar_conc()
# fit.plot()
#
| bsd-3-clause |
IndraVikas/scikit-learn | sklearn/utils/testing.py | 84 | 24860 | """Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# Denis Engemann
# License: BSD 3 clause
import os
import inspect
import pkgutil
import warnings
import sys
import re
import platform
import scipy as sp
import scipy.io
from functools import wraps
try:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
except ImportError:
# Python 3+
from urllib.request import urlopen
from urllib.error import HTTPError
import tempfile
import shutil
import os.path as op
import atexit
# WindowsError only exist on Windows
try:
WindowsError
except NameError:
WindowsError = None
import sklearn
from sklearn.base import BaseEstimator
from sklearn.externals import joblib
# Conveniently import all assertions in one place.
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_false
from nose.tools import assert_raises
from nose.tools import raises
from nose import SkipTest
from nose import with_setup
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
ClusterMixin)
__all__ = ["assert_equal", "assert_not_equal", "assert_raises",
"assert_raises_regexp", "raises", "with_setup", "assert_true",
"assert_false", "assert_almost_equal", "assert_array_equal",
"assert_array_almost_equal", "assert_array_less",
"assert_less", "assert_less_equal",
"assert_greater", "assert_greater_equal"]
try:
from nose.tools import assert_in, assert_not_in
except ImportError:
# Nose < 1.0.0
def assert_in(x, container):
assert_true(x in container, msg="%r in %r" % (x, container))
def assert_not_in(x, container):
assert_false(x in container, msg="%r in %r" % (x, container))
try:
from nose.tools import assert_raises_regex
except ImportError:
# for Python 2
def assert_raises_regex(expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Helper function to check for message patterns in exceptions"""
not_raised = False
try:
callable_obj(*args, **kwargs)
not_raised = True
except expected_exception as e:
error_message = str(e)
if not re.compile(expected_regexp).search(error_message):
raise AssertionError("Error message should match pattern "
"%r. %r does not." %
(expected_regexp, error_message))
if not_raised:
raise AssertionError("%s not raised by %s" %
(expected_exception.__name__,
callable_obj.__name__))
# assert_raises_regexp is deprecated in Python 3.4 in favor of
# assert_raises_regex but lets keep the bacward compat in scikit-learn with
# the old name for now
assert_raises_regexp = assert_raises_regex
def _assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def _assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
def assert_less_equal(a, b, msg=None):
message = "%r is not lower than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a <= b, message
def assert_greater_equal(a, b, msg=None):
message = "%r is not greater than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a >= b, message
def assert_warns(warning_class, func, *args, **kw):
"""Test that a certain warning occurs.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
Returns
-------
result : the return value of `func`
"""
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = any(warning.category is warning_class for warning in w)
if not found:
raise AssertionError("%s did not give warning: %s( is %s)"
% (func.__name__, warning_class, w))
return result
def assert_warns_message(warning_class, message, func, *args, **kw):
# very important to avoid uncontrolled state propagation
"""Test that a certain warning occurs and with a certain message.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
message : str | callable
The entire message or a substring to test for. If callable,
it takes a string as argument and will trigger an assertion error
if it returns `False`.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`.
Returns
-------
result : the return value of `func`
"""
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
if hasattr(np, 'VisibleDeprecationWarning'):
# Let's not catch the numpy internal DeprecationWarnings
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = [issubclass(warning.category, warning_class) for warning in w]
if not any(found):
raise AssertionError("No warning raised for %s with class "
"%s"
% (func.__name__, warning_class))
message_found = False
# Checks the message of all warnings belong to warning_class
for index in [i for i, x in enumerate(found) if x]:
# substring will match, the entire message with typo won't
msg = w[index].message # For Python 3 compatibility
msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
if callable(message): # add support for certain tests
check_in_message = message
else:
check_in_message = lambda msg: message in msg
if check_in_message(msg):
message_found = True
break
if not message_found:
raise AssertionError("Did not receive the message you expected "
"('%s') for <%s>, got: '%s'"
% (message, func.__name__, msg))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
# XXX: once we may depend on python >= 2.6, this can be replaced by the
# warnings module context manager.
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: %s"
% (func.__name__, w))
return result
def ignore_warnings(obj=None):
""" Context manager and decorator to ignore warnings
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if callable(obj):
return _ignore_warnings(obj)
else:
return _IgnoreWarnings()
def _ignore_warnings(fn):
"""Decorator to catch and hide warnings without visual nesting"""
@wraps(fn)
def wrapper(*args, **kwargs):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
return fn(*args, **kwargs)
w[:] = []
return wrapper
class _IgnoreWarnings(object):
"""Improved and simplified Python warnings context manager
Copied from Python 2.7.5 and modified as required.
"""
def __init__(self):
"""
Parameters
==========
category : warning class
The category to filter. Defaults to Warning. If None,
all categories will be muted.
"""
self._record = True
self._module = sys.modules['warnings']
self._entered = False
self.log = []
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
clean_warning_registry() # be safe and not propagate state + chaos
warnings.simplefilter('always')
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
self.log = []
def showwarning(*args, **kwargs):
self.log.append(warnings.WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return self.log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
clean_warning_registry() # be safe and not propagate state + chaos
try:
from nose.tools import assert_less
except ImportError:
assert_less = _assert_less
try:
from nose.tools import assert_greater
except ImportError:
assert_greater = _assert_greater
def _assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
if np.allclose(actual, desired, rtol=rtol, atol=atol):
return
msg = ('Array not equal to tolerance rtol=%g, atol=%g: '
'actual %s, desired %s') % (rtol, atol, actual, desired)
raise AssertionError(msg)
if hasattr(np.testing, 'assert_allclose'):
assert_allclose = np.testing.assert_allclose
else:
assert_allclose = _assert_allclose
def assert_raise_message(exceptions, message, function, *args, **kwargs):
"""Helper function to test error messages in exceptions
Parameters
----------
exceptions : exception or tuple of exception
Name of the estimator
func : callable
Calable object to raise error
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
"""
try:
function(*args, **kwargs)
except exceptions as e:
error_message = str(e)
if message not in error_message:
raise AssertionError("Error message does not include the expected"
" string: %r. Observed error message: %r" %
(message, error_message))
else:
# concatenate exception names
if isinstance(exceptions, tuple):
names = " or ".join(e.__name__ for e in exceptions)
else:
names = exceptions.__name__
raise AssertionError("%s not raised by %s" %
(names, function.__name__))
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set.
Parameters
----------
columns_dict : dict, keys=str, values=ndarray
Contains data as columns_dict[column_name] = array of data.
dataname : string
Name of data set.
matfile : string or file object
The file name string or the file-like object of the output file.
ordering : list, default None
List of column_names, determines the ordering in the data set.
Notes
-----
This function transposes all arrays, while fetch_mldata only transposes
'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
scipy.io.savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
def __init__(self, mock_datasets):
"""Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.
"""
self.mock_datasets = mock_datasets
def __call__(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
from io import BytesIO
matfile = BytesIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise HTTPError(urlname, 404, dataset_name + " is not available",
[], None)
def install_mldata_mock(mock_datasets):
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
META_ESTIMATORS = ["OneVsOneClassifier",
"OutputCodeClassifier", "OneVsRestClassifier", "RFE",
"RFECV", "BaseEnsemble"]
# estimators that there is no way to default-construct sensibly
OTHER = ["Pipeline", "FeatureUnion", "GridSearchCV",
"RandomizedSearchCV"]
# some trange ones
DONT_TEST = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer',
'LabelBinarizer', 'LabelEncoder',
'MultiLabelBinarizer', 'TfidfTransformer',
'TfidfVectorizer', 'IsotonicRegression',
'OneHotEncoder', 'RandomTreesEmbedding',
'FeatureHasher', 'DummyClassifier', 'DummyRegressor',
'TruncatedSVD', 'PolynomialFeatures',
'GaussianRandomProjectionHash', 'HashingVectorizer',
'CheckingClassifier', 'PatchExtractor', 'CountVectorizer',
# GradientBoosting base estimators, maybe should
# exclude them in another way
'ZeroEstimator', 'ScaledLogOddsEstimator',
'QuantileEstimator', 'MeanEstimator',
'LogOddsEstimator', 'PriorProbabilityEstimator',
'_SigmoidCalibration', 'VotingClassifier']
def all_estimators(include_meta_estimators=False,
include_other=False, type_filter=None,
include_dont_test=False):
"""Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
include_meta_estimators : boolean, default=False
Whether to include meta-estimators that can be constructed using
an estimator as their first argument. These are currently
BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV.
include_other : boolean, default=False
Wether to include meta-estimators that are somehow special and can
not be default-constructed sensibly. These are currently
Pipeline, FeatureUnion and GridSearchCV
include_dont_test : boolean, default=False
Whether to include "special" label estimator or test processors.
type_filter : string, list of string, or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = sklearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(
path=path, prefix='sklearn.', onerror=lambda x: None):
if ".tests." in modname:
continue
module = __import__(modname, fromlist="dummy")
classes = inspect.getmembers(module, inspect.isclass)
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes
if (issubclass(c[1], BaseEstimator)
and c[0] != 'BaseEstimator')]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if not include_dont_test:
estimators = [c for c in estimators if not c[0] in DONT_TEST]
if not include_other:
estimators = [c for c in estimators if not c[0] in OTHER]
# possibly get rid of meta estimators
if not include_meta_estimators:
estimators = [c for c in estimators if not c[0] in META_ESTIMATORS]
if type_filter is not None:
if not isinstance(type_filter, list):
type_filter = [type_filter]
else:
type_filter = list(type_filter) # copy
filtered_estimators = []
filters = {'classifier': ClassifierMixin,
'regressor': RegressorMixin,
'transformer': TransformerMixin,
'cluster': ClusterMixin}
for name, mixin in filters.items():
if name in type_filter:
type_filter.remove(name)
filtered_estimators.extend([est for est in estimators
if issubclass(est[1], mixin)])
estimators = filtered_estimators
if type_filter:
raise ValueError("Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or None, got"
" %s." % repr(type_filter))
# drop duplicates, sort for reproducibility
return sorted(set(estimators))
def set_random_state(estimator, random_state=0):
if "random_state" in estimator.get_params().keys():
estimator.set_params(random_state=random_state)
def if_matplotlib(func):
"""Test decorator that skips test if matplotlib not installed. """
@wraps(func)
def run_test(*args, **kwargs):
try:
import matplotlib
matplotlib.use('Agg', warn=False)
# this fails if no $DISPLAY specified
import matplotlib.pyplot as plt
plt.figure()
except ImportError:
raise SkipTest('Matplotlib not available.')
else:
return func(*args, **kwargs)
return run_test
def if_not_mac_os(versions=('10.7', '10.8', '10.9'),
message='Multi-process bug in Mac OS X >= 10.7 '
'(see issue #636)'):
"""Test decorator that skips test if OS is Mac OS X and its
major version is one of ``versions``.
"""
mac_version, _, _ = platform.mac_ver()
skip = '.'.join(mac_version.split('.')[:2]) in versions
def decorator(func):
if skip:
@wraps(func)
def func(*args, **kwargs):
raise SkipTest(message)
return func
return decorator
def clean_warning_registry():
"""Safe way to reset warnings """
warnings.resetwarnings()
reg = "__warningregistry__"
for mod_name, mod in list(sys.modules.items()):
if 'six.moves' in mod_name:
continue
if hasattr(mod, reg):
getattr(mod, reg).clear()
def check_skip_network():
if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)):
raise SkipTest("Text tutorial requires large dataset download")
def check_skip_travis():
"""Skip test if being run on Travis."""
if os.environ.get('TRAVIS') == "true":
raise SkipTest("This test needs to be skipped on Travis")
def _delete_folder(folder_path, warn=False):
"""Utility function to cleanup a temporary folder if still existing.
Copy from joblib.pool (for independance)"""
try:
if os.path.exists(folder_path):
# This can fail under windows,
# but will succeed when called by atexit
shutil.rmtree(folder_path)
except WindowsError:
if warn:
warnings.warn("Could not delete temporary folder %s" % folder_path)
class TempMemmap(object):
def __init__(self, data, mmap_mode='r'):
self.temp_folder = tempfile.mkdtemp(prefix='sklearn_testing_')
self.mmap_mode = mmap_mode
self.data = data
def __enter__(self):
fpath = op.join(self.temp_folder, 'data.pkl')
joblib.dump(self.data, fpath)
data_read_only = joblib.load(fpath, mmap_mode=self.mmap_mode)
atexit.register(lambda: _delete_folder(self.temp_folder, warn=True))
return data_read_only
def __exit__(self, exc_type, exc_val, exc_tb):
_delete_folder(self.temp_folder)
with_network = with_setup(check_skip_network)
with_travis = with_setup(check_skip_travis)
| bsd-3-clause |
NumCosmo/NumCosmo | examples/example_hiprim_Tmodes.py | 1 | 6080 | #!/usr/bin/env python
try:
import gi
gi.require_version('NumCosmo', '1.0')
gi.require_version('NumCosmoMath', '1.0')
except:
pass
import math
import numpy as np
import matplotlib.pyplot as plt
from gi.repository import GObject
from gi.repository import NumCosmo as Nc
from gi.repository import NumCosmoMath as Ncm
#
# Initializing the library objects, this must be called before
# any other library function.
#
Ncm.cfg_init ()
#
# Script parameters
#
# Maximum multipole
lmax = 2500
#
# Creating a new instance of HIPrimPowerLaw
#
prim = Nc.HIPrimPowerLaw.new ()
r = 1.0
prim.props.T_SA_ratio = r
prim.props.n_T = -1.0 * r / 8.0
#
# New CLASS backend precision object
# Lets also increase k_per_decade_primordial since we are
# dealing with a modified spectrum.
#
cbe_prec = Nc.CBEPrecision.new ()
cbe_prec.props.k_per_decade_primordial = 50.0
#
# New CLASS backend object
#
cbe = Nc.CBE.prec_new (cbe_prec)
#
# New CLASS backend object
#
Bcbe = Nc.HIPertBoltzmannCBE.full_new (cbe)
Bcbe.set_TT_lmax (lmax)
# Setting which CMB data to use
Bcbe.set_target_Cls (Nc.DataCMBDataType.TT)
# Setting if the lensed Cl's are going to be used or not.
Bcbe.set_lensed_Cls (True)
# Setting if the tensor contribution is going to be used or not.
Bcbe.set_tensor (True)
Bcbe.append_target_Cls (Nc.DataCMBDataType.TT)
Bcbe.append_target_Cls (Nc.DataCMBDataType.TE)
Bcbe.append_target_Cls (Nc.DataCMBDataType.EE)
Bcbe.append_target_Cls (Nc.DataCMBDataType.BB)
Bcbe.set_TT_lmax (lmax)
Bcbe.set_TE_lmax (lmax)
Bcbe.set_EE_lmax (lmax)
Bcbe.set_BB_lmax (30)
#
# New homogeneous and isotropic cosmological model NcHICosmoDEXcdm
#
cosmo = Nc.HICosmo.new_from_name (Nc.HICosmo, "NcHICosmoDEXcdm")
cosmo.omega_x2omega_k ()
cosmo.param_set_by_name ("Omegak", 0.0)
#
# New homogeneous and isotropic reionization object
#
reion = Nc.HIReionCamb.new ()
#
# Adding submodels to the main cosmological model.
#
cosmo.add_submodel (reion)
cosmo.add_submodel (prim)
#
# Preparing the Class backend object
#
fact = 1.0e200
ln10e10ASA = prim.props.ln10e10ASA
T_SA_ratio = prim.props.T_SA_ratio
prim.props.ln10e10ASA = prim.props.ln10e10ASA + math.log (1.0 / fact)
prim.props.T_SA_ratio = prim.props.T_SA_ratio * fact
Bcbe.prepare (cosmo)
Cls1_TT = Ncm.Vector.new (lmax + 1)
Cls2_TT = Ncm.Vector.new (lmax + 1)
Cls1_TE = Ncm.Vector.new (lmax + 1)
Cls2_TE = Ncm.Vector.new (lmax + 1)
Cls1_EE = Ncm.Vector.new (lmax + 1)
Cls2_EE = Ncm.Vector.new (lmax + 1)
Cls1_BB = Ncm.Vector.new (31)
Cls2_BB = Ncm.Vector.new (31)
Bcbe.get_TT_Cls (Cls1_TT)
Bcbe.get_TE_Cls (Cls1_TE)
Bcbe.get_EE_Cls (Cls1_EE)
Bcbe.get_BB_Cls (Cls1_BB)
prim.props.ln10e10ASA = ln10e10ASA
prim.props.T_SA_ratio = T_SA_ratio
prim.props.T_SA_ratio = 1.0 / fact
Bcbe.prepare (cosmo)
Bcbe.get_TT_Cls (Cls2_TT)
Bcbe.get_TE_Cls (Cls2_TE)
Bcbe.get_EE_Cls (Cls2_EE)
Bcbe.get_BB_Cls (Cls2_BB)
Cls1_TT_a = Cls1_TT.dup_array ()
Cls1_TE_a = Cls1_TE.dup_array ()
Cls1_EE_a = Cls1_EE.dup_array ()
Cls1_BB_a = Cls1_BB.dup_array ()
Cls2_TT_a = Cls2_TT.dup_array ()
Cls2_TE_a = Cls2_TE.dup_array ()
Cls2_EE_a = Cls2_EE.dup_array ()
Cls2_BB_a = Cls2_BB.dup_array ()
ell = np.array (list(range(2, lmax + 1)))
ell_BB = np.array (list(range(2, 31)))
Cls1_TT_a = ell * (ell + 1.0) * np.array (Cls1_TT_a[2:])
Cls1_TE_a = ell * (ell + 1.0) * np.array (Cls1_TE_a[2:])
Cls1_EE_a = ell * (ell + 1.0) * np.array (Cls1_EE_a[2:])
Cls1_BB_a = ell_BB * (ell_BB + 1.0) * np.array (Cls1_BB_a[2:])
Cls2_TT_a = ell * (ell + 1.0) * np.array (Cls2_TT_a[2:])
Cls2_TE_a = ell * (ell + 1.0) * np.array (Cls2_TE_a[2:])
Cls2_EE_a = ell * (ell + 1.0) * np.array (Cls2_EE_a[2:])
Cls2_BB_a = ell_BB * (ell_BB + 1.0) * np.array (Cls2_BB_a[2:])
print ("TT")
print (Cls1_TT_a[:28])
print (Cls2_TT_a[:28])
print ("TE")
print (Cls1_TE_a[:28])
print (Cls2_TE_a[:28])
print ("EE")
print (Cls1_EE_a[:28])
print (Cls2_EE_a[:28])
print ("BB")
print (Cls1_EE_a[:28])
print (Cls2_EE_a[:28])
#
# Ploting the TT angular power spcetrum
#
plt.title (r'With and without tensor contribution to $C_\ell^\mathrm{TT}$')
plt.plot (ell[:28], Cls1_TT_a[:28], 'r', label="tensor")
plt.plot (ell[:28], Cls2_TT_a[:28], 'b--', label="scalar")
plt.xlabel(r'$\ell$')
plt.ylabel(r'$C_\ell$')
plt.legend(loc='best')
#plt.xscale ('log')
#plt.yscale ('symlog')
plt.savefig ("hiprim_tensor_Cls_TT.pdf")
plt.clf ()
plt.title (r'With and without tensor contribution to $C_\ell^\mathrm{TE}$')
plt.plot (ell[:28], Cls1_TE_a[:28], 'r', label="tensor")
plt.plot (ell[:28], Cls2_TE_a[:28], 'b--', label="scalar")
plt.xlabel(r'$\ell$')
plt.ylabel(r'$C_\ell$')
plt.legend(loc='best')
#plt.xscale ('log')
#plt.yscale ('symlog')
plt.savefig ("hiprim_tensor_Cls_TE.pdf")
plt.clf ()
plt.title (r'With and without tensor contribution to $C_\ell^\mathrm{EE}$')
plt.plot (ell[:28], Cls1_EE_a[:28], 'r', label="tensor")
plt.plot (ell[:28], Cls2_EE_a[:28], 'b--', label="scalar")
plt.xlabel(r'$\ell$')
plt.ylabel(r'$C_\ell$')
plt.legend(loc='best')
#plt.xscale ('log')
#plt.yscale ('symlog')
plt.savefig ("hiprim_tensor_Cls_EE.pdf")
plt.clf ()
plt.title (r'With and without tensor contribution to $C_\ell^\mathrm{BB}$')
plt.plot (ell_BB[:28], Cls1_BB_a[:28], 'r', label="tensor")
plt.plot (ell_BB[:28], Cls2_BB_a[:28], 'b--', label="scalar")
plt.xlabel(r'$\ell$')
plt.ylabel(r'$C_\ell$')
plt.legend(loc='best')
#plt.xscale ('log')
#plt.yscale ('symlog')
plt.savefig ("hiprim_tensor_Cls_BB.pdf")
plt.clf ()
theta = np.linspace (math.pi / 3.0, math.pi, 1000)
smap = Ncm.SphereMap.new (128)
smap.set_lmax (lmax)
smap.set_Cls (Cls1_TT)
s = smap.calc_Ctheta (1.0e-7)
Ctheta1 = np.vectorize (s.eval) (theta)
smap.set_Cls (Cls2_TT)
s = smap.calc_Ctheta (1.0e-7)
Ctheta2 = np.vectorize (s.eval) (theta)
plt.title (r'$C^\mathrm{TT}(\theta)$')
plt.plot (theta, Ctheta1, 'r', lw=0.1, label=r'$C(\theta)^\mathrm{tensor}$')
plt.plot (theta, Ctheta2, 'b', lw=0.1, label=r'$C(\theta)^\mathrm{scalar}$')
plt.xlabel(r'$\theta$')
plt.ylabel(r'$C(\theta)$')
plt.legend(loc='best')
#plt.xscale ('log')
#plt.yscale ('symlog')
plt.savefig ("hiprim_tensor_Ctheta_TT.pdf")
plt.clf ()
| gpl-3.0 |
ifarup/tc1-82 | ciefunctions.py | 2 | 36878 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
ciefunctions: GUI application for the calculation of the CIE
cone-fundamental-based colorimetric functions provided
by CIE TC 1-97.
Copyright (C) 2012-2020 Ivar Farup and Jan Henrik Wold
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import tc1_97
import tc1_97.description
import tc1_97.plot
import tc1_97.table
from tc1_97.utils import resource_path
import sys
import os
import numpy as np
from PyQt5 import QtWidgets, QtGui, QtCore, QtWebEngineWidgets
from matplotlib.backends.backend_qt5agg \
import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg \
import NavigationToolbar2QT \
as NavigationToolbar
from matplotlib.figure import Figure
class AppForm(QtWidgets.QMainWindow):
"""
The main application window.
"""
def __init__(self, parent=None):
QtWidgets.QMainWindow.__init__(self, parent)
QtWidgets.QWidget.move(self, 0, 0)
self.setWindowTitle(' CIE Functions')
self.create_menu()
self.create_main_frame()
self.on_compute()
def save_table(self):
file_choices = "CSV (*.csv)|*.csv"
pre = ''
post = ''
if self.plot_combo.currentIndex() == self.COMBO_LMS:
pre = 'cie2006_lms'
elif self.plot_combo.currentIndex() == self.COMBO_LMSBASE:
pre = 'cie2006_lms_9figs'
elif self.plot_combo.currentIndex() == self.COMBO_MB:
pre = 'macleod_boynton_cc'
elif self.plot_combo.currentIndex() == self.COMBO_LM:
pre = 'maxwellian_cc'
elif self.plot_combo.currentIndex() == self.COMBO_XYZ:
pre = 'cie_xyz_F'
elif self.plot_combo.currentIndex() == self.COMBO_XY:
pre = 'cie_cc_F'
elif self.plot_combo.currentIndex() == self.COMBO_PURPLE_XYZ:
pre = 'purple_xyz_F'
elif self.plot_combo.currentIndex() == self.COMBO_PURPLE_XY:
pre = 'purple_cc_F'
if ((self.plot_combo.currentIndex() in [self.COMBO_LMS,
self.COMBO_LMSBASE]) and
self.log_check.isChecked()):
post = '__logarithmic_values'
elif (self.plot_combo.currentIndex() == self.COMBO_LM or
((self.plot_combo.currentIndex() in [self.COMBO_XYZ,
self.COMBO_XY,
self.COMBO_PURPLE_XYZ,
self.COMBO_PURPLE_XY]) and
self.norm_check.isChecked())):
post = '__renormalized_values'
if ((self.plot_combo.currentIndex() == self.COMBO_XYZSTD) and
(self.field_combo.currentIndex() == self.STD_31)):
suggest = 'cie_xyz__standard1931__fs_2.csv'
elif (self.plot_combo.currentIndex() == self.COMBO_XYZSTD and
self.field_combo.currentIndex() == self.STD_64):
suggest = 'cie_xyz__standard1964__fs_10.csv'
elif (self.plot_combo.currentIndex() == self.COMBO_XYSTD and
self.field_combo.currentIndex() == self.STD_31):
suggest = 'cie_cc__standard1931__fs_2.csv'
elif (self.plot_combo.currentIndex() == self.COMBO_XYSTD and
self.field_combo.currentIndex() == self.STD_64):
suggest = 'cie_cc__standard1964__fs_10.csv'
else:
suggest = (pre + '__fs_' + str(self.field_spin.value()) +
'__age_' + str(self.age_spin.value()) +
'__domain_' + str(self.lambda_min_spin.value()) +
'-' + str(self.lambda_max_spin.value()) +
'__step_' + str(self.resolution_spin.value()) +
post + '.csv')
path = QtWidgets.QFileDialog.getSaveFileName(self,
'Save file', suggest,
file_choices)[0]
if path:
if self.plot_combo.currentIndex() == self.COMBO_LMS:
if self.log_check.isChecked():
np.savetxt(path, self.results['logLMS'],
'%.1f, %.5f, %.5f, %.5f')
else:
np.savetxt(path, self.results['LMS'],
'%.1f, %.5e, %.5e, %.5e')
elif self.plot_combo.currentIndex() == self.COMBO_LMSBASE:
if self.log_check.isChecked():
np.savetxt(path, self.results['logLMS_base'],
'%.1f, %.8f, %.8f, %.8f')
else:
np.savetxt(path, self.results['LMS_base'],
'%.1f, %.8e, %.8e, %.8e')
elif self.plot_combo.currentIndex() == self.COMBO_MB:
np.savetxt(path, self.results['lms_mb'],
'%.1f, %.6f, %.6f, %.6f')
elif self.plot_combo.currentIndex() == self.COMBO_LM:
np.savetxt(path, self.results['lms_mw'],
'%.1f, %.6f, %.6f, %.6f')
elif self.plot_combo.currentIndex() == self.COMBO_XYZ:
if self.norm_check.isChecked():
np.savetxt(path, self.results['XYZ_N'],
'%.1f, %.6e, %.6e, %.6e')
else:
np.savetxt(path, self.results['XYZ'],
'%.1f, %.6e, %.6e, %.6e')
elif self.plot_combo.currentIndex() == self.COMBO_XY:
if self.norm_check.isChecked():
np.savetxt(path, self.results['xyz_N'],
'%.1f, %.5f, %.5f, %.5f')
else:
np.savetxt(path, self.results['xyz'],
'%.1f, %.5f, %.5f, %.5f')
elif self.plot_combo.currentIndex() == self.COMBO_PURPLE_XYZ:
if self.norm_check.isChecked():
np.savetxt(path, self.results['XYZ_purples_N'],
'%.1f, %.6e, %.6e, %.6e')
else:
np.savetxt(path, self.results['XYZ_purple'],
'%.1f, %.6e, %.6e, %.6e')
elif self.plot_combo.currentIndex() == self.COMBO_PURPLE_XY:
if self.norm_check.isChecked():
np.savetxt(path, self.results['xyz_purples_N'],
'%.1f, %.5f, %.5f, %.5f')
else:
np.savetxt(path, self.results['xyz_purples'],
'%.1f, %.5f, %.5f, %.5f')
elif self.plot_combo.currentIndex() == self.COMBO_XYZSTD:
if self.field_combo.currentIndex() == self.STD_31:
np.savetxt(path, self.results['XYZ31'],
'%.1f, %.6e, %.6e, %.6e')
else:
np.savetxt(path, self.results['XYZ64'],
'%.1f, %.6e, %.6e, %.6e')
elif self.plot_combo.currentIndex() == self.COMBO_XYSTD:
if self.field_combo.currentIndex() == self.STD_31:
np.savetxt(path, self.results['xyz31'],
'%.1f, %.5f, %.5f, %.5f')
else:
np.savetxt(path, self.results['xyz64'],
'%.1f, %.5f, %.5f, %.5f')
def options(self):
"""
Return a dict() with the current plot options for the plot module.
"""
return {'grid': self.grid_check.isChecked(),
'cie31': self.cie31_check.isChecked(),
'cie64': self.cie64_check.isChecked(),
'labels': self.wavelength_check.isChecked(),
'log10': self.log_check.isChecked(),
'norm': self.norm_check.isChecked(),
'label_fontsize': 7,
'title_fontsize': 10.5,
'full_title': True,
'axis_labels': True}
def on_about(self):
msg = """
CIE Functions: Calculates the CIE cone-fundamental-based colorimetric \
functions according to CIE TC 1-97.
Copyright (C) 2012-2020 Ivar Farup and Jan Henrik Wold
This program is free software: you can redistribute it and/or modify it \
under the terms of the GNU General Public License as published by the \
Free Software Foundation, either version 3 of the License, or (at your \
option) any later version.
This program is distributed in the hope that it will be useful, but \
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY \
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License \
for more details.
You should have received a copy of the GNU General Public License along \
with this program. If not, see <http://www.gnu.org/licenses/>.
"""
QtWidgets.QMessageBox.about(self, "About CIE Functions", msg.strip())
def on_grid(self):
self.axes.grid(self.grid_check.isChecked())
self.canvas.draw()
def on_draw(self, redraw_description=True):
# Reset GUI values that have not been computed
self.field_spin.setValue(self.last_field)
self.age_spin.setValue(self.last_age)
self.lambda_min_spin.setValue(self.last_lambda_min)
self.lambda_max_spin.setValue(self.last_lambda_max)
self.resolution_spin.setValue(self.last_resolution)
self.mpl_toolbar.update() # reset the views history (fixes #124)
if self.plot_combo.currentIndex() not in \
[self.COMBO_XYSTD, self.COMBO_XYZSTD]:
self.field_spin.show()
self.field_combo.hide()
self.age_label.show()
self.age_spin.show()
self.resolution_label.show()
self.resolution_spin.show()
self.compute_button.show()
self.lambda_min_max_label.show()
self.lambda_min_max_dash.show()
self.lambda_min_spin.show()
self.lambda_max_spin.show()
else:
self.field_spin.hide()
self.field_combo.show()
self.age_label.hide()
self.age_spin.hide()
self.resolution_label.hide()
self.resolution_spin.hide()
self.compute_button.hide()
self.lambda_min_max_label.hide()
self.lambda_min_max_dash.hide()
self.lambda_min_spin.hide()
self.lambda_max_spin.hide()
if self.plot_combo.currentIndex() in [self.COMBO_LMS,
self.COMBO_LMSBASE]:
self.norm_label.setVisible(False)
self.norm_check.setVisible(False)
self.log_label.setVisible(True)
self.log_check.setVisible(True)
elif self.plot_combo.currentIndex() == self.COMBO_LM:
self.log_label.setVisible(False)
self.log_check.setVisible(False)
self.norm_check.setVisible(False)
self.norm_label.setVisible(True)
elif self.plot_combo.currentIndex() in [self.COMBO_XYZ,
self.COMBO_XY,
self.COMBO_PURPLE_XYZ,
self.COMBO_PURPLE_XY]:
self.log_label.setVisible(False)
self.log_check.setVisible(False)
self.norm_label.setVisible(True)
self.norm_check.setVisible(True)
else:
self.log_label.setVisible(False)
self.log_check.setVisible(False)
self.norm_label.setVisible(False)
self.norm_check.setVisible(False)
#
# CIE LMS cone fundamentals
# (description, plot and table)
#
if self.plot_combo.currentIndex() == self.COMBO_LMS:
# Setup GUI
self.compare_label_31.setDisabled(True)
self.compare_label_64.setDisabled(True)
self.wavelength_check.setDisabled(True)
self.wavelength_label.setDisabled(True)
self.cie31_check.setDisabled(True)
self.cie64_check.setDisabled(True)
# Create html description
html_string = tc1_97.description.LMS(self.results,
self.plot_combo.currentText(),
self.options(), True)
# Create plot
tc1_97.plot.LMS(self.axes, self.plots, self.options())
# Create html table
html_table = tc1_97.table.LMS(self.results, self.options(), True)
#
# CIE LMS cone fundamentals (9 sign. figs.)
# (description, plot and table)
#
elif self.plot_combo.currentIndex() == self.COMBO_LMSBASE:
# Setup GUI
self.compare_label_31.setDisabled(True)
self.compare_label_64.setDisabled(True)
self.wavelength_check.setDisabled(True)
self.wavelength_label.setDisabled(True)
self.cie31_check.setDisabled(True)
self.cie64_check.setDisabled(True)
# Create html description
html_string = tc1_97.description.LMS_base(
self.results,
self.plot_combo.currentText(),
self.options(), True)
# Create plot
tc1_97.plot.LMS_base(self.axes, self.plots, self.options())
# Create html table
html_table = tc1_97.table.LMS_base(self.results,
self.options(), True)
#
# MacLeod-Boynton ls chromaticity diagram
# (description, plot and table)
#
elif self.plot_combo.currentIndex() == self.COMBO_MB:
# Setup GUI
self.compare_label_31.setDisabled(True)
self.compare_label_64.setDisabled(True)
self.wavelength_check.setEnabled(True)
self.wavelength_label.setEnabled(True)
self.cie31_check.setDisabled(True)
self.cie64_check.setDisabled(True)
# Create html description
html_string = tc1_97.description.lms_mb(
self.results,
self.plot_combo.currentText(),
self.options(), True)
# Create plot
tc1_97.plot.ls_mb(self.axes, self.plots, self.options())
# Create html table
html_table = tc1_97.table.lms_mb(self.results,
self.options(), True)
#
# Maxwellian lm chromaticity diagram
# (description, plot and table)
#
elif self.plot_combo.currentIndex() == self.COMBO_LM:
# Setup GUI
self.compare_label_31.setDisabled(True)
self.compare_label_64.setDisabled(True)
self.wavelength_check.setEnabled(True)
self.wavelength_label.setEnabled(True)
self.cie31_check.setDisabled(True)
self.cie64_check.setDisabled(True)
# Create html description
html_string = tc1_97.description.lms_mw(
self.results, self.plot_combo.currentText(),
self.options(), True)
# Create plot
tc1_97.plot.lm_mw(self.axes, self.plots, self.options())
# Create html table
html_table = tc1_97.table.lms_mw(
self.results, self.options(), True)
#
# CIE XYZ cone-fundamental-based tristimulus functions
# (description, plot and table)
#
elif self.plot_combo.currentIndex() == self.COMBO_XYZ:
# Setup GUI
self.compare_label_31.setEnabled(True)
self.compare_label_64.setEnabled(True)
self.wavelength_check.setDisabled(True)
self.wavelength_label.setDisabled(True)
self.cie31_check.setEnabled(True)
self.cie64_check.setEnabled(True)
# Create html description
html_string = tc1_97.description.XYZ(self.results,
self.plot_combo.currentText(),
self.options(), True)
# Create plot
tc1_97.plot.XYZ(self.axes, self.plots, self.options())
# Create html table
html_table = tc1_97.table.XYZ(self.results, self.options(), True)
#
# CIE xy cone-fundamental-based chromaticity diagram
# (description, plot and table)
#
elif self.plot_combo.currentIndex() == self.COMBO_XY:
# Setup GUI
self.compare_label_31.setEnabled(True)
self.compare_label_64.setEnabled(True)
self.wavelength_check.setEnabled(True)
self.wavelength_label.setEnabled(True)
self.cie31_check.setEnabled(True)
self.cie64_check.setEnabled(True)
# Create html description
html_string = tc1_97.description.xyz(self.results,
self.plot_combo.currentText(),
self.options(), True)
# Create plot
tc1_97.plot.xy(self.axes, self.plots, self.options())
# Create html table
html_table = tc1_97.table.xyz(self.results, self.options(), True)
#
# XYZ cone-fundamental-based tristimulus functions for
# purple-line stimuli (description, plot and table)
#
elif self.plot_combo.currentIndex() == self.COMBO_PURPLE_XYZ:
# Setup GUI
self.compare_label_31.setEnabled(False)
self.compare_label_64.setEnabled(False)
self.wavelength_check.setDisabled(True)
self.wavelength_label.setDisabled(True)
self.cie31_check.setEnabled(False)
self.cie64_check.setEnabled(False)
# Create html descriptions
html_string = tc1_97.description.XYZ_purples(
self.results,
self.plot_combo.currentText(),
self.options(), True)
# Create plot
tc1_97.plot.XYZ_purples(self.axes, self.plots, self.options())
# Create html table
html_table = tc1_97.table.XYZ_purples(self.results,
self.options(), True)
#
# xy cone-fundamental-based chromaticity diagram (purple-line stimuli)
# (description, plot and table)
#
elif self.plot_combo.currentIndex() == self.COMBO_PURPLE_XY:
# Setup GUI
self.compare_label_31.setEnabled(False)
self.compare_label_64.setEnabled(False)
self.wavelength_check.setEnabled(True)
self.wavelength_label.setEnabled(True)
self.cie31_check.setEnabled(False)
self.cie64_check.setEnabled(False)
# Create html description
html_string = tc1_97.description.xyz_purples(
self.results,
self.plot_combo.currentText(),
self.options(), True)
# Create plot
tc1_97.plot.xy_purples(self.axes, self.plots, self.options())
# Create html table
html_table = tc1_97.table.xyz_purples(self.results,
self.options(), True)
#
# CIE XYZ standard colour-matching functions
# (description, plot and table)
#
elif self.plot_combo.currentIndex() == self.COMBO_XYZSTD:
# Setup GUI
self.wavelength_check.setDisabled(True)
self.wavelength_label.setDisabled(True)
if self.field_combo.currentIndex() == self.STD_31:
# Setup GUI
self.compare_label_31.setDisabled(True)
self.compare_label_64.setEnabled(True)
self.cie31_check.setDisabled(True)
self.cie64_check.setEnabled(True)
# Create html description
html_string = tc1_97.description.XYZ31(
self.results,
self.plot_combo.currentText(),
self.options(), True)
# Create plot
tc1_97.plot.XYZ31(self.axes, self.plots, self.options())
# Create html table
html_table = tc1_97.table.XYZ31(self.results,
self.options(), True)
else: # STD_64
# Setup GUI
self.compare_label_31.setEnabled(True)
self.compare_label_64.setDisabled(True)
self.cie31_check.setEnabled(True)
self.cie64_check.setDisabled(True)
# Create html description
html_string = tc1_97.description.XYZ64(
self.results, self.plot_combo.currentText(),
self.options(), True)
# Create plot
tc1_97.plot.XYZ64(self.axes, self.plots, self.options())
# Create html table
html_table = tc1_97.table.XYZ64(self.results,
self.options(), True)
#
# CIE xy standard chromaticity diagrams
# (description, plot and table)
#
elif self.plot_combo.currentIndex() == self.COMBO_XYSTD:
# Setup GUI
self.wavelength_check.setEnabled(True)
self.wavelength_label.setEnabled(True)
if self.field_combo.currentIndex() == self.STD_31:
# Setup GUI
self.compare_label_31.setDisabled(True)
self.compare_label_64.setEnabled(True)
self.cie31_check.setDisabled(True)
self.cie64_check.setEnabled(True)
# Create html description
html_string = tc1_97.description.xyz31(
self.results, self.plot_combo.currentText(),
self.options(), True)
# Create plot
tc1_97.plot.xy31(self.axes, self.plots, self.options())
# Create html table
html_table = tc1_97.table.xyz31(self.results,
self.options(), True)
else: # STD_64
# Setup GUI
self.compare_label_31.setEnabled(True)
self.compare_label_64.setDisabled(True)
self.cie31_check.setEnabled(True)
self.cie64_check.setDisabled(True)
# Create html description
html_string = tc1_97.description.xyz64(
self.results, self.plot_combo.currentText(),
self.options(), True)
# Create plot
tc1_97.plot.xy64(self.axes, self.plots, self.options())
# Create html table
html_table = tc1_97.table.xyz64(self.results,
self.options(), True)
#
# Refresh GUI
#
base_url = QtCore.QUrl.fromLocalFile(os.getcwd() + os.sep)
if redraw_description:
self.transformation.setHtml(html_string, baseUrl=base_url)
self.html_table.setHtml(html_table, baseUrl=base_url)
self.canvas.draw()
return
def on_draw_plot_only(self):
self.on_draw(False)
def on_draw_all(self):
self.on_draw(True)
def on_compute(self):
if self.lambda_max_spin.value() < 700:
self.lambda_max_spin.setValue(700)
self.lambda_max_spin.setMinimum(700)
self.last_age = self.age_spin.value()
self.last_field = self.field_spin.value()
self.last_resolution = tc1_97.my_round(self.resolution_spin.value(), 1)
self.last_lambda_min = tc1_97.my_round(self.lambda_min_spin.value(), 1)
self.last_lambda_max = tc1_97.my_round(self.lambda_max_spin.value(), 1)
self.results, self.plots = tc1_97.compute_tabulated(
self.last_field,
self.last_age,
self.last_lambda_min,
self.last_lambda_max,
self.last_resolution)
if self.results['XYZ'][-1, 0] < 700:
self.lambda_max_spin.setMinimum(self.results['XYZ'][-1, 0])
if self.results['XYZ'][-1, 0] != self.last_lambda_max:
self.last_lambda_max = self.results['XYZ'][-1, 0]
self.lambda_max_spin.setValue(self.last_lambda_max)
self.on_draw(True)
def add_actions(self, target, actions):
for action in actions:
if action is None:
target.addSeparator()
else:
target.addAction(action)
def create_action(self, text, slot=None, shortcut=None,
icon=None, tip=None, checkable=False,
signal="triggered()"):
action = QtWidgets.QAction(text, self)
if icon is not None:
action.setIcon(QtGui.QIcon(":/%s.png" % icon))
if shortcut is not None:
action.setShortcut(shortcut)
if tip is not None:
action.setToolTip(tip)
action.setStatusTip(tip)
if slot is not None:
self.connect(action, QtCore.SIGNAL(signal), slot)
if checkable:
action.setCheckable(True)
return action
def create_menu(self):
self.file_menu = self.menuBar().addMenu("&File")
quit_action = self.file_menu.addAction("&Quit")
quit_action.triggered.connect(self.close)
self.help_menu = self.menuBar().addMenu("&Help")
about_action = self.help_menu.addAction("&About")
about_action.triggered.connect(self.on_about)
def create_main_frame(self):
self.main_frame = QtWidgets.QWidget()
# Create the mpl Figure and FigCanvas objects.
# 5x4 inches, 100 dots-per-inch
#
self.dpi = 100
self.fig = Figure((10.0, 8.0), dpi=self.dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self.main_frame)
# Since we have only one plot, we can use add_axes
# instead of add_subplot, but then the subplot
# configuration tool in the navigation toolbar wouldn't
# work.
#
self.axes = self.fig.add_subplot(111)
# Create the navigation toolbar, tied to the canvas
#
self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame)
# Other GUI controls
#
self.field_spin = QtWidgets.QDoubleSpinBox()
self.field_spin.setLocale(QtCore.QLocale('C'))
self.field_spin.setMinimum(1)
self.field_spin.setMaximum(10)
self.field_spin.setDecimals(1)
self.field_spin.setValue(2)
self.field_spin.setSingleStep(0.1)
self.field_combo = QtWidgets.QComboBox()
self.field_combo.addItem(u'2\N{DEGREE SIGN} (1931)')
self.STD_31 = 0
self.field_combo.addItem(u'10\N{DEGREE SIGN} (1964)')
self.STD_64 = 1
self.field_combo.hide()
self.field_combo.currentIndexChanged.connect(self.on_draw_all)
self.age_spin = QtWidgets.QSpinBox()
self.age_spin.setMinimum(20)
self.age_spin.setMaximum(80)
self.age_spin.setValue(32)
self.lambda_min_spin = QtWidgets.QDoubleSpinBox()
self.lambda_min_spin.setLocale(QtCore.QLocale('C'))
self.lambda_min_spin.setMinimum(390)
self.lambda_min_spin.setMaximum(400)
self.lambda_min_spin.setDecimals(1)
self.lambda_min_spin.setValue(390)
self.lambda_min_spin.setSingleStep(0.1)
self.lambda_max_spin = QtWidgets.QDoubleSpinBox()
self.lambda_max_spin.setLocale(QtCore.QLocale('C'))
self.lambda_max_spin.setMinimum(700)
self.lambda_max_spin.setMaximum(830)
self.lambda_max_spin.setDecimals(1)
self.lambda_max_spin.setValue(830)
self.lambda_max_spin.setSingleStep(0.1)
self.resolution_spin = QtWidgets.QDoubleSpinBox()
self.resolution_spin.setLocale(QtCore.QLocale('C'))
self.resolution_spin.setMinimum(0.1)
self.resolution_spin.setMaximum(5)
self.resolution_spin.setDecimals(1)
self.resolution_spin.setValue(1)
self.resolution_spin.setSingleStep(0.1)
self.plot_combo = QtWidgets.QComboBox()
self.plot_combo.addItem('CIE LMS cone fundamentals')
self.COMBO_LMS = 0
self.plot_combo.addItem('CIE LMS cone fundamentals (9 sign. figs.)')
self.COMBO_LMSBASE = 1
self.plot_combo.addItem(
u'MacLeod\u2013Boynton ls chromaticity diagram')
self.COMBO_MB = 2
self.plot_combo.addItem('Maxwellian lm chromaticity diagram')
self.COMBO_LM = 3
self.plot_combo.addItem(
'CIE XYZ cone-fundamental-based tristimulus functions')
self.COMBO_XYZ = 4
self.plot_combo.addItem(
'CIE xy cone-fundamental-based chromaticity diagram')
self.COMBO_XY = 5
self.plot_combo.addItem(
'XYZ cone-fundamental-based tristimulus functions for ' +
'purple-line stimuli')
self.COMBO_PURPLE_XYZ = 6
self.plot_combo.addItem(
'xy cone-fundamental-based chromaticity diagram ' +
'(purple-line stimuli)')
self.COMBO_PURPLE_XY = 7
self.plot_combo.addItem('CIE XYZ standard colour-matching functions')
self.COMBO_XYZSTD = 8
self.plot_combo.addItem('CIE xy standard chromaticity diagram')
self.COMBO_XYSTD = 9
self.plot_combo.currentIndexChanged.connect(self.on_draw_all)
self.grid_check = QtWidgets.QCheckBox()
self.grid_check.stateChanged.connect(self.on_grid)
self.wavelength_check = QtWidgets.QCheckBox()
self.wavelength_check.stateChanged.connect(self.on_draw_plot_only)
self.cie31_check = QtWidgets.QCheckBox()
self.cie31_check.stateChanged.connect(self.on_draw_plot_only)
self.cie64_check = QtWidgets.QCheckBox()
self.cie64_check.stateChanged.connect(self.on_draw_plot_only)
self.norm_check = QtWidgets.QCheckBox()
self.norm_check.stateChanged.connect(self.on_draw_all)
self.log_check = QtWidgets.QCheckBox()
self.log_check.stateChanged.connect(self.on_draw_all)
self.save_table_button = QtWidgets.QPushButton('&Save table')
self.save_table_button.clicked.connect(self.save_table)
self.compute_button = QtWidgets.QPushButton(' &Compute ')
self.compute_button.clicked.connect(self.on_compute)
self.transformation = QtWebEngineWidgets.QWebEngineView()
self.html_table = QtWebEngineWidgets.QWebEngineView()
# Layout with labels
#
self.compare_label_31 = QtWidgets.QLabel(
u'Compare with CIE 1931 2\N{DEGREE SIGN}')
self.compare_label_64 = QtWidgets.QLabel(
u'Compare with CIE 1964 10\N{DEGREE SIGN}')
self.grid_label = QtWidgets.QLabel('Grid')
self.wavelength_label = QtWidgets.QLabel('Labels')
self.norm_label = QtWidgets.QLabel(' Renormalized values ')
self.log_label = QtWidgets.QLabel(' Logarithmic values ')
self.field_label = QtWidgets.QLabel(u' Field size (\N{DEGREE SIGN})')
self.age_label = QtWidgets.QLabel(' Age (yr)')
self.lambda_min_max_label = QtWidgets.QLabel(' Domain (nm)')
self.lambda_min_max_dash = QtWidgets.QLabel(u'\u2013')
self.resolution_label = QtWidgets.QLabel(' Step (nm)')
grid = QtWidgets.QGridLayout()
grid.setRowMinimumHeight(0, 34)
grid.addWidget(self.field_label, 0, 0, QtCore.Qt.AlignRight)
grid.addWidget(self.age_label, 0, 2, QtCore.Qt.AlignRight)
grid.addWidget(self.lambda_min_max_label, 0, 4, QtCore.Qt.AlignRight)
grid.addWidget(self.lambda_min_max_dash, 0, 6)
grid.addWidget(self.resolution_label, 0, 8, QtCore.Qt.AlignRight)
grid.addWidget(QtWidgets.QLabel(''), 0, 10)
grid.addWidget(self.field_spin, 0, 1)
grid.addWidget(self.field_combo, 0, 1)
grid.addWidget(self.age_spin, 0, 3)
grid.addWidget(self.lambda_min_spin, 0, 5)
grid.addWidget(self.lambda_max_spin, 0, 7)
grid.addWidget(self.resolution_spin, 0, 9)
grid.addWidget(self.compute_button, 0, 11)
grid.setColumnStretch(2, 11)
grid.setColumnStretch(4, 16)
grid.setColumnStretch(8, 13)
grid.setColumnStretch(10, 8)
inner_vbox = QtWidgets.QVBoxLayout()
inner_vbox.addWidget(self.mpl_toolbar)
inner_vbox.addWidget(self.canvas)
check_bar = QtWidgets.QGridLayout()
check_bar.addWidget(self.compare_label_31, 0, 0, QtCore.Qt.AlignRight)
check_bar.addWidget(self.cie31_check, 0, 1)
check_bar.addWidget(QtWidgets.QLabel(''), 0, 2)
check_bar.addWidget(self.compare_label_64, 0, 3, QtCore.Qt.AlignRight)
check_bar.addWidget(self.cie64_check, 0, 4)
check_bar.addWidget(self.grid_label, 0, 5, QtCore.Qt.AlignRight)
check_bar.addWidget(self.grid_check, 0, 6)
check_bar.addWidget(self.wavelength_label, 0, 7, QtCore.Qt.AlignRight)
check_bar.addWidget(self.wavelength_check, 0, 8)
check_bar.setColumnStretch(2, 8)
check_bar.setColumnStretch(3, 16)
check_bar.setColumnStretch(5, 14)
check_bar.setColumnStretch(7, 16)
check_widget = QtWidgets.QWidget()
check_widget.setLayout(check_bar)
inner_vbox.addWidget(check_widget)
inner_widget = QtWidgets.QWidget()
inner_widget.setLayout(inner_vbox)
table_vbox = QtWidgets.QVBoxLayout()
table_vbox.addWidget(self.html_table)
table_vbox.addWidget(self.save_table_button)
table_widget = QtWidgets.QWidget()
table_widget.setLayout(table_vbox)
spectral_tabs = QtWidgets.QTabWidget()
spectral_tabs.addTab(inner_widget, 'Plot')
spectral_tabs.addTab(table_widget, 'Table')
combo_grid = QtWidgets.QGridLayout()
combo_grid.addWidget(self.plot_combo, 0, 0)
combo_grid.addWidget(QtWidgets.QLabel(' '), 0, 1)
combo_grid.addWidget(self.norm_label, 0, 2, QtCore.Qt.AlignRight)
combo_grid.addWidget(self.norm_check, 0, 3)
combo_grid.addWidget(self.log_label, 0, 2, QtCore.Qt.AlignRight)
combo_grid.addWidget(self.log_check, 0, 3)
combo_grid.setColumnMinimumWidth(0, 650)
combo_grid.setColumnMinimumWidth(2, 150)
combo_grid.setColumnMinimumWidth(3, 20)
combo_grid.setColumnStretch(0, 1)
combo_grid.setSpacing(0)
spectral_innerwidget = QtWidgets.QWidget()
spectral_vbox = QtWidgets.QVBoxLayout(spectral_innerwidget)
spectral_vbox.addWidget(spectral_tabs)
spectral_vbox.addLayout(combo_grid)
spectral_vbox.addLayout(grid)
spectral_splitter = QtWidgets.QSplitter()
spectral_splitter.addWidget(spectral_innerwidget)
spectral_splitter.addWidget(self.transformation)
vbox = QtWidgets.QVBoxLayout()
vbox.addWidget(spectral_splitter)
self.main_frame.setLayout(vbox)
self.setCentralWidget(self.main_frame)
def main():
"""
Run the CIE Functions application.
"""
app = QtWidgets.QApplication(sys.argv)
app_icon = QtGui.QIcon()
app_icon.addFile(resource_path('icons/ciefunctions_icon (16x16) .png'),
QtCore.QSize(16, 16))
app_icon.addFile(resource_path('icons/ciefunctions_icon (24x24) .png'),
QtCore.QSize(24, 24))
app_icon.addFile(resource_path('icons/ciefunctions_icon (32x32) .png'),
QtCore.QSize(32, 32))
app_icon.addFile(resource_path('icons/ciefunctions_icon (48x48) .png'),
QtCore.QSize(48, 48))
app_icon.addFile(resource_path('icons/ciefunctions_icon (256x256) .png'),
QtCore.QSize(256, 256))
app.setWindowIcon(app_icon)
form = AppForm()
form.show()
app.exec_()
if __name__ == '__main__':
main()
| gpl-3.0 |
neddles/Loss-Analysis | loss_analysis/loss_analysis.py | 1 | 27502 | # port "loss analysis v5.xlsx" by Ziv Hameiri to python3
import openpyxl
import numpy as np
import sys
import os
import re
from collections import OrderedDict
import matplotlib.pyplot as plt
import warnings
# modules for this package
import analysis
from scipy import constants
T = 300 # TODO: make optional input?
Vth = constants.k * T / constants.e
def waterfall(ax, y, xlabels=None):
'''
Create a waterfall plot.
Assumes the first value is the starting point,
all other values are set to negative creating a 'waterfall' downwards.
'''
y = abs(np.array(y))
y[1:] = -1 * y[1:]
x = np.arange(len(y))
y_bot = np.append(0, y[:-1].cumsum())
ax.bar(x, y, bottom=y_bot, align='center')
ax.set_ylim(ymin = y_bot[-1] + y[-1])
if xlabels is not None:
ax.set_xticks(np.arange(len(xlabels)))
ax.set_xticklabels(xlabels, rotation=40, ha='right')
return ax
class Refl(object):
def __init__(self, fname):
self.load(fname)
def process(self, f_metal=None, wlbounds=(900, 1000), wljunc=600):
'''
Performs several calculations including:
- Average Reflection (AR)
- Weighted Average Reflection (WAR)
- Light lost from front surface escape
the results are loaded into attributes
'''
# xxx need upper bound for this?
self.AR = np.trapz(self.refl / 100, x=self.wl)
self.AM15G_Jph = analysis.AM15G_resample(self.wl)
i_upper = (self.wl <= 1000)
self.WAR = (np.dot(self.refl[i_upper], self.AM15G_Jph[i_upper])
/ np.sum(self.AM15G_Jph[i_upper]))
if f_metal is None:
index = (self.wl >= 400) * i_upper
refl_min = np.amin(self.refl[index])
self.f_metal = refl_min
else:
self.f_metal = f_metal
index_l = (self.wl >= wlbounds[0])
index = (self.wl <= wlbounds[1]) * index_l
# use numpys implementation for line fitting
popt, pcov = np.polyfit(self.wl[index], self.refl[index], 1, cov=True)
self.refl_wo_escape = np.copy(self.refl)
self.refl_wo_escape[index_l] = np.polyval(popt, self.wl[index_l])
# defined as area between 100% and the given curve, to simplify calculations
Jloss = OrderedDict()
Jloss['max_limit'] = np.sum(self.AM15G_Jph)
Jloss['metal_shading'] = np.dot(self.f_metal / 100 \
* np.ones(len(self.AM15G_Jph)),
self.AM15G_Jph)
Jloss['refl_wo_escape'] = np.dot(self.refl_wo_escape / 100 \
, self.AM15G_Jph) \
- Jloss['metal_shading']
Jloss['front_escape'] = np.dot(self.refl / 100, self.AM15G_Jph) \
- Jloss['metal_shading'] \
- Jloss['refl_wo_escape']
# this makes qe Jloss calculations easier
idx_junc = analysis.find_nearest(wljunc, self.wl)
Jloss['front_escape_blue'] = np.dot(self.refl[:idx_junc] / 100,
self.AM15G_Jph[:idx_junc])
Jloss['front_escape_red'] = np.dot(self.refl[idx_junc:] / 100,
self.AM15G_Jph[idx_junc:])
self.Jloss = Jloss
def plot(self, ax):
ax.plot(self.wl, self.refl, '-o')
ax.plot(self.wl, self.refl_wo_escape, '-o')
ax.plot(self.wl, np.ones(len(self.wl)) * self.f_metal, 'r-')
ax.set_ylabel('Reflectance [%]')
ax.grid(True)
def plot_QE(self, ax):
ax.fill_between(self.wl, 100 - self.refl,
100 - self.refl_wo_escape)
ax.legend(loc='best')
# ax.set_ylabel('Reflectance [%]')
# ax.grid(True)
def load(self, raw_data_file):
'''Loads Reflectance data in attributes'''
self.filepath = raw_data_file
self.filename = os.path.basename(raw_data_file)
data_array = np.genfromtxt(raw_data_file, usecols=(0, 1), skip_header=1,
delimiter=',').transpose()
# is this needed?
if data_array[0, 0] > data_array[0, -1]:
data_array = data_array[:, ::-1]
self.wl = data_array[0, :]
self.refl = data_array[1, :]
class QE(object):
def __init__(self, fname):
self.load(fname)
def process(self, wl, refl, refl_wo_escape, Jloss, wljunc=600):
'''
Performs several calculations from QE and Reflectance data including:
- IQE
- Leff and SRV_rear
- Current loss from each region of the device
the results are saved into attributes
'''
self.IQE = self.EQE / (100 - refl)
self.output_Basore_fit, self.plot_Basore_fit = analysis.fit_Basore(
self.wl, self.IQE)
EQE_on_eta_c = self.EQE / self.output_Basore_fit['eta_c']
idx = analysis.find_nearest(750, wl)
total_min = np.minimum((100 - refl_wo_escape), EQE_on_eta_c)
self.EQE_xxx_unnamed = np.append(100 - refl_wo_escape[:idx],
total_min[idx:])
AM15G_Jph = analysis.AM15G_resample(self.wl)
Jloss_qe = Jloss.copy()
del Jloss_qe['front_escape_red']
del Jloss_qe['front_escape_blue']
idx_junc = analysis.find_nearest(wljunc, self.wl)
Jloss_qe['parasitic_absorption'] = np.dot((100 - self.EQE_xxx_unnamed[idx_junc:]) / 100,
AM15G_Jph[idx_junc:]) \
- Jloss['front_escape_red']
Jloss_qe['bulk_recomm'] = np.dot((100 - self.EQE[idx_junc:]) / 100,
AM15G_Jph[idx_junc:]) \
- Jloss['front_escape_red'] \
- Jloss_qe['parasitic_absorption']
Jloss_qe['blue_loss'] = np.dot((100 - self.EQE[:idx_junc]) / 100,
AM15G_Jph[:idx_junc]) \
- Jloss['front_escape_blue']
self.Jloss_qe = Jloss_qe
# print(Jloss_qe)
def plot_EQE(self, ax):
line_EQE = ax.plot(self.wl, self.EQE, '-o', label='EQE')
ax.set_xlabel('Wavelength [$nm$]')
ax.set_ylabel('QE [%]')
ax.legend(loc='best')
ax.grid(True)
return line_EQE # xxx currently not working
def plot_IQE(self, ax):
ax.plot(self.wl, self.IQE, '-o', label='IQE')
ax.set_xlabel('Wavelength [$nm$]')
ax.set_ylabel('QE [%]')
ax.legend(loc='best')
ax.grid(True)
def plot_Jloss(self, ax):
waterfall(ax, list(self.Jloss_qe.values()), list(self.Jloss_qe.keys()))
def load(self, raw_data_file):
'''Loads EQE data into attributes'''
self.filepath = raw_data_file
self.filename = os.path.basename(raw_data_file)
# the other columns are ignored
data_array = np.genfromtxt(raw_data_file, usecols=(0, 1),
skip_header=1, skip_footer=8)
self.wl = data_array[:, 0]
self.EQE = data_array[:, 1]
f = open(raw_data_file, 'r')
d = {}
for line in f.readlines()[-7:-1]:
d.update(dict([line.strip('\n').split(':')]))
d['Jsc'] = round(float(d['Jsc']) / 1e3, 7)
self.output = d
class IVLight(object):
def __init__(self, fname):
self.load(fname)
def process(self, Rsh, Rs):
'''
Light IV calculations
caculates the ideal fill factors:
FF0
FFs
FF
The the loss from the current
FF_Rsh
FF_Rsh
FF_other
These are all stored within two dictionaries.
Inputs:
Rsh: The shunt resistance
Rs: The series resistance
Outputs:
None
'''
self.m = analysis.ideality_factor(
self.V, -1 * (self.J - self.output['Jsc']), Vth)
ideal_FF = OrderedDict()
ideal_FF['FF_0'] = analysis.ideal_FF(self.output['Voc'])
ideal_FF['FF_s'] = analysis.ideal_FF_series(self.output['Voc'],
self.output['Jsc'],
Rs)
ideal_FF['FF_s_sh'] = analysis.ideal_FF_series_shunt(self.output['Voc'],
self.output['Jsc'],
Rs, Rsh)
self.ideal_FF = ideal_FF
FF_loss = OrderedDict()
FF_loss['FF_0'] = analysis.ideal_FF(self.output['Voc'])
FF_loss['FF_Rs'] = analysis.FF_loss_series(self.output['Voc'],
self.output['Jsc'],
self.output['Jmp'],
Rs)
FF_loss['FF_Rsh'] = analysis.FF_loss_shunt(self.output['Voc'],
self.output['Jsc'],
self.output['Vmp'],
self.output['Jmp'],
Rs, Rsh)
# for waterfall plot
FF_loss['FF_other'] = (FF_loss['FF_0'] \
- self.output['FF'] \
- FF_loss['FF_Rs'] \
- FF_loss['FF_Rsh'])
self.FF_loss = FF_loss
def plot(self, ax):
'''
Plots the current voltage curve
inputs:
ax: A figure axes to which is plotted
'''
ax.plot(self.V, self.J, '-o', label='light IV')
ax.set_xlabel('Voltage [$V$]')
ax.set_ylabel('Current Density [$A cm^{-2}$]')
ax.grid(True)
# ax.legend(loc='best')
def plot_m(self, ax):
# trims some noise at ends of array
ax.plot(self.V[10:-5], self.m[10:-5], '-o', label='Light IV')
ax.set_xlabel('Voltage [$V$]')
ax.set_ylabel('Ideality Factor []')
ax.grid(True)
ax.legend(loc='best')
ax.set_ylim(ymin=0)
def plot_FF1(self, ax):
waterfall(ax, list(self.FF_loss.values()), list(self.FF_loss.keys()))
def load(self, raw_data_file):
'''Loads Light IV data into attributes'''
self.filepath = raw_data_file
self.filename = os.path.basename(raw_data_file)
f = open(raw_data_file, 'r')
d = OrderedDict()
# rows which contain floats in lightIV data file header
float_rows = [2]
float_rows.extend(list(range(6, 18)))
for i, line in enumerate(f.readlines()[1:19]):
# convert to float for future calculations
if i in float_rows:
key_temp, val = line.strip('\n').split(':\t')
key = key_temp.strip()
d[key] = float(val)
else:
d.update(dict([line.strip('\n').split(':\t')]))
data_array = np.genfromtxt(raw_data_file, skip_header=20)
self.V = data_array[:, 0]
self.J = data_array[:, 1] / d['Cell Area (sqr cm)']
self.output = d
class IVSuns(object):
filepath = None
filename = None
def __init__(self, fname):
self.load(fname)
def process(self):
'''Suns Voc calculations'''
self.m = analysis.ideality_factor(self.V, self.effsuns, Vth)
def plot_IV(self, ax):
ax.plot(self.V, self.J, '-o', label='suns Voc')
ax.set_xlabel('Voltage [$V$]')
ax.set_ylabel('Current Density [$A cm^{-2}$]')
ax.grid(True)
ax.legend(loc='best')
ax.set_ylim(ymin=0)
def plot_tau(self, ax):
# TODO: trims off some noise, use better method?
ax.loglog(self.Dn[5:-5], self.tau_eff[5:-5], '-o',
label='Suns Voc')
ax.set_xlabel('$\Delta n$ [$cm^{-3}$]')
ax.set_ylabel(r'$\tau_{eff}$ [s]')
ax.grid(True)
ax.legend(loc='best')
# ax.set_xlim(xmin=1e11)
def plot_m(self, ax):
# trims some noise at ends of array
ax.plot(self.V[10:-5], self.m[10:-5], '-o', label='suns Voc')
ax.set_xlabel('Voltage [$V$]')
ax.set_ylabel('Ideality Factor []')
ax.grid(True)
ax.legend(loc='best')
ax.set_ylim(ymin=0)
def plot_log_IV(self, ax):
# trims some noise at ends of array
# TODO: Link this to Jsc rather than this manual index
# check for real values
index = np.isfinite(self.J)
# find the meaured Jsc
Jsc_index = abs(self.V[index]) == np.min(abs(self.V[index]))
ax.plot(self.V, -1 * (
self.J - self.J[index][Jsc_index]), '-o', label='Suns Voc')
ax.set_xlabel('Voltage [$V$]')
ax.set_ylabel('Ideality Factor []')
ax.grid(True)
ax.legend(loc='best')
def load(self, raw_data_file, text_format=False):
'''Loads Suns Voc data in attributes'''
self.filepath = raw_data_file
self.filename = os.path.basename(raw_data_file)
if text_format:
data_array = np.genfromtxt(raw_data_file, usecols=(0, 1, 2, 3, 4),
skip_header=1)
else:
# suppress annoying warning
with warnings.catch_warnings():
warnings.simplefilter('ignore')
wb = openpyxl.load_workbook(raw_data_file, read_only=True,
data_only=True)
ws_RawData = wb.get_sheet_by_name('RawData')
ws_User = wb.get_sheet_by_name('User')
last_cell = 'J' + str(ws_RawData.max_row)
data_array = np.array([[i.value for i in j] for j in
ws_RawData['E2':last_cell]])
# try: ??
# np.asarray(xlSheet.Range("A9:I133").Value, dtype=np.float64)
params = [i.value for i in ws_User['A5':'F5'][0]]
vals = [i.value for i in ws_User['A6':'F6'][0]]
self.params = dict(zip(params, vals))
params = [i.value for i in ws_User['A8':'L8'][0]]
# Reduce 13 significant figures in .xlsx file to 6 (default of .format())
# vals = [float('{:f}'.format(i.value)) for i in
# ws_User['A6':'F6'][0]]
vals = [float('{:e}'.format(i.value))
for i in ws_User['A9':'L9'][0]]
self.output = dict(zip(params, vals))
self.effsuns = data_array[:, 0] # Effective Suns
self.V = data_array[:, 1]
self.J = data_array[:, 2]
self.P = data_array[:, 3]
self.Dn = data_array[:, 4]
self.tau_eff = data_array[:, 5]
class IVDark(object):
def __init__(self, fname):
self.load(fname)
def process(self):
'''
This performs the Dark IV calculations for loss analysis
It currently caculates:
the idealify factor as a function of voltage
'''
# Ideality factor
self.m = analysis.ideality_factor(self.V, self.J, Vth)
# Shunt resistance, at 30mV
# TODO: do linear fit with zero intercept?
Rsh = 0.03 / analysis.find_nearest(0.03, self.V, self.J)
return Rsh
def plot_log_IV(self, ax):
ax.semilogy(self.V, self.J, '-o', label='Dark IV')
ax.set_xlabel('Voltage [$V$]')
ax.set_ylabel('Current Density [$A cm^{-2}$]')
ax.grid(True)
ax.legend(loc='best')
def plot_m(self, ax):
ax.plot(self.V, self.m, '-o', label='dark IV')
ax.set_xlabel('Voltage [$V$]')
ax.set_ylabel('Ideality Factor []')
ax.grid(True)
ax.legend(loc='best')
def load(self, raw_data_file):
'''Loads Dark IV data in attributes'''
self.filepath = raw_data_file
self.filename = os.path.basename(raw_data_file)
f = open(raw_data_file, 'r')
d = OrderedDict()
# rows which contain floats in lightIV data file header
float_rows = [1, 6, 7, 8]
for i, line in enumerate(f.readlines()[1:10]):
# convert to float for future calculations
key, val = line.strip('\n').split(':\t')
if i in float_rows:
d[key] = float(val)
else:
d[key] = val
# d.update(dict(re.findall(r'([\s\S]+)\s*:\t([^\n]+)', line)))
# d.update(dict([line.strip('\n').split(':\t')]))
# for line in f.readlines()[1:10]:
# d.update(dict(re.findall(r'([\s\S]+)\s*:\t([^\n]+)', line)))
# d['Cell Area in sqr cm'] = float(d['Cell Area in sqr cm'])
self.output = d
data_array = np.genfromtxt(
raw_data_file, usecols=(0, 1), skip_header=11)
self.V = data_array[:, 0]
self.J = data_array[:, 1] / d['Cell Area in sqr cm']
class Cell(object):
def __init__(self, thickness=None, **kwargs):
self.thickness = thickness # [cm]
self.sample_names = {}
self.input_errors = {}
self.refl = Refl(kwargs['reflectance_fname'])
self.qe = QE(kwargs['EQE_fname'])
self.sunsVoc = IVSuns(kwargs['suns Voc_fname'])
self.div = IVDark(kwargs['dark IV_fname'])
self.liv = IVLight(kwargs['light IV_fname'])
self.example_dir = os.path.join(os.pardir, 'example_cell')
self.check_input_vals()
def check_input_vals(self):
'''
Check the input cell parameters are consistent between measurements.
Gives the error as a percentage.
'''
# sample names
self.sample_names['Light IV'] = self.liv.output['Cell Name ']
self.sample_names['Suns Voc'] = self.sunsVoc.params['Sample Name']
self.sample_names['Dark IV'] = self.div.output['Cell Name']
# Cell area
# tolerance = 1e-3
area_liv = self.liv.output['Cell Area (sqr cm)']
area_div = self.div.output['Cell Area in sqr cm']
delta = (area_div - area_liv) / area_liv
self.input_errors['Cell Area'] = delta
# thickness
self.thickness = self.sunsVoc.params['Wafer Thickness (cm)']
tck_user_input = self.thickness
tck_sunsVoc = self.sunsVoc.params['Wafer Thickness (cm)']
delta = (tck_sunsVoc - tck_user_input) / tck_user_input
self.input_errors['Cell thickness'] = delta
# Voc
Voc_liv = self.liv.output['Voc']
Voc_div = self.sunsVoc.output['Voc (V)']
delta = (Voc_div - Voc_liv) / Voc_liv
self.input_errors['Voc'] = delta
# Jsc
Jsc_liv = self.liv.output['Jsc']
Jsc_iqe = self.qe.output['Jsc']
delta = (Jsc_iqe - Jsc_liv) / Jsc_liv
self.input_errors['Jsc'] = delta
# some checks on the data
assert abs(self.input_errors['Cell Area']
) < 0.01, "Provided sample area's disagrees: {0:.1f} cm^2 {1:.1f} cm^2".format(area_liv, area_div)
assert abs(self.input_errors['Cell thickness']
) < 0.01, "Provided sample thickness disagrees: {0:.4f} cm {1:.4f} cm".format(tck_user_input, tck_sunsVoc)
assert abs(self.input_errors['Voc']
) < 0.01, "Provided Voc disagree: {0:.0f} mV {1:.0f} mV".format(Voc_liv * 1000, Voc_div * 1000)
assert abs(self.input_errors['Jsc']
) < 0.1, "Provided Jsc disagree: {0:.0f} mA {1:.0f} mA".format(Jsc_liv * 1000, Jsc_iqe * 1000)
def collect_outputs(self):
'''Collects input and output parameters into self.output_list'''
output_list = []
def quick_print(key, val):
output_list.append('{:>30}, {:<20}'.format(key, val))
output_list.append('\n')
quick_print('##### Inputs check: Percentage difference', '',)
for key, val in self.sample_names.items():
quick_print(key, val)
for key, val in self.input_errors.items():
quick_print(key, '{:.3e}%'.format(val * 100))
output_list.append('\n')
quick_print('##### Reflectance', '')
quick_print('filename', self.refl.filename)
output_list.append('\n')
quick_print('##### QE', '')
quick_print('filename', self.qe.filename)
for key, val in self.qe.output.items():
quick_print(key, val)
quick_print('Basore fit Leff', '{:.3e}'.format(
self.qe.output_Basore_fit['Leff']))
quick_print('Basore fit eta_c', '{:.3f}'.format(
self.qe.output_Basore_fit['eta_c']))
output_list.append('\n')
quick_print('##### Light IV', '')
quick_print('filename', self.liv.filename)
for key, val in self.liv.output.items():
quick_print(key, val)
output_list.append('\n')
quick_print('##### Suns Voc', '')
quick_print('filename', self.sunsVoc.filename)
for key, val in self.sunsVoc.params.items():
quick_print(key, val)
for key, val in self.sunsVoc.output.items():
quick_print(key, val)
output_list.append('\n')
quick_print('##### Dark IV', '')
quick_print('filename', self.div.filename)
for key, val in self.div.output.items():
quick_print(key, val)
output_list.append('\n')
quick_print('##### Calclated', '')
quick_print('### Reflectance', '')
quick_print('AR', '{:.3f}'.format(self.refl.AR))
quick_print('WAR', '{:.3f}'.format(self.refl.WAR))
quick_print('f_metal', '{:.3f}'.format(self.refl.f_metal))
quick_print('### Parasitic resistances', '')
quick_print('Rsh (Ohm cm2)', '{:.3e}'.format(self.Rsh))
quick_print('Rs1 (Ohm cm2)', '{:.3e}'.format(self.Rs_1))
quick_print('Rs2 (Ohm cm2)', '{:.3e}'.format(self.Rs_2))
quick_print('### Current losses', '')
for key, val in self.qe.Jloss_qe.items():
quick_print(key + ' (mA)', '{:.3f}'.format(val))
quick_print('### Fill Factor', '')
for key, val in self.liv.ideal_FF.items():
quick_print(key, '{:.3f}'.format(val))
self.output_list = output_list
def print_output_to_file(self):
filename = self.cell_name + '_loss_analysis_summary.csv'
output_file = open(os.path.join(self.output_dir, filename), 'w')
for item in self.output_list:
output_file.write(item + '\r\n')
output_file.close()
def plot_all(self, save_fig_bool):
'''Plot the output of previous calculations'''
# for reflectance
fig_QE = plt.figure('QE', figsize=(30 / 2.54, 15 / 2.54))
fig_QE.clf()
ax_refl = fig_QE.add_subplot(2, 2, 1)
ax_QE = fig_QE.add_subplot(2, 2, 2)
ax_QE_fit = fig_QE.add_subplot(2, 2, 3)
ax_QE_layered = fig_QE.add_subplot(2, 2, 4)
self.refl.plot(ax_refl)
self.refl.plot(ax_QE)
self.qe.plot_EQE(ax_QE)
self.qe.plot_IQE(ax_QE)
# for light and dark IV
fig_IV = plt.figure('IV', figsize=(30 / 2.54, 15 / 2.54))
fig_IV.clf()
# get the plotting axes
ax_logIV = fig_IV.add_subplot(2, 2, 1)
ax_ideality = fig_IV.add_subplot(2, 2, 3)
ax_lightIV = fig_IV.add_subplot(2, 2, 2)
ax_tau = fig_IV.add_subplot(2, 2, 4)
# plot light IV first, as is typically the noisest
self.liv.plot_m(ax_ideality)
self.liv.plot(ax_lightIV)
# plot suns Voc
self.sunsVoc.plot_m(ax_ideality)
self.sunsVoc.plot_IV(ax_lightIV)
self.sunsVoc.plot_tau(ax_tau)
self.sunsVoc.plot_log_IV(ax_logIV)
# plot dark IV as least noisest
self.div.plot_log_IV(ax_logIV)
self.div.plot_m(ax_ideality)
# plot the EQE fitted data
self.qe.plot_Basore_fit(ax_QE_fit)
# this is doing some loss analysis filling
dummy_ones = np.ones(len(self.refl.wl))
ax_QE_layered.fill_between(self.refl.wl, dummy_ones * 100,
100 - dummy_ones * self.refl.f_metal, color='blue')
ax_QE_layered.fill_between(self.refl.wl,
100 - dummy_ones * self.refl.f_metal,
100 - self.refl.refl_wo_escape, color='green')
ax_QE_layered.fill_between(self.refl.wl, 100 - self.refl.refl_wo_escape,
100 - self.refl.refl, color='red')
ax_QE_layered.fill_between(self.refl.wl, 100 - self.refl.refl,
self.qe.EQE_xxx_unnamed, color='cyan')
# ax_QE_layered.plot(self.refl.wl, self.qe.EQE_xxx_unnamed)
ax_QE_layered.fill_between(self.refl.wl, self.qe.EQE_xxx_unnamed,
self.qe.EQE, color='magenta')
# line_EQE, = self.qe.plot_EQE(ax_QE_layered)
# line_EQE.set_marker('x')
# self.refl.plot_QE(ax_QE_layered)
# for loss analysis summary
fig_LA = plt.figure('LA', figsize=(30 / 2.54, 15 / 2.54))
fig_LA.clf()
ax_FF = fig_LA.add_subplot(2, 2, 1)
ax_Jloss = fig_LA.add_subplot(2, 2, 2)
self.liv.plot_FF1(ax_FF)
self.qe.plot_Jloss(ax_Jloss)
fig_QE.set_tight_layout(True)
fig_IV.set_tight_layout(True)
if save_fig_bool:
fig_QE.savefig(os.path.join(self.output_dir,
self.cell_name + '_QE.png'))
fig_IV.savefig(os.path.join(self.output_dir,
self.cell_name + '_IV.png'))
plt.show()
def process_all(self, save_fig_bool, output_dir, cell_name):
'''
A function that calls all the processing functions.
'''
if cell_name == '':
self.cell_name = self.liv.output['Cell Name ']
else:
self.cell_name = cell_name
self.output_dir = output_dir
self.sunsVoc.process()
self.refl.process()
self.qe.process(self.refl.wl, self.refl.refl, self.refl.refl_wo_escape,
self.refl.Jloss)
self.Rsh = self.div.process()
self.Rs_1 = analysis.Rs_calc_1(self.liv.output['Vmp'],
self.liv.output['Jmp'],
self.sunsVoc.V, self.sunsVoc.J)
self.Rs_2 = analysis.Rs_calc_2(self.liv.output['Voc'],
self.liv.output['Jsc'],
self.liv.output['FF'],
self.sunsVoc.output['PFF'])
self.liv.process(self.Rsh, self.Rs_1)
self.collect_outputs()
self.print_output_to_file()
self.plot_all(save_fig_bool)
if __name__ == "__main__":
example_dir = os.path.join(os.pardir, 'example_cell')
files = {
'reflectance_fname': os.path.join(example_dir, 'example_reflectance.csv'),
'EQE_fname': os.path.join(example_dir, 'example_EQE.txt'),
'light IV_fname': os.path.join(example_dir, 'example_lightIV.lgt'),
'suns Voc_fname': os.path.join(example_dir, 'example_sunsVoc.xlsm'),
'dark IV_fname': os.path.join(example_dir, 'example_darkIV.drk')}
cell1 = Cell(**files)
cell1.process_all()
| mit |
jkibele/OpticalRS | OpticalRS/GroundTruthShp.py | 1 | 34647 | """
I'm copying this from some of my much older work. It's probably going to be
buggy. Yep, it was. I've done some refactoring and whatnot but it could probably
use some more. Maybe combine with `GeoDFUtils`? It would be nice to be able to
easily operate on GDF subsets and selections.
`buffer` and `rasterize` are working pretty well. Still need tests and whatnot
but those methods are the main use for this module at the moment. The two
`error_matrix` functions are useful and working too.
"""
#from error_matrix import *
from RasterDS import RasterDS
from ErrorMatrix import ErrorMatrix
from scipy.stats.stats import mode
import numpy as np
import pandas as pd
import geopandas as gpd
import os
from osgeo import ogr, gdal, osr
import shapely as shpl
from scipy.stats import mode as scipymode
from tempfile import mkdtemp
import shutil
class GroundTruthGDF(gpd.GeoDataFrame):
def __init__(self, *args, **kwargs):
hf = kwargs.pop('habfield', 'habitat')
hc = kwargs.pop('habcodefield', 'hab_num')
super(GroundTruthGDF, self).__init__(*args, **kwargs)
self.habfield = hf
self.habcodefld = hc
@classmethod
def new(cls,*args,**kwargs):
return cls(*args,**kwargs)
@classmethod
def from_file(cls, filename, **kwargs):
hf = kwargs.pop('habfield', 'habitat')
hc = kwargs.pop('habcodefield', 'hab_num')
gdf = gpd.io.file.read_file(filename, **kwargs)
return cls(gdf, habfield=hf, habcodefield=hc)
@property
def codes_habitat(self):
"""
Return a dictionary just like habitat_codes only backwards.
"""
hf = self.habfield
hcf = self.habcodefld
hcd = dict()
for cl in self[hcf].unique():
if cl > 0:
sers = self[self[hcf]==cl][hf]
if sers.count() > 1:
hcd[cl] = sers.mode().item()
elif sers.count() > 0:
hcd[cl] = sers.item()
return hcd
def __getitem__(self, key):
result = super(GroundTruthGDF, self).__getitem__(key)
if isinstance(result, gpd.GeoDataFrame):
result.__class__ = GroundTruthGDF
result.habfield = self.habfield
result.habcodefld = self.habcodefld
return result
def query(self, expr, inplace=False, **kwargs):
result = super(GroundTruthGDF, self).query(expr, inplace=False, **kwargs)
if isinstance(result, gpd.GeoDataFrame):
result.__class__ = GroundTruthGDF
result.habfield = self.habfield
result.habcodefld = self.habcodefld
return result
def comparison_df(self, rds, radius=0, generous=False, band_index=0,
out_of_bounds=np.nan, with_unclassed=False):
"""
There can be problems if there are codes in the raster that do not exist
in the geodataframe. I should probably check for this condition and
raise an exception. No time right now.
"""
pred = self.compare_raster(rds, radius=radius, generous=generous,
band_index=band_index,
out_of_bounds=out_of_bounds)
truth = self.__getitem__(self.habcodefld)
truth.name = 'truth'
pred.name = 'pred'
preddf = pd.concat((truth, pred), axis=1)
if not with_unclassed:
# Get rid of any row that has a zero in it
preddf = preddf[(preddf!=0).all(1)]
return preddf
def error_matrix(self, rds, radius=0, generous=False, band_index=0,
out_of_bounds=np.nan, with_unclassed=False):
from sklearn.metrics import confusion_matrix
compdf = self.comparison_df(rds, radius=radius, generous=generous,
band_index=band_index,
out_of_bounds=out_of_bounds,
with_unclassed=with_unclassed).dropna()
# scikit-learn returns pred on x and true on y. I want it the other
# way around so .T
em = confusion_matrix(compdf.truth, compdf.pred).T.view(ErrorMatrix)
codes = np.sort(np.unique(compdf.dropna()))
em.categories = map(lambda s: self.codes_habitat.get(s, "Unclassified"),
codes)
return em
def compare_raster(self, rds, radius=0, generous=False, band_index=0,
out_of_bounds=np.nan):
"""
Compare habitat codes in `gdf` with codes in corresponding locations of
a raster habitat map (`rds`). This can be an exact point to point
comparison (when `radius`=0) or can be more forgiving. When `radius`>0
and `generous` is `False`, the mode (most common) value within `radius`
of each point will be returned. When `radius`>0 and `generous` is True,
ground truth habitat codes will be returned if found within `radius` of
each point, and the mode will be returned if not.
Parameters
----------
rds : OpticalRS.RasterDS
The habitat map (or whatever raster) you want to compare to the
`GroundTruthShapefile` (self). The projection of this raster must
match the projection of the `GroundTruthShapefile`. If it doesn't
match, you might get results but they'll be wrong.
radius : float
The radius with which to buffer `point`. The units of this value
depend on the projection being used.
generous : boolean
If False (default), mode will be returned. If True, habitat code will be
returned if within `radius`. See function description for more info.
band_index : int
Index of the image band to sample. Zero indexed (band 1 = 0). For
single band rasters, this should be left at the default value (0).
out_of_bounds : float, int, or nan (default)
If `point` is not within `self.raster_extent`, `out_of_bounds` will
be returned.
Returns
-------
pandas Series
The values from `rds` that correspond to each point in `gdf`.
"""
column = self.habcodefld
if generous:
rcheck = lambda row: rds.radiused_point_check(row.geometry,
radius=radius,
search_value=row[column],
band_index=band_index,
out_of_bounds=out_of_bounds)
else:
rcheck = lambda row: rds.radiused_point_check(row.geometry,
radius=radius,
search_value=None,
band_index=band_index,
out_of_bounds=out_of_bounds)
return self.apply(rcheck, axis=1)
class GroundTruthShapefile(object):
"""
This class contains code for relating point ground truth shapefiles (such as
the ones generated by Benthic Photo Survey) to raster maps. The default
values (for `habfield` and `habcodefield`) assume that there's a field
called habitat that contains a text description of the habitat class for
each point.
"""
def __init__(self, file_path, habfield='habitat', habcodefield='hab_num'):
self.habfield = habfield
self.habcodefld = habcodefield
self.file_path = file_path
self.ds = open_shapefile(self.file_path)
self.hab_dict = self.__setup_hab_dict()
self.legit_habs = sorted( [ h for h in self.habitats if h ] ) # Exclude None as a habitat value
self.habitat_codes = self.__setup_hab_codes() # dict( zip( legit_habs, range( 1, len(legit_habs) + 1 ) ) )
def __setup_hab_dict(self):
"""
The hab_dict is a dictionary that contains a list of ogr features for
each habitat key.
"""
hab_dict = {}
for hab in self.habitats:
hab_dict[hab] = [f for f in self.features if f.__getattr__(self.habfield)==hab]
return hab_dict
def __setup_hab_codes(self):
"""
There should be habitat codes in the shapefile in a field called
hab_num. We need to get them and set up the matching names. This only
works for BPS shapefiles with a hab_num field set up to match the
habitat field. If `self.habfield` is set to something else, we'll just
generate integer codes.
"""
# Exclude None from list of habitats
hcd = {}
if self.habcodefld is not None:
for hab in self.legit_habs:
feat = self.hab_dict[hab][0]
hcd[hab] = feat.__getattr__(self.habcodefld)
else:
for i, hab in enumerate(self.legit_habs):
hcd[hab] = i+1 # +1 to make it not zero indexed
return hcd
@property
def features(self):
fts = [f for f in self.ds.GetLayer()]
self.ds.GetLayer().ResetReading()
return fts
@property
def habitats(self):
habs = sorted( set([f.__getattr__(self.habfield) for f in self.features]))
return habs
@property
def legit_habs_code_sorted(self):
"""
Return the legit habitats sorted by order of their numeric codes.
"""
return [v for k,v in sorted(self.codes_habitat.items())]
@property
def geo_data_frame(self):
"""
Return a GeoPandas GeoDataFrame object.
"""
gtgdf = GroundTruthGDF.from_file(self.file_path, habfield=self.habfield,
habcodefield=self.habcodefld)
# gtgdf = gpd.GeoDataFrame.from_file(self.file_path)
return gtgdf
def geopandas_subset(self, query, file_name=None):
"""
Create a `GroundTruthShapefile` based on a geopandas subset of
`self.geo_data_frame`. If `file_name` is `None` (default), then the file
will only be temporarily saved. It will be deleted before this function
returns. This seems to work fine for generating error matrices from
subsets but it could have unintended consequences elsewhere. If you
provide a `file_name`, a shapefile will be saved from the output.
Parameters
----------
query : string or pandas Series
If `query` is a string, `pandas.DataFrame.query` will be used to
generate the subset. Otherwise, query is assumed to be a series that
can be used to index `self.geo_data_frame`.
file_name : string file path or None
If `None`, a temporary shapefile will be created and immediately
deleted. Otherwise, the subset will be saved as a shapefile.
Returns
-------
GroundTruthShapefile
A `GroundTruthShapefile` object containing only the selected subset
of features.
"""
if file_name is None:
tdir = mkdtemp()
tfn = os.path.join(tdir, 'temp.shp')
else:
tfn = file_name
if type(query) is str:
gdf = self.geo_data_frame.query(query)
else:
gdf = self.geo_data_frame[query]
# save the subset to a file
gdf.to_file(tfn)
# make a new GroundTruthShapefile
gts = GroundTruthShapefile(tfn, self.habfield, self.habcodefld)
if file_name is None:
shutil.rmtree(tdir)
return gts
@property
def spatial_reference(self):
"""
Return the OGR spatial reference object for the shapefile.
"""
return self.ds.GetLayer().GetSpatialRef()
@property
def projection_wkt(self):
"""
Return the well known text (WKT) representation of the shapefile's projection.
"""
return self.spatial_reference.ExportToWkt()
@property
def projcs(self):
"""
Return the PROJCS value from the shapefile's spatial reference. This is
basically the name of the projection. ...I think.
"""
return self.spatial_reference.GetAttrValue('PROJCS')
@property
def geometry_type(self):
"""
Just return whether it's a type of point, line, or polygon.
"""
type_name = ogr.GeometryTypeToName( self.ds.GetLayer().GetGeomType() ).lower()
if type_name.find('point') <> -1:
return 'point'
elif type_name.find('line') <> -1:
return 'line'
elif type_name.find('polygon') <> -1:
return 'polygon'
else:
return None
@property
def hab_colors(self):
"""
return a dictionary with hab codes as keys and hab colors as values.
"""
legit_habs = sorted( [ h for h in self.habitats if h ] )
hcd = {}
for hab in legit_habs:
feat = self.hab_dict[hab][0]
hcd[hab] = feat.hab_color
return hcd
@property
def codes_habitat(self):
"""
Return a dictionary just like habitat_codes only backwards.
"""
chd = {}
for k,v in self.habitat_codes.items():
chd[v] = k
return chd
@property
def qgis_vector(self):
qvl = QgsVectorLayer(self.file_path,'grnd_truth','ogr')
if qvl.isValid():
return qvl
else:
raise Exception("Failed to create a QGis Vector Layer. QGis provider path problems, perhaps?")
def buffer(self, radius=1.0, file_path=None):
"""
Buffer the geometries in `self` and return a new `ogr` datasource. If
`file_path` is `None`, just create the datasource in memory. If a file
path is given, write out a shapefile. All fields and values (aside from
geometry) are cloned.
"""
if file_path == None:
drvname = 'Memory'
else:
drvname = 'ESRI Shapefile'
srcds = self.ds
# get projection
lyr = srcds.GetLayer(0)
sptrf = lyr.GetSpatialRef()
proj = osr.SpatialReference()
proj.ImportFromWkt(sptrf.ExportToWkt())
drv = ogr.GetDriverByName(drvname)
if file_path == None:
dst_ds = drv.CreateDataSource('out')
elif os.path.exists(file_path):
raise Exception("{} already exists!".format(file_path))
else:
dst_ds = drv.CreateDataSource(file_path)
dst_lyr = dst_ds.CreateLayer('', srs=proj, geom_type=ogr.wkbPolygon)
# copy all the fields to the destination ds
featr = lyr.GetFeature(0)
nfields = featr.GetFieldCount()
for i in range(nfields):
fld = featr.GetFieldDefnRef(i)
dst_lyr.CreateField(fld)
feat_defn = dst_lyr.GetLayerDefn()
# reset the feature counter
lyr.ResetReading()
# buffer the geometries and copy the fields
for i in range(lyr.GetFeatureCount()):
# get the feature and geometry
feat = lyr.GetFeature(i)
geom = feat.GetGeometryRef()
# create a new feature
newfeat = feat.Clone()
# get the buffered geometry
bufgeom = geom.Buffer(radius)
# set the new geometry to the buffered geom
newfeat.SetGeometry(bufgeom)
# add the new feature to the destination layer
dst_lyr.CreateFeature(newfeat)
# clean up
newfeat.Destroy()
feat.Destroy()
# ensure the new features are written
dst_lyr.SyncToDisk()
return dst_ds
def rasterize(self, buffer_radius=None, raster_template=None,
pixel_size=1.99976, value_field='hab_num', float_values=False,
array_only=False, out_file_path=None):
"""
Return a raster that can be used for classification training.
buffer_radius: A float value in projection units to buffer the
geometries by. If buffer_radius is left None then only pixels right
under points will be classified.
raster_template: A RasterDS object. If supplied, the resulting
rasterized image will have the same extent and geotransform as the
template. Also, if a raster_template is provided, the pixel_size keyword
value will be ignored and pixel size will come from the template.
pixel_size: A float value representing pixel size in projection units.
This value will be ignored if a raster_template is supplied.
value_field: A string representing the name of the field in the
shapefile that holds the numeric code that will be burned into the
raster output as the pixel value.
float_values: Boolean. If `True`, the output raster will contain floats.
If `False`, the output will be integers. Default is `False`.
array_only: A boolean. If true we'll try to just write the raster to
memory and not to disk. If you don't need to keep the raster, this will
just keep you from having to clean up useless files later. Then we'll
just return an array instead of GroundTruthRaster object.
out_file_path: String. Path to the raster file output. If `None`
(default) and `array_only=False`, a file name based on the
`GroundTruthShapefile` file name will be created. If `array_only=True`,
`out_file_path` is ignored.
"""
if float_values:
datatype = gdal.GDT_Float32
else:
datatype = gdal.GDT_Byte
# Make a copy of the layer's data source because we'll need to
# modify its attributes table
if buffer_radius:
source_ds = ogr.GetDriverByName("Memory").CopyDataSource( self.buffer(radius=buffer_radius), "" )
else:
source_ds = ogr.GetDriverByName("Memory").CopyDataSource( self.ds, "")
source_layer = source_ds.GetLayer(0)
source_srs = source_layer.GetSpatialRef()
if raster_template:
gTrans = raster_template.gdal_ds.GetGeoTransform()
pixsizeX = gTrans[1]
pixsizeY = gTrans[5]
x_res = raster_template.gdal_ds.RasterXSize
y_res = raster_template.gdal_ds.RasterYSize
rdsarr = raster_template.band_array
# if np.ma.is_masked(rdsarr):
# mask = rdsarr[...,0].mask
# else:
# mask = None
else:
x_min, x_max, y_min, y_max = source_layer.GetExtent()
# Create the destination data source
x_res = int((x_max - x_min) / pixel_size)
y_res = int((y_max - y_min) / pixel_size)
if out_file_path:
targ_fn = out_file_path
else:
# make a target ds with filename based on source filename
targ_fn = self.file_path.rsplit(os.path.extsep, 1)[0] + '_rast' + os.path.extsep + 'tif'
# print "x_res: %i, y_res: %i" % (x_res,y_res)
target_ds = gdal.GetDriverByName('GTiff').Create(targ_fn, x_res, y_res, 1, datatype)
if raster_template:
# Use the raster template supplied so that we get the same extent as the raster
# we're trying to classify
target_ds.SetGeoTransform( gTrans )
else:
# None supplied so use the pixel_size value and the extent of the shapefile
target_ds.SetGeoTransform(( x_min, pixel_size, 0, y_max, 0, -pixel_size, ))
if raster_template:
target_ds.SetProjection( raster_template.gdal_ds.GetProjection() )
elif source_srs:
# Make the target raster have the same projection as the source
target_ds.SetProjection(source_srs.ExportToWkt())
else:
# Source has no projection (needs GDAL >= 1.7.0 to work)
target_ds.SetProjection('LOCAL_CS["arbitrary"]')
# Rasterize
err = gdal.RasterizeLayer(target_ds, [1], source_layer,
burn_values=[0],
options=["ATTRIBUTE=%s" % value_field])
if err != 0:
raise Exception("error rasterizing layer: %s" % err)
# clean up
source_layer = None
source_srs = None
source_ds = None
if array_only:
out_array = target_ds.ReadAsArray()
target_ds = None
os.remove( targ_fn )
return out_array
else:
target_ds = None
return RasterDS(targ_fn)
def error_matrix(self, classification_ds, with_unclassed=False):
"""
Take a RasterDS (classification_ds) and create a user / producer
accuracy table. Return as an array so it can be displayed in multiple
ways. See the `ErrorMatrix` module for more information on the returned
object.
Parameters
----------
classification_ds : OpticalRS.RasterDS
The habitat map (or whatever raster) you want to compare to the
`GroundTruthShapefile` (self). The projection of this raster must
match the projection of the `GroundTruthShapefile`. If it doesn't
match, you might get results but they'll be wrong.
Returns
-------
ErrorMatrix
See the `ErrorMatrix` module for more information on the returned
object.
Notes
-----
This function should be merged in some way with `error_matrix_buffered`.
There's a bunch of redundancy between the two. I don't have time to do
it right now.
"""
maxcode = max(self.habitat_codes.values())
if with_unclassed:
maxcode += 1
errmat = np.zeros((maxcode, maxcode), int)
cats = list()
rext = classification_ds.raster_extent
for hab,code in self.habitat_codes.items():
for feature in self.hab_dict[hab]:
ref_val = code
geom = feature.geometry()
pnt = shpl.geometry.base.geom_from_wkb(geom.ExportToWkb())
if pnt.within(rext):
cls_val = classification_ds.value_at_point( geom )
else:
# this means that the point is not within the raster
# I think that means we don't want to count this point at
# all in the accuracy assessment.
continue
if with_unclassed:
errmat[ cls_val ][ ref_val ] += 1
elif cls_val == 0:
# If we're not including unclassified values
# we don't want this showing up in the totals.
continue
else:
errmat[ cls_val - 1 ][ ref_val - 1 ] += 1
# Get rid of all zero rows and columns. This can happen if hab codes
# skip an integer.
em = errmat.view( ErrorMatrix ).clean_zeros(with_unclassed)
# Rows and Columns of errmat end up sorted by hab code. This next line
# will give the habitat names sorted by hab code number.
if with_unclassed:
em.categories = ['Unclassified'] + sorted(self.habitat_codes, key=self.habitat_codes.get)
else:
em.categories = sorted(self.habitat_codes, key=self.habitat_codes.get)
return em
def error_matrix_buffered(self, classification_ds, radius=2.0, with_unclassed=False):
"""
Take a RasterDS (classification_ds) and create a user / producer
accuracy table. Ground Truth points will be buffered and matching
habitat codes within `radius` of a point will be considered success.
Return as an array so it can be displayed in multiple ways. See the
`ErrorMatrix` module for more information on the returned object.
Parameters
----------
classification_ds : OpticalRS.RasterDS
The habitat map (or whatever raster) you want to compare to the
`GroundTruthShapefile` (self). The projection of this raster must
match the projection of the `GroundTruthShapefile`. If it doesn't
match, you might get results but they'll be wrong.
radius : float
The radius with which to buffer points. The units of this value
depend on the projection being used. You can use
`GroundTruthShapefile.projection_wkt` to examine the projection and
find the units.
Returns
-------
ErrorMatrix
See the `ErrorMatrix` module for more information on the returned
object.
"""
maxcode = max(self.habitat_codes.values())
if with_unclassed:
maxcode += 1
errmat = np.zeros((maxcode, maxcode), int)
cats = list()
rext = classification_ds.raster_extent
for hab,code in self.habitat_codes.items():
for feature in self.hab_dict[hab]:
ref_val = code
geom = feature.geometry()
pnt = shpl.geometry.base.geom_from_wkb(geom.ExportToWkb())
if pnt.within(rext):
clsarr = classification_ds.geometry_subset(pnt.buffer(radius),
all_touched=True)
else:
# this means that the point is not within the raster
# I think that means we don't want to count this point at
# all in the accuracy assessment.
continue
if ref_val in clsarr.compressed():
cls_val = ref_val # this counts as success
elif not pnt.within(rext):
# this means that the point is not within the raster
# I think that means we don't want to count this point at
# all in the accuracy assessment.
continue
else:
# our reference value was not found within radius of point
# so we'll report it as the most common class within radius
if len(clsarr.compressed()) == 0:
cls_val = 0 # Assuming zero is code for unclassified
else:
cls_val = scipymode(clsarr.compressed()).mode.item()
if with_unclassed:
errmat[ cls_val ][ ref_val ] += 1
elif cls_val == 0:
# If we're not including unclassified values
# we don't want this showing up in the totals.
continue
else:
errmat[ cls_val - 1 ][ ref_val - 1 ] += 1
# Get rid of all zero rows and columns. This can happen if hab codes
# skip an integer.
em = errmat.view( ErrorMatrix ).clean_zeros(with_unclassed)
# Rows and Columns of errmat end up sorted by hab code. This next line
# will give the habitat names sorted by hab code number.
if with_unclassed:
em.categories = ['Unclassified'] + sorted(self.habitat_codes, key=self.habitat_codes.get)
else:
em.categories = sorted(self.habitat_codes, key=self.habitat_codes.get)
return em
@property
def hab_dict_counts(self):
ret_dict = {}
for hab in self.habitats:
ret_dict[hab] = len( self.hab_dict[hab] )
return ret_dict
def add_raster_values(self, raster_ds):
"""
The raster data source here is assumed to be a classified image. The raster
values should correspond to classes.
"""
trans = transform_dict(raster_ds)
band = raster_ds.GetRasterBand(1)
self.features = [ add_raster_value(f,trans,band) for f in self.ds.GetLayer() ]
self.ds.GetLayer().ResetReading()
self.hab_dict = self.__setup_hab_dict()
@property
def unsupervised_habitat_class_dict(self):
"""
For each habitat, give a list of raster values that correspond to the ground truth
points of that habitat type. This will be used with unsupervised classifications to
figure out which, if any, of the classes correspond to particular habitat types.
"""
try:
hcd = {}
for hab in self.habitats:
hcd[hab] = [ f.raster_value for f in self.hab_dict[hab] ]
except AttributeError:
raise AttributeError("Features need to be assigned raster values before you can create a habitat class dictionary.")
return hcd
@property
def unsupervised_habitat_class_modes(self):
hcm = {}
for hab in self.habitats:
md, cn = mode( self.unsupervised_habitat_class_dict[hab] )
if len( md )==1:
hcm[hab] = md[0]
else:
hcm[hab] = None
return hcm
def __output_training_LAN(self,img,buffer_radius=3.5,driver_str='LAN'):
"""
DEPRICATED! -> This only works for points. I think I can use the
rasterize method instead. I need to verify and then get rid of this
method. This method also has the habitat field hard coded (search for
feat.habitat). That would need to be changed to
feat.__getattr__(self.habfield) to make this work correctly.
Create a raster input for supervised classifications. img is the image
that we want to classify (in the form of a gdal datasource). Spectral
can't use tifs so we will create LAN file.
A buffer radius of 3.5 meters gives us 3 x 3 sets of pixels with our
point feature in the center. This, of course, assumes that we're dealing
with WV2 imagery and a projection with meters as the units. This works
for me on my project but might now work for others.
"""
if driver_str=='LAN':
f_ext = 'lan'
elif driver_str=='GTiff':
f_ext = 'tif'
else:
raise ValueError("At this point, the output_training_LAN method only knows how to deal with LAN and GTiff file types. Sorry.")
lyr = self.ds.GetLayer()
lyr.ResetReading()
trans = transform_dict(img)
driver = gdal.GetDriverByName(driver_str)
rows = img.RasterYSize
cols = img.RasterXSize
fname = img.GetDescription().rsplit(os.path.extsep)[0] + '_train' + os.path.extsep + f_ext
add_num = 0
while os.path.exists(fname):
add_num += 1
if add_num==1:
fname = fname.replace( os.path.extsep + f_ext, '_%i' % add_num + os.path.extsep + f_ext )
else:
old = '_%i.%s' % ( add_num - 1, f_ext )
new = '_%i.%s' % ( add_num, f_ext )
fname = fname.replace( old, new )
outDs = driver.Create(fname, cols, rows, 1, GDT_Int16)
if outDs is None:
print 'Could not create %s' % fname
sys.exit(1)
outBand = outDs.GetRasterBand(1)
pixel_count = 0
hab_pix_count = dict( zip( [h for h in self.habitats if h], np.zeros( len([h for h in self.habitats if h]), dtype=np.int ) ) )
for feat in lyr:
if not feat.habitat:
continue
if self.hab_dict_counts[feat.habitat] < 24:
continue
if buffer_radius:
geom = feat.geometry().Buffer(buffer_radius)
elp = envelope_dict(geom)
xtop = elp['xLeft']
ytop = elp['yTop']
xOffset = int( (xtop - trans['originX']) / trans['pixWidth'] )
yOffset = int( (ytop - trans['originY']) / trans['pixHeight'] )
xdist = elp['xRight'] - elp['xLeft']
ydist = elp['yBottom'] - elp['yTop']
cols = int( xdist / trans['pixWidth'] )
rows = int( ydist / trans['pixHeight'] )
pixarr = int( self.habitat_codes[feat.habitat] ) * np.ones((rows,cols), dtype=np.int16)
else:
geom = feat.geometry()
xOffset = int( (geom.GetX() - trans['originX']) / trans['pixWidth'] )
yOffset = int( (geom.GetY() - trans['originY']) / trans['pixHeight'] )
pixarr = np.array( [[ self.habitat_codes[feat.habitat] ]] )
outBand.WriteArray(pixarr,xOffset,yOffset)
pixel_count += pixarr.size
hab_pix_count[feat.habitat] += pixarr.size
outBand.FlushCache()
outBand.SetNoDataValue(0)
# georeference the image and set the projection
outDs.SetGeoTransform(img.GetGeoTransform())
outDs.SetProjection(img.GetProjection())
# build pyramids
gdal.SetConfigOption('HFA_USE_RRD', 'YES')
outDs.BuildOverviews(overviewlist=[2,4,8,16,32,64,128])
print "%i pixels total" % pixel_count
for hab in self.habitats:
if hab:
print "%i pixels for %s" % ( hab_pix_count[hab], hab )
return GroundTruthRaster( outDs.GetDescription() )
def training_classes(self, rds, buffer_radius=None,calc_stats=0):
"""
I think I should move some of this functionality over to the GroundTruthRaster class
in common.py. I'm generating classes okay from what I can tell but I get a singular
matrix error when I try to run the Gaussian Classifier. I have no idea why. Baffled,
I am.
"""
grnd_truth = self.rasterize(buffer_radius=buffer_radius,raster_template=rds,array_only=True)
sp_img = rds.spy_image.load()
return sp.create_training_classes(sp_img, grnd_truth,calc_stats=calc_stats)
def add_raster_value(feature, trans, band ):
geom = feature.geometry()
x = geom.GetX()
y = geom.GetY()
xOffset = int( (x - trans['originX']) / trans['pixWidth'] )
yOffset = int( (y - trans['originY']) / trans['pixHeight'] )
data = band.ReadAsArray(xOffset, yOffset, 1, 1)
feature.raster_value = data[0,0]
return feature
def open_shapefile(filename):
"""Take a file path string and return an ogr shape"""
# open the shapefile and get the layer
driver = ogr.GetDriverByName('ESRI Shapefile')
shp = driver.Open(filename)
if shp is None:
print 'Could not open %s' % filename
sys.exit(1)
return shp
| bsd-3-clause |
MohammedWasim/scikit-learn | sklearn/metrics/classification.py | 95 | 67713 | """Metrics to assess performance on classification task given classe prediction
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# Jatin Shah <[email protected]>
# Saurabh Jha <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy.spatial.distance import hamming as sp_hamming
from ..preprocessing import LabelBinarizer, label_binarize
from ..preprocessing import LabelEncoder
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import column_or_1d
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..utils.validation import _num_samples
from ..utils.sparsefuncs import count_nonzero
from ..utils.fixes import bincount
from .base import UndefinedMetricWarning
def _check_targets(y_true, y_pred):
"""Check that y_true and y_pred belong to the same classification task
This converts multiclass or binary types to a common shape, and raises a
ValueError for a mix of multilabel and multiclass targets, a mix of
multilabel formats, for the presence of continuous-valued or multioutput
targets, or for targets of different lengths.
Column vectors are squeezed to 1d, while multilabel formats are returned
as CSR sparse label indicators.
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
type_true : one of {'multilabel-indicator', 'multiclass', 'binary'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``
y_true : array or indicator matrix
y_pred : array or indicator matrix
"""
check_consistent_length(y_true, y_pred)
type_true = type_of_target(y_true)
type_pred = type_of_target(y_pred)
y_type = set([type_true, type_pred])
if y_type == set(["binary", "multiclass"]):
y_type = set(["multiclass"])
if len(y_type) > 1:
raise ValueError("Can't handle mix of {0} and {1}"
"".format(type_true, type_pred))
# We can't have more than one value on y_type => The set is no more needed
y_type = y_type.pop()
# No metrics support "multiclass-multioutput" format
if (y_type not in ["binary", "multiclass", "multilabel-indicator"]):
raise ValueError("{0} is not supported".format(y_type))
if y_type in ["binary", "multiclass"]:
y_true = column_or_1d(y_true)
y_pred = column_or_1d(y_pred)
if y_type.startswith('multilabel'):
y_true = csr_matrix(y_true)
y_pred = csr_matrix(y_pred)
y_type = 'multilabel-indicator'
return y_type, y_true, y_pred
def _weighted_sum(sample_score, sample_weight, normalize=False):
if normalize:
return np.average(sample_score, weights=sample_weight)
elif sample_weight is not None:
return np.dot(sample_score, sample_weight)
else:
return sample_score.sum()
def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None):
"""Accuracy classification score.
In multilabel classification, this function computes subset accuracy:
the set of labels predicted for a sample must *exactly* match the
corresponding set of labels in y_true.
Read more in the :ref:`User Guide <accuracy_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of correctly classified samples.
Otherwise, return the fraction of correctly classified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the correctly classified samples
(float), else it returns the number of correctly classified samples
(int).
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
jaccard_similarity_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equal
to the ``jaccard_similarity_score`` function.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import accuracy_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> accuracy_score(y_true, y_pred)
0.5
>>> accuracy_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
differing_labels = count_nonzero(y_true - y_pred, axis=1)
score = differing_labels == 0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def confusion_matrix(y_true, y_pred, labels=None):
"""Compute confusion matrix to evaluate the accuracy of a classification
By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}`
is equal to the number of observations known to be in group :math:`i` but
predicted to be in group :math:`j`.
Read more in the :ref:`User Guide <confusion_matrix>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to reorder
or select a subset of labels.
If none is given, those that appear at least once
in ``y_true`` or ``y_pred`` are used in sorted order.
Returns
-------
C : array, shape = [n_classes, n_classes]
Confusion matrix
References
----------
.. [1] `Wikipedia entry for the Confusion matrix
<http://en.wikipedia.org/wiki/Confusion_matrix>`_
Examples
--------
>>> from sklearn.metrics import confusion_matrix
>>> y_true = [2, 0, 2, 2, 0, 1]
>>> y_pred = [0, 0, 2, 2, 0, 2]
>>> confusion_matrix(y_true, y_pred)
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"])
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type not in ("binary", "multiclass"):
raise ValueError("%s is not supported" % y_type)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
n_labels = labels.size
label_to_ind = dict((y, x) for x, y in enumerate(labels))
# convert yt, yp into index
y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred])
y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true])
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = np.logical_and(y_pred < n_labels, y_true < n_labels)
y_pred = y_pred[ind]
y_true = y_true[ind]
CM = coo_matrix((np.ones(y_true.shape[0], dtype=np.int), (y_true, y_pred)),
shape=(n_labels, n_labels)
).toarray()
return CM
def cohen_kappa_score(y1, y2, labels=None):
"""Cohen's kappa: a statistic that measures inter-annotator agreement.
This function computes Cohen's kappa [1], a score that expresses the level
of agreement between two annotators on a classification problem. It is
defined as
.. math::
\kappa = (p_o - p_e) / (1 - p_e)
where :math:`p_o` is the empirical probability of agreement on the label
assigned to any sample (the observed agreement ratio), and :math:`p_e` is
the expected agreement when both annotators assign labels randomly.
:math:`p_e` is estimated using a per-annotator empirical prior over the
class labels [2].
Parameters
----------
y1 : array, shape = [n_samples]
Labels assigned by the first annotator.
y2 : array, shape = [n_samples]
Labels assigned by the second annotator. The kappa statistic is
symmetric, so swapping ``y1`` and ``y2`` doesn't change the value.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to select a
subset of labels. If None, all labels that appear at least once in
``y1`` or ``y2`` are used.
Returns
-------
kappa : float
The kappa statistic, which is a number between -1 and 1. The maximum
value means complete agreement; zero or lower means chance agreement.
References
----------
.. [1] J. Cohen (1960). "A coefficient of agreement for nominal scales".
Educational and Psychological Measurement 20(1):37-46.
doi:10.1177/001316446002000104.
.. [2] R. Artstein and M. Poesio (2008). "Inter-coder agreement for
computational linguistics". Computational Linguistic 34(4):555-596.
"""
confusion = confusion_matrix(y1, y2, labels=labels)
P = confusion / float(confusion.sum())
p_observed = np.trace(P)
p_expected = np.dot(P.sum(axis=0), P.sum(axis=1))
return (p_observed - p_expected) / (1 - p_expected)
def jaccard_similarity_score(y_true, y_pred, normalize=True,
sample_weight=None):
"""Jaccard similarity coefficient score
The Jaccard index [1], or Jaccard similarity coefficient, defined as
the size of the intersection divided by the size of the union of two label
sets, is used to compare set of predicted labels for a sample to the
corresponding set of labels in ``y_true``.
Read more in the :ref:`User Guide <jaccard_similarity_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the sum of the Jaccard similarity coefficient
over the sample set. Otherwise, return the average of Jaccard
similarity coefficient.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the average Jaccard similarity
coefficient, else it returns the sum of the Jaccard similarity
coefficient over the sample set.
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
accuracy_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equivalent
to the ``accuracy_score``. It differs in the multilabel classification
problem.
References
----------
.. [1] `Wikipedia entry for the Jaccard index
<http://en.wikipedia.org/wiki/Jaccard_index>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import jaccard_similarity_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> jaccard_similarity_score(y_true, y_pred)
0.5
>>> jaccard_similarity_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> jaccard_similarity_score(np.array([[0, 1], [1, 1]]),\
np.ones((2, 2)))
0.75
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
with np.errstate(divide='ignore', invalid='ignore'):
# oddly, we may get an "invalid" rather than a "divide" error here
pred_or_true = count_nonzero(y_true + y_pred, axis=1)
pred_and_true = count_nonzero(y_true.multiply(y_pred), axis=1)
score = pred_and_true / pred_or_true
# If there is no label, it results in a Nan instead, we set
# the jaccard to 1: lim_{x->0} x/x = 1
# Note with py2.6 and np 1.3: we can't check safely for nan.
score[pred_or_true == 0.0] = 1.0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def matthews_corrcoef(y_true, y_pred):
"""Compute the Matthews correlation coefficient (MCC) for binary classes
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary (two-class) classifications. It takes into
account true and false positives and negatives and is generally regarded as
a balanced measure which can be used even if the classes are of very
different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
Only in the binary case does this relate to information about true and
false positives and negatives. See references below.
Read more in the :ref:`User Guide <matthews_corrcoef>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
Returns
-------
mcc : float
The Matthews correlation coefficient (+1 represents a perfect
prediction, 0 an average random prediction and -1 and inverse
prediction).
References
----------
.. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the
accuracy of prediction algorithms for classification: an overview
<http://dx.doi.org/10.1093/bioinformatics/16.5.412>`_
.. [2] `Wikipedia entry for the Matthews Correlation Coefficient
<http://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_
Examples
--------
>>> from sklearn.metrics import matthews_corrcoef
>>> y_true = [+1, +1, +1, -1]
>>> y_pred = [+1, -1, +1, +1]
>>> matthews_corrcoef(y_true, y_pred) # doctest: +ELLIPSIS
-0.33...
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type != "binary":
raise ValueError("%s is not supported" % y_type)
lb = LabelEncoder()
lb.fit(np.hstack([y_true, y_pred]))
y_true = lb.transform(y_true)
y_pred = lb.transform(y_pred)
with np.errstate(invalid='ignore'):
mcc = np.corrcoef(y_true, y_pred)[0, 1]
if np.isnan(mcc):
return 0.
else:
return mcc
def zero_one_loss(y_true, y_pred, normalize=True, sample_weight=None):
"""Zero-one classification loss.
If normalize is ``True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int). The best
performance is 0.
Read more in the :ref:`User Guide <zero_one_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of misclassifications.
Otherwise, return the fraction of misclassifications.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float or int,
If ``normalize == True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int).
Notes
-----
In multilabel classification, the zero_one_loss function corresponds to
the subset zero-one loss: for each sample, the entire set of labels must be
correctly predicted, otherwise the loss for that sample is equal to one.
See also
--------
accuracy_score, hamming_loss, jaccard_similarity_score
Examples
--------
>>> from sklearn.metrics import zero_one_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> zero_one_loss(y_true, y_pred)
0.25
>>> zero_one_loss(y_true, y_pred, normalize=False)
1
In the multilabel case with binary label indicators:
>>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
score = accuracy_score(y_true, y_pred,
normalize=normalize,
sample_weight=sample_weight)
if normalize:
return 1 - score
else:
if sample_weight is not None:
n_samples = np.sum(sample_weight)
else:
n_samples = _num_samples(y_true)
return n_samples - score
def f1_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the F1 score, also known as balanced F-score or F-measure
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
In the multi-class and multi-label case, this is the weighted average of
the F1 score of each class.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
f1_score : float or array of float, shape = [n_unique_labels]
F1 score of the positive class in binary classification or weighted
average of the F1 scores of each class for the multiclass task.
References
----------
.. [1] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import f1_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> f1_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> f1_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average=None)
array([ 0.8, 0. , 0. ])
"""
return fbeta_score(y_true, y_pred, 1, labels=labels,
pos_label=pos_label, average=average,
sample_weight=sample_weight)
def fbeta_score(y_true, y_pred, beta, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the F-beta score
The F-beta score is the weighted harmonic mean of precision and recall,
reaching its optimal value at 1 and its worst value at 0.
The `beta` parameter determines the weight of precision in the combined
score. ``beta < 1`` lends more weight to precision, while ``beta > 1``
favors recall (``beta -> 0`` considers only precision, ``beta -> inf``
only recall).
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta: float
Weight of precision in harmonic mean.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
F-beta score of the positive class in binary classification or weighted
average of the F-beta score of each class for the multiclass task.
References
----------
.. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011).
Modern Information Retrieval. Addison Wesley, pp. 327-328.
.. [2] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import fbeta_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> fbeta_score(y_true, y_pred, average='macro', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average='micro', beta=0.5)
... # doctest: +ELLIPSIS
0.33...
>>> fbeta_score(y_true, y_pred, average='weighted', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average=None, beta=0.5)
... # doctest: +ELLIPSIS
array([ 0.71..., 0. , 0. ])
"""
_, _, f, _ = precision_recall_fscore_support(y_true, y_pred,
beta=beta,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('f-score',),
sample_weight=sample_weight)
return f
def _prf_divide(numerator, denominator, metric, modifier, average, warn_for):
"""Performs division and handles divide-by-zero.
On zero-division, sets the corresponding result elements to zero
and raises a warning.
The metric, modifier and average arguments are used only for determining
an appropriate warning.
"""
result = numerator / denominator
mask = denominator == 0.0
if not np.any(mask):
return result
# remove infs
result[mask] = 0.0
# build appropriate warning
# E.g. "Precision and F-score are ill-defined and being set to 0.0 in
# labels with no predicted samples"
axis0 = 'sample'
axis1 = 'label'
if average == 'samples':
axis0, axis1 = axis1, axis0
if metric in warn_for and 'f-score' in warn_for:
msg_start = '{0} and F-score are'.format(metric.title())
elif metric in warn_for:
msg_start = '{0} is'.format(metric.title())
elif 'f-score' in warn_for:
msg_start = 'F-score is'
else:
return result
msg = ('{0} ill-defined and being set to 0.0 {{0}} '
'no {1} {2}s.'.format(msg_start, modifier, axis0))
if len(mask) == 1:
msg = msg.format('due to')
else:
msg = msg.format('in {0}s with'.format(axis1))
warnings.warn(msg, UndefinedMetricWarning, stacklevel=2)
return result
def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
pos_label=1, average=None,
warn_for=('precision', 'recall',
'f-score'),
sample_weight=None):
"""Compute precision, recall, F-measure and support for each class
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F-beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F-beta score reaches its best
value at 1 and worst score at 0.
The F-beta score weights recall more than precision by a factor of
``beta``. ``beta == 1.0`` means recall and precision are equally important.
The support is the number of occurrences of each class in ``y_true``.
If ``pos_label is None`` and in binary classification, this function
returns the average precision, recall and F-measure if ``average``
is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float, 1.0 by default
The strength of recall versus precision in the F-score.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None (default), 'binary', 'micro', 'macro', 'samples', \
'weighted']
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
warn_for : tuple or set, for internal use
This determines which warnings will be made in the case that this
function is being used to return only one of its metrics.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision: float (if average is not None) or array of float, shape =\
[n_unique_labels]
recall: float (if average is not None) or array of float, , shape =\
[n_unique_labels]
fbeta_score: float (if average is not None) or array of float, shape =\
[n_unique_labels]
support: int (if average is not None) or array of int, shape =\
[n_unique_labels]
The number of occurrences of each label in ``y_true``.
References
----------
.. [1] `Wikipedia entry for the Precision and recall
<http://en.wikipedia.org/wiki/Precision_and_recall>`_
.. [2] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
.. [3] `Discriminative Methods for Multi-labeled Classification Advances
in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu
Godbole, Sunita Sarawagi
<http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`
Examples
--------
>>> from sklearn.metrics import precision_recall_fscore_support
>>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig'])
>>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog'])
>>> precision_recall_fscore_support(y_true, y_pred, average='macro')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='micro')
... # doctest: +ELLIPSIS
(0.33..., 0.33..., 0.33..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
It is possible to compute per-label precisions, recalls, F1-scores and
supports instead of averaging:
>>> precision_recall_fscore_support(y_true, y_pred, average=None,
... labels=['pig', 'dog', 'cat'])
... # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE
(array([ 0. , 0. , 0.66...]),
array([ 0., 0., 1.]),
array([ 0. , 0. , 0.8]),
array([2, 2, 2]))
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options and average != 'binary':
raise ValueError('average has to be one of ' +
str(average_options))
if beta <= 0:
raise ValueError("beta should be >0 in the F-beta score")
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
present_labels = unique_labels(y_true, y_pred)
if average == 'binary' and (y_type != 'binary' or pos_label is None):
warnings.warn('The default `weighted` averaging is deprecated, '
'and from version 0.18, use of precision, recall or '
'F-score with multiclass or multilabel data or '
'pos_label=None will result in an exception. '
'Please set an explicit value for `average`, one of '
'%s. In cross validation use, for instance, '
'scoring="f1_weighted" instead of scoring="f1".'
% str(average_options), DeprecationWarning, stacklevel=2)
average = 'weighted'
if y_type == 'binary' and pos_label is not None and average is not None:
if average != 'binary':
warnings.warn('From version 0.18, binary input will not be '
'handled specially when using averaged '
'precision/recall/F-score. '
'Please use average=\'binary\' to report only the '
'positive class performance.', DeprecationWarning)
if labels is None or len(labels) <= 2:
if pos_label not in present_labels:
if len(present_labels) < 2:
# Only negative labels
return (0., 0., 0., 0)
else:
raise ValueError("pos_label=%r is not a valid label: %r" %
(pos_label, present_labels))
labels = [pos_label]
if labels is None:
labels = present_labels
n_labels = None
else:
n_labels = len(labels)
labels = np.hstack([labels, np.setdiff1d(present_labels, labels,
assume_unique=True)])
### Calculate tp_sum, pred_sum, true_sum ###
if y_type.startswith('multilabel'):
sum_axis = 1 if average == 'samples' else 0
# All labels are index integers for multilabel.
# Select labels:
if not np.all(labels == present_labels):
if np.max(labels) > np.max(present_labels):
raise ValueError('All labels must be in [0, n labels). '
'Got %d > %d' %
(np.max(labels), np.max(present_labels)))
if np.min(labels) < 0:
raise ValueError('All labels must be in [0, n labels). '
'Got %d < 0' % np.min(labels))
y_true = y_true[:, labels[:n_labels]]
y_pred = y_pred[:, labels[:n_labels]]
# calculate weighted counts
true_and_pred = y_true.multiply(y_pred)
tp_sum = count_nonzero(true_and_pred, axis=sum_axis,
sample_weight=sample_weight)
pred_sum = count_nonzero(y_pred, axis=sum_axis,
sample_weight=sample_weight)
true_sum = count_nonzero(y_true, axis=sum_axis,
sample_weight=sample_weight)
elif average == 'samples':
raise ValueError("Sample-based precision, recall, fscore is "
"not meaningful outside multilabel "
"classification. See the accuracy_score instead.")
else:
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
y_pred = le.transform(y_pred)
sorted_labels = le.classes_
# labels are now from 0 to len(labels) - 1 -> use bincount
tp = y_true == y_pred
tp_bins = y_true[tp]
if sample_weight is not None:
tp_bins_weights = np.asarray(sample_weight)[tp]
else:
tp_bins_weights = None
if len(tp_bins):
tp_sum = bincount(tp_bins, weights=tp_bins_weights,
minlength=len(labels))
else:
# Pathological case
true_sum = pred_sum = tp_sum = np.zeros(len(labels))
if len(y_pred):
pred_sum = bincount(y_pred, weights=sample_weight,
minlength=len(labels))
if len(y_true):
true_sum = bincount(y_true, weights=sample_weight,
minlength=len(labels))
# Retain only selected labels
indices = np.searchsorted(sorted_labels, labels[:n_labels])
tp_sum = tp_sum[indices]
true_sum = true_sum[indices]
pred_sum = pred_sum[indices]
if average == 'micro':
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
### Finally, we have all our sufficient statistics. Divide! ###
beta2 = beta ** 2
with np.errstate(divide='ignore', invalid='ignore'):
# Divide, and on zero-division, set scores to 0 and warn:
# Oddly, we may get an "invalid" rather than a "divide" error
# here.
precision = _prf_divide(tp_sum, pred_sum,
'precision', 'predicted', average, warn_for)
recall = _prf_divide(tp_sum, true_sum,
'recall', 'true', average, warn_for)
# Don't need to warn for F: either P or R warned, or tp == 0 where pos
# and true are nonzero, in which case, F is well-defined and zero
f_score = ((1 + beta2) * precision * recall /
(beta2 * precision + recall))
f_score[tp_sum == 0] = 0.0
## Average the results ##
if average == 'weighted':
weights = true_sum
if weights.sum() == 0:
return 0, 0, 0, None
elif average == 'samples':
weights = sample_weight
else:
weights = None
if average is not None:
assert average != 'binary' or len(precision) == 1
precision = np.average(precision, weights=weights)
recall = np.average(recall, weights=weights)
f_score = np.average(f_score, weights=weights)
true_sum = None # return no support
return precision, recall, f_score, true_sum
def precision_score(y_true, y_pred, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the precision
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Precision of the positive class in binary classification or weighted
average of the precision of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import precision_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> precision_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> precision_score(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average=None) # doctest: +ELLIPSIS
array([ 0.66..., 0. , 0. ])
"""
p, _, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('precision',),
sample_weight=sample_weight)
return p
def recall_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the recall
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
recall : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Recall of the positive class in binary classification or weighted
average of the recall of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import recall_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> recall_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average=None)
array([ 1., 0., 0.])
"""
_, r, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('recall',),
sample_weight=sample_weight)
return r
def classification_report(y_true, y_pred, labels=None, target_names=None,
sample_weight=None, digits=2):
"""Build a text report showing the main classification metrics
Read more in the :ref:`User Guide <classification_report>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array, shape = [n_labels]
Optional list of label indices to include in the report.
target_names : list of strings
Optional display names matching the labels (same order).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
digits : int
Number of digits for formatting output floating point values
Returns
-------
report : string
Text summary of the precision, recall, F1 score for each class.
Examples
--------
>>> from sklearn.metrics import classification_report
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> target_names = ['class 0', 'class 1', 'class 2']
>>> print(classification_report(y_true, y_pred, target_names=target_names))
precision recall f1-score support
<BLANKLINE>
class 0 0.50 1.00 0.67 1
class 1 0.00 0.00 0.00 1
class 2 1.00 0.67 0.80 3
<BLANKLINE>
avg / total 0.70 0.60 0.61 5
<BLANKLINE>
"""
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
last_line_heading = 'avg / total'
if target_names is None:
width = len(last_line_heading)
target_names = ['%s' % l for l in labels]
else:
width = max(len(cn) for cn in target_names)
width = max(width, len(last_line_heading), digits)
headers = ["precision", "recall", "f1-score", "support"]
fmt = '%% %ds' % width # first column: class name
fmt += ' '
fmt += ' '.join(['% 9s' for _ in headers])
fmt += '\n'
headers = [""] + headers
report = fmt % tuple(headers)
report += '\n'
p, r, f1, s = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
average=None,
sample_weight=sample_weight)
for i, label in enumerate(labels):
values = [target_names[i]]
for v in (p[i], r[i], f1[i]):
values += ["{0:0.{1}f}".format(v, digits)]
values += ["{0}".format(s[i])]
report += fmt % tuple(values)
report += '\n'
# compute averages
values = [last_line_heading]
for v in (np.average(p, weights=s),
np.average(r, weights=s),
np.average(f1, weights=s)):
values += ["{0:0.{1}f}".format(v, digits)]
values += ['{0}'.format(np.sum(s))]
report += fmt % tuple(values)
return report
def hamming_loss(y_true, y_pred, classes=None):
"""Compute the average Hamming loss.
The Hamming loss is the fraction of labels that are incorrectly predicted.
Read more in the :ref:`User Guide <hamming_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
classes : array, shape = [n_labels], optional
Integer array of labels.
Returns
-------
loss : float or int,
Return the average Hamming loss between element of ``y_true`` and
``y_pred``.
See Also
--------
accuracy_score, jaccard_similarity_score, zero_one_loss
Notes
-----
In multiclass classification, the Hamming loss correspond to the Hamming
distance between ``y_true`` and ``y_pred`` which is equivalent to the
subset ``zero_one_loss`` function.
In multilabel classification, the Hamming loss is different from the
subset zero-one loss. The zero-one loss considers the entire set of labels
for a given sample incorrect if it does entirely match the true set of
labels. Hamming loss is more forgiving in that it penalizes the individual
labels.
The Hamming loss is upperbounded by the subset zero-one loss. When
normalized over samples, the Hamming loss is always between 0 and 1.
References
----------
.. [1] Grigorios Tsoumakas, Ioannis Katakis. Multi-Label Classification:
An Overview. International Journal of Data Warehousing & Mining,
3(3), 1-13, July-September 2007.
.. [2] `Wikipedia entry on the Hamming distance
<http://en.wikipedia.org/wiki/Hamming_distance>`_
Examples
--------
>>> from sklearn.metrics import hamming_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> hamming_loss(y_true, y_pred)
0.25
In the multilabel case with binary label indicators:
>>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2)))
0.75
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if classes is None:
classes = unique_labels(y_true, y_pred)
else:
classes = np.asarray(classes)
if y_type.startswith('multilabel'):
n_differences = count_nonzero(y_true - y_pred)
return (n_differences / (y_true.shape[0] * len(classes)))
elif y_type in ["binary", "multiclass"]:
return sp_hamming(y_true, y_pred)
else:
raise ValueError("{0} is not supported".format(y_type))
def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None):
"""Log loss, aka logistic loss or cross-entropy loss.
This is the loss function used in (multinomial) logistic regression
and extensions of it such as neural networks, defined as the negative
log-likelihood of the true labels given a probabilistic classifier's
predictions. For a single sample with true label yt in {0,1} and
estimated probability yp that yt = 1, the log loss is
-log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp))
Read more in the :ref:`User Guide <log_loss>`.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels for n_samples samples.
y_pred : array-like of float, shape = (n_samples, n_classes)
Predicted probabilities, as returned by a classifier's
predict_proba method.
eps : float
Log loss is undefined for p=0 or p=1, so probabilities are
clipped to max(eps, min(1 - eps, p)).
normalize : bool, optional (default=True)
If true, return the mean loss per sample.
Otherwise, return the sum of the per-sample losses.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
Examples
--------
>>> log_loss(["spam", "ham", "ham", "spam"], # doctest: +ELLIPSIS
... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]])
0.21616...
References
----------
C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer,
p. 209.
Notes
-----
The logarithm used is the natural logarithm (base-e).
"""
lb = LabelBinarizer()
T = lb.fit_transform(y_true)
if T.shape[1] == 1:
T = np.append(1 - T, T, axis=1)
# Clipping
Y = np.clip(y_pred, eps, 1 - eps)
# This happens in cases when elements in y_pred have type "str".
if not isinstance(Y, np.ndarray):
raise ValueError("y_pred should be an array of floats.")
# If y_pred is of single dimension, assume y_true to be binary
# and then check.
if Y.ndim == 1:
Y = Y[:, np.newaxis]
if Y.shape[1] == 1:
Y = np.append(1 - Y, Y, axis=1)
# Check if dimensions are consistent.
check_consistent_length(T, Y)
T = check_array(T)
Y = check_array(Y)
if T.shape[1] != Y.shape[1]:
raise ValueError("y_true and y_pred have different number of classes "
"%d, %d" % (T.shape[1], Y.shape[1]))
# Renormalize
Y /= Y.sum(axis=1)[:, np.newaxis]
loss = -(T * np.log(Y)).sum(axis=1)
return _weighted_sum(loss, sample_weight, normalize)
def hinge_loss(y_true, pred_decision, labels=None, sample_weight=None):
"""Average hinge loss (non-regularized)
In binary class case, assuming labels in y_true are encoded with +1 and -1,
when a prediction mistake is made, ``margin = y_true * pred_decision`` is
always negative (since the signs disagree), implying ``1 - margin`` is
always greater than 1. The cumulated hinge loss is therefore an upper
bound of the number of mistakes made by the classifier.
In multiclass case, the function expects that either all the labels are
included in y_true or an optional labels argument is provided which
contains all the labels. The multilabel margin is calculated according
to Crammer-Singer's method. As in the binary case, the cumulated hinge loss
is an upper bound of the number of mistakes made by the classifier.
Read more in the :ref:`User Guide <hinge_loss>`.
Parameters
----------
y_true : array, shape = [n_samples]
True target, consisting of integers of two values. The positive label
must be greater than the negative label.
pred_decision : array, shape = [n_samples] or [n_samples, n_classes]
Predicted decisions, as output by decision_function (floats).
labels : array, optional, default None
Contains all the labels for the problem. Used in multiclass hinge loss.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] `Wikipedia entry on the Hinge loss
<http://en.wikipedia.org/wiki/Hinge_loss>`_
.. [2] Koby Crammer, Yoram Singer. On the Algorithmic
Implementation of Multiclass Kernel-based Vector
Machines. Journal of Machine Learning Research 2,
(2001), 265-292
.. [3] `L1 AND L2 Regularization for Multiclass Hinge Loss Models
by Robert C. Moore, John DeNero.
<http://www.ttic.edu/sigml/symposium2011/papers/
Moore+DeNero_Regularization.pdf>`_
Examples
--------
>>> from sklearn import svm
>>> from sklearn.metrics import hinge_loss
>>> X = [[0], [1]]
>>> y = [-1, 1]
>>> est = svm.LinearSVC(random_state=0)
>>> est.fit(X, y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=0, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-2], [3], [0.5]])
>>> pred_decision # doctest: +ELLIPSIS
array([-2.18..., 2.36..., 0.09...])
>>> hinge_loss([-1, 1, 1], pred_decision) # doctest: +ELLIPSIS
0.30...
In the multiclass case:
>>> X = np.array([[0], [1], [2], [3]])
>>> Y = np.array([0, 1, 2, 3])
>>> labels = np.array([0, 1, 2, 3])
>>> est = svm.LinearSVC()
>>> est.fit(X, Y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=None, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-1], [2], [3]])
>>> y_true = [0, 2, 3]
>>> hinge_loss(y_true, pred_decision, labels) #doctest: +ELLIPSIS
0.56...
"""
check_consistent_length(y_true, pred_decision, sample_weight)
pred_decision = check_array(pred_decision, ensure_2d=False)
y_true = column_or_1d(y_true)
y_true_unique = np.unique(y_true)
if y_true_unique.size > 2:
if (labels is None and pred_decision.ndim > 1 and
(np.size(y_true_unique) != pred_decision.shape[1])):
raise ValueError("Please include all labels in y_true "
"or pass labels as third argument")
if labels is None:
labels = y_true_unique
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
mask = np.ones_like(pred_decision, dtype=bool)
mask[np.arange(y_true.shape[0]), y_true] = False
margin = pred_decision[~mask]
margin -= np.max(pred_decision[mask].reshape(y_true.shape[0], -1),
axis=1)
else:
# Handles binary class case
# this code assumes that positive and negative labels
# are encoded as +1 and -1 respectively
pred_decision = column_or_1d(pred_decision)
pred_decision = np.ravel(pred_decision)
lbin = LabelBinarizer(neg_label=-1)
y_true = lbin.fit_transform(y_true)[:, 0]
try:
margin = y_true * pred_decision
except TypeError:
raise TypeError("pred_decision should be an array of floats.")
losses = 1 - margin
# The hinge_loss doesn't penalize good enough predictions.
losses[losses <= 0] = 0
return np.average(losses, weights=sample_weight)
def _check_binary_probabilistic_predictions(y_true, y_prob):
"""Check that y_true is binary and y_prob contains valid probabilities"""
check_consistent_length(y_true, y_prob)
labels = np.unique(y_true)
if len(labels) != 2:
raise ValueError("Only binary classification is supported. "
"Provided labels %s." % labels)
if y_prob.max() > 1:
raise ValueError("y_prob contains values greater than 1.")
if y_prob.min() < 0:
raise ValueError("y_prob contains values less than 0.")
return label_binarize(y_true, labels)[:, 0]
def brier_score_loss(y_true, y_prob, sample_weight=None, pos_label=None):
"""Compute the Brier score.
The smaller the Brier score, the better, hence the naming with "loss".
Across all items in a set N predictions, the Brier score measures the
mean squared difference between (1) the predicted probability assigned
to the possible outcomes for item i, and (2) the actual outcome.
Therefore, the lower the Brier score is for a set of predictions, the
better the predictions are calibrated. Note that the Brier score always
takes on a value between zero and one, since this is the largest
possible difference between a predicted probability (which must be
between zero and one) and the actual outcome (which can take on values
of only 0 and 1).
The Brier score is appropriate for binary and categorical outcomes that
can be structured as true or false, but is inappropriate for ordinal
variables which can take on three or more values (this is because the
Brier score assumes that all possible outcomes are equivalently
"distant" from one another). Which label is considered to be the positive
label is controlled via the parameter pos_label, which defaults to 1.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
pos_label : int (default: None)
Label of the positive class. If None, the maximum label is used as
positive class
Returns
-------
score : float
Brier score
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import brier_score_loss
>>> y_true = np.array([0, 1, 1, 0])
>>> y_true_categorical = np.array(["spam", "ham", "ham", "spam"])
>>> y_prob = np.array([0.1, 0.9, 0.8, 0.3])
>>> brier_score_loss(y_true, y_prob) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, 1-y_prob, pos_label=0) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true_categorical, y_prob, \
pos_label="ham") # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, np.array(y_prob) > 0.5)
0.0
References
----------
http://en.wikipedia.org/wiki/Brier_score
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
if pos_label is None:
pos_label = y_true.max()
y_true = np.array(y_true == pos_label, int)
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
return np.average((y_true - y_prob) ** 2, weights=sample_weight)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.