repo_name
stringlengths 6
92
| path
stringlengths 4
191
| copies
stringclasses 322
values | size
stringlengths 4
6
| content
stringlengths 821
753k
| license
stringclasses 15
values |
---|---|---|---|---|---|
MotokiShiga/stem-nmf | python/libnmf.py | 1 | 25657 | """ NMF (Nonnegative Matrix Factorization) for Spectrum Imaging Data Analysis
"""
# Author: Motoki Shiga <[email protected]>
# License: MIT
#
# Reference
# [1] Motoki Shiga, Kazuyoshi Tatsumi, Shunsuke Muto, Koji Tsuda,
# Yuta Yamamoto, Toshiyuki Mori, Takayoshi Tanji,
# "Sparse Modeling of EELS and EDX Spectral Imaging Data by Nonnegative Matrix Factorization",
# Ultramicroscopy, Vol.170, p.43-59, 2016.
#
import numpy as np
from numpy import random
import numpy.linalg as lin
from scipy.special import gammaln
import matplotlib.pyplot as plt
class NMF(object):
"""Non-Negative Matrix Factorization (NMF)
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all features
are kept.
reps : The number of initializations. (default: 3)
max_itr : integer, default: 200
Number of iterations to compute.
random_state : integer seed, RandomState instance (default: 0)
Random number generator seed control.
Attributes
----------
C_ : array, [#spatial data points, n_components]
Non-negative components decomposed from data X.
S_ : array, [#channels, n_components]
Non-negative spectra decomposed from data X.
obj_fun_ : array, [#iterations]
Learning curve of reconstruction error (Mean Squared Error)
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> model = NMF(n_components=2)
>>> model.fit(X)
Training NMF model....
1th iteration of NMF algorithm
2th iteration of NMF algorithm
3th iteration of NMF algorithm
NMF(n_components=2, reps=3, max_itr=100, random_seed=0)
>>> model.C_
array([[ 0. , 0.40549951],
[ 0.13374645, 0.40555886],
[ 0.24076597, 0.48667235],
[ 0.40131387, 0.4055646 ],
[ 0.56186177, 0.32445684],
[ 0.66888128, 0.40557034]])
>>> model.S_
array([[ 7.47464589, 2.46643616],
[ 0. , 2.4657656 ]])
References
----------
[1] Cichocki, Andrzej, and P.H.A.N. Anh-Huy.
“Fast local algorithms for large scale nonnegative matrix and tensor factorizations.”
IEICE transactions on fundamentals of electronics, communications and computer sciences 92.3: 708-721, 2009.
"""
# constructor
def __init__(self, n_components, reps=3, max_itr=100, random_seed=0, flag_nonneg=True):
self.n_components = n_components
self.reps = reps
self.max_itr = max_itr
self.random_seed = random_seed
self.flag_nonneg = flag_nonneg
def __repr__(self):
class_name = self.__class__.__name__
txt = 'n_components=' + str(self.n_components) \
+ ', reps=' + str(self.reps) + ', max_itr=' + str(self.max_itr) + \
', random_seed=' + str(self.random_seed)
return '%s(%s)' % (class_name, txt,)
def __str__(self):
txt = self.__repr__()
return txt
def fit(self, X, num_xy=list(), channel_vals=list(), unit_name='Channel'):
"""
Learn a NMF model for the data X.
Parameters
----------
X: {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be decomposed
num_xy: {array}, (#samples in x axis, #samples in x axis)
or int, (#samples in x axis x #samples in x axis)
The number of data points
channel_vals: {array}
The sequence of channel numbers, or unit values
unit_name: strings
The name of x axis of spectra
Returns
-------
self
"""
num_X_xy, num_X_ch = X.shape
if type(num_xy)==int:
self.num_xy = [num_xy]
elif len(num_xy)==0:
self.num_xy = num_X_xy
else:
self.num_xy = num_xy
if len(channel_vals)>0:
self.channel_vals = channel_vals
else:
self.channel_vals = np.arange(num_X_ch)
self.unit_name = unit_name
obj_best = np.array([np.inf])
random.seed(self.random_seed) # set the random seed
print('Training NMF model....')
for rep in range(self.reps):
print(str(rep + 1) + 'th iteration of NMF algorithm')
# initialization
obj = np.zeros(self.max_itr)
C = np.ones((num_X_xy, self.n_components))
for j in range(self.n_components):
C[:, j] = C[:, j] / (np.sqrt(C[:, j].T @ C[:, j]) + 1e-16)
cj = np.sum(C, axis=1)
i = np.random.choice(num_X_xy, self.n_components)
S = X[i, :].T
# main loop
for itr in range(self.max_itr):
# update S
XC = X.T @ C
C2 = C.T @ C
for j in range(self.n_components):
S[:, j] = XC[:, j] - S @ C2[:, j] + C2[j, j] * S[:, j]
if self.flag_nonneg:
S[:, j] = (S[:, j] + np.abs(S[:, j])) / 2 # replace negative values with zeros
# update C
XS = X @ S
S2 = S.T @ S
for j in range(self.n_components):
cj = cj - C[:, j]
C[:, j] = XS[:, j] - C @ S2[:, j] + S2[j, j] * C[:, j]
C[:, j] = (C[:, j] + np.abs(C[:, j])) / 2 # replace negative values with zeros
C[:, j] = C[:, j] / (np.sqrt(C[:, j].T @ C[:, j])) # normalize
cj = cj + C[:, j]
# cost function
X_est = C @ S.T # reconstructed data matrix
obj[itr] = lin.norm(X - X_est, ord='fro')**2 / X.size
# check of convergence
if (itr > 1) & (np.abs(obj[itr - 1] - obj[itr]) < 10 ** (-10)):
obj = obj[0:itr]
print('# updates: ' + str(itr))
break
# choose the best result
if obj_best[-1] > obj[-1]:
obj_best = obj.copy()
C_best = C.copy()
S_best = S.copy()
self.C_, self.S_, self.obj_fun_ = C_best, S_best, obj_best
return self
def imshow_component(self, figsize=list()):
'''
Plot spatial distributions of components
Parameters
----------
figsize: the vertical and horizontal size of the figure
'''
if (type(self.num_xy) != int) and (len(self.num_xy) == 2):
if len(figsize) == 0:
plt.figure()
else:
plt.figure(figsize=figsize)
for k in range(self.C_.shape[1]):
plt.subplot(100 + self.C_.shape[1] * 10 + k + 1)
im = self.C_[:, k].reshape(self.num_xy)
plt.imshow(im)
plt.title('Component: ' + str(k + 1))
plt.tight_layout()
plt.show()
else:
self.plot_component(figsize)
def plot_component(self, figsize=list()):
'''
Plot component intensities (data points vs intensities)
Parameters
----------
figsize: the vertical and horizontal size of the figure
'''
if len(figsize) == 0:
plt.figure()
else:
plt.figure(figsize=figsize)
for k in range(self.C_.shape[1]):
plt.plot(self.C_[:, k], label=str(k + 1))
plt.xlim([0, self.C_.shape[0]])
plt.xlabel('Spatial data point')
plt.ylabel('Intensity')
plt.title('Components')
plt.legend()
plt.show()
def plot_spectra(self, figsize=list(), normalize=True):
'''
Plot spectra
Parameters
----------
figsize: the vertical and horizontal size of the figure
normalize: Normalize each spectrum or NOT
'''
if len(figsize) == 0:
plt.figure()
else:
plt.figure(figsize=figsize)
for k in range(self.S_.shape[1]):
if normalize:
self.S_[:, k] = self.S_[:, k] / (np.sqrt(self.S_[:, k].T @ self.S_[:, k]) + 1e-16)
plt.plot(self.channel_vals, self.S_[:, k], label=str(k + 1))
plt.xlabel(self.unit_name)
plt.ylabel('Intensity')
plt.xlim([self.channel_vals[0], self.channel_vals[-1]])
plt.title('Spectra')
plt.legend()
plt.show()
def plot_object_fun(self, figsize=list()):
'''
Plot learning curve (#iterations vs object function (error function))
Parameters
----------
figsize: the vertical and horizontal size of the figure
'''
if len(figsize) == 0:
plt.figure()
else:
plt.figure(figsize=figsize)
plt.plot(self.obj_fun_)
plt.xlabel('Iterations')
plt.xlim([0, len(self.obj_fun_)])
plt.title('Object function')
plt.show()
class NMF_SO(NMF):
"""Non-Negative Matrix Factorization with Soft orthogonality penalty (NMF-SO)
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all features
are kept.
wo : weight of orthogonal penalty.
The value should be between 0 and 1.
reps : The number of initializations. (default: 3)
max_itr : integer, default: 200
Number of iterations to compute.
random_state : integer seed, RandomState instance (default: 0)
Random number generator seed control.
Attributes
----------
C_ : array, [#spatial data points, n_components]
Non-negative components decomposed from data X.
S_ : array, [#channels, n_components]
Non-negative spectra decomposed from data X.
obj_fun_ : array, [#iterations]
Learning curve of reconstruction error (Mean Squared Error)
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> model = NMF_SO(n_components=2, wo = 0.1)
>>> model.fit(X)
Training NMF with Soft Orthogonal constraint....
1th iteration of NMF-SO algorithm
2th iteration of NMF-SO algorithm
3th iteration of NMF-SO algorithm
NMF_SO(n_components=2, wo=0.1, reps=3, max_itr=100, random_seed=0)
>>> model.C_
array([[ 0. , 0.30547946],
[ 0. , 0.51238139],
[ 0. , 0.73899883],
[ 0.33013316, 0.31309478],
[ 0.60391616, 0. ],
[ 0.72546355, 0. ]])
>>> model.S_
array([[ 8.28515563, 3.94337313],
[ 1.34447182, 1.87880282]])
References
----------
Motoki Shiga, Kazuyoshi Tatsumi, Shunsuke Muto, Koji Tsuda,
Yuta Yamamoto, Toshiyuki Mori, Takayoshi Tanji,
"Sparse Modeling of EELS and EDX Spectral Imaging Data by Nonnegative Matrix Factorization",
Ultramicroscopy, Vol.170, p.43-59, 2016.
doi: 10.1016/j.ultramic.2016.08.006
"""
# constructor
def __init__(self, n_components, wo=0.1, reps=3, max_itr=100, random_seed=0, flag_nonneg=True):
self.n_components = n_components
self.wo = wo
self.reps = reps
self.max_itr = max_itr
self.random_seed = random_seed
self.flag_nonneg = flag_nonneg
def __repr__(self):
class_name = self.__class__.__name__
txt = 'n_components=' + str(self.n_components) + ', wo=' + str(self.wo) \
+ ', reps=' + str(self.reps) + ', max_itr=' + str(self.max_itr) + \
', random_seed=' + str(self.random_seed)
return '%s(%s)' % (class_name, txt,)
def __str__(self):
txt = self.__repr__()
return txt
def fit(self, X, num_xy=list(), channel_vals=list(), unit_name='Channel'):
"""
Learn a NMF model for the data X.
Parameters
----------
X: {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be decomposed
num_xy: {array}, (#samples in x axis, #samples in x axis)
or int, (#samples in x axis x #samples in x axis)
The number of data points
channel_vals: {array}
The sequence of channel numbers, or unit values
unit_name: strings
The name of x axis of spectra
Returns
-------
self
"""
num_X_xy, num_X_ch = X.shape
if type(num_xy)==int:
self.num_xy = [num_xy]
elif len(num_xy)==0:
self.num_xy = num_X_xy
else:
self.num_xy = num_xy
if len(channel_vals)>0:
self.channel_vals = channel_vals
else:
self.channel_vals = np.arange(num_X_ch)
self.unit_name = unit_name
obj_best = np.array([np.inf])
random.seed(self.random_seed) # set the random seed
print('Training NMF with Soft Orthogonal constraint....')
for rep in range(self.reps):
print(str(rep + 1) + 'th iteration of NMF-SO algorithm')
# initialization
obj = np.zeros(self.max_itr)
C = np.ones((num_X_xy, self.n_components))
for j in range(self.n_components):
C[:, j] = C[:, j] / (np.sqrt(C[:, j].T @ C[:, j]) + 1e-16)
cj = np.sum(C, axis=1)
i = np.random.choice(num_X_xy, self.n_components)
S = X[i, :].T
# main loop
for itr in range(self.max_itr):
# update S
XC = X.T @ C
C2 = C.T @ C
for j in range(self.n_components):
S[:, j] = XC[:, j] - S @ C2[:, j] + C2[j, j] * S[:, j]
if self.flag_nonneg:
S[:, j] = (S[:, j] + np.abs(S[:, j])) / 2 # replace negative values with zeros
# update C
XS = X @ S
S2 = S.T @ S
for j in range(self.n_components):
cj = cj - C[:, j]
C[:, j] = XS[:, j] - C @ S2[:, j] + S2[j, j] * C[:, j]
C[:, j] = C[:, j] - self.wo * (cj.T @ C[:, j]) / (cj.T @ cj) * cj
C[:, j] = (C[:, j] + np.abs(C[:, j])) / 2 # replace negative values with zeros
C[:, j] = C[:, j] / (np.sqrt(C[:, j].T @ C[:, j])) # normalize
cj = cj + C[:, j]
# cost function
X_est = C @ S.T # reconstructed data matrix
obj[itr] = lin.norm(X - X_est, ord='fro')**2 / X.size
# check of convergence
if (itr > 1) & (np.abs(obj[itr - 1] - obj[itr]) < 10 ** (-10)):
obj = obj[0:itr]
print('# updates: ' + str(itr))
break
# choose the best result
if obj_best[-1] > obj[-1]:
obj_best = obj.copy()
C_best = C.copy()
S_best = S.copy()
self.C_, self.S_, self.obj_fun_ = C_best, S_best, obj_best
return self
class NMF_ARD_SO(NMF_SO):
"""Non-Negative Matrix Factorization with Soft orthogonality penalty (NMF-SO)
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all features
are kept.
wo : real value
The weight of orthogonal penalty.
The value should be between 0 and 1.
reps : The number of initializations. (default: 3)
max_itr : integer, default: 200
Number of iterations to compute.
alpha: real value (over than 1)
To adjust sparseness
threshold_merge: real value
The threshold of similarity between components to judge components should be merged.
random_state : integer seed, RandomState instance (default: 0)
Random number generator seed control.
Attributes
----------
C_ : array, [#spatial data points, n_components]
Non-negative components decomposed from data X.
S_ : array, [#channels, n_components]
Non-negative spectra decomposed from data X.
obj_fun_ : array, [#iterations]
Learning curve of reconstruction error (Mean Squared Error)
beta_ : real value
Sparse penalty parameter (computed from alpha and data X)
lambdas_ : attay, [#iterations]
Learning curve of component intensities
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> model = NMF_ARD_SO(n_components=2, wo = 0.1)
>>> model.fit(X)
Training NMF with Soft Orthogonal constraint....
1th iteration of NMF-SO algorithm
2th iteration of NMF-SO algorithm
3th iteration of NMF-SO algorithm
NMF_SO(n_components=2, wo=0.1, reps=3, max_itr=100, random_seed=0)
>>> model.C_
array([[ 0. , 1.31254938],
[ 0. , 2.21337851],
[ 0.04655829, 3.15615036],
[ 2.88446237, 1.23380528],
[ 5.05090679, 0. ],
[ 6.07007114, 0. ]])
>>> model.S_
array([[ 0.9869102 , 0.90082913],
[ 0.16127074, 0.43417379]])
References
----------
Motoki Shiga, Kazuyoshi Tatsumi, Shunsuke Muto, Koji Tsuda,
Yuta Yamamoto, Toshiyuki Mori, Takayoshi Tanji,
"Sparse Modeling of EELS and EDX Spectral Imaging Data by Nonnegative Matrix Factorization",
Ultramicroscopy, Vol.170, p.43-59, 2016.
doi: 10.1016/j.ultramic.2016.08.006
"""
# constructor
def __init__(self, n_components, wo=0.1, reps=3, max_itr=100,
alpha=1+10**(-15), threshold_merge=0.99, random_seed=0, flag_nonneg=True):
super(NMF_ARD_SO, self).__init__(n_components, wo, reps, max_itr, random_seed)
self.alpha = alpha
self.threshold_merge = threshold_merge
self.flag_nonneg = flag_nonneg
def __repr__(self):
class_name = self.__class__.__name__
txt = 'n_components=' + str(self.n_components) + ', wo=' + str(self.wo) \
+ ', reps=' + str(self.reps) + ', max_itr=' + str(self.max_itr) + \
', alpha=' + str(self.alpha) + ', threshold_merge=' + str(self.threshold_merge) + ', random_seed=' + str(self.random_seed)
return '%s(%s)' % (class_name, txt,)
def __str__(self):
txt = self.__repr__()
return txt
def fit(self, X, num_xy=list(), channel_vals=list(), unit_name='Channel'):
"""
Learn a NMF model for the data X.
Parameters
----------
X: {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be decomposed
num_xy: {array}, (#samples in x axis, #samples in x axis)
or int, (#samples in x axis x #samples in x axis)
The number of data points
channel_vals: {array}
The sequence of channel numbers, or unit values
unit_name: strings
The name of x axis of spectra
Returns
-------
self
"""
eps = np.finfo(np.float64).eps # tiny value
num_X_xy, num_X_ch = X.shape # the number of data points and the number of channels
# --- Attribute initialization from a data matrix------
if type(num_xy) == int:
self.num_xy = [num_xy]
elif len(num_xy) == 0:
self.num_xy = num_X_xy
else:
self.num_xy = num_xy # (xdim, ydim)
if len(channel_vals) > 0:
self.channel_vals = channel_vals
else:
self.channel_vals = np.arange(num_X_ch)
self.unit_name = unit_name
# -----------------------------------------------------
mu_x = np.mean(X)
self.beta_ = mu_x * (self.alpha - 1) * np.sqrt(num_X_ch) / self.n_components
const = self.n_components * (gammaln(self.alpha) - self.alpha * np.log(self.beta_))
random.seed(self.random_seed) # set the random seed
obj_best = np.array([np.inf]) # to deposit the best object value
print('Training NMF with ARD and Soft Orthogonal constraint....')
for rep in range(self.reps):
print(str(rep+1) + 'th iteration of NMF-ARD-SO algorithm')
# --- Initialization ------
C = (np.random.rand(num_X_xy, self.n_components) + 1) * (np.sqrt(mu_x / self.n_components))
L = (np.sum(C, axis=0) + self.beta_) / (num_X_ch + self.alpha + 1)
cj = np.sum(C, axis=1)
i = np.random.choice(num_X_xy, self.n_components)
S = X[i, :].T
for j in range(self.n_components):
c = (np.sqrt(S[:, j].T @ S[:, j])) # normalize
if c > 0:
S[:, j] = S[:, j] / c
else:
S[:, j] = 1 / np.sqrt(num_X_ch)
X_est = C @ S.T # reconstructed data matrix
sigma2 = np.mean((X - X_est) ** 2)
obj = np.zeros(self.max_itr)
lambdas = np.zeros((self.max_itr, self.n_components))
# -------------------------
for itr in range(self.max_itr):
# update S (spectra)
XC = X.T @ C
C2 = C.T @ C
for j in range(self.n_components):
S[:, j] = XC[:, j] - S @ C2[:, j] + C2[j, j] * S[:, j]
if self.flag_nonneg:
S[:, j] = (S[:, j] + np.abs(S[:, j])) / 2 # replace negative values with zeros
c = (np.sqrt(S[:, j].T @ S[:, j])) # normalize
if c > 0:
S[:, j] = S[:, j] / c
else:
S[:, j] = 1 / np.sqrt(num_X_ch)
# update C (component intensities)
XS = X @ S
S2 = S.T @ S
for j in range(self.n_components):
cj = cj - C[:, j]
C[:, j] = XS[:, j] - C @ S2[:, j] + S2[j, j] * C[:, j]
C[:, j] = C[:, j] - sigma2 / L[j]
if (self.wo > 0):
C[:, j] = C[:, j] - self.wo * (cj.T @ C[:, j]) / (cj.T @ cj) * cj
C[:, j] = (C[:, j] + np.abs(C[:, j])) / 2 # replace negative values with zeros
cj = cj + C[:, j]
# merge components if their spectra are almost same
if itr > 3:
SS = S.T @ S
i, j = np.where(SS >= self.threshold_merge)
m = i < j
i, j = i[m], j[m]
for n in range(len(i)):
S[:, j[n]] = 1 / np.sqrt(num_X_ch)
C[:, i[n]] = np.sum(C[:, np.r_[i[n], j[n]]], axis=1)
C[:, j[n]] = 0
if np.sum(cj) < eps:
C[:, :] = eps
# update lambda(ARD parameters)
L = (np.sum(C, axis=0) + self.beta_) / (num_X_xy + self.alpha + 1) + eps
lambdas[itr, :] = L.copy()
# update sigma2 (the variance of additive Gaussian noise)
X_est = C @ S.T # reconstructed data matrix
sigma2 = np.mean((X - X_est) ** 2)
# object function (negative log likelihood)
obj[itr] = num_X_xy * num_X_ch / 2 * np.log(2 * np.pi * sigma2) + num_X_xy * num_X_ch / 2 # MSE
obj[itr] = obj[itr] + (L ** (-1)).T @ (np.sum(C, axis=0) + self.beta_).T \
+ (num_X_xy + self.alpha + 1) * np.sum(np.log(L), axis=0) + const
# check of convergence
if (itr > 1) & (np.abs(obj[itr - 1] - obj[itr]) < 10 ** (-10)):
obj = obj[0:itr]
lambdas = lambdas[0:itr, :].copy()
break
# choose the best result
if obj_best[-1] > obj[-1]:
obj_best = obj.copy()
C_best = C.copy()
S_best = S.copy()
lambdas_best = lambdas.copy()
# for learning curve of object function
self.obj_fun_ = obj_best
# replace tiny values with zeros
C_best[C_best < eps] = 0
S_best[S_best < eps] = 0
L_best = (np.sum(C, axis=0) + self.beta_) / (num_X_xy + self.alpha + 1)
k = np.argsort(-L_best)
num_comp_best = np.sum(L_best[k] > eps)
ks = k[:num_comp_best]
self.C_, self.S_, self.L_ = C_best[:, ks], S_best[:, ks], L_best[ks]
self.lambdas_ = lambdas_best[:, k] # leave all values to draw learning curve of ARD
X_est = self.C_ @ self.S_.T # reconstructed data matrix
self.sigma2_ = np.mean((X - X_est) ** 2)
return self
def plot_ard(self, figsize=list()):
'''
Plot learning curve of component intensities (#iterations vs intensities)
Parameters
----------
figsize: the vertical and horizontal size of the figure
'''
if len(figsize) == 0:
plt.figure()
else:
plt.figure(figsize=figsize)
for k in range(self.n_components):
plt.plot(self.lambdas_[:, k], label=str(k + 1))
plt.xlabel('Iterations')
plt.ylabel('Intensity')
plt.xlim([0, self.lambdas_.shape[0]])
plt.title('Intensity of components')
plt.legend()
plt.show()
| mit |
turbulencia/tlab | doc/figures.py | 2 | 18697 | import numpy as np
import scipy.linalg
from scipy import special
import matplotlib.pyplot as plt
import fdm
from matplotlib import rc
rc('text', usetex=True)
rc('font', family='serif', size=11)
# rc('axes', titlesize='medium')
rc('legend',fontsize='small')
rc('axes', grid=True)
rc('grid', color='0.9')
rc('lines', solid_capstyle='round')
# rc('figure', dpi=100)
rc('savefig', dpi=100)
FiguresToPlot = []
FiguresToPlot +=['profiles']
# FiguresToPlot +=['spectra']
# FiguresToPlot +=['wavenumber']
# FiguresToPlot +=['stability']
# FiguresToPlot +=['convergence']
###############################################################################
tag='profiles'
if tag in FiguresToPlot:
fig_id = 0
n = 100
x = np.linspace(-8.,8., num=n)
fig_id = fig_id +1
plt.figure( figsize = (4,3) )
plt.plot( x, 0.5 *np.tanh(-0.5 *x), label=r'tanh')
plt.plot( x, 0.5 *special.erf(-0.5 *x), label=r'erf')
plt.plot( x, -x, label=r'linear')
plt.plot( x, np.exp(-0.5 *x**2.), label=r'Gaussian')
plt.plot( x, 1./np.cosh(-0.5 *x) **2., label=r'Bickley')
plt.plot( x, 1 -(0.5 *x) **2., label=r'parabolic')
plt.xticks( [-8 +2*i for i in range(9)] )
plt.yticks( [-0.5 +0.5*i for i in range(5)] )
plt.xlabel(r'spatial variable $\xi$')
plt.ylabel(r'function $g(\xi)$')
plt.legend( )
plt.axis([-8., 8., -1., 1.])
plt.tight_layout(pad=0.1)
plt.savefig("{}.pdf".format(tag+str(fig_id)))
plt.show()
###############################################################################
tag='spectra'
if tag in FiguresToPlot:
fig_id = 0
n = 100
f = np.linspace(0, 5., num=n)
fig_id = fig_id +1
plt.figure( figsize = (4,3) )
plt.plot( f, np.ones(n), label=r'uniform')
plt.plot( f, f**2. *np.exp(-2. *(f-1.) ), label=r'quadratic')
plt.plot( f, f**4. *np.exp(-2. *(f **2. -1) ), label=r'quartic')
plt.plot( f, np.exp(-0.5 *(f-1.) **2. *6 **2.), label=r'Gaussian')
plt.xlabel(r'normalized frequency $f/f_0$')
plt.ylabel(r'normalized spectra $E/E_0$')
plt.legend( )
plt.tight_layout(pad=0.1)
plt.savefig("{}.pdf".format(tag+str(fig_id)))
plt.show()
###############################################################################
tag='wavenumber'
if tag in FiguresToPlot:
fig_id = 0
# Modified wavenumber
w = np.linspace( 0., np.pi, num = 100)
# Modified wavenumber of first order derivative
fig_id = fig_id +1
plt.figure( figsize = (4,3) )
plt.plot( w, w, label=r'exact', c='k', lw=1.0 )
plt.plot( w, np.sin(w), label=r'$\delta_x$ C2', c='C0', alpha=0.25 )
wm = fdm.fdm1_c6_wavenumber(w)
plt.plot( w, wm, label=r'$\delta_x$ C6', c='C0' )
j = wm.argmax()
plt.text( w[j]-1., wm[j], r'${:3.2f}\,@\,{:3.2f}\pi$'.format(wm[j],w[j]/np.pi), va='bottom' )
plt.plot( (w[j],w[j]-1), (wm[j],wm[j]), 'k', ls='-', lw=0.5, marker='o', mfc='w', markevery=2, ms=4 )
plt.xticks( np.linspace(0., np.pi, num = 7), [r'$0.0$', r'', r'$\pi/3$', r'', r'$2\pi/3$', r'', r'$\pi$'])
plt.yticks( np.linspace(0., np.pi, num = 7), [r'$0.0$', r'', r'$\pi/3$', r'', r'$2\pi/3$', r'', r'$\pi$'], rotation='vertical')
plt.xlabel(r'$\omega=\kappa h = 2\pi/\mathrm{PPW}$')
plt.ylabel(r'Im($\lambda_1)$')
plt.legend( )
plt.tight_layout(pad=0.1)
plt.savefig("{}.pdf".format(tag+str(fig_id)))
# Relative
fig_id = fig_id +1
plt.figure( figsize = (4,3) )
plt.plot( w, np.sin(w) /w, label=r'$\delta_x$ C2', c='C0', alpha=0.25 )
plt.plot( w, fdm.fdm1_c6_wavenumber(w) /w, label=r'$\delta_x$ C6', c='C0' )
plt.xticks( np.linspace(0., np.pi, num = 7), [r'$0.0$', r'', r'$\pi/3$', r'', r'$2\pi/3$', r'', r'$\pi$'])
plt.xlabel(r'$\omega=\kappa h = 2\pi/\mathrm{PPW}$')
plt.ylabel(r'$\mathrm{Im}(\lambda_1) /[\mathrm{Im}(\lambda_1)]_\mathrm{e}$')
plt.legend( )
plt.tight_layout(pad=0.1)
plt.savefig("{}.pdf".format(tag+str(fig_id)))
# Modified wavenumber of second order derivative
fig_id = fig_id +1
plt.figure( figsize = (4,3) )
plt.plot( w, w **2., label=r'exact', c='k', lw=1.0 )
plt.plot( w, fdm.fdm1_c6_wavenumber(w) **2., label=r'$(\delta_x$ C6)$^2$', c='C0' )
plt.plot( w, 2. *( 1 -np.cos(w) ), label=r'$\delta_{xx}$ C2', c='C1', alpha=0.25 )
wm = fdm.fdm2_c6_wavenumber(w, 48./7.)
plt.plot( w, wm, label=r'$\delta_{xx}$ C6', c='C1', alpha=0.5 )
j = -1
plt.text( w[j]-1., wm[j], r'${:3.2f}\,@\,\pi$'.format(wm[j]), va='bottom' )
plt.plot( (w[j],w[j]-1), (wm[j],wm[j]), 'k', ls='-', lw=0.5, marker='o', mfc='w', markevery=2, ms=4 )
plt.plot( w, fdm.fdm2_c6_wavenumber(w, np.pi **2.), label=r'$\delta_{xx}$ C6b', c='C1' )
plt.xticks( np.linspace(0., np.pi, num = 7), [r'$0.0$', r'', r'$\pi/3$', r'', r'$2\pi/3$', r'', r'$\pi$'])
plt.yticks( np.linspace(0., np.pi **2., num = 7), [r'$0.0$', r'', r'$\pi^2/3$', r'', r'$2\pi^2/3$', r'', r'$\pi^2$'], rotation='vertical')
plt.xlabel(r'$\omega=\kappa h = 2\pi/\mathrm{PPW}$')
plt.ylabel(r'-Re($\lambda_2)$')
plt.legend( )
plt.tight_layout(pad=0.1)
plt.savefig("{}.pdf".format(tag+str(fig_id)))
# Relative
fig_id = fig_id +1
plt.figure( figsize = (4,3) )
plt.plot( w, fdm.fdm1_c6_wavenumber(w) **2. /w **2., label=r'$(\delta_x$ C6)$^2$', c='C0' )
plt.plot( w, 2. *( 1 -np.cos(w) ) /w **2., label=r'$\delta_{xx}$ C2', c='C1', alpha=0.25 )
plt.plot( w, fdm.fdm2_c6_wavenumber(w, 48./7.) /w **2., label=r'$\delta_{xx}$ C6', c='C1', alpha=0.5 )
plt.plot( w, fdm.fdm2_c6_wavenumber(w, np.pi **2.) /w **2., label=r'$\delta_{xx}$ C6b', c='C1' )
plt.xticks( np.linspace(0., np.pi, num = 7), [r'$0.0$', r'', r'$\pi/3$', r'', r'$2\pi/3$', r'', r'$\pi$'])
# plt.yticks( np.linspace(0., np.pi **2., num = 7), [r'$0.0$', r'', r'$\pi^2/3$', r'', r'$2\pi^2/3$', r'', r'$\pi^2$'], rotation='vertical')
plt.xlabel(r'$\omega=\kappa h = 2\pi/\mathrm{PPW}$')
plt.ylabel(r'$\mathrm{Re}(\lambda_2)/[\mathrm{Re}(\lambda_2)]_\mathrm{e}$')
plt.legend( )
plt.tight_layout(pad=0.1)
plt.savefig("{}.pdf".format(tag+str(fig_id)))
plt.show()
###############################################################################
tag='stability'
if tag in FiguresToPlot:
fig_id = 0
def PlotBackground(w,r,s,tag,wi,wr,cfl_a,cfl_d):
colors = [ '#507dbc', '#86A7D3', '#bbd1ea', '#f9b5ac', '#F49690', '#ee7674' ]
# plt.contourf(np.real(w),np.imag(w),r,[0., 1.],colors=['#aedcc0'],alpha=0.5)
plt.contourf(np.real(w),np.imag(w),s,[-0.1,-0.01,0.,0.01,0.1],colors=colors,alpha=0.75,extend='both')
# plt.contour( np.real(w),np.imag(w),abs(s_masked),[1.],linewidths=[1.0],colors='w')
plt.colorbar(orientation='horizontal',shrink=0.6, label=tag, pad=0.05)
plt.contour( np.real(w),np.imag(w),r, [1.],colors=['k'],linewidths=[1.0])
plt.xlabel(r'Re($\lambda\tau)$',loc='right',labelpad=-2)
plt.ylabel(r'Im($\lambda\tau)$',loc='bottom',labelpad=-22)
plt.gca().set_aspect('equal')#,'box')
plt.gca().spines['left'].set_position(('data', 0))
plt.gca().spines['bottom'].set_position(('data', 0))
plt.gca().spines['top'].set_visible(False)
plt.gca().spines['right'].set_visible(False)
plt.grid()
plt.gca().xaxis.set_ticklabels([])
plt.gca().yaxis.set_ticklabels([])
# wi, wr = 3.34, -4.65
plt.text( -1.5, wi, r'${:3.2f}$'.format(wi), va='bottom' )
plt.plot( (0.,-1.5), (wi,wi), 'k', ls='-', lw=0.5, marker='o', mfc='w', markevery=2, ms=4 )
plt.text( wr, -1.5, r'${:3.2f}$'.format(wr), ha='left' )
plt.plot( (wr,wr), (0,-1.5), 'k', ls='-', lw=0.5, marker='o', mfc='w', markevery=2, ms=4 )
wi, wr = cfl_a *1.99,-cfl_d *np.pi **2. #6.86
plt.plot( wr *np.array([0., 1., 1., 0., 0.]), wi *np.array([1., 1., -1., -1., 1.]), 'k', lw='0.5')
plt.text( wr-1.5, wi, r'${}={:3.2f}$'.format('\mathrm{CFL}_\mathrm{a}',cfl_a), va='bottom' )
plt.plot( (wr-0.1,wr-1.5), (wi,wi), 'k', ls='-', lw=0.5 )
plt.text( wr, -wi-1., r'${}={:3.2f}$'.format('\mathrm{CFL}_\mathrm{d}',cfl_d), ha='right' )
plt.plot( (wr,wr), (-wi-0.1,-wi-1.), 'k', ls='-', lw=0.5 )
wi, wr = cfl_a *np.pi /2., -cfl_d *(np.pi /2.) **2.
plt.plot( wr *np.array([0., 1., 1., 0., 0.]), wi *np.array([1., 1., -1., -1., 1.]), 'k', lw='0.5')
plt.text( wr-1.5, -wi, r'$\mathrm{PPW}=4$', va='bottom' )
plt.plot( (wr-0.1,wr-1.5), (-wi,-wi), 'k', ls='-', lw=0.5 )
return
# Stability Regions
# x = np.linspace(-3.0,0.6,400)
# y = np.linspace(-2.75,2.75,400)
# x, y = np.meshgrid(x,y)
# w = x + 1j *y
# r = 1. + w + w **2. /2. + w **3. /6. # Runge-Kitta 3
# wi, wr = 1.73, -2.52
# cfl_a = 0.6
# cfl_d = 0.15
x = np.linspace(-5.0,1.0,400)
y = np.linspace(-4.0,4.0,400)
x, y = np.meshgrid(x,y)
w = x + 1j *y
r = 1. + w + w **2. /2. + w **3. /6. + w **4. /24. + w **5. /200. # Runge-Kitta 4-5
wi, wr = 3.34, -4.65
cfl_a = 1.2
cfl_d = 0.3
s = r/np.exp(w)
r = np.abs(r)
s_masked = np.ma.masked_where( r > 1., s )
# Eigenvalues
n = 128
# Periodic case
w1 = np.linspace( -np.pi, np.pi, num = n, endpoint=False)
lambdasS = w1 *cfl_a *1j -w1 **2. *cfl_d
lambdasC = fdm.fdm1_c6_wavenumber(w1) *cfl_a *1j -fdm.fdm2_c6_wavenumber(w1, np.pi **2.) *cfl_d
# Nonperiodic case, grid step from 1 to 1+h2 as sigmoid function. Set h2 = 0 for uniform grid
h2 = 0.0 #1.5
ndelta = 4.
s = np.linspace( 1., float(n), num=n)
dummy = np.exp( -(s -float(n)/2.) /ndelta )
xp1 = 1. + h2 /( 1. + dummy ) # First order derivative of x=x(s)
xp2 = ( h2 /ndelta ) *( 1. /( 1. + dummy ) ) **2. *dummy # Second-order derivative
D2 = np.diagflat( xp2 /xp1 **2 ) # Correction diagonal matrix
# Nonperiodic case, 1st order derivative
l1 = fdm.fdm1_c6_A(n) *xp1
A1 = np.diagflat(l1[0,1:],-1) +np.diagflat(l1[1,:],0) +np.diagflat(l1[2,:-1],1)
r1 = fdm.fdm1_c6_B(n)
B1 = np.diagflat(r1[0][2:],-2) +np.diagflat(r1[1][1:],-1) +np.diagflat(r1[2],0) \
+np.diagflat(r1[3][:-1],1) +np.diagflat(r1[4][:-2],2)
# Nonperiodic case, 2nd order derivative
l2 = fdm.fdm2_c6_A(n, np.pi **2.) *( xp1 **2. )
# l2 = fdm.fdm2_c6_A(n, 48./7.) *( xp1 **2. )
A2 = np.diagflat(l2[0,1:],-1) +np.diagflat(l2[1,:],0) +np.diagflat(l2[2,:-1],1)
r2 = fdm.fdm2_c6_B(n, np.pi **2.)
# r2 = fdm.fdm2_c6_B(n, 48./7.)
B2 = np.diagflat(r2[0][3:],-3) +np.diagflat(r2[1][2:],-2) +np.diagflat(r2[2][1:],-1) +np.diagflat(r2[3],0) \
+np.diagflat(r2[4][:-1],1) +np.diagflat(r2[5][:-2],2) +np.diagflat(r2[6][:-3],3)
# Reduced arrays for Dirichlet boundary conditions at j=0
A1 = A1[1:,1:]
A1[0,0] -= l1[0,1] *l1[2,0] /l1[1,0]
B1 = B1[1:,1:]
B1[0,0] -= l1[0,1] *r1[3,0] /l1[1,0]
B1[0,1] -= l1[0,1] *r1[4,0] /l1[1,0]
A2 = A2[1:,1:]
A2[0,0] -= l2[0,1] *l2[2,0] /l2[1,0]
B2 = B2[1:,1:]
B2[0,0] -= l2[0,1] *r2[4,0] /l2[1,0]
B2[0,1] -= l2[0,1] *r2[5,0] /l2[1,0]
B2[0,2] -= l2[0,1] *r2[6,0] /l2[1,0]
D2 = D2[1:,1:]
# Reduced arrays for Dirichlet boundary conditions at j=n
A1 = A1[:-1,:-1]
A1[-1,-1] -= l1[2,-2] *l1[0,-1] /l1[1,-1]
B1 = B1[:-1,:-1]
B1[-1,-1] -= l1[2,-2] *r1[1,-1] /l1[1,-1]
B1[-1,-2] -= l1[2,-2] *r1[0,-1] /l1[1,-1]
A2 = A2[:-1,:-1]
A2[-1,-1] -= l2[2,-2] *l2[0,-1] /l2[1,-1]
B2 = B2[:-1,:-1]
B2[-1,-1] -= l2[2,-2] *r2[2,-1] /l2[1,-1]
B2[-1,-2] -= l2[2,-2] *r2[1,-1] /l2[1,-1]
B2[-1,-3] -= l2[2,-2] *r2[0,-1] /l2[1,-1]
D2 = D2[:-1,:-1]
L = -( cfl_a +cfl_d *D2 )*scipy.linalg.solve(A1,B1) + cfl_d *scipy.linalg.solve(A2,B2)
lambdas = scipy.linalg.eigvals( L )
# Plot
fig_id = fig_id +1
fig, ((f1, f2)) = plt.subplots(nrows=1, ncols=2, figsize=(8,6))
plt.subplot(f1)
PlotBackground( w, r, abs(s_masked)-1., r'amplitude error $\rho-1$',wi,wr,cfl_a,cfl_d )
plt.plot(np.real(lambdasS),np.imag(lambdasS),marker='o',markersize=5.,markeredgewidth=0.,lw=0.,color='0.5')
plt.plot(np.real(lambdasC),np.imag(lambdasC),marker='o',markersize=5.,markeredgewidth=0.,lw=0.,color='#6a2202')
plt.plot(np.real(lambdas), np.imag(lambdas), marker='o',markersize=5.,markeredgewidth=0.,lw=0.,color='#bc7201')
plt.subplot(f2)
PlotBackground( w, r, np.angle(s_masked) /np.pi, r'phase error $\theta/\pi$',wi,wr,cfl_a,cfl_d )
plt.plot(np.real(lambdasS),np.imag(lambdasS),marker='o',markersize=5.,markeredgewidth=0.,lw=0.,color='0.5')
plt.plot(np.real(lambdasC),np.imag(lambdasC),marker='o',markersize=5.,markeredgewidth=0.,lw=0.,color='#6a2202')
plt.plot(np.real(lambdas), np.imag(lambdas), marker='o',markersize=5.,markeredgewidth=0.,lw=0.,color='#bc7201')
plt.tight_layout(pad=0.0)
plt.savefig("{}.pdf".format(tag+str(fig_id)),bbox_inches='tight')
plt.show()
###############################################################################
tag='convergence'
if tag in FiguresToPlot:
fig_id = 0
# Define funtions
def exp_(x):
c = 1.
return ( # Parenthesis to write comments at end of line
'Exp', # Name
np.exp(c *x), # Function
np.exp(c *x) *c, # First-order derivative
np.exp(c *x) *c **2. # Second-order derivative
)
def sin_(x):
c = 1.
return 'Sin', \
np.sin(c *np.pi *x), \
np.cos(c *np.pi *x) *c *np.pi, \
-np.sin(c *np.pi *x) *(c *np.pi) **2.
def cos_(x):
c = 1.
return 'Cos', \
np.cos(c *np.pi *x), \
-np.sin(c *np.pi *x) *c *np.pi, \
-np.cos(c *np.pi *x) *(c *np.pi) **2.
def gauss_(x):
c = 40.
tmp = np.exp(-c *x *x)
return 'Gaussian', \
tmp, \
-tmp *2. *c *x, \
-tmp *2. *c *( 1. -2. *c *x *x )
# Error analysis of the second-order derivative
n = 10
x = np.linspace(-1.,1.,n)
h = (x[n-1]-x[0]) /(n-1)
# Define list of functions to be processed
fs = [ exp_(x) ]#, sin_(x), cos_(x), gauss_(x)]
# Calculate FD approximation to the second-order derivative
l2 = fdm.fdm2_c6_A(n, np.pi **2.)
# l2 = fdm.fdm2_c6_A(n, 48./7.)
A2 = np.diagflat(l2[0,1:],-1) +np.diagflat(l2[1,:],0) +np.diagflat(l2[2,:-1],1)
r2 = fdm.fdm2_c6_B(n, np.pi **2.)
# r2 = fdm.fdm2_c6_B(n, 48./7.)
B2 = np.diagflat(r2[0][3:],-3) +np.diagflat(r2[1][2:],-2) +np.diagflat(r2[2][1:],-1) +np.diagflat(r2[3],0) \
+np.diagflat(r2[4][:-1],1) +np.diagflat(r2[5][:-2],2) +np.diagflat(r2[6][:-3],3)
fdm2s = [ scipy.linalg.solve(A2,B2@f[1]) /h**2. for f in fs ]
# e = [ A2@(fdm2s[i]-fs[i][3]) for i in range(len(fdm2s)) ]
e = [ fdm2s[i]-fs[i][3] for i in range(len(fdm2s)) ]
# Plot result
plt.figure( figsize = (4,3))
for i in range(len(fdm2s)):
# plt.plot(x, f[3], label=f[0])
plt.plot(x, e[i], label=fs[i][0])
plt.title("Second-order derivative")
plt.xlabel("$x$")
plt.ylabel("$d^2f/dx^2$")
plt.legend(loc="best")
plt.show()
# # Convergence study: we increment the number of grid points n by factors of 2
# # between 2**imin and 2**imax
# h = []
# e1s = []
# e2s = []
# for n in [ 2**i for i in range(4,11)]:
# x = np.linspace(-1.,1.,n)
# h.append( (x[n-1]-x[0]) /(n-1) )
# # Define list of functions to be processed
# fs = [ exp_(x) ]#, sin_(x), cos_(x), gauss_(x)]
# # Calculate FD approximation to the first-order derivative and error
# l1 = fdm.fdm1_c6_A(n)
# A1 = np.diagflat(l1[0,1:],-1) +np.diagflat(l1[1,:],0) +np.diagflat(l1[2,:-1],1)
# r1 = fdm.fdm1_c6_B(n)
# B1 = np.diagflat(r1[0][2:],-2) +np.diagflat(r1[1][1:],-1) +np.diagflat(r1[2],0) \
# +np.diagflat(r1[3][:-1],1) +np.diagflat(r1[4][:-2],2)
# fdm1s = [ scipy.linalg.solve(A1,B1@f[1]) /h[-1] for f in fs ]
# # e1s.append( [ scipy.linalg.norm(fdm1s[i]-fs[i][2]) /np.sqrt(float(n)) for i in range(len(fdm1s)) ] )
# e1s.append( [ np.amax(np.abs(fdm1s[i]-fs[i][2])) for i in range(len(fdm1s)) ] )
# # Calculate FD approximation to the second-order derivative
# l2 = fdm.fdm2_c6_A(n, np.pi **2.)
# # l2 = fdm.fdm2_c6_A(n, 48./7.)
# A2 = np.diagflat(l2[0,1:],-1) +np.diagflat(l2[1,:],0) +np.diagflat(l2[2,:-1],1)
# r2 = fdm.fdm2_c6_B(n, np.pi **2.)
# # r2 = fdm.fdm2_c6_B(n, 48./7.)
# B2 = np.diagflat(r2[0][3:],-3) +np.diagflat(r2[1][2:],-2) +np.diagflat(r2[2][1:],-1) +np.diagflat(r2[3],0) \
# +np.diagflat(r2[4][:-1],1) +np.diagflat(r2[5][:-2],2) +np.diagflat(r2[6][:-3],3)
# fdm2s = [ scipy.linalg.solve(A2,B2@f[1]) /h[-1]**2. for f in fs ]
# # e2s.append( [ scipy.linalg.norm(fdm2s[i]-fs[i][3]) /np.sqrt(float(n)) for i in range(len(fdm2s)) ] )
# e2s.append( [ np.amax(np.abs(fdm2s[i]-fs[i][3])) for i in range(len(fdm2s)) ] )
# legends = [ f[0] for f in fs ]
# h_ = np.array( h ) # We need arrays to plot
# # e1s_ = np.array( e1s ) # We need arrays to plot
# # plt.figure( figsize = (4,3))
# # for i in range(np.shape(e1s_)[1]):
# # plt.plot( h_/h_[0], e1s_[:,i] )#/e1s_[0,i] )
# # plt.legend(legends,loc='best')
# # plt.xscale("log")
# # plt.yscale("log")
# # plt.title("First-order derivative")
# # plt.xlabel("Grid spacing $h/h_0$")
# # plt.ylabel("Global error $e_2/e_{2,0}$")
# # #plt.ylabel("Global error $e_\infty/e_{\infty,0}$")
# # plt.show()
# e2s_ = np.array( e2s ) # We need arrays to plot
# plt.figure( figsize = (4,3))
# for i in range(np.shape(e2s_)[1]):
# plt.plot( h_/h_[0], e2s_[:,i] )#/e2s_[0,i] )
# plt.legend(legends,loc='best')
# plt.xscale("log")
# plt.yscale("log")
# # plt.axis([None,None,1e-10,1e0])
# plt.title(r"Second-order derivative")
# plt.xlabel(r"Grid spacing $h/h_0$")
# plt.ylabel(r"Global error $e_2/e_{2,0}$")
# #plt.ylabel(r"Global error $e_\infty/e_{\infty,0}$")
# plt.show()
| gpl-3.0 |
theakholic/ThinkStats2 | code/analytic.py | 69 | 6265 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import math
import numpy as np
import pandas
import nsfg
import thinkplot
import thinkstats2
def ParetoMedian(xmin, alpha):
"""Computes the median of a Pareto distribution."""
return xmin * pow(2, 1/alpha)
def MakeExpoCdf():
"""Generates a plot of the exponential CDF."""
thinkplot.PrePlot(3)
for lam in [2.0, 1, 0.5]:
xs, ps = thinkstats2.RenderExpoCdf(lam, 0, 3.0, 50)
label = r'$\lambda=%g$' % lam
thinkplot.Plot(xs, ps, label=label)
thinkplot.Save(root='analytic_expo_cdf',
title='Exponential CDF',
xlabel='x',
ylabel='CDF')
def ReadBabyBoom(filename='babyboom.dat'):
"""Reads the babyboom data.
filename: string
returns: DataFrame
"""
var_info = [
('time', 1, 8, int),
('sex', 9, 16, int),
('weight_g', 17, 24, int),
('minutes', 25, 32, int),
]
columns = ['name', 'start', 'end', 'type']
variables = pandas.DataFrame(var_info, columns=columns)
variables.end += 1
dct = thinkstats2.FixedWidthVariables(variables, index_base=1)
df = dct.ReadFixedWidth(filename, skiprows=59)
return df
def MakeBabyBoom():
"""Plot CDF of interarrival time on log and linear scales.
"""
# compute the interarrival times
df = ReadBabyBoom()
diffs = df.minutes.diff()
cdf = thinkstats2.Cdf(diffs, label='actual')
thinkplot.PrePlot(cols=2)
thinkplot.Cdf(cdf)
thinkplot.Config(xlabel='minutes',
ylabel='CDF',
legend=False)
thinkplot.SubPlot(2)
thinkplot.Cdf(cdf, complement=True)
thinkplot.Config(xlabel='minutes',
ylabel='CCDF',
yscale='log',
legend=False)
thinkplot.Save(root='analytic_interarrivals',
legend=False)
def MakeParetoCdf():
"""Generates a plot of the Pareto CDF."""
xmin = 0.5
thinkplot.PrePlot(3)
for alpha in [2.0, 1.0, 0.5]:
xs, ps = thinkstats2.RenderParetoCdf(xmin, alpha, 0, 10.0, n=100)
thinkplot.Plot(xs, ps, label=r'$\alpha=%g$' % alpha)
thinkplot.Save(root='analytic_pareto_cdf',
title='Pareto CDF',
xlabel='x',
ylabel='CDF')
def MakeParetoCdf2():
"""Generates a plot of the CDF of height in Pareto World."""
xmin = 100
alpha = 1.7
xs, ps = thinkstats2.RenderParetoCdf(xmin, alpha, 0, 1000.0, n=100)
thinkplot.Plot(xs, ps)
thinkplot.Save(root='analytic_pareto_height',
title='Pareto CDF',
xlabel='height (cm)',
ylabel='CDF',
legend=False)
def MakeNormalCdf():
"""Generates a plot of the normal CDF."""
thinkplot.PrePlot(3)
mus = [1.0, 2.0, 3.0]
sigmas = [0.5, 0.4, 0.3]
for mu, sigma in zip(mus, sigmas):
xs, ps = thinkstats2.RenderNormalCdf(mu=mu, sigma=sigma,
low=-1.0, high=4.0)
label = r'$\mu=%g$, $\sigma=%g$' % (mu, sigma)
thinkplot.Plot(xs, ps, label=label)
thinkplot.Save(root='analytic_normal_cdf',
title='Normal CDF',
xlabel='x',
ylabel='CDF',
loc=2)
def MakeNormalModel(weights):
"""Plot the CDF of birthweights with a normal model."""
# estimate parameters: trimming outliers yields a better fit
mu, var = thinkstats2.TrimmedMeanVar(weights, p=0.01)
print('Mean, Var', mu, var)
# plot the model
sigma = math.sqrt(var)
print('Sigma', sigma)
xs, ps = thinkstats2.RenderNormalCdf(mu, sigma, low=0, high=12.5)
thinkplot.Plot(xs, ps, label='model', color='0.8')
# plot the data
cdf = thinkstats2.Cdf(weights, label='data')
thinkplot.PrePlot(1)
thinkplot.Cdf(cdf)
thinkplot.Save(root='analytic_birthwgt_model',
title='Birth weights',
xlabel='birth weight (lbs)',
ylabel='CDF')
def MakeExampleNormalPlot():
"""Generates a sample normal probability plot.
"""
n = 1000
thinkplot.PrePlot(3)
mus = [0, 1, 5]
sigmas = [1, 1, 2]
for mu, sigma in zip(mus, sigmas):
sample = np.random.normal(mu, sigma, n)
xs, ys = thinkstats2.NormalProbability(sample)
label = '$\mu=%d$, $\sigma=%d$' % (mu, sigma)
thinkplot.Plot(xs, ys, label=label)
thinkplot.Save(root='analytic_normal_prob_example',
title='Normal probability plot',
xlabel='standard normal sample',
ylabel='sample values')
def MakeNormalPlot(weights, term_weights):
"""Generates a normal probability plot of birth weights."""
mean, var = thinkstats2.TrimmedMeanVar(weights, p=0.01)
std = math.sqrt(var)
xs = [-4, 4]
fxs, fys = thinkstats2.FitLine(xs, mean, std)
thinkplot.Plot(fxs, fys, linewidth=4, color='0.8')
thinkplot.PrePlot(2)
xs, ys = thinkstats2.NormalProbability(weights)
thinkplot.Plot(xs, ys, label='all live')
xs, ys = thinkstats2.NormalProbability(term_weights)
thinkplot.Plot(xs, ys, label='full term')
thinkplot.Save(root='analytic_birthwgt_normal',
title='Normal probability plot',
xlabel='Standard deviations from mean',
ylabel='Birth weight (lbs)')
def main():
thinkstats2.RandomSeed(18)
MakeExampleNormalPlot()
# make the analytic CDFs
MakeExpoCdf()
MakeBabyBoom()
MakeParetoCdf()
MakeParetoCdf2()
MakeNormalCdf()
# test the distribution of birth weights for normality
preg = nsfg.ReadFemPreg()
full_term = preg[preg.prglngth >= 37]
weights = preg.totalwgt_lb.dropna()
term_weights = full_term.totalwgt_lb.dropna()
MakeNormalModel(weights)
MakeNormalPlot(weights, term_weights)
if __name__ == "__main__":
main()
| gpl-3.0 |
danweflen/mrgaze | build/lib/mrgaze/mrclean.py | 4 | 7570 | #!/usr/bin/env python
"""
Video pupilometry functions
- takes calibration and gaze video filenames as input
- controls calibration and gaze estimation workflow
Example
-------
>>> mrgaze.py <Calibration Video> <Gaze Video>
Author
------
Mike Tyszka, Caltech Brain Imaging Center
License
-------
This file is part of mrgaze.
mrgaze is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
mrgaze is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with mrgaze. If not, see <http://www.gnu.org/licenses/>.
Copyright
---------
2014 California Institute of Technology.
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import medfilt
from scipy.ndimage.morphology import binary_dilation
from mrgaze import utils
from mrgaze import improc as ip
def MRClean(frame, z_thresh=8.0):
"""
Attempt to repair scan lines corrupted by MRI RF or gradient pulses.
Parameters
----------
frame : numpy integer array
Original corrupted, interlaced video frame
cfg : configuration object
Pipeline configuration parameters
Returns
-------
frame_clean : numpy integer array
Repaired interlaced frame
art_power : float
Artifact power in frame
Example
-------
>>>
"""
# Internal debug flag
DEBUG = False
# Init repaired frame
frame_clean = frame.copy()
# Split frame into even and odd lines
fr_even = frame[0::2,:]
fr_odd = frame[1::2,:]
# Odd - even frame difference
df = fr_odd.astype(float) - fr_even.astype(float)
# Row mean of frame difference
df_row_mean = np.mean(df, axis=1)
# Artifact power - mean square of row means
art_power = np.mean(df_row_mean**2)
# Robust estimate of noise SD in row projection
sd_n = ip.WaveletNoiseSD(df_row_mean)
# Frame difference projection z-scores
z = df_row_mean / sd_n
# Find scanlines with |z| > z_thresh
bad_rows = np.abs(z) > z_thresh
# Median smooth the bad rows mask then dilate by 3 lines (kernel 2*3+1 = 7)
bad_rows = medfilt(bad_rows)
bad_rows = binary_dilation(bad_rows, structure=np.ones((7,)))
# If an artifact is present
if np.sum(bad_rows) > 0:
# Add leading and trailing zero to bad rows flag array
# This lets forward difference work correctly below
bad_rows_pad = np.append(0, np.append(bad_rows, 0))
# Find bad row block start and end indices by forward differencing
# Add leading and trailing zeros to avoid unterminated blocks
# Remember this later when determining correct row indices
dbad = np.diff(bad_rows_pad)
# Bad row block start and end indices
# bad_on indicates row indices immediately prior to block starts
# bad_off indicates row indices immediate after block ends
bad_on = (np.where(dbad > 0))[0] - 1
bad_off = (np.where(dbad < 0))[0]
if bad_on.size != bad_off.size:
print('Block start and end arrays differ in size - returning')
return frame_clean, art_power
# Init cleaned half frames
fr_odd_clean = fr_odd.copy()
fr_even_clean = fr_even.copy()
# Loop over last good rows before bad blocks
for i, r0 in enumerate(bad_on):
# First good row after bad block
r1 = bad_off[i]
# Protect against overrange rows
# This reduces artifact cleanup effectiveness if bad rows
# Are adjacent to the top or bottom of the frame
nr = fr_odd.shape[0]
r0 = utils._clamp(r0, 0, nr-1)
r1 = utils._clamp(r1, 0, nr-1)
# Linear interp between leading and trailing good rows
odd_interp = InterpRows(fr_odd, r0, r1)
even_interp = InterpRows(fr_even, r0, r1)
# Extract equivalent rows from original odd and even frames
odd_orig = fr_odd[r0:r1+1,:]
even_orig = fr_even[r0:r1+1,:]
# Calculate RMS difference between interp and orig
odd_rms_diff = utils._rms(odd_orig - odd_interp)
even_rms_diff = utils._rms(even_orig - even_interp)
# If RMS diff for odd < even, consider odd rows, clean
# and vise versa
if odd_rms_diff < even_rms_diff:
fr_even_clean[r0:r1+1,:] = odd_orig
else:
fr_odd_clean[r0:r1+1,:] = even_orig
# Reinterlace cleaned frame
frame_clean[0::2,:] = fr_odd_clean
frame_clean[1::2,:] = fr_even_clean
# Display results
if DEBUG:
ny = bad_rows.shape[0]
y = np.arange(0,ny)
plt.figure(1)
plt.set_cmap('jet')
plt.subplot(321)
plt.imshow(fr_odd)
plt.title('Odd')
plt.subplot(322)
plt.imshow(fr_even)
plt.title('Even')
plt.subplot(323)
plt.imshow(fr_odd_clean)
plt.title('Odd Repaired')
plt.subplot(324)
plt.imshow(fr_even_clean)
plt.title('Even Repaired')
plt.subplot(325)
plt.imshow(df)
plt.title('Odd - Even')
plt.subplot(326)
plt.plot(y, z, y, bad_rows * z.max() * 0.9)
plt.title('Z-score and Bad Row Mask')
plt.show()
return frame_clean, art_power
def InpaintRows(src, r0, r1):
"""
Repair bad row blocks by vertical linear interpolation
Parameters
----
src : 2D numpy uint8 array
Original image to be repaired
r0 : integer
Row index immediately before start of corrupted rows
r1 : integer
Row index immediately after end of corrupted rows
Returns
----
dest : 2D numpy uint8 array
Repaired image
Example:
----
>>> img_repaired = InpaintRows(img, 5, 25)
"""
# Init repaired image
dest = src.copy()
# Linear interpolation over bad row block
Ii = InterpRows(src, r0, r1)
# Replace bad row block with interpolated values
dest[r0:r1+1,:] = np.round(Ii).astype(int)
return dest
def InterpRows(src, r0, r1):
'''
Create a linear interpolation block between two rows (inclusive)
Arguments
----
src : 2D numpy float array
Source image for start and end rows.
r0 : integer
Starting row index within src.
r1 : integer
Ending row index within src.
Returns
----
row_block : 2D numpy float array
Interpolation between rows r0 and r1 inclusive.
'''
# Extract image rows r0 and r1
I0 = (src[r0, :].astype(float)).reshape(1,-1)
I1 = (src[r1, :].astype(float)).reshape(1,-1)
# Create vector of row indices for interpolation
# NOTE : np.arange(0,n) generates [0,1,2,...,n-1]
# so we need to +1 the number of elements for inclusion
# of the trailing row.
f = (np.arange(0, r1 - r0 + 1).reshape(-1,1)) / float(r1-r0)
# Linear interpolation over bad row block
row_block = f.dot(I1-I0) + I0
return row_block
| gpl-3.0 |
baspijhor/paparazzi | sw/tools/tcp_aircraft_server/phoenix/__init__.py | 86 | 4470 | #Copyright 2014, Antoine Drouin
"""
Phoenix is a Python library for interacting with Paparazzi
"""
import math
"""
Unit convertions
"""
def rad_of_deg(d): return d/180.*math.pi
def deg_of_rad(r): return r*180./math.pi
def rps_of_rpm(r): return r*2.*math.pi/60.
def rpm_of_rps(r): return r/2./math.pi*60.
def m_of_inch(i): return i*0.0254
"""
Plotting
"""
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
my_title_spec = {'color' : 'k', 'fontsize' : 20 }
def save_if(filename):
if filename: matplotlib.pyplot.savefig(filename, dpi=80)
def prepare_fig(fig=None, window_title=None, figsize=(20.48, 10.24), margins=None):
if fig == None:
fig = plt.figure(figsize=figsize)
# else:
# plt.figure(fig.number)
if margins:
left, bottom, right, top, wspace, hspace = margins
fig.subplots_adjust(left=left, right=right, bottom=bottom, top=top,
hspace=hspace, wspace=wspace)
if window_title:
fig.canvas.set_window_title(window_title)
return fig
def decorate(ax, title=None, xlab=None, ylab=None, legend=None, xlim=None, ylim=None):
ax.xaxis.grid(color='k', linestyle='-', linewidth=0.2)
ax.yaxis.grid(color='k', linestyle='-', linewidth=0.2)
if xlab:
ax.xaxis.set_label_text(xlab)
if ylab:
ax.yaxis.set_label_text(ylab)
if title:
ax.set_title(title, my_title_spec)
if legend <> None:
ax.legend(legend, loc='best')
if xlim <> None:
ax.set_xlim(xlim[0], xlim[1])
if ylim <> None:
ax.set_ylim(ylim[0], ylim[1])
"""
Messages
"""
#: dictionary mapping the C type to its length in bytes (e.g char -> 1)
TYPE_TO_LENGTH_MAP = {
"char" : 1,
"uint8" : 1,
"int8" : 1,
"uint16" : 2,
"int16" : 2,
"uint32" : 4,
"int32" : 4,
"float" : 4,
"double" : 8,
}
#: dictionary mapping the C type to correct format string
TYPE_TO_PRINT_MAP = {
float : "%f",
str : "%s",
chr : "%c",
int : "%d"
}
ACID_ALL = 0xFF
ACID_TEST = 0xFE
ACID_GROUNDSTATION = 0xFD
#: dictionary mapping debug types to format characters
DEBUG_MESSAGES = {
"DEBUG_UINT8" : "%d",
"DEBUG_INT32" : "%d",
"DEBUG_FLOAT" : "%#f"
}
"""
Binary logs
See format description in sw/airborne/subsystems/datalink/fms_link.c
"""
import struct
def hex_of_bin(b): return ' '.join( [ "%02X" % ord( x ) for x in b ] )
import pdb
def read_binary_log(filename, tick_freq = 2*512.):
f = open(filename, "rb")
d = f.read()
packet_header_len = 6
msg_header_len = 2
def read_packet(d, packet_start):
payload_start = packet_start+packet_header_len
timestamp, payload_len = struct.unpack("IH", d[packet_start:payload_start])
msgs = read_packet_payload(d, payload_start, payload_len)
next_packet = payload_start+payload_len+2
return timestamp, msgs, next_packet
def read_packet_payload(d, s, l):
msgs = []
packet_end = s+l; msg_start = s
while msg_start<packet_end:
payload_start = msg_start+msg_header_len
msg_len, msg_id = struct.unpack("BB", d[msg_start:payload_start])
payload_end = payload_start+msg_len
msg_payload = d[payload_start:payload_end]
msgs.append([msg_id, msg_payload])
#print msg_id, msg_len, hex_of_bin(msg_payload)
msg_start = payload_end
return msgs
packets = []
packet_start=0
while packet_start<len(d):
timestamp, msgs, next_packet = read_packet(d, packet_start)
packets.append([timestamp/tick_freq, msgs])
#print timestamp, msgs
packet_start = next_packet
f.close()
return packets
def extract_from_binary_log(protocol, packets, msg_names, t_min=None, t_max=None):
ret = [{'time':[], 'data':[]} for m in msg_names]
if t_min == None: t_min = packets[0][0]
if t_max == None: t_max = packets[-1][0]
for t, msgs in packets:
if t>= t_min and t<= t_max:
for id, payload in msgs:
m = protocol.get_message_by_id('telemetry', id)
try: i = msg_names.index(m.name)
except: pass
finally: ret[i]['time'].append(t); ret[i]['data'].append(m.unpack_scaled_values(payload))
return ret
| gpl-2.0 |
cauchycui/scikit-learn | examples/plot_kernel_approximation.py | 262 | 8004 | """
==================================================
Explicit feature map approximation for RBF kernels
==================================================
An example illustrating the approximation of the feature map
of an RBF kernel.
.. currentmodule:: sklearn.kernel_approximation
It shows how to use :class:`RBFSampler` and :class:`Nystroem` to
approximate the feature map of an RBF kernel for classification with an SVM on
the digits dataset. Results using a linear SVM in the original space, a linear
SVM using the approximate mappings and using a kernelized SVM are compared.
Timings and accuracy for varying amounts of Monte Carlo samplings (in the case
of :class:`RBFSampler`, which uses random Fourier features) and different sized
subsets of the training set (for :class:`Nystroem`) for the approximate mapping
are shown.
Please note that the dataset here is not large enough to show the benefits
of kernel approximation, as the exact SVM is still reasonably fast.
Sampling more dimensions clearly leads to better classification results, but
comes at a greater cost. This means there is a tradeoff between runtime and
accuracy, given by the parameter n_components. Note that solving the Linear
SVM and also the approximate kernel SVM could be greatly accelerated by using
stochastic gradient descent via :class:`sklearn.linear_model.SGDClassifier`.
This is not easily possible for the case of the kernelized SVM.
The second plot visualized the decision surfaces of the RBF kernel SVM and
the linear SVM with approximate kernel maps.
The plot shows decision surfaces of the classifiers projected onto
the first two principal components of the data. This visualization should
be taken with a grain of salt since it is just an interesting slice through
the decision surface in 64 dimensions. In particular note that
a datapoint (represented as a dot) does not necessarily be classified
into the region it is lying in, since it will not lie on the plane
that the first two principal components span.
The usage of :class:`RBFSampler` and :class:`Nystroem` is described in detail
in :ref:`kernel_approximation`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Andreas Mueller <[email protected]>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
import numpy as np
from time import time
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, pipeline
from sklearn.kernel_approximation import (RBFSampler,
Nystroem)
from sklearn.decomposition import PCA
# The digits dataset
digits = datasets.load_digits(n_class=9)
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.data)
data = digits.data / 16.
data -= data.mean(axis=0)
# We learn the digits on the first half of the digits
data_train, targets_train = data[:n_samples / 2], digits.target[:n_samples / 2]
# Now predict the value of the digit on the second half:
data_test, targets_test = data[n_samples / 2:], digits.target[n_samples / 2:]
#data_test = scaler.transform(data_test)
# Create a classifier: a support vector classifier
kernel_svm = svm.SVC(gamma=.2)
linear_svm = svm.LinearSVC()
# create pipeline from kernel approximation
# and linear svm
feature_map_fourier = RBFSampler(gamma=.2, random_state=1)
feature_map_nystroem = Nystroem(gamma=.2, random_state=1)
fourier_approx_svm = pipeline.Pipeline([("feature_map", feature_map_fourier),
("svm", svm.LinearSVC())])
nystroem_approx_svm = pipeline.Pipeline([("feature_map", feature_map_nystroem),
("svm", svm.LinearSVC())])
# fit and predict using linear and kernel svm:
kernel_svm_time = time()
kernel_svm.fit(data_train, targets_train)
kernel_svm_score = kernel_svm.score(data_test, targets_test)
kernel_svm_time = time() - kernel_svm_time
linear_svm_time = time()
linear_svm.fit(data_train, targets_train)
linear_svm_score = linear_svm.score(data_test, targets_test)
linear_svm_time = time() - linear_svm_time
sample_sizes = 30 * np.arange(1, 10)
fourier_scores = []
nystroem_scores = []
fourier_times = []
nystroem_times = []
for D in sample_sizes:
fourier_approx_svm.set_params(feature_map__n_components=D)
nystroem_approx_svm.set_params(feature_map__n_components=D)
start = time()
nystroem_approx_svm.fit(data_train, targets_train)
nystroem_times.append(time() - start)
start = time()
fourier_approx_svm.fit(data_train, targets_train)
fourier_times.append(time() - start)
fourier_score = fourier_approx_svm.score(data_test, targets_test)
nystroem_score = nystroem_approx_svm.score(data_test, targets_test)
nystroem_scores.append(nystroem_score)
fourier_scores.append(fourier_score)
# plot the results:
plt.figure(figsize=(8, 8))
accuracy = plt.subplot(211)
# second y axis for timeings
timescale = plt.subplot(212)
accuracy.plot(sample_sizes, nystroem_scores, label="Nystroem approx. kernel")
timescale.plot(sample_sizes, nystroem_times, '--',
label='Nystroem approx. kernel')
accuracy.plot(sample_sizes, fourier_scores, label="Fourier approx. kernel")
timescale.plot(sample_sizes, fourier_times, '--',
label='Fourier approx. kernel')
# horizontal lines for exact rbf and linear kernels:
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_score, linear_svm_score], label="linear svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_time, linear_svm_time], '--', label='linear svm')
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_score, kernel_svm_score], label="rbf svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_time, kernel_svm_time], '--', label='rbf svm')
# vertical line for dataset dimensionality = 64
accuracy.plot([64, 64], [0.7, 1], label="n_features")
# legends and labels
accuracy.set_title("Classification accuracy")
timescale.set_title("Training times")
accuracy.set_xlim(sample_sizes[0], sample_sizes[-1])
accuracy.set_xticks(())
accuracy.set_ylim(np.min(fourier_scores), 1)
timescale.set_xlabel("Sampling steps = transformed feature dimension")
accuracy.set_ylabel("Classification accuracy")
timescale.set_ylabel("Training time in seconds")
accuracy.legend(loc='best')
timescale.legend(loc='best')
# visualize the decision surface, projected down to the first
# two principal components of the dataset
pca = PCA(n_components=8).fit(data_train)
X = pca.transform(data_train)
# Gemerate grid along first two principal components
multiples = np.arange(-2, 2, 0.1)
# steps along first component
first = multiples[:, np.newaxis] * pca.components_[0, :]
# steps along second component
second = multiples[:, np.newaxis] * pca.components_[1, :]
# combine
grid = first[np.newaxis, :, :] + second[:, np.newaxis, :]
flat_grid = grid.reshape(-1, data.shape[1])
# title for the plots
titles = ['SVC with rbf kernel',
'SVC (linear kernel)\n with Fourier rbf feature map\n'
'n_components=100',
'SVC (linear kernel)\n with Nystroem rbf feature map\n'
'n_components=100']
plt.tight_layout()
plt.figure(figsize=(12, 5))
# predict and plot
for i, clf in enumerate((kernel_svm, nystroem_approx_svm,
fourier_approx_svm)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(1, 3, i + 1)
Z = clf.predict(flat_grid)
# Put the result into a color plot
Z = Z.reshape(grid.shape[:-1])
plt.contourf(multiples, multiples, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=targets_train, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.tight_layout()
plt.show()
| bsd-3-clause |
hkucukdereli/MousyBox | loadData.py | 1 | 7920 | import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from Tkinter import Tk
from tkFileDialog import askopenfilename,askdirectory
def chooseFile():
"""
Parameters
----------
None
No parameters are specified.
Returns
-------
filenames: tuple
A tuple that contains the list of files to be loaded.
"""
## change the wd to dir containing the script
curpath = os.path.dirname(os.path.realpath(__file__))
os.chdir(curpath)
root = Tk()
root.withdraw()
filenames = askopenfilename(parent= root, filetypes = (("CSV files", "*.csv"), ("Text files", "*.txt"), ("All files", "*.*")), multiple= True)
if len(filenames) == 1:
print len(filenames), " file is loaded."
elif len(filenames) > 1:
print len(filenames), " files are loaded."
else:
print "No files are loaded."
return filenames
def loadData(fileName):
"""
Parameters
----------
fileName: string
Input file name as a legal string.
Returns
-------
data: pandas data frame
Data frame that contains the raw data.
trialNum: pandas data frame
Data frame that contains the valid trial numbers.
Trials_list: dict
Dict that contains sorted trial data.
"""
# load the data from csv
data = pd.read_csv(fileName, delimiter= ",", names= ['Event', 'Value_1', 'Value_2'], skip_blank_lines= True, error_bad_lines= False)
# groupd the data
grouped = data.groupby('Event')
trialNum = grouped.get_group('Trial#')
trialNew = grouped.get_group('Trial_New')
trialEnd = grouped.get_group('Trial_End').iloc[1:]
trialEnd = trialEnd.append(data.tail(1))
Trials_list = {}
for ind, each in enumerate(trialNum['Value_1']):
ind_head = trialNum.iloc[ind].name
ind_tail = trialEnd.iloc[ind].name
#Trials_list.append(data.iloc[ind_head : ind_tail].sort())
Trials_list[int(each)] = data.iloc[ind_head : ind_tail].sort_values(by= 'Value_2')
return data, trialNum, Trials_list
def getLicks(trialNum, Trials_list, th= 0):
"""
Parameters
----------
trialNum: pandas data frame
Data frame that contains the valid trial numbers.
Trials_list: dict
Dict that contains sorted trial data.
th: int
Threshold value for digitizing tha lick values. Default is 0.
Returns
-------
Licks: pandas data frame
Data frame that contains the timestamps, raw and digitized lick values.
Columns: LickTime, LickDigi, LickRaw
"""
Licks = {}
for ind, each in enumerate(trialNum['Value_1']):
lick_times = np.array(Trials_list[int(each)][Trials_list[int(each)]['Event'] == 'Lick']['Value_2'])
lick_values = np.array(Trials_list[int(each)][Trials_list[int(each)]['Event'] == 'Lick']['Value_1'])
Licks[int(each)] = pd.DataFrame({'LickTime' : lick_times, 'LickRaw' : lick_values, 'LickDigi' : np.digitize(lick_values, bins= [th])})
return Licks
def findLicks(trialNum, Licks):
"""
Parameters
----------
trialNum: pandas data frame
Data frame that contains the valid trial numbers.
Licks: pandas data frame
lorem ipsum.
Returns
-------
Licks: pandas data frame
lorem ipsum.
Columns: lorem ipsum
"""
for ind, each in enumerate(trialNum['Value_1']):
temp = np.array(Licks[int(each)]['LickDigi'].iloc[1:-1]) - np.array(Licks[int(each)]['LickDigi'].iloc[0:-2])
temp = np.append(np.append(0, temp), 0)
Licks[int(each)]['Stamps'] = temp
#Licks[int(each)]['Stamps'] = Licks[int(each)]['LickTime'][Licks[int(each)]['LickDigi'] == 1]
return Licks
def countLicks(trialNum, Licks, bins):
"""
Parameters
----------
trialNum: pandas data frame
Data frame that contains the valid trial numbers.
Licks: pandas data frame
lorem ipsum.
Returns
-------
Licks: pandas data frame
lorem ipsum.
Columns: lorem ipsum
"""
bins = bins*1000
lickCounts = pd.DataFrame()
for ind, each in enumerate(trialNum['Value_1']):
for bin in bins:
licks = Licks[int(each)].iloc[bin:bin+1]
tempCount = np.array(licks['LickTime'][lick['Stamps'] == 1])
#tempCount = np.array(Licks[int(each)]['LickTime'][Licks[int(each)]['Stamps'] == 1])
print len(tempCount)
lickCounts[int(each)] = len(tempCount)
return lickCounts
def getPokes(trialNum, Trials_list):
"""
Parameters
----------
trialNum: pandas data frame
Data frame that contains the valid trial numbers.
Trials_list: dict
Dict that contains sorted trial data.
Returns
-------
Licks: pandas data frame
Data frame that contains the timestamps of start and end for each poke.
Columns: PokeStart, PokeEnd
"""
Pokes = {}
for ind, each in enumerate(trialNum['Value_1']):
poke_start = pd.DataFrame({'PokeStart' : np.array(Trials_list[int(each)][Trials_list[int(each)]['Event'] == 'Poke_Start']['Value_1'])})
poke_end = pd.DataFrame({'PokeEnd' : np.array(Trials_list[int(each)][Trials_list[int(each)]['Event'] == 'Poke_End']['Value_1'])})
Pokes[int(each)] = pd.concat([poke_start, poke_end], axis= 1)
return Pokes
def plotLicks(trialNum, Licks):
"""
"""
Licks
for ind, each in enumerate(trialNum['Value_1']):
lick_start = Licks[10]['LickTime'][Licks[10]['Stamps'] == 1]
lick_end = Licks[10]['LickTime'][Licks[10]['Stamps'] == -1]
#fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(12, 3), facecolor='w', dpi= 150)
for lick_ind in Licks[int(each)].index:
pass
#if Licks[int(each)]['']
if __name__ == "__main__":
#filenames = chooseFile()
fname = "C:\Users\hakan\Documents\git_repos\MousyBox\\7792\\7792_Day09.csv"
[data, trialNum, Trials_list] = loadData(fname)
Licks = getLicks(trialNum, Trials_list, th= 50)
Licks = findLicks(trialNum, Licks)
lickCounts = countLicks(trialNum, Licks, [0, 5, 10, 15])
print lickCounts
print(Licks[10].head(5))
for lick in Licks[10].index:
if Licks[10]['Stamps'].iloc[lick] == 1:
<<<<<<< HEAD
print Licks[10]['LickTime'].iloc[lick], Licks[10]['Stamps'].iloc[lick]
elif Licks[10]['Stamps'].iloc[lick] == -1:
print Licks[10]['LickTime'].iloc[lick], Licks[10]['Stamps'].iloc[lick]
=======
pass
#print Licks[10]['Stamps'].iloc[lick]
elif Licks[10]['Stamps'].iloc[lick] == -1:
pass
#print Licks[10]['Stamps'].iloc[lick]
>>>>>>> b0506dd5e896c2bde5da753bcbebbf2d771162f4
#plotLicks(trialNum, Licks)
#print Licks[10].iloc[3970:4000]
##print Licks[10]['LickTime'][Licks[10]['Stamps'] == 1]
##print Licks[10]['LickTime'][Licks[10]['Stamps'] == -1]
#Licks[int(each)]['Stamps'] = Licks[int(each)]['LickTime'][Licks[int(each)]['LickDigi'] == 1]
#plt.scatter(Licks[10]['LickTime'][Licks[10]['Stamps'] == 1], Licks[10]['LickTime'][Licks[10]['Stamps'] == -1])
#plt.show()
#a=np.array(Licks[10]['LickDigi'].iloc[0:-2])
#b=np.array(Licks[10]['LickDigi'].iloc[1:-1])
#print a+b
#fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(12, 3), facecolor='w', dpi= 150)
#ax.plot(Licks[10].LickDigi*400-20)
#ax.plot(LickStamps[10])
#print LickStamps[10]['Gaussed'].iloc[0]
#for row in LickEnd[10].index:
#ax.scatter(row, LickStamps[10]['Gaussed'].loc[row])
#ax.set_y#lim([-30, 100])
#plt.show()
| mit |
lnls-fac/collective_effects | pycolleff/pycolleff/process_wakes.py | 1 | 66191 | #!/usr/bin/env python3
import os as _os
import re as _re
import sh as _sh
import gzip as _gzip
import pickle as _pickle
import numpy as _np
from scipy import integrate as _scy_int
import matplotlib.pyplot as _plt
from matplotlib import rc as _rc
_rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
## for Palatino and other serif fonts use:
#_rc('font',**{'family':'serif','serif':['Palatino']})
_rc('text', usetex=True)
from mathphys.constants import light_speed as c
from . import colleff as _colleff
from . import sirius as _sirius
try:
from pyaccel import naff as _naff
bool_pyaccel = True
except Exception:
bool_pyaccel = False
## Trought the code I am assuming:
# s positive means particle behind source --> Wl, Wt = 0 s < 0
# Wl(s) = -c/Q * int El(ct-s,t) dt
# Wx(s) = - int_-inf^s dWl/dx ds'
# Zl = int exp(i*w*s) Wl(s) ds
# Zx = i*int exp(i*w*s) Wx(s) ds
_jnPth = _os.path.sep.join
_si = _sirius.create_ring()
DEFAULT_FNAME_SAVE = 'SimulData.pickle'
FNAME_ECHOZ1 = r"wake.dat"
FNAME_ECHOZ2 = r"wake[LT]{1}.dat"
FNAME_ECHOZR2D = r"(wakeL_([0-9]{2}).txt)" # the older .dat files are not treated
FNAME_GDFIDL = r"[\w-]+W[YXq]{1}_AT_XY.[0-9]{4}"
ANALYSIS_TYPES = {'dx', # horizontal impedance
'dy', # vertical impedance
'db', # both planes are symmetric
'll' # longitudinal and transverse quadrupolar impedances
}
PLANES = ('ll','dx','dy','qx','qy')
TITLES = {'ll':'Longitudinal',
'dx':'Dipolar Horizontal',
'dy':'Dipolar Vertical',
'qx':'Quadrupolar Horizontal',
'qy':'Quadrupolar Vertical',
}
WAKE_YLABELS= {'ll':r'$W_l$ [V/pC]',
'dx':r'$W_{{D_x}}$ [V/pC/m]',
'dy':r'$W_{{D_y}}$ [V/pC/m]',
'qx':r'$W_{{Q_x}}$ [V/pC/m]',
'qy':r'$W_{{Q_y}}$ [V/pC/m]'
}
IMPS_YLABELS= {'ll':r'$Z_l$ [$\Omega$]',
'dx':r'$Z_{{D_x}}$ [$\Omega$/m]',
'dy':r'$Z_{{D_y}}$ [$\Omega$/m]',
'qx':r'$Z_{{Q_x}}$ [$\Omega$/m]',
'qy':r'$Z_{{Q_y}}$ [$\Omega$/m]'
}
class EMSimulData:
def __init__(self, code=None):
self.code = code # CST, ACE3P, GdfidL, ECHOz1 ECHOz2, ...
self.bunlen = 0.0 # Bunch Length Used in simulation[m]
self.sbun = _np.array([],dtype=float) # positions where the bunch is defined [m]
self.bun = _np.array([],dtype=float) # bunch profile used in the simulation [As/m]
self.s = _np.array([],dtype=float) # axis: distance from following to drive bunch [m]
self.Wll = _np.array([],dtype=float) # Longitudinal Wakepotential [V/C]
self.Wdx = _np.array([],dtype=float) # Dipolar Horizontal Wakepotential [V/C/m]
self.Wdy = _np.array([],dtype=float) # Dipolar Vertical Wakepotential [V/C/m]
self.Wqx = _np.array([],dtype=float) # Quadrupolar Horizontal Wakepotential [V/C/m]
self.Wqy = _np.array([],dtype=float) # Quadrupolar Vertical Wakepotential [V/C/m]
self.freq = _np.array([],dtype=float) # axis: frequency obtained from FFT [GHz]
self.Zll = _np.array([],dtype=complex) # Longitudinal Impedance [Ohm]
self.Zdx = _np.array([],dtype=complex) # Dipolar Horizontal Impedance [Ohm]
self.Zdy = _np.array([],dtype=complex) # Dipolar Vertical Impedance [Ohm]
self.Zqx = _np.array([],dtype=complex) # Quadrupolar Horizontal Impedance [Ohm]
self.Zqy = _np.array([],dtype=complex) # Quadrupolar Vertical Impedance [Ohm]
self._klossW = None
self._kckdxW = None
self._kckdyW = None
self._kckqxW = None
self._kckqyW = None
def copy(self):
other = EMSimulData()
other.code = self.code
other.bunlen = self.bunlen
other.sbun = self.sbun.copy()
other.bun = self.bun.copy()
other.s = self.s.copy()
other.Wll = self.Wll.copy()
other.Wdx = self.Wdx.copy()
other.Wdy = self.Wdy.copy()
other.Wqx = self.Wqx.copy()
other.Wqy = self.Wqy.copy()
other.freq = self.freq.copy()
other.Zll = self.Zll.copy()
other.Zdx = self.Zdx.copy()
other.Zdy = self.Zdy.copy()
other.Zqx = self.Zqx.copy()
other.Zqy = self.Zqy.copy()
other._klossW = self._klossW
other._kckdxW = self._kckdxW
other._kckdyW = self._kckdyW
other._kckqxW = self._kckqxW
other._kckqyW = self._kckqyW
return other
def klossW(self):
if self._klossW: return self._klossW
T0, sigs, spos = _si.T0, self.bunlen, self.s
wake = self.Wll
if wake is None or _np.all(wake==0): return None
rhos = (1/(sigs*_np.sqrt(2*_np.pi)))*_np.exp(-spos**2/(2*sigs**2))
kW = _np.trapz(wake*rhos, x=spos)
self._klossW = kW
return kW
def PlossW(self, T0=_si.T0, h=_si.harm_num, Iavg=500e-3):
kW = self.klossW()
Ploss = kW * Iavg**2 * T0 * 1e12 / h
return Ploss
def kick_factorW(self,pl='dy'):
kick = getattr(self,'_kck'+pl+'W')
if kick: return kick
T0, sigs, spos = _si.T0, self.bunlen, self.s
wake = getattr(self,'W'+pl)
if wake is None or _np.all(wake==0): return None
rhos = (1/(sigs*_np.sqrt(2*_np.pi)))*_np.exp(-spos**2/(2*sigs**2))
kW = _np.trapz(wake*rhos, x=spos)
setattr(self,'_kck'+pl+'W', kW)
return kW
def klossZ(self,bunlen=2.65e-3,n=1):
_si.nbun = n
klossZ,*_ = _si.loss_factor(w = self.freq*2*_np.pi, Zl = self.Zll, bunlen=bunlen)
return klossZ
def kick_factorZ(self,pl='dy',bunlen=2.65e-3,n=1):
_si.nbun = n
Z = getattr(self,'Z'+pl)
if Z is None or _np.all(Z==0): return None
kckZ,*_ = _si.kick_factor(w = self.freq*2*_np.pi, Z = Z, bunlen=bunlen)
return kckZ
def PlossZ(self,bunlen=2.65e-3,n=1):
_si.nbun = n
_,PlossZ,*_ = _si.loss_factor(w = self.freq*2*_np.pi, Zl = self.Zll, bunlen=bunlen)
return PlossZ
def _ACE3P_load_data(simpar):
raise NotImplementedError('This function was not tested yet.')
nsigmas = 5
headerL = 3
if wdir.startswith(tardir): cpfile = False
else: cpfile = True
wakepath = _jnPth([wdir,'wakefield.out'])
loadres = _np.loadtxt(wakepath, skiprows=headerL)
if cpfile: _sh.cp(wakepath, tardir)
spos = loadres[:,0]
# I know this is correct for ECHO (2015/08/27):
if m==0: wake = -loadres[:,1]
else: wake = loadres[:,1]
spos = spos - nsigmas* bunlen # Performs displacement over s axis
return spos, wake
def _CST_load_data(simpar):
raise NotImplementedError('This function was not tested yet.')
headerL = 2
if wdir.startswith(tardir): cpfile = False
else: cpfile = True
wakepath = _jnPth([wdir,'wake.txt'])
loadres = _np.loadtxt(wakepath, skiprows=headerL)
if cpfile: _sh.cp(wakepath, tardir)
spos = loadres[:,0]
wake = loadres[:,1]
# Adjust s-axis (rescale or shift)
spos = spos/1000 # Rescaling mm to m
if m>0: wake = -wake
return spos, wake
def _GdfidL_load_dados_info(filename):
dados, info = [], []
with open(filename) as fh:
data = fh.read()
for line in data.splitlines():
if not line.startswith((' #',' %',' $')):
dados.append(line)
else:
info.append(line)
return dados, info
def _GdfidL_get_charge(info):
for line in info:
if line.find('total charge')>=0:
l = line.split(',')[1]
charge = float(_re.findall(r'[-+]?\d+\.?\d+[eE]?[-+]?\d+',l)[0])
break
return charge
def _GdfidL_get_integration_path(info):
for line in info:
if line.find('subtitle=')>=0:
x,y = (float(val) for val in _re.findall(r'[-+]?\d+\.?\d+[eE]?[-+]?\d+',line))
break
return x, y
def _GdfidL_get_longitudinal_info(path,filelist,pl='ll'):
if not silent: print('Loading longitunal Wake file:')
fn = [f for f in filelist if f.find('Wq_AT_XY')>=0]
if not fn:
if not silent: print('No longitudinal wake file found. It is needed to have one')
raise Exception('No longitudinal wake file found. It is needed to have one')
if len(fn)>1:
if not silent: print('More than one longitudinal wake file found. It is only allowed 1')
raise Exception('More than one longitudinal wake file found. It is only allowed 1')
dados, info = _load_dados_info(_jnPth([path,fn[0]]))
charge = _get_charge(info)
xd, yd = _get_integration_path(info)
spos,wake = _np.loadtxt(dados,unpack=True) # dados is a list of strings
if not silent: print('Charge of the driving bunch: {0:5.3g} pC'.format(charge*1e12))
if pl == 'll' and (abs(xd) > 1e-10 or abs(yd) > 1e-10) and not silent:
print('Driving particle not in the origin. Are you sure this is what you want?')
elif pl !='ll' and abs(xd) < 1e-10 and abs(yd) < 1e-10 and not silent:
print('The driving bunch is too close to origin. Are you sure this is what you want?')
a = _np.argmin(_np.diff(spos)) + 1
sbun = spos[a:]
bun = wake[a:]*charge/_np.trapz(wake[a:],x=sbun) # C
wake = -wake[:a]/charge # V/C # minus sign because of convention
spos = spos[:a] # m
bunlen = -sbun[0]/6 # gdfidl uses a bunch with 6-sigma
if not silent:
print('Bunch length of the driving bunch: {0:7.4g} mm'.format(bunlen*1e3))
return spos, wake, sbun, bun, bunlen, xd, yd
def _GdfidL_get_transversal_info(path,filelist,pl='qx'):
stri = 'W{0:s}_AT_XY'.format(pl[1].upper())
fn = [f for f in f_match if f.find(stri)>=0]
if not fn:
if not silent: print('No W{0:s} wake file found. Skipping to next'.format(pl))
return None
if not silent: print('{0:2d} W{1:s} wake file found: {2:s}'.format(len(fn),pl,', '.join(fn)))
dados, info = _load_dados_info(_jnPth([path,fn[0]]))
charge = _get_charge(info)
if pl[1] == 'x':
delta1,_ = _get_integration_path(info)
else:
_,delta1 = _get_integration_path(info)
_, wake1 = _np.loadtxt(dados,unpack=True)
print('Integration path at {0:s} = {1:8.4g} um '.format(pl[1],delta1*1e6),end='')
wake = wake1/delta1 / charge # V/C/m
if len(fn) > 1:
dados, info = _load_dados_info(_jnPth([path,fn[1]]))
if pl[1] == 'x':
delta2,_ = _get_integration_path(info)
else:
_,delta2 = _get_integration_path(info)
_, wake2 = _np.loadtxt(dados,unpack=True)
print('and {0:8.4g} um'.format(delta2*1e6))
if pl[0] == 'd':
wake = (wake1/delta1 - wake2/delta2)/(1/delta1-1/delta2) / charge # V/C
else:
wake = (wake1 - wake2)/(delta1-delta2) / charge # V/C/m
else:
print()
return wake
def _GdfidL2_load_data(simul_data,path,anal_pl,silent=False,**symmetries):
return NotImplementedError('Not implemented yet.')
# list all the files that match the name pattern for wakefields
f_in_dir = _sh.ls(path).stdout.decode()
f_match = _re.findall(FNAME_GDFIDL,f_in_dir)
anal_pl_ori = None
if anal_pl == 'db':
anal_pl_ori = 'db'
if not f_match:
anal_pl = 'dx' if _os.path.isdir('dxdpl') else 'dy'
else:
anal_pl = 'dx' if [f for f in f_match if f.find('WX_AT_XY')>=0] else 'dy'
if not silent: print('There is symmetry y=x, calculation performed in the '
+ anal_pl[1].upper() + ' plane.')
fl_match = [f]
if anal_pl in {'ll'}:
if not f_match:
if not silent: print('No files found for longitudinal analysis.')
raise Exception('No files found for longitudinal analisys')
#Load longitudinal Wake
spos, wake, sbun, bun, bunlen, xd, yd = _GdfidL_get_longitudinal_info(path,f_match,pl='ll')
simul_data.Wll = wake
simul_data.s = spos
simul_data.bun = bun
simul_data.sbun = sbun
simul_data.bunlen = bunlen
# And quadrupolar Wakes, if existent:
if not silent: print('Loading Horizontal Quadrupolar Wake file:')
wake = _GdfidL_get_transversal_info(path,f_match,pl='qx') # V/C/m
if wake is not None: simul_data.Wqx = wake
if not silent: print('Loading Vertical Quadrupolar Wake file:')
wake = _GdfidL_get_transversal_info(path,f_match,pl='qy') # V/C/m
if wake is not None: simul_data.Wqy = wake
if not silent: print('Longitudinal Data Loaded.')
elif anal_pl in {'dx','dy'}:
if not f_match:
if not silent: print('There is no wake files in this folder. I will assume there is no symmetry.')
spos,wake,sbun,bun,bunlen,xd,yd = [],[],[],[],[],[],[]
for sub_fol in ['dpl','dmi']:
ext_path = _jnPth([path,anal_pl+sub_fol])
if not silent: print('Looking for '+anal_pl+sub_fol+' subfolder:')
if not _os.path.isdir(ext_path):
if not silent: print('For non-symmetric structures, there must '
'be subfolders {0:s}dpl {0:s}dmi with the data'.format(anal_pl))
raise Exception('Files not found')
# list all the files that match the pattern
f_in_dir = _sh.ls(ext_path).stdout.decode()
f_match = _re.findall(FNAME_GDFIDL,f_in_dir)
if not f_match:
if not silent: print('No files found for transverse analysis.')
raise Exception('No files found for transverse analisys')
sp, _, sb, bn, bnln, xdi, ydi = _get_longitudinal_info(ext_path,f_match,pl=anal_pl)
spos.append(sp)
bun.append(bn)
sbun.append(sb)
bunlen.append(bnln)
xd.append(xdi)
yd.append(ydi)
if not silent:
print('Loading {0:s} Dipolar Wake file:'.format(
'Horizontal' if anal_pl=='dx' else 'Vertical'))
wk = _get_transversal_info(ext_path,f_match,pl=anal_pl) # V/C
if wk is not None:
wake.append(wk)
else:
if not silent: print('Actually there is something wrong, these wake files should be here.')
raise Exception('Transverse {0:s} dipolar wake files not found'.format(
'Horizontal' if anal_pl=='dx' else 'Vertical'))
delta = xd if anal_pl=='dx' else yd
ndel = yd if anal_pl=='dx' else xd
if not (_np.allclose(spos[0],spos[1],atol=0) and
_np.allclose(sbun[0],sbun[1],atol=0) and
_np.allclose( bun[0], bun[1],atol=0) and
_np.allclose(ndel[0],ndel[1],atol=0)):
if not silent: print('There is a mismatch between the paramenters of the'
'simulation in the {0:s}dpl and {0:s}dmi folders.'.format(anal_pl))
raise Exception('Mismatch of the parameters of the simulation in the subfolders.')
simul_data.s = spos[0]
simul_data.bun = bun[0]
simul_data.sbun = sbun[0]
simul_data.bunlen = bunlen[0]
setattr(simul_data,'W'+anal_pl, (wake[0]-wake[1])/(delta[0]-delta[1])) # V/C/m
else:
spos, wake, sbun, bun, bunlen, xd, yd = _get_longitudinal_info(path,f_match,pl=anal_pl)
simul_data.s = spos
simul_data.bun = bun
simul_data.sbun = sbun
simul_data.bunlen = bunlen
if not silent:
print('Loading {0:s} Dipolar Wake file:'.format(
'Horizontal' if anal_pl=='dx' else 'Vertical'))
wake = _get_transversal_info(path,f_match,pl=anal_pl) # V/C
if wake is not None:
delta = xd if anal_pl=='dx' else yd
setattr(simul_data,'W'+anal_pl,wake/delta) # V/C/m
else:
print('Actually there is something wrong, these wake files should be here.')
raise Exception('Transverse {0:s} dipolar wake files not found'.format(
'Horizontal' if anal_pl=='dx' else 'Vertical'))
if not silent: print('Transverse Data Loaded.')
else:
if not silent: print('Plane of analysis {0:s} does not match any of the possible options'.format(anal_pl))
raise Exception('Plane of analysis {0:s} does not match any of the possible options'.format(anal_pl))
if anal_pl_ori:
anal_pl_comp = 'dx' if anal_pl == 'dy' else 'dy'
if not silent: print('There is symmetry. Copying the data from the '+
'{0:s} plane to the {1:s} plane'.format(anal_pl[1].upper(),anal_pl_comp[1].upper()))
setattr(simul_data, 'W'+anal_pl_comp, getattr(simul_data,'W'+anal_pl).copy())
def _GdfidL_load_data(simul_data,path,anal_pl,silent=False):
def _load_dados_info(filename):
dados, info = [], []
with open(filename) as fh:
data = fh.read()
for line in data.splitlines():
if not line.startswith((' #',' %',' $')):
dados.append(line)
else:
info.append(line)
return dados, info
def _get_charge(info):
for line in info:
if line.find('total charge')>=0:
l = line.split(',')[1]
charge = float(_re.findall(r'[-+]?\d+\.?\d+[eE]?[-+]?\d+',l)[0])
break
return charge
def _get_integration_path(info):
for line in info:
if line.find('subtitle=')>=0:
x,y = (float(val) for val in _re.findall(r'[-+]?\d+\.?\d+[eE]?[-+]?\d+',line))
break
return x, y
def _get_longitudinal_info(path,filelist,pl='ll'):
if not silent: print('Loading longitunal Wake file:')
fn = [f for f in filelist if f.find('Wq_AT_XY')>=0]
if not fn:
if not silent: print('No longitudinal wake file found. It is needed to have one')
raise Exception('No longitudinal wake file found. It is needed to have one')
if len(fn)>1:
if not silent: print('More than one longitudinal wake file found. It is only allowed 1')
raise Exception('More than one longitudinal wake file found. It is only allowed 1')
dados, info = _load_dados_info(_jnPth([path,fn[0]]))
charge = _get_charge(info)
xd, yd = _get_integration_path(info)
spos,wake = _np.loadtxt(dados,unpack=True) # dados is a list of strings
if not silent: print('Charge of the driving bunch: {0:5.3g} pC'.format(charge*1e12))
if pl == 'll' and (abs(xd) > 1e-10 or abs(yd) > 1e-10) and not silent:
print('Driving particle not in the origin. Are you sure this is what you want?')
elif pl !='ll' and abs(xd) < 1e-10 and abs(yd) < 1e-10 and not silent:
print('The driving bunch is too close to origin. Are you sure this is what you want?')
a = _np.argmin(_np.diff(spos)) + 1
sbun = spos[a:]
bun = wake[a:]*charge/_np.trapz(wake[a:],x=sbun) # C
wake = -wake[:a]/charge # V/C # minus sign because of convention
spos = spos[:a] # m
bunlen = -sbun[0]/6 # gdfidl uses a bunch with 6-sigma
if not silent:
print('Bunch length of the driving bunch: {0:7.4g} mm'.format(bunlen*1e3))
return spos, wake, sbun, bun, bunlen, xd, yd
def _get_transversal_info(path,filelist,pl='qx'):
stri = 'W{0:s}_AT_XY'.format(pl[1].upper())
fn = [f for f in f_match if f.find(stri)>=0]
if not fn:
if not silent: print('No W{0:s} wake file found. Skipping to next'.format(pl))
return None
if not silent: print('{0:2d} W{1:s} wake file found: {2:s}'.format(len(fn),pl,', '.join(fn)))
dados, info = _load_dados_info(_jnPth([path,fn[0]]))
charge = _get_charge(info)
if pl[1] == 'x':
delta1,_ = _get_integration_path(info)
else:
_,delta1 = _get_integration_path(info)
_, wake1 = _np.loadtxt(dados,unpack=True)
print('Integration path at {0:s} = {1:8.4g} um '.format(pl[1],delta1*1e6),end='')
wake = wake1/delta1 / charge # V/C/m
if len(fn) > 1:
dados, info = _load_dados_info(_jnPth([path,fn[1]]))
if pl[1] == 'x':
delta2,_ = _get_integration_path(info)
else:
_,delta2 = _get_integration_path(info)
_, wake2 = _np.loadtxt(dados,unpack=True)
print('and {0:8.4g} um'.format(delta2*1e6))
if pl[0] == 'd':
wake = (wake1/delta1 - wake2/delta2)/(1/delta1-1/delta2) / charge # V/C
else:
wake = (wake1 - wake2)/(delta1-delta2) / charge # V/C/m
else:
print()
return wake
# list all the files that match the name pattern for wakefields
f_in_dir = _sh.ls(path).stdout.decode()
f_match = _re.findall(FNAME_GDFIDL,f_in_dir)
elec_symm = False
anal_pl_ori = None
if anal_pl == 'db':
anal_pl_ori = 'db'
if not f_match:
anal_pl = 'dx' if _os.path.isdir('dxdpl') else 'dy'
else:
anal_pl = 'dx' if [f for f in f_match if f.find('WX_AT_XY')>=0] else 'dy'
if not silent: print('There is symmetry y=x, calculation performed in the '+anal_pl[1].upper()+' plane.')
if anal_pl in {'ll'}:
if not f_match:
if not silent: print('No files found for longitudinal analysis.')
raise Exception('No files found for longitudinal analisys')
#Load longitudinal Wake
spos, wake, sbun, bun, bunlen, xd, yd = _get_longitudinal_info(path,f_match,pl='ll')
simul_data.Wll = wake
simul_data.s = spos
simul_data.bun = bun
simul_data.sbun = sbun
simul_data.bunlen = bunlen
# And quadrupolar Wakes, if existent:
if not silent: print('Loading Horizontal Quadrupolar Wake file:')
wake = _get_transversal_info(path,f_match,pl='qx') # V/C/m
if wake is not None: simul_data.Wqx = wake
if not silent: print('Loading Vertical Quadrupolar Wake file:')
wake = _get_transversal_info(path,f_match,pl='qy') # V/C/m
if wake is not None: simul_data.Wqy = wake
if not silent: print('Longitudinal Data Loaded.')
elif anal_pl in {'dx','dy'}:
if not f_match:
if not silent: print('There is no wake files in this folder.')
elec_fol = _jnPth([path,'elec'])
if _os.path.isdir(elec_fol):
if not silent: print(' I found a folder named "elec". I will assume the simulation has this symmetry.')
f_in_dir = _sh.ls(elec_fol).stdout.decode()
f_match = _re.findall(FNAME_GDFIDL,f_in_dir)
elec_symm = True;
if not f_match:
if not silent: print(' I will assume there is no symmetry.')
spos,wake,sbun,bun,bunlen,xd,yd = [],[],[],[],[],[],[]
for sub_fol in ['dpl','dmi']:
ext_path = _jnPth([path,anal_pl+sub_fol])
if not silent: print('Looking for '+anal_pl+sub_fol+' subfolder:')
if not _os.path.isdir(ext_path):
if not silent: print('For non-symmetric structures, there must '
'be subfolders {0:s}dpl {0:s}dmi with the data'.format(anal_pl))
raise Exception('Files not found')
# list all the files that match the pattern
f_in_dir = _sh.ls(ext_path).stdout.decode()
f_match = _re.findall(FNAME_GDFIDL,f_in_dir)
if not f_match:
if not silent: print('No files found for transverse analysis.')
raise Exception('No files found for transverse analisys')
sp, _, sb, bn, bnln, xdi, ydi = _get_longitudinal_info(ext_path,f_match,pl=anal_pl)
spos.append(sp)
bun.append(bn)
sbun.append(sb)
bunlen.append(bnln)
xd.append(xdi)
yd.append(ydi)
if not silent:
print('Loading {0:s} Dipolar Wake file:'.format(
'Horizontal' if anal_pl=='dx' else 'Vertical'))
wk = _get_transversal_info(ext_path,f_match,pl=anal_pl) # V/C
if wk is not None:
wake.append(wk)
else:
if not silent: print('Actually there is something wrong, these wake files should be here.')
raise Exception('Transverse {0:s} dipolar wake files not found'.format(
'Horizontal' if anal_pl=='dx' else 'Vertical'))
#If the simulation is not ready yet the lenghts may differ. This line
# is used to truncate the longer wake in the length of the shorter:
l1 = min( len(spos[0]), len(spos[1]) )
delta = xd if anal_pl=='dx' else yd
ndel = yd if anal_pl=='dx' else xd
if not (_np.allclose(spos[0][:l1],spos[1][:l1],atol=0) and
_np.allclose(sbun[0],sbun[1],atol=0) and
_np.allclose( bun[0], bun[1],atol=0) and
_np.allclose(ndel[0],ndel[1],atol=0)):
if not silent: print('There is a mismatch between the paramenters of the'
'simulation in the {0:s}dpl and {0:s}dmi folders.'.format(anal_pl))
raise Exception('Mismatch of the parameters of the simulation in the subfolders.')
simul_data.s = spos[0][:l1]
simul_data.bun = bun[0]
simul_data.sbun = sbun[0]
simul_data.bunlen = bunlen[0]
setattr(simul_data,'W'+anal_pl, (wake[0][:l1]-wake[1][:l1])/(delta[0]-delta[1])) # V/C/m
else:
if elec_symm: path = elec_fol
spos, wake, sbun, bun, bunlen, xd, yd = _get_longitudinal_info(path,f_match,pl=anal_pl)
simul_data.s = spos
simul_data.bun = bun
simul_data.sbun = sbun
simul_data.bunlen = bunlen
if not silent:
print('Loading {0:s} Dipolar Wake file:'.format(
'Horizontal' if anal_pl=='dx' else 'Vertical'))
wake = _get_transversal_info(path,f_match,pl=anal_pl) # V/C
if wake is not None:
delta = xd if anal_pl=='dx' else yd
delta *= 2 if elec_symm else 1
setattr(simul_data,'W'+anal_pl,wake/delta) # V/C/m
else:
print('Actually there is something wrong, these wake files should be here.')
raise Exception('Transverse {0:s} dipolar wake files not found'.format(
'Horizontal' if anal_pl=='dx' else 'Vertical'))
if not silent: print('Transverse Data Loaded.')
else:
if not silent: print('Plane of analysis {0:s} does not match any of the possible options'.format(anal_pl))
raise Exception('Plane of analysis {0:s} does not match any of the possible options'.format(anal_pl))
if anal_pl_ori:
anal_pl_comp = 'dx' if anal_pl == 'dy' else 'dy'
if not silent: print('There is symmetry. Copying the data from the '+
'{0:s} plane to the {1:s} plane'.format(anal_pl[1].upper(),anal_pl_comp[1].upper()))
setattr(simul_data, 'W'+anal_pl_comp, getattr(simul_data,'W'+anal_pl).copy())
def _ECHOz1_load_data(simul_data,path,anal_pl,silent=False):
if anal_pl=='ll':
if not silent: print('Loading longitudinal Wake file:',end='')
fname = _jnPth([path,FNAME_ECHOZ1])
if _os.path.isfile(fname):
if not silent: print('Data found.')
loadres = _np.loadtxt(fname, skiprows=0)
else:
if not silent: print('Not found.')
raise Exception('Longitudinal wake file not found.')
else:
if not silent: print('ECHOz1 only calculates longitudinal wake.')
raise Exception('ECHOz1 only calculates longitudinal wake.')
simul_data.s = loadres[:,0]/100 # Rescaling cm to m
simul_data.Wll = -loadres[:,1] * 1e12 # V/C/m the minus sign is due to convention
# loading driving bunch info
if not silent: print('Loading bunch length from wake.dat')
sbun = simul_data.s.copy()
ds = sbun[1]-sbun[0]
bunlen = abs(sbun[0]-ds/2) / 5
a = _np.argmin(_np.abs(sbun + sbun[0])) + 1
sbun = sbun[:a]
simul_data.bunlen = bunlen
simul_data.sbun = sbun
simul_data.bun = _np.exp(-sbun**2/(2*bunlen**2))/(_np.sqrt(2*_np.pi)*bunlen)
if not silent:
print('Bunch length of the driving bunch: {0:7.3g} mm'.format(bunlen*1e3))
print('Data Loaded.')
def _ECHOz2_load_data(simul_data,path,anal_pl,silent=False):
anal_pl_ori = None
if anal_pl == 'db':
anal_pl_ori = 'db'
anal_pl = 'dy'
if not silent: print('Even though there is symmetry, I am loading data to the Y plane.')
if anal_pl=='ll':
if not silent: print('Loading longitudinal Wake file:',end='')
fname = _jnPth([path,'wakeL.dat'])
if _os.path.isfile(fname):
if not silent: print('Data found.')
spos,wl = _np.loadtxt(fname, skiprows=0,usecols=(0,1),unpack=True)
else:
if not silent: print('Not found.')
Exception('Longitudinal wake file not found.')
simul_data.s = spos/100 # Rescaling cm to m
simul_data.Wll = -wl * 1e12 # V/C the minus sign is due to convention
elif anal_pl in {'dx','dy'}:
fname = _jnPth([path,'wakeL.dat'])
if _os.path.isfile(fname):
if not silent: print('Calculating Transverse wake from longitudinal wake file:',end='')
if not silent: print('Data found.')
spos,wl = _np.loadtxt(fname, skiprows=0,usecols=(0,1),unpack=True)
simul_data.s = spos/100 # Rescaling cm to m
wt = -_scy_int.cumtrapz(-wl,x=spos/100,initial=0) # one minus sign due to convention and the other due to Panofsky-Wenzel
setattr(simul_data, 'W'+anal_pl, wt * 1e12) # V/C/m
else:
if not silent: print('File not found.\nLoading transverse wake from transverse wake file.:',end='')
fname = _jnPth([path,'wakeT.dat'])
if _os.path.isfile(fname):
if not silent: print('Data found.\nDepending on the ECHOz2 program version this may lead to inacurate results.')
spos,wt = _np.loadtxt(fname, skiprows=0,usecols=(0,1),unpack=True)
else:
if not silent: print('Not found.')
Exception('Transverse wake file not found.')
simul_data.s = spos/100 # Rescaling cm to m
# there is an error in the integration of echoz2. It is needed to subtract
# the first value to correct and offset
# wt = -_scy_int.cumtrapz(-wl,x=spos/100,initial=0)
setattr(simul_data, 'W'+anal_pl, (wt-wt[0]) * 1e12) # V/C/m the minus sign is due to convention
else:
if not silent: print('Plane of analysis {0:s} does not match any of the possible options'.format(anal_pl))
raise Exception('Plane of analysis {0:s} does not match any of the possible options'.format(anal_pl))
# loading driving bunch info
if not silent: print('Loading bunch length from wake file')
sbun = simul_data.s.copy()
ds = sbun[1]-sbun[0]
bunlen = abs(sbun[0] -ds/2) / 5
a = _np.argmin(_np.abs(sbun + sbun[0])) + 1
sbun = sbun[:a]
simul_data.bunlen = bunlen
simul_data.sbun = sbun
simul_data.bun = _np.exp(-sbun**2/(2*bunlen**2))/(_np.sqrt(2*_np.pi)*bunlen)
if not silent:
print('Bunch length of the driving bunch: {0:7.3g} mm'.format(simul_data.bunlen*1e3))
print('Data Loaded.')
if anal_pl_ori:
anal_pl_comp = 'dx' if anal_pl == 'dy' else 'dy'
if not silent: print('There is symmetry. Copying the data from the '+
'{0:s} plane to the {1:s} plane'.format(anal_pl[1].upper(),anal_pl_comp[1].upper()))
setattr(simul_data, 'W'+anal_pl_comp, getattr(simul_data,'W'+anal_pl).copy())
def _ECHO_rect_load_data(simul_data,code,path,anal_pl,silent):
def _load_dados(fname,mode, bc, code):
if code == 'echozr':
len_unit, charge_unit, header = 1e-2, 1e-12, 3 # cm to m, pC to C
with open(fname) as f:
f.readline()
a = f.readline()
mstep, offset, wid, bunlen = _np.fromstring(a[1:],sep='\t')
offset = int(offset)
arbitrary_factor = 2 # I don't know why I have to divide the echozr data by 2;
y0 = y = mstep*offset / 100
elif code == 'echo2d':
len_unit, charge_unit, header = 1, 1, 6
with open(fname) as f:
f.readline()
mstep, offset = _np.fromstring(f.readline(),sep='\t')
f.readline()
wid, bunlen = _np.fromstring(f.readline(),sep='\t')
offset = int(offset)
arbitrary_factor = 1 # But I don't have to do this for the echo2d data.
y0 = y = mstep*offset
offset = 0 # has only one column of wake
spos, Wm = _np.loadtxt(fname,skiprows=header,usecols=(0,1+offset),unpack=True)
mstep *= len_unit
wid *= len_unit
bunlen*= len_unit
spos *= len_unit
Wm *= -len_unit/charge_unit / arbitrary_factor # minus sign is due to convention
Kxm = _np.pi/wid*mode
if bc == 'elec': Wm /= _np.sinh(Kxm*y0)*_np.sinh(Kxm*y)
else: Wm /= _np.cosh(Kxm*y0)*_np.cosh(Kxm*y)
return spos, Wm, mstep, wid, bunlen
if anal_pl == 'db':
if not silent: print('Problem: All rectangular geometries does not have symmetry.')
raise Exception('Problem: All rectangular geometries does not have symmetry.')
if anal_pl == 'll': bc = 'magn'
elif anal_pl in {'dx','dy'}: bc = 'elec'
if not silent: print('Looking for data files in subfolder {0:s}.'.format(bc))
pname = _jnPth([path,bc])
if not _os.path.isdir(pname):
pname = path
if code == 'echozr':
if not silent:
print('Subfolder not found. It would be better to'+
' create the subfolder and put the files there...')
print('Looking for files in the current folder:')
elif code == 'echo2d':
if not silent: print('Files not found. ')
raise Exception('Files not found.')
f_in_dir = _sh.ls(pname).stdout.decode()
f_match = sorted(_re.findall(FNAME_ECHOZR2D,f_in_dir))
if not f_match:
if not silent: print('Files not found.')
raise Exception('Files not found.')
if not silent:
print('Files found.\n I am assuming the simulation was performed '+
'with {0:s} boundary condition.'.format('electric' if bc == 'elec' else 'magnetic'))
print('Modes found: ' + ', '.join([m for _,m in f_match]))
print('Loading data from files')
spos, W, mode, mesh_size, width, bunlen = [], [], [], [], [], []
for fn, m in f_match:
if int(m) == 0: continue
s, Wm, ms, wid, bl = _load_dados(_jnPth([pname,fn]),int(m),bc,code)
mode.append(int(m))
spos.append(s)
W.append(Wm)
mesh_size.append(ms)
width.append(wid)
bunlen.append(bl)
cond = False
for i in range(1,len(mode)):
cond |= len(spos[i]) != len(spos[0])
cond |= not _np.isclose(mesh_size[i], mesh_size[0], rtol=1e-5, atol=0)
cond |= not _np.isclose(width[i], width[0], rtol=0, atol=1e-7)
cond |= not _np.isclose(bunlen[i], bunlen[0], rtol=1e-5, atol=0)
if cond:
message = 'Parameters of file {0:s} differ from {1:s}.'.format(f_match[i][0],f_match[0][0])
if not silent: print(message)
raise Exception(message)
simul_data.s = spos[0]
simul_data.bunlen = bunlen[0]
a = _np.argmin(_np.abs(spos[0] + spos[0][0])) + 1 # I want the bunch to be symmetric
sbun = spos[0][:a]
simul_data.sbun = sbun
simul_data.bun =_np.exp(-sbun**2/(2*bunlen[0]**2))/(_np.sqrt(2*_np.pi)*bunlen[0])
if not silent:
print('Bunch length of the driving bunch: {0:7.4g} mm'.format(simul_data.bunlen*1e3))
print('Width of the simulated geometry: {0:7.4g} mm'.format(width[0]*1e3))
print('Mesh step used in the simulation: {0:7.4g} um'.format(mesh_size[0]*1e6))
print('All Data Loaded.')
if anal_pl=='ll':
if not silent: print('Calculating longitudinal Wake from data:')
Wll, frac = None, 1
for i in range(len(mode)):
if mode[i] == 1:
Wll = W[i].copy()
elif mode[i] % 2:
Wll += W[i] #only odd terms
frac = _np.max(_np.abs(W[i]/Wll))
if Wll is None:
if not silent: print('There is none odd mode to calculate Longitudinal Wake.')
else:
if not silent: print('Maximum influence of last mode in the final result is: {0:5.2f}%'.format(frac*100))
Wll *= 2/width[0]
simul_data.Wll = Wll
if not silent: print('Calculating Quadrupolar Wake from data:')
Wq, frac = None, 1
for i in range(len(mode)):
Kxm = _np.pi/width[0]*mode[i]
if mode[i] == 1:
Wq = W[i].copy() * Kxm**2
elif mode[i] % 2:
Wq += W[i] * Kxm**2 #only odd terms
frac = _np.max(_np.abs(W[i]*Kxm**2/Wq))
if Wq is None:
if not silent: print('There is none odd mode to calculate Quadrupolar Wake.')
else:
if not silent: print('Maximum influence of last mode in the final result is: {0:5.2f}%'.format(frac*100))
Wq *= 2/width[0]
Wq = -_scy_int.cumtrapz(Wq,x=spos[0],initial=0) # minus sign is due to Panofsky-Wenzel
simul_data.Wqy = Wq
simul_data.Wqx = -Wq
if not silent: print('Calculating Dipolar Horizontal Wake from data:')
Wdx, frac = None, 1
for i in range(len(mode)):
Kxm = _np.pi/width[0]*mode[i]
if mode[i] == 2:
Wdx = W[i].copy() * Kxm**2
elif not mode[i] % 2:
Wdx += W[i] * Kxm**2 #only even terms
frac = _np.max(_np.abs(W[i]*Kxm**2/Wdx))
if Wdx is None:
if not silent: print('There is none even mode to calculate Dipolar Horizontal Wake.')
else:
if not silent: print('Maximum influence of last mode in the final result is: {0:5.2f}%'.format(frac*100))
Wdx *= 2/width[0]
Wdx = -_scy_int.cumtrapz(Wdx,x=spos[0],initial=0) # minus sign is due to Panofsky-Wenzel
simul_data.Wdx = Wdx
elif anal_pl in {'dx','dy'}:
pl = 'Vertical' if anal_pl == 'dy' else 'Horizontal'
if not silent: print('Calculating Dipolar {0:s} Wake from data:'.format(pl))
Wd, frac = None, 1
for i in range(len(mode)):
Kxm = _np.pi/width[0]*mode[i]
if mode[i] == 1:
Wd = W[i].copy() * Kxm**2
elif mode[i] % 2:
Wd += W[i] * Kxm**2 #only odd terms
frac = _np.max(_np.abs(W[i]*Kxm**2/Wd))
if Wd is None:
if not silent: print('There is none even mode to calculate Dipolar {0:s} Wake.'.format(pl))
else:
if not silent: print('Maximum influence of last mode in the final result is: {0:5.2f}%'.format(frac*100))
Wd *= 2/width[0]
Wd = -_scy_int.cumtrapz(Wd,x=spos[0],initial=0) # minus sign is due to Panofsky-Wenzel
setattr(simul_data,'W'+anal_pl,Wd)
else:
if not silent: print('Plane of analysis {0:s} does not match any of the possible options'.format(anal_pl))
raise Exception('Plane of analysis {0:s} does not match any of the possible options'.format(anal_pl))
def _ECHOzR_load_data(simul_data,path,anal_pl,silent=False):
_ECHO_rect_load_data(simul_data,'echozr',path,anal_pl,silent)
def _ECHO2D_load_data(simul_data,path,anal_pl,silent=False):
if not silent: print('Trying to find out the geometry type: ',end='')
if (_os.path.isdir(_jnPth([path,'magn'])) or
_os.path.isdir(_jnPth([path,'elec']))):
geo_type = 'rectangular'
elif (_os.path.isfile(_jnPth([path,'wakeL_00.txt'])) or
_os.path.isfile(_jnPth([path,'wakeL_01.txt']))):
geo_type = 'round'
else:
if not silent: print('not ok.\n Could not find out the geometry type.')
raise Exception('Could not find out the geometry type.')
if not silent: print(geo_type)
if geo_type == 'rectangular':
_ECHO_rect_load_data(simul_data,'echo2d',path,anal_pl,silent)
else:
anal_pl_ori = None
if anal_pl == 'db':
anal_pl_ori = 'db'
anal_pl = 'dy'
if not silent: print('Even though there is symmetry, I am loading data to the Y plane.')
if anal_pl=='ll':
if not silent: print('Loading longitudinal Wake file:',end='')
fname = _jnPth([path,'wakeL_00.txt'])
if _os.path.isfile(fname):
if not silent: print('Data found.')
with open(fname) as f:
f.readline()
mstep, offset = _np.fromstring(f.readline(),sep='\t')
f.readline()
_, bunlen = _np.fromstring(f.readline(),sep='\t')
spos, Wm = _np.loadtxt(fname,skiprows=6,unpack=True)
simul_data.s = spos
simul_data.Wll = -Wm # V/C the minus sign is due to convention
else:
if not silent: print('Not found.')
Exception('Longitudinal wake file not found.')
elif anal_pl in {'dx','dy'}:
if not silent: print('Loading Transverse Wake file:',end='')
fname = _jnPth([path,'wakeL_01.txt'])
if _os.path.isfile(fname):
if not silent: print('Data found.')
with open(fname) as f:
f.readline()
mstep, offset = _np.fromstring(f.readline(),sep='\t')
f.readline()
_, bunlen = _np.fromstring(f.readline(),sep='\t')
y0 = mstep*(offset+0.5) # transverse wakes are calculated in the middle of the mesh
spos, Wm = _np.loadtxt(fname,skiprows=6,unpack=True) # m and V/C/m^2
simul_data.s = spos
Wdm = -_scy_int.cumtrapz(-Wm/(y0*y0),x=spos,initial=0) # V/C/m the minus sign is due to convention
setattr(simul_data, 'W'+anal_pl, Wdm)
else:
if not silent: print('File not found.')
Exception('Transverse wake file not found.')
else:
if not silent: print('Plane of analysis {0:s} does not match any of the possible options'.format(anal_pl))
raise Exception('Plane of analysis {0:s} does not match any of the possible options'.format(anal_pl))
if anal_pl_ori:
anal_pl_comp = 'dx' if anal_pl == 'dy' else 'dy'
if not silent: print('There is symmetry. Copying the data from the '+
'{0:s} plane to the {1:s} plane'.format(anal_pl[1].upper(),anal_pl_comp[1].upper()))
setattr(simul_data, 'W'+anal_pl_comp, getattr(simul_data,'W'+anal_pl).copy())
a = _np.argmin(_np.abs(spos + spos[0])) + 1
sbun = spos[:a]
simul_data.bunlen = bunlen
simul_data.sbun = sbun
simul_data.bun = _np.exp(-sbun**2/(2*bunlen**2))/(_np.sqrt(2*_np.pi)*bunlen)
if not silent:
print('Bunch length of the driving bunch: {0:7.3g} mm'.format(simul_data.bunlen*1e3))
print('Mesh size used in the simulation: {0:7.4g} um'.format(mstep*1e6))
print('Data Loaded.')
CODES = {'echoz1': _ECHOz1_load_data,
'echoz2': _ECHOz2_load_data,
'echo2d': _ECHO2D_load_data,
'echozr': _ECHOzR_load_data,
'gdfidl': _GdfidL_load_data,
'ace3p' : _ACE3P_load_data,
'cst' : _CST_load_data
}
def load_raw_data(simul_data=None, code=None, path=None, anal_pl=None, silent=False):
if not simul_data: simul_data = EMSimulData()
if path is None: path = _os.path.abspath('.')
if not silent: print('#'*60 + '\nLoading Simulation Data')
#Split the path to try to guess other parameters:
path_split = set(path.lower().split(_os.path.sep))
#First try to guess the code used in simulation, if not supplied:
if code is None:
if not silent: print('Simulation Code not supplied, trying to guess from path: ', end='')
code_guess = list(CODES.keys() & path_split)
if code_guess: code = code_guess[0]
else:
if not silent: print('could not be guessed.')
if not silent: print('Trying to guess from files in folder: ', end='')
f_mat = None
f_in_dir = _sh.ls(path).stdout.decode()
if len(_re.findall(FNAME_GDFIDL,f_in_dir)): code = 'gdfidl'
elif len(_re.findall(FNAME_ECHOZ1,f_in_dir)): code = 'echoz1'
elif len(_re.findall(FNAME_ECHOZ2,f_in_dir)): code = 'echoz2'
elif len(_re.findall(FNAME_ECHOZR2D,f_in_dir)):
fol = path
f_mat = _re.findall(FNAME_ECHOZR2D,f_in_dir)
elif _os.path.isdir(_jnPth([path,'elec'])):
fol = _jnPth([path,'elec'])
f_in_fol = _sh.ls(fol).stdout.decode()
f_mat = _re.findall(FNAME_ECHOZR2D,f_in_fol)
elif _os.path.isdir(_jnPth([path,'magn'])):
fol = _jnPth([path,'magn'])
f_in_fol = _sh.ls(fol).stdout.decode()
f_mat = _re.findall(FNAME_ECHOZR2D,f_in_fol)
else: raise Exception('Simulation Code was not supplied and could not be guessed.')
if f_mat is not None:
if _os.path.isfile(_jnPth([fol,f_mat[0][0]])):
with open(_jnPth([fol,f_mat[0][0]])) as f:
code = 'echozr' if f.readline().find('[cm]')> 0 else 'echo2d'
else: raise Exception('Simulation Code was not supplied and could not be guessed.')
if not silent: print(code)
simul_data.code = code
# Now try to guess the plane of the analysis:
if anal_pl is None:
if not silent: print('Plane of Analysis not supplied, trying to guess from path: ', end='')
anal_pl_guess = list(ANALYSIS_TYPES & path_split)
if anal_pl_guess: anal_pl = anal_pl_guess[0]
else:
if not silent: print('could not be guessed.')
if not silent: print('Trying to guess from files in folder and code: ', end='')
if code == 'echoz1': anal_pl = 'll'
elif code == 'echoz2': anal_pl = 'dy' if _os.path.isfile('wakeT.dat') else 'll'
elif code == 'gdfidl':
f_in_dir = _sh.ls(path).stdout.decode()
f_mat = _re.findall(r"[\w-]+W([YXq]{2})_AT_XY.[0-9]{4}",f_in_dir)
if len(f_mat) > 0:
# f_mat = _re.findall(r"[\w-]+W([YX]{1})_AT_XY.[0-9]{4}",f_in_dir)
# countx = [x for x in f_mat if x=='X']
# county = [y for y in f_mat if y=='Y']
# anal_pl = 'dy' if len(county) >= len(county) else 'dx'
anal_pl = 'd'+f_mat[0][0].lower()
else:
anal_pl = 'll'
elif code == 'echozr':
if _os.path.isdir(_jnPth([path,'magn'])): anal_pl = 'll'
elif _os.path.isdir(_jnPth([path,'elec'])): anal_pl = 'dy'
elif _os.path.isfile('wakeL_01.txt'):
w = _np.loadtxt('wakeL_01.txt',skiprows=3,usecols=(1,),unpack=True)
if _np.all(w==0): anal_pl = 'dy'
else: anal_pl = 'll'
else: raise Exception('Plane of analysis was not supplied and could not be guessed.')
elif code == 'echo2d':
if _os.path.isdir(_jnPth([path,'magn'])): anal_pl = 'll'
elif _os.path.isdir(_jnPth([path,'elec'])): anal_pl = 'dy'
elif _os.path.isfile('wakeL_00.txt'): anal_pl = 'll'
else: anal_pl = 'dy'
else: raise Exception('Plane of analysis was not supplied and could not be guessed.')
if not silent: print(anal_pl)
CODES[code](simul_data,silent=silent,path=path,anal_pl=anal_pl) # changes in simul_data are made implicitly
if not silent: print('#'*60+'\n')
return simul_data
def calc_impedance(simul_data, use_win='phase', pl=None, cutoff=2, s_min=None, s_max=None, silent=False):
def better_fft_length(n,max_factor=1000):
for p in range(n):
n2, i = n-p, 2
while (i * i <= n2 and i < max_factor):
if n2 % i: i += 1
else: n2 //= i
if n2 < max_factor: return p, n-p
def _get_impedance(spos,wake,sigt,cutoff):
dt = (spos[-1]-spos[0]) / (spos.shape[0]-1) / c # frequency scale (Hz):
VHat = _np.fft.fft(wake, wake.shape[0]) * dt # fft == \int exp(-i*2pi*f*t/n) G(t) dt
freq = _np.fft.fftfreq(wake.shape[0], d=dt)
VHat = _np.fft.fftshift(VHat) # shift the negative frequencies
freq = _np.fft.fftshift(freq) # to the center of the spectrum
# Longitudinal position shift to match center of the bunch with zero z:
w = 2*_np.pi*freq
VHat *= _np.exp(-1j*w*(spos[0])/c)
# find the maximum useable frequency
wmax = cutoff/sigt
indcs = _np.abs(w) <= wmax
# Deconvolve the Transform with a gaussian bunch:
Jwlist = _np.exp(-(w*sigt)**2/2)
Z = VHat[indcs]/Jwlist[indcs]
return freq[indcs], Z
if not silent: print('#'*60 + '\n' + 'Calculating Impedances')
if pl is None: planes = PLANES
else: planes = [pl]
# Extracts Needed Variables
sigt = simul_data.bunlen / c # bunch time-length
spos = simul_data.s.copy()
if s_min is None: s_min = spos[0]
if s_max is None: s_max = spos[-1]
inds = _np.logical_and(spos >= s_min, spos <= s_max)
spos = spos[inds]
p, n = better_fft_length(spos.shape[0]) # numpy's fft algorithm is slow for large primes
spos = spos[:n]
if not silent and p>0:
print('Last {0:d} point{1:s} of wake {2:s} '.format(
p, *(('s','were') if p > 1 else ('','was'))) +
'not considered to gain performance in FFT.')
if use_win is True:
if not silent: print('Using Half-Hanning Window')
# Half Hanning window to zero the end of the signal
window = _np.hanning(2*spos.shape[0])[spos.shape[0]:]
elif isinstance(use_win,str) and use_win.lower().startswith('phase'):
if not silent: print('Using Half-Hanning Window to correct the phases')
# Half Hanning window to smooth the final of the signal
window = _np.hanning(2*spos.shape[0])[spos.shape[0]:]
else:
if not silent: print('Not using Window')
window = _np.ones(spos.shape[0])
if not silent: print('Cutoff frequency w = {0:d}/sigmat'.format(cutoff))
for pl in planes:
if not silent: print('Performing FFT on W{0:s}: '.format(pl),end='')
Wpl = getattr(simul_data,'W'+pl).copy()
if Wpl is None or _np.all(Wpl == 0):
if not silent: print('No Data found.')
continue
if not silent: print('Data found. ',end='')
Wpl = Wpl[inds]
Wpl = Wpl[:n]
simul_data.freq, Zpl = _get_impedance(spos,Wpl*window,sigt,cutoff)
if isinstance(use_win,str) and use_win.lower().startswith('phase'):
_, Zpl2 = _get_impedance(spos,Wpl,sigt,cutoff)
Zpl = _np.abs(Zpl2)*_np.exp(1j*_np.angle(Zpl))
if pl =='ll':
# I have to take the conjugate of the fft because:
#fftt == \int exp(-i*2pi*f*t/n) G(t) dt
#while impedance, according to Chao and Ng, is given by:
#Z == \int exp(i*2pi*f*t/n) G(t) dt
simul_data.Zll = Zpl.conj()
else:
#the Transverse impedance, according to Chao and Ng, is given by:
#Z == i\int exp(i*2pi*f*t/n) G(t) dt
setattr(simul_data, 'Z'+pl, 1j*Zpl.conj())
if not silent: print('Impedance Calculated.')
if not silent: print('#'*60 + '\n')
if bool_pyaccel:
def calc_impedance_naff(simul_data, pl='ll', s_min = None,s_max = None, win = 1, nr_ff = 20):
if pl not in PLANES:
raise Exception('Value of variable pl not accepted. Must be one of these: '+', '.join(PLANES))
# Extracts Needed Variables
sigt = simul_data.bunlen / c # bunch time-length
spos = simul_data.s.copy()
W = getattr(simul_data,'W' + pl).copy()
sizeW = len(W)
if W is None or not len(W) or _np.all(W == 0):
raise Exception('No Data found.')
if s_min is None: s_min = spos[0]
if s_max is None: s_max = spos[-1]
inds = _np.logical_and(spos >= s_min, spos <= s_max)
spos = spos[inds]
W = W[inds]
dt = (spos[1]-spos[0])/c
leng = len(W) - (len(W)-1)%6
spos = spos[-leng:]
W = W[-leng:]
if 0.49 < win < 0.51:
W *= _np.hanning(2*spos.shape[0])[spos.shape[0]:]
tu,a = _naff.naff_general(W,use_win=0, is_real=False, nr_ff=nr_ff)
elif isinstance(win,int):
tu,a = _naff.naff_general(W,use_win=win, is_real=False, nr_ff=nr_ff)
else:
raise Exception('Win must be 1/2 for half-hanning window or an integer for other windows(0 --> no window).')
freq = tu/dt
w = 2*_np.pi*freq
# Longitudinal position shift to match center of the bunch with zero z:
a *= _np.exp(-1j*w*(spos[0])/c)
# Reconstruct the signal
S = _np.zeros(leng,dtype=complex)
for wi,ai in zip(w,a):
S += ai*_np.exp(1j*wi*spos/c)
S = S.real
# Deconvolve the Transform with a gaussian bunch:
a /= _np.exp(-(w*sigt)**2/2)
# Must multiply by the vector length due to difference in the meaning of the
# amplitune in the NAFF transform and the fourier transform
Z = a*dt*sizeW
if pl =='ll':
# I have to take the conjugate of the fft because:
#fftt == \int exp(-i*2pi*f*t/n) G(t) dt
#while impedance, according to Chao and Ng, is given by:
#Z == \int exp(i*2pi*f*t/n) G(t) dt
Z = Z.conj()
else:
#the Transverse impedance, according to Chao and Ng, is given by:
#Z == i\int exp(i*2pi*f*t/n) G(t) dt
Z = 1j*Z.conj()
return freq, Z, leng, S
def plot_wakes(simul_data,save_figs=False,pth2sv=None,show=False,pls=None):
sbun = simul_data.sbun
sigs = simul_data.bunlen
spos = simul_data.s
if pls is None: pls = PLANES
for pl in pls:
wake = getattr(simul_data,'W'+pl)*1e-12 # V/C -> V/pC
if wake is None or _np.all(wake==0): continue
max_wake = wake[_np.abs(wake).argmax()]
bunchshape = simul_data.bun * (max_wake/simul_data.bun.max())
f,axs = _plt.subplots(nrows=1, ncols=2, sharey=True, figsize=(14,6))
ax = axs[0]
b = ax.get_position()
b.x0, b.x1 = 0.05, 0.45
ax.set_position(b)
ax.plot(sbun*1000,bunchshape,'b',linewidth=2,label='Bunch Shape')
ax.plot(spos*1000,wake,'r',linewidth=2,label='Wake Potential')
ax.grid(True)
ax.set_ylabel(WAKE_YLABELS[pl],fontsize=13)
ax.set_xlim([spos[0]*1000, 8000*sigs])
ax.set_ylim([wake.min()*1.1, wake.max()*1.1])
ax.legend(loc='best')
ax = axs[1]
b = ax.get_position()
b.x0, b.x1 = 0.45, 0.95
ax.set_position(b)
ax.plot(spos*1000,wake,'r',linewidth=2)
ax.grid(True)
tit = ax.set_title(TITLES[pl],fontsize=13)
tit.set_x(0.1)
xl = ax.set_xlabel('s [mm]',fontsize=13)
xl.set_x(0.1)
ax.set_xlim([8000*sigs,spos[-1]*1000])
ax.set_ylim([wake.min()*1.1, wake.max()*1.1])
if save_figs: f.savefig(_jnPth((pth2sv,'W'+pl+'.svg')))
if show: _plt.show()
def plot_impedances(simul_data,save_figs=False,pth2sv=None,show=False,pls=None):
freq = simul_data.freq
if pls is None: pls = PLANES
for pl in pls:
Z = getattr(simul_data,'Z'+pl)
if Z is None or _np.all(Z==0): continue
_plt.figure()
_plt.plot(freq/1e9,Z.real,'r',linewidth=2,label='Re')
_plt.plot(freq/1e9,Z.imag,'b--',linewidth=2,label='Im')
_plt.xlabel('Frequency [GHz]',fontsize=13)
_plt.grid(True)
_plt.title (TITLES[pl],fontsize=13)
_plt.ylabel(IMPS_YLABELS[pl],fontsize=13)
_plt.legend (loc='best')
_plt.xlim(freq[[0,-1]]/1e9)
if save_figs: _plt.savefig(_jnPth((pth2sv,'Z'+pl+'.svg')))
if show: _plt.show()
def plot_losskick_factors(simul_data,save_figs=False,pth2sv=None,show=False,pls=None):
# Extracts and Initialize Needed Variables:
_si.nom_cur = 500e-3
sigvec = _np.array([2.65, 5, 8, 10, 15],dtype=float)*1e-3 # bunch length scenarios
Ivec = _np.linspace(10e-3,_si.nom_cur,num=50) # current scenarios
bunlen = simul_data.bunlen
sigi = _np.linspace(bunlen,18e-3,num=50)
fill_pat = _np.array([1,864,864/2,864/4],dtype=int)
if pls is None: pls = PLANES
pls2 = []
for pl in pls:
W = getattr(simul_data,'W'+pl)
if W is None or _np.all(W==0): continue
Z = getattr(simul_data,'Z'+pl)
if Z is None or _np.all(Z==0): continue
pls2.append(pl)
for pl in pls2:
if pl == 'll':
f,axs = _plt.subplots(nrows=1, ncols=2, figsize=(12,6),gridspec_kw=dict(left=0.08,right=0.97))
ax = axs[0]
fname = 'Loss_factor'
for i in range(fill_pat.shape[0]):
kZi = _np.zeros(sigi.shape[0])
for j in range(sigi.shape[0]):
kZi[j] = simul_data.klossZ(bunlen=sigi[j],n=fill_pat[i]) * 1e-12 #V/pC
ax.semilogy(sigi * 1e3, kZi * 1e3, 'o',markersize=4,label=r'$n = {0:03d}$'.format(fill_pat[i]))
if not i: kZ = kZi[0]
# Calculates klossW
kW = simul_data.klossW() * 1e-12
# Print loss factor calculated in both ways
ax.semilogy(bunlen * 1e3, kW * 1e3, '*',markersize=7,color=[1, 0, 0],label=r'$K_L^W$')
ax.set_title('Loss Factor for $n$ equally spaced bunches.')
ax.set_xlabel(r'$\sigma$ [mm]')
ax.set_ylabel(r'$K_L$ [mV/pC]')
ax.legend(loc='best')
ax.grid(True)
ax.annotate(r'$K_L^W = {0:5.2f}$ mV/pC'.format(kW*1e3),xy=(bunlen*1.1e3, kW*1e3),fontsize=12)
ax = axs[1]
kZvec = _np.zeros(sigvec.shape[0])
labels = []
for i in range(sigvec.shape[0]):
kZvec[i] = simul_data.klossZ(bunlen=sigvec[i], n=_si.harm_num) #V/C
labels.append(r'$\sigma = {0:05.2f}$ mm'.format(sigvec[i]*1e3))
Plossvec = kZvec[None,:] * Ivec[:,None]**2 * _si.T0/_si.harm_num
ax.semilogy(Ivec*1e3, Plossvec,markersize=4)
ax.set_title('Power Loss for ${0:d}$ equally spaced bunches.'.format(_si.harm_num))
ax.set_xlabel(r'$I_{{avg}}$ [mA]')
ax.set_ylabel(r'Power [W]')
ax.legend(labels,loc='best')
ax.grid(True)
else:
f = _plt.figure(figsize=(6,6))
ax = _plt.axes()
fname = 'Kck'+pl+'_factor'
for i in range(fill_pat.shape[0]):
kZi = _np.zeros(sigi.shape[0])
for j in range(sigi.shape[0]):
kZi[j] = simul_data.kick_factorZ(pl=pl,bunlen=sigi[j],n=fill_pat[i]) * 1e-12 #V/pC/m
ax.plot(sigi*1e3, kZi, 'o',markersize=4,label=r'n = {0:03d}'.format(fill_pat[i]))
if not i: kickZ = kZi[0]
# Calculates kickW:
kickW = simul_data.kick_factorW(pl=pl) * 1e-12
# Print loss factor calculated in both ways
ax.plot(bunlen*1e3, kickW, '*',markersize=7,color=[1, 0, 0],label=r'$K_{{{0:s}_{1:s}}}^W$'.format(pl[0].upper(),pl[1]))
ax.set_title('Kick Factor for $n$ equally spaced bunches.')
ax.set_xlabel(r'$\sigma$ [mm]',fontsize=13)
ax.set_ylabel(r'$K_{{{0:s}_{1:s}}}$ [V/pC/m]'.format(pl[0].upper(),pl[1]),fontsize=13)
ax.legend(loc='best')
ax.grid(True)
stri = r'$K_{{{0:s}_{1:s}}}^W = {2:5.2f}$ V/pC/m'.format(pl[0].upper(),pl[1],kickW)
ax.annotate(stri,xy=(bunlen*1.1e3, kickW),fontsize=13)
if save_figs: _plt.savefig(_jnPth((pth2sv,fname+'.svg')))
if show: _plt.show()
def show_now():
_plt.show()
def save_processed_data(simul_data,silent=False,pth2sv=None):
if not silent: print('#'*60 + '\nSaving Processed data:')
spos = simul_data.s
freq = simul_data.freq
if pth2sv is None:
if not silent: print('Saving in the same folder of the raw data')
pth2sv = _os.path.abspath('.')
elif type(pth2sv) is str:
if not silent: print('Saving to subfolder: ' + pth2sv)
if not _os.path.isdir(pth2sv):
if not silent: print('Folder does not exist. Creating it...')
_os.mkdir(pth2sv)
else:
if not silent: print('pth2sv must be a string or None object')
raise Exception('pth2sv must be a string or None')
#Save wakes
for par in PLANES:
unit = 'V/C' if par == 'll' else 'V/C/m'
header = '{0:30s} {1:30s}'.format('s [m]', 'W{0:s} [{1:s}]'.format(par,unit))
fname = _jnPth([pth2sv,'W'+par+'.gz'])
wake = getattr(simul_data,'W'+par)
if wake is None or _np.all(wake == 0): continue
if not silent: print('Saving W'+ par + ' data to .gz file')
_np.savetxt(fname,_np.array([spos,wake]).transpose(),
fmt=['%30.16g','%30.16g'], header=header)
#Save Impedances
for par in PLANES:
unit = 'Ohm' if par == 'll' else 'Ohm/m'
header = '{0:30s} {1:30s} {2:30s}'.format('Frequency [GHz]',
'ReZ{0:s} [{1:s}]'.format(par,unit),
'ImZ{0:s} [{1:s}]'.format(par,unit))
fname = _jnPth([pth2sv,'Z'+par+'.gz'])
Z = getattr(simul_data,'Z'+par)
if Z is None or _np.all(Z == 0): continue
if not silent: print('Saving Z'+ par + ' data to .gz file')
_np.savetxt(fname,_np.array([freq/1e9,Z.real,Z.imag]).transpose(),
fmt=['%30.16g','%30.16g','%30.16g'], header=header)
if not silent: print('Saving the Complete EMSimulData structure to a .pickle file.')
with _gzip.open(_jnPth((pth2sv,DEFAULT_FNAME_SAVE)), 'wb') as f:
_pickle.dump(simul_data,f,_pickle.HIGHEST_PROTOCOL)
if not silent: print('All Data Saved\n' + '#'*60)
def load_processed_data(filename):
with _gzip.open(filename,'rb') as fh:
simul_data = _pickle.load(fh)
return simul_data
def create_make_fig_file(path = None):
if path is None: path = _os.path.abspath('.')
fname = _jnPth([path,'create_figs.py'])
analysis = '#!/usr/bin/env python3\n\n'
analysis += 'import os\n'
analysis += 'import pycolleff.process_wakes as ems\n\n'
analysis += 'opts = dict(save_figs=False,show=False)\n'
analysis += 'path = os.path.abspath(__file__).rpartition(os.path.sep)[0]\n'
analysis += "file_name = os.path.sep.join([path,'{0:s}'])\n".format(DEFAULT_FNAME_SAVE)
analysis += 'simul_data = ems.load_processed_data(file_name)\n'
analysis += 'ems.plot_wakes(simul_data,**opts)\n'
analysis += 'ems.plot_impedances(simul_data,**opts)\n'
analysis += 'ems.plot_losskick_factors(simul_data,**opts)\n'
analysis += 'ems.show_now()\n'
with open(fname,'w') as f:
f.writelines(analysis)
_sh.chmod('+x',fname)
| mit |
jbkalmbach/kbmod | analysis/fake_search.py | 2 | 12449 | import os
import shutil
import pandas as pd
import numpy as np
import time
import multiprocessing as mp
import astropy.coordinates as astroCoords
import astropy.units as u
import csv
import trajectoryFiltering as tf
from kbmodpy import kbmod as kb
from astropy.io import fits
from astropy.wcs import WCS
from sklearn.cluster import DBSCAN
from skimage import measure
from analysis_utils import analysis_utils, \
return_indices, stamp_filter_parallel
from collections import OrderedDict
class run_search(analysis_utils):
def __init__(self, v_list, ang_list, num_obs):
"""
Input
--------
v_list : list
[min_velocity, max_velocity, velocity_steps]
ang_list: list
[radians below ecliptic,
radians above ecliptic,
steps]
num_obs : integer
Number of images a trajectory must be unmasked.
"""
self.v_arr = np.array(v_list)
self.ang_arr = np.array(ang_list)
self.num_obs = num_obs
return
def run_search(self, im_filepath, res_filepath, out_suffix, time_file,
likelihood_level=10., mjd_lims=None, num_fakes=25,
rand_seed=42):
visit_nums, visit_times = np.genfromtxt(time_file, unpack=True)
image_time_dict = OrderedDict()
for visit_num, visit_time in zip(visit_nums, visit_times):
image_time_dict[str(int(visit_num))] = visit_time
chunk_size = 100000
start = time.time()
patch_visits = sorted(os.listdir(im_filepath))
patch_visit_ids = np.array([int(visit_name[1:7]) for visit_name in patch_visits])
patch_visit_times = np.array([image_time_dict[str(visit_id)] for visit_id in patch_visit_ids])
if mjd_lims is None:
use_images = patch_visit_ids
else:
visit_only = np.where(((patch_visit_times > mjd_lims[0])
& (patch_visit_times < mjd_lims[1])))[0]
print(visit_only)
use_images = patch_visit_ids[visit_only]
image_mjd = np.array([image_time_dict[str(visit_id)] for visit_id in use_images])
times = image_mjd - image_mjd[0]
flags = ~0 # mask pixels with any flags
flag_exceptions = [32,39] # unless it has one of these special combinations of flags
master_flags = int('100111', 2) # mask any pixels which have any of
# these flags in more than two images
hdulist = fits.open('%s/v%i-fg.fits' % (im_filepath, use_images[0]))
f0 = hdulist[0].header['FLUXMAG0']
w = WCS(hdulist[1].header)
ec_angle = self.calc_ecliptic_angle(w)
del(hdulist)
images = [kb.layered_image('%s/v%i-fg.fits' % (im_filepath, f)) for f in np.sort(use_images)]
print('Images Loaded')
p = kb.psf(1.4)
# Add fakes steps
print('Adding fake objects')
x_fake_range = (5, 3650)
y_fake_range = (5, 3650)
angle_range = (ec_angle-(np.pi/15.), ec_angle+(np.pi/15.))
velocity_range = (100, 500)
mag_range = (20, 26)
fake_results = []
fake_output = []
np.random.seed(rand_seed)
for val in range(num_fakes):
traj = kb.trajectory()
traj.x = int(np.random.uniform(*x_fake_range))
traj.y = int(np.random.uniform(*y_fake_range))
ang = np.random.uniform(*angle_range)
vel = np.random.uniform(*velocity_range)
traj.x_v = vel*np.cos(ang)
traj.y_v = vel*np.sin(ang)
mag_val = np.random.uniform(*mag_range)
traj.flux = f0*np.power(10, -0.4*mag_val)
fake_results.append(traj)
fake_output.append([traj.x, traj.y, traj.x_v, traj.y_v, traj.flux, mag_val])
for fake_obj in fake_results:
tf.add_trajectory(images, fake_obj, p, times)
stack = kb.image_stack(images)
del(images)
stack.apply_mask_flags(flags, flag_exceptions)
stack.apply_master_mask(master_flags, 2)
stack.grow_mask()
stack.grow_mask()
# stack.apply_mask_threshold(120.)
stack.set_times(times)
print("Times set")
x_size = stack.get_width()
y_size = stack.get_width()
search = kb.stack_search(stack, p)
del(stack)
ang_min = ec_angle - self.ang_arr[0]
ang_max = ec_angle + self.ang_arr[1]
vel_min = self.v_arr[0]
vel_max = self.v_arr[1]
print("Starting Search")
print('---------------------------------------')
param_headers = ("Ecliptic Angle", "Min. Search Angle", "Max Search Angle",
"Min Velocity", "Max Velocity")
param_values = (ec_angle, ang_min, ang_max, vel_min, vel_max)
for header, val in zip(param_headers, param_values):
print('%s = %.4f' % (header, val))
search.gpu(int(self.ang_arr[2]),int(self.v_arr[2]),ang_min,ang_max,
vel_min,vel_max,int(self.num_obs))
keep_stamps = []
keep_snr = []
keep_new_lh = []
keep_results = []
keep_times = []
memory_error = False
keep_lc = []
filter_stats = np.zeros(4)
likelihood_limit = False
res_num = 0
chunk_size = 500000
print('---------------------------------------')
print("Processing Results")
print('---------------------------------------')
while likelihood_limit is False:
pool = mp.Pool(processes=16)
results = search.get_results(res_num,chunk_size)
chunk_headers = ("Chunk Start", "Chunk Size", "Chunk Max Likelihood",
"Chunk Min. Likelihood")
chunk_values = (res_num, len(keep_results), results[0].lh, results[-1].lh)
for header, val, in zip(chunk_headers, chunk_values):
if type(val) == np.int:
print('%s = %i' % (header, val))
else:
print('%s = %.2f' % (header, val))
print('---------------------------------------')
psi_curves = []
phi_curves = []
for line in results:
psi_curve, phi_curve = search.lightcurve(line)
psi_curves.append(np.array(psi_curve).flatten())
phi_curve = np.array(phi_curve).flatten()
phi_curve[phi_curve == 0.] = 99999999.
phi_curves.append(phi_curve)
if line.lh < likelihood_level:
likelihood_limit = True
break
keep_idx_results = pool.starmap_async(return_indices,
zip(psi_curves, phi_curves,
[j for j in range(len(psi_curves))]))
pool.close()
pool.join()
keep_idx_results = keep_idx_results.get()
filter_stats[0] += len(psi_curves)
if len(keep_idx_results[0]) < 3:
keep_idx_results = [(0, [-1], 0.)]
for result_on in range(len(psi_curves)):
if keep_idx_results[result_on][1][0] == -1:
continue
elif len(keep_idx_results[result_on][1]) < 3:
continue
elif keep_idx_results[result_on][2] < likelihood_level:
continue
else:
keep_idx = keep_idx_results[result_on][1]
new_likelihood = keep_idx_results[result_on][2]
keep_results.append(results[result_on])
keep_new_lh.append(new_likelihood)
stamps = search.sci_stamps(results[result_on], 10)
stamp_arr = np.array([np.array(stamps[s_idx]) for s_idx in keep_idx])
keep_stamps.append(np.sum(stamp_arr, axis=0))
keep_lc.append((psi_curves[result_on]/phi_curves[result_on])[keep_idx])
keep_snr.append((psi_curves[result_on]/np.sqrt(phi_curves[result_on]))[keep_idx])
#keep_times.append(image_mjd[keep_idx])
keep_times.append(keep_idx)
# if len(keep_results) > 800000:
# with open('%s/memory_error_tr_%s.txt' %
# (res_filepath, out_suffix), 'w') as f:
# f.write('In %i total results, %i were kept. Needs manual look.' %
# (res_num + chunk_size, len(keep_results)))
# memory_error = True
# likelihood_limit = True
# if res_num+chunk_size >= 8000000:
# likelihood_level = 20.
# with open('%s/overload_error_tr_%s.txt' %
# (res_filepath, out_suffix), 'w') as f:
# f.write('In %i total results, %i were kept. Likelihood level down to %f.' %
# (res_num + chunk_size, len(keep_results), line.lh))
res_num += chunk_size
del(search)
lh_sorted_idx = np.argsort(np.array(keep_new_lh))[::-1]
filter_stats[1] = len(lh_sorted_idx)
if len(lh_sorted_idx) > 0:
print("Stamp filtering %i results" % len(lh_sorted_idx))
pool = mp.Pool(processes=16)
stamp_filt_pool = pool.map_async(stamp_filter_parallel,
np.array(keep_stamps)[lh_sorted_idx])
pool.close()
pool.join()
stamp_filt_results = stamp_filt_pool.get()
stamp_filt_idx = lh_sorted_idx[np.where(np.array(stamp_filt_results) == 1)]
filter_stats[2] = len(stamp_filt_idx)
if len(stamp_filt_idx) > 0:
print("Clustering %i results" % len(stamp_filt_idx))
cluster_idx = self.cluster_results(np.array(keep_results)[stamp_filt_idx],
x_size, y_size, [vel_min, vel_max],
[ang_min, ang_max])
final_results = stamp_filt_idx[cluster_idx]
else:
cluster_idx = []
final_results = []
del(cluster_idx)
del(stamp_filt_results)
del(stamp_filt_idx)
del(stamp_filt_pool)
else:
final_results = lh_sorted_idx
print('Keeping %i results' % len(final_results))
filter_stats[3] = len(final_results)
np.savetxt('%s/results_%s.txt' % (res_filepath, out_suffix),
np.array(keep_results)[final_results], fmt='%s')
np.savetxt('%s/results_fakes_%s.txt' % (res_filepath, out_suffix),
np.array(fake_output), header='x,y,xv,yv,flux,mag')
# np.savetxt('%s/lc_%s.txt' % (res_filepath, out_suffix),
# np.array(keep_lc)[final_results], fmt='%s')
with open('%s/lc_%s.txt' % (res_filepath, out_suffix), 'w') as f:
writer = csv.writer(f)
writer.writerows(np.array(keep_lc)[final_results])
with open('%s/snr_%s.txt' % (res_filepath, out_suffix), 'w') as f:
writer = csv.writer(f)
writer.writerows(np.array(keep_snr)[final_results])
# np.savetxt('%s/times_%s.txt' % (res_filepath, out_suffix),
# np.array(keep_times)[final_results], fmt='%s')
with open('%s/times_%s.txt' % (res_filepath, out_suffix), 'w') as f:
writer = csv.writer(f)
writer.writerows(np.array(keep_times)[final_results])
np.savetxt('%s/filtered_likes_%s.txt' % (res_filepath, out_suffix),
np.array(keep_new_lh)[final_results], fmt='%.4f')
np.savetxt('%s/ps_%s.txt' % (res_filepath, out_suffix),
np.array(keep_stamps).reshape(len(keep_stamps), 441)[final_results], fmt='%.4f')
np.savetxt('%s/filt_stats_%s.txt' % (res_filepath, out_suffix), np.array(filter_stats), fmt='%i')
end = time.time()
del(keep_stamps)
del(keep_times)
del(keep_results)
del(keep_new_lh)
del(keep_lc)
print("Time taken for patch: ", end-start)
| bsd-2-clause |
vorwerkc/pymatgen | pymatgen/analysis/magnetism/heisenberg.py | 4 | 37351 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements a simple algorithm for extracting nearest neighbor
exchange parameters by mapping low energy magnetic orderings to a Heisenberg
model.
"""
import copy
import logging
import sys
from ast import literal_eval
import numpy as np
import pandas as pd
from monty.json import MSONable, jsanitize
from monty.serialization import dumpfn
from pymatgen.core.structure import Structure
from pymatgen.analysis.graphs import StructureGraph
from pymatgen.analysis.local_env import MinimumDistanceNN
from pymatgen.analysis.magnetism import CollinearMagneticStructureAnalyzer, Ordering
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
__author__ = "ncfrey"
__version__ = "0.1"
__maintainer__ = "Nathan C. Frey"
__email__ = "[email protected]"
__status__ = "Development"
__date__ = "June 2019"
class HeisenbergMapper:
"""
Class to compute exchange parameters from low energy magnetic orderings.
"""
def __init__(self, ordered_structures, energies, cutoff=0.0, tol=0.02):
"""
Exchange parameters are computed by mapping to a classical Heisenberg
model. Strategy is the scheme for generating neighbors. Currently only
MinimumDistanceNN is implemented.
n+1 unique orderings are required to compute n exchange
parameters.
First run a MagneticOrderingsWF to obtain low energy collinear magnetic
orderings and find the magnetic ground state. Then enumerate magnetic
states with the ground state as the input structure, find the subset
of supercells that map to the ground state, and do static calculations
for these orderings.
Args:
ordered_structures (list): Structure objects with magmoms.
energies (list): Total energies of each relaxed magnetic structure.
cutoff (float): Cutoff in Angstrom for nearest neighbor search.
Defaults to 0 (only NN, no NNN, etc.)
tol (float): Tolerance (in Angstrom) on nearest neighbor distances
being equal.
Parameters:
strategy (object): Class from pymatgen.analysis.local_env for
constructing graphs.
sgraphs (list): StructureGraph objects.
unique_site_ids (dict): Maps each site to its unique numerical
identifier.
wyckoff_ids (dict): Maps unique numerical identifier to wyckoff
position.
nn_interacations (dict): {i: j} pairs of NN interactions
between unique sites.
dists (dict): NN, NNN, and NNNN interaction distances
ex_mat (DataFrame): Invertible Heisenberg Hamiltonian for each
graph.
ex_params (dict): Exchange parameter values (meV/atom)
"""
# Save original copies of inputs
self.ordered_structures_ = ordered_structures
self.energies_ = energies
# Sanitize inputs and optionally order them by energy / magnetic moments
hs = HeisenbergScreener(ordered_structures, energies, screen=False)
ordered_structures = hs.screened_structures
energies = hs.screened_energies
self.ordered_structures = ordered_structures
self.energies = energies
self.cutoff = cutoff
self.tol = tol
# Get graph representations
self.sgraphs = self._get_graphs(cutoff, ordered_structures)
# Get unique site ids and wyckoff symbols
self.unique_site_ids, self.wyckoff_ids = self._get_unique_sites(ordered_structures[0])
# These attributes are set by internal methods
self.nn_interactions = None
self.dists = None
self.ex_mat = None
self.ex_params = None
# Check how many commensurate graphs we found
if len(self.sgraphs) < 2:
print("We need at least 2 unique orderings.")
sys.exit(1)
else: # Set attributes
self._get_nn_dict()
self._get_exchange_df()
@staticmethod
def _get_graphs(cutoff, ordered_structures):
"""
Generate graph representations of magnetic structures with nearest
neighbor bonds. Right now this only works for MinimumDistanceNN.
Args:
cutoff (float): Cutoff in Angstrom for nearest neighbor search.
ordered_structures (list): Structure objects.
Returns:
sgraphs (list): StructureGraph objects.
"""
# Strategy for finding neighbors
if cutoff:
strategy = MinimumDistanceNN(cutoff=cutoff, get_all_sites=True)
else:
strategy = MinimumDistanceNN() # only NN
# Generate structure graphs
sgraphs = [StructureGraph.with_local_env_strategy(s, strategy=strategy) for s in ordered_structures]
return sgraphs
@staticmethod
def _get_unique_sites(structure):
"""
Get dict that maps site indices to unique identifiers.
Args:
structure (Structure): ground state Structure object.
Returns:
unique_site_ids (dict): maps tuples of equivalent site indices to a
unique int identifier
wyckoff_ids (dict): maps tuples of equivalent site indices to their
wyckoff symbols
"""
# Get a nonmagnetic representation of the supercell geometry
s0 = CollinearMagneticStructureAnalyzer(
structure, make_primitive=False, threshold=0.0
).get_nonmagnetic_structure(make_primitive=False)
# Get unique sites and wyckoff positions
if "wyckoff" in s0.site_properties:
s0.remove_site_property("wyckoff")
symm_s0 = SpacegroupAnalyzer(s0).get_symmetrized_structure()
wyckoff = ["n/a"] * len(symm_s0)
equivalent_indices = symm_s0.equivalent_indices
wyckoff_symbols = symm_s0.wyckoff_symbols
# Construct dictionaries that map sites to numerical and wyckoff
# identifiers
unique_site_ids = {}
wyckoff_ids = {}
i = 0
for indices, symbol in zip(equivalent_indices, wyckoff_symbols):
unique_site_ids[tuple(indices)] = i
wyckoff_ids[i] = symbol
i += 1
for index in indices:
wyckoff[index] = symbol
return unique_site_ids, wyckoff_ids
def _get_nn_dict(self):
"""Get dict of unique nearest neighbor interactions.
Returns:
None: (sets self.nn_interactions and self.dists instance variables)
"""
tol = self.tol # tolerance on NN distances
sgraph = self.sgraphs[0]
unique_site_ids = self.unique_site_ids
nn_dict = {}
nnn_dict = {}
nnnn_dict = {}
all_dists = []
# Loop over unique sites and get neighbor distances up to NNNN
for k in unique_site_ids:
i = k[0]
i_key = unique_site_ids[k]
connected_sites = sgraph.get_connected_sites(i)
dists = [round(cs[-1], 2) for cs in connected_sites] # i<->j distances
dists = sorted(list(set(dists))) # NN, NNN, NNNN, etc.
dists = dists[:3] # keep up to NNNN
all_dists += dists
# Keep only up to NNNN and call dists equal if they are within tol
all_dists = sorted(list(set(all_dists)))
rm_list = []
for idx, d in enumerate(all_dists[:-1]):
if abs(d - all_dists[idx + 1]) < tol:
rm_list.append(idx + 1)
all_dists = [d for idx, d in enumerate(all_dists) if idx not in rm_list]
if len(all_dists) < 3: # pad with zeros
all_dists += [0.0] * (3 - len(all_dists))
all_dists = all_dists[:3]
labels = ["nn", "nnn", "nnnn"]
dists = dict(zip(labels, all_dists))
# Get dictionary keys for interactions
for k in unique_site_ids:
i = k[0]
i_key = unique_site_ids[k]
connected_sites = sgraph.get_connected_sites(i)
# Loop over sites and determine unique NN, NNN, etc. interactions
for cs in connected_sites:
dist = round(cs[-1], 2) # i_j distance
j = cs[2] # j index
for key in unique_site_ids.keys():
if j in key:
j_key = unique_site_ids[key]
if abs(dist - dists["nn"]) <= tol:
nn_dict[i_key] = j_key
elif abs(dist - dists["nnn"]) <= tol:
nnn_dict[i_key] = j_key
elif abs(dist - dists["nnnn"]) <= tol:
nnnn_dict[i_key] = j_key
nn_interactions = {"nn": nn_dict, "nnn": nnn_dict, "nnnn": nnnn_dict}
self.dists = dists
self.nn_interactions = nn_interactions
def _get_exchange_df(self):
"""
Loop over all sites in a graph and count the number and types of
nearest neighbor interactions, computing +-|S_i . S_j| to construct
a Heisenberg Hamiltonian for each graph.
Returns:
None: (sets self.ex_mat instance variable)
TODO:
* Deal with large variance in |S| across configs
"""
sgraphs = self.sgraphs
tol = self.tol
unique_site_ids = self.unique_site_ids
nn_interactions = self.nn_interactions
dists = self.dists
# Get |site magmoms| from FM ordering so that S_i and S_j are consistent?
# Large S variations is throwing a loop
# fm_struct = self.get_low_energy_orderings()[0]
# Total energy and nonmagnetic energy contribution
columns = ["E", "E0"]
# Get labels of unique NN interactions
for k0, v0 in nn_interactions.items():
for i, j in v0.items(): # i and j indices
c = str(i) + "-" + str(j) + "-" + str(k0)
c_rev = str(j) + "-" + str(i) + "-" + str(k0)
if c not in columns and c_rev not in columns:
columns.append(c)
num_sgraphs = len(sgraphs)
# Keep n interactions (not counting 'E') for n+1 structure graphs
columns = columns[: num_sgraphs + 1]
num_nn_j = len(columns) - 1 # ignore total energy
j_columns = [name for name in columns if name not in ["E", "E0"]]
ex_mat_empty = pd.DataFrame(columns=columns)
ex_mat = ex_mat_empty.copy()
if len(j_columns) < 2:
self.ex_mat = ex_mat # Only <J> can be calculated here
else:
sgraphs_copy = copy.deepcopy(sgraphs)
sgraph_index = 0
# Loop over all sites in each graph and compute |S_i . S_j|
# for n+1 unique graphs to compute n exchange params
for graph in sgraphs:
sgraph = sgraphs_copy.pop(0)
ex_row = pd.DataFrame(np.zeros((1, num_nn_j + 1)), index=[sgraph_index], columns=columns)
for i, node in enumerate(sgraph.graph.nodes):
# s_i_sign = np.sign(sgraph.structure.site_properties['magmom'][i])
s_i = sgraph.structure.site_properties["magmom"][i]
for k in unique_site_ids.keys():
if i in k:
i_index = unique_site_ids[k]
# Get all connections for ith site and compute |S_i . S_j|
connections = sgraph.get_connected_sites(i)
# dists = [round(cs[-1], 2) for cs in connections] # i<->j distances
# dists = sorted(list(set(dists))) # NN, NNN, NNNN, etc.
for j, connection in enumerate(connections):
j_site = connection[2]
dist = round(connection[-1], 2) # i_j distance
# s_j_sign = np.sign(sgraph.structure.site_properties['magmom'][j_site])
s_j = sgraph.structure.site_properties["magmom"][j_site]
for k in unique_site_ids.keys():
if j_site in k:
j_index = unique_site_ids[k]
# Determine order of connection
if abs(dist - dists["nn"]) <= tol:
order = "-nn"
elif abs(dist - dists["nnn"]) <= tol:
order = "-nnn"
elif abs(dist - dists["nnnn"]) <= tol:
order = "-nnnn"
j_ij = str(i_index) + "-" + str(j_index) + order
j_ji = str(j_index) + "-" + str(i_index) + order
if j_ij in ex_mat.columns:
ex_row.at[sgraph_index, j_ij] -= s_i * s_j
elif j_ji in ex_mat.columns:
ex_row.at[sgraph_index, j_ji] -= s_i * s_j
# Ignore the row if it is a duplicate to avoid singular matrix
if ex_mat.append(ex_row)[j_columns].equals(
ex_mat.append(ex_row)[j_columns].drop_duplicates(keep="first")
):
e_index = self.ordered_structures.index(sgraph.structure)
ex_row.at[sgraph_index, "E"] = self.energies[e_index]
sgraph_index += 1
ex_mat = ex_mat.append(ex_row)
# if sgraph_index == num_nn_j: # check for zero columns
# zeros = [b for b in (ex_mat[j_columns] == 0).all(axis=0)]
# if True in zeros:
# sgraph_index -= 1 # keep looking
ex_mat[j_columns] = ex_mat[j_columns].div(2.0) # 1/2 factor in Heisenberg Hamiltonian
ex_mat[["E0"]] = 1 # Nonmagnetic contribution
# Check for singularities and delete columns with all zeros
zeros = list((ex_mat == 0).all(axis=0))
if True in zeros:
c = ex_mat.columns[zeros.index(True)]
ex_mat = ex_mat.drop(columns=[c], axis=1)
# ex_mat = ex_mat.drop(ex_mat.tail(len_zeros).index)
# Force ex_mat to be square
ex_mat = ex_mat[: ex_mat.shape[1] - 1]
self.ex_mat = ex_mat
def get_exchange(self):
"""
Take Heisenberg Hamiltonian and corresponding energy for each row and
solve for the exchange parameters.
Returns:
ex_params (dict): Exchange parameter values (meV/atom).
"""
ex_mat = self.ex_mat
# Solve the matrix equation for J_ij values
E = ex_mat[["E"]]
j_names = [j for j in ex_mat.columns if j not in ["E"]]
# Only 1 NN interaction
if len(j_names) < 3:
# Estimate exchange by J ~ E_AFM - E_FM
j_avg = self.estimate_exchange()
ex_params = {"<J>": j_avg}
self.ex_params = ex_params
return ex_params
# Solve eigenvalue problem for more than 1 NN interaction
H = ex_mat.loc[:, ex_mat.columns != "E"].values
H_inv = np.linalg.inv(H)
j_ij = np.dot(H_inv, E)
# Convert J_ij to meV
j_ij[1:] *= 1000 # J_ij in meV
j_ij = j_ij.tolist()
ex_params = {j_name: j[0] for j_name, j in zip(j_names, j_ij)}
self.ex_params = ex_params
return ex_params
def get_low_energy_orderings(self):
"""
Find lowest energy FM and AFM orderings to compute E_AFM - E_FM.
Returns:
fm_struct (Structure): fm structure with 'magmom' site property
afm_struct (Structure): afm structure with 'magmom' site property
fm_e (float): fm energy
afm_e (float): afm energy
"""
fm_struct, afm_struct = None, None
mag_min = np.inf
mag_max = 0.001
fm_e_min = 0
afm_e_min = 0
# epas = [e / len(s) for (e, s) in zip(self.energies, self.ordered_structures)]
for s, e in zip(self.ordered_structures, self.energies):
ordering = CollinearMagneticStructureAnalyzer(s, threshold=0.0, make_primitive=False).ordering
magmoms = s.site_properties["magmom"]
# Try to find matching orderings first
if ordering == Ordering.FM and e < fm_e_min:
fm_struct = s
mag_max = abs(sum(magmoms))
fm_e = e
fm_e_min = e
if ordering == Ordering.AFM and e < afm_e_min:
afm_struct = s
afm_e = e
mag_min = abs(sum(magmoms))
afm_e_min = e
# Brute force search for closest thing to FM and AFM
if not fm_struct or not afm_struct:
for s, e in zip(self.ordered_structures, self.energies):
magmoms = s.site_properties["magmom"]
if abs(sum(magmoms)) > mag_max: # FM ground state
fm_struct = s
fm_e = e
mag_max = abs(sum(magmoms))
# AFM ground state
if abs(sum(magmoms)) < mag_min:
afm_struct = s
afm_e = e
mag_min = abs(sum(magmoms))
afm_e_min = e
elif abs(sum(magmoms)) == 0 and mag_min == 0:
if e < afm_e_min:
afm_struct = s
afm_e = e
afm_e_min = e
# Convert to magnetic structures with 'magmom' site property
fm_struct = CollinearMagneticStructureAnalyzer(
fm_struct, make_primitive=False, threshold=0.0
).get_structure_with_only_magnetic_atoms(make_primitive=False)
afm_struct = CollinearMagneticStructureAnalyzer(
afm_struct, make_primitive=False, threshold=0.0
).get_structure_with_only_magnetic_atoms(make_primitive=False)
return fm_struct, afm_struct, fm_e, afm_e
def estimate_exchange(self, fm_struct=None, afm_struct=None, fm_e=None, afm_e=None):
"""
Estimate <J> for a structure based on low energy FM and AFM orderings.
Args:
fm_struct (Structure): fm structure with 'magmom' site property
afm_struct (Structure): afm structure with 'magmom' site property
fm_e (float): fm energy/atom
afm_e (float): afm energy/atom
Returns:
j_avg (float): Average exchange parameter (meV/atom)
"""
# Get low energy orderings if not supplied
if any(arg is None for arg in [fm_struct, afm_struct, fm_e, afm_e]):
fm_struct, afm_struct, fm_e, afm_e = self.get_low_energy_orderings()
magmoms = fm_struct.site_properties["magmom"]
# Normalize energies by number of magnetic ions
# fm_e = fm_e / len(magmoms)
# afm_e = afm_e / len(afm_magmoms)
m_avg = np.mean([np.sqrt(m ** 2) for m in magmoms])
# If m_avg for FM config is < 1 we won't get sensibile results.
if m_avg < 1:
iamthedanger = """
Local magnetic moments are small (< 1 muB / atom). The
exchange parameters may be wrong, but <J> and the mean
field critical temperature estimate may be OK.
"""
logging.warning(iamthedanger)
delta_e = afm_e - fm_e # J > 0 -> FM
j_avg = delta_e / (m_avg ** 2) # eV / magnetic ion
j_avg *= 1000 # meV / ion
return j_avg
def get_mft_temperature(self, j_avg):
"""
Crude mean field estimate of critical temperature based on <J> for
one sublattice, or solving the coupled equations for a multisublattice
material.
Args:
j_avg (float): j_avg (float): Average exchange parameter (meV/atom)
Returns:
mft_t (float): Critical temperature (K)
"""
num_sublattices = len(self.unique_site_ids)
k_boltzmann = 0.0861733 # meV/K
# Only 1 magnetic sublattice
if num_sublattices == 1:
mft_t = 2 * abs(j_avg) / 3 / k_boltzmann
else: # multiple magnetic sublattices
omega = np.zeros((num_sublattices, num_sublattices))
ex_params = self.ex_params
ex_params = {k: v for (k, v) in ex_params.items() if k != "E0"} # ignore E0
for k in ex_params:
# split into i, j unique site identifiers
sites = k.split("-")
sites = [int(num) for num in sites[:2]] # cut 'nn' identifier
i, j = sites[0], sites[1]
omega[i, j] += ex_params[k]
omega[j, i] += ex_params[k]
omega = omega * 2 / 3 / k_boltzmann
eigenvals, eigenvecs = np.linalg.eig(omega)
mft_t = max(eigenvals)
if mft_t > 1500: # Not sensible!
stayoutofmyterritory = """
This mean field estimate is too high! Probably
the true low energy orderings were not given as inputs.
"""
logging.warning(stayoutofmyterritory)
return mft_t
def get_interaction_graph(self, filename=None):
"""
Get a StructureGraph with edges and weights that correspond to exchange
interactions and J_ij values, respectively.
Args:
filename (str): if not None, save interaction graph to filename.
Returns:
igraph (StructureGraph): Exchange interaction graph.
"""
structure = self.ordered_structures[0]
sgraph = self.sgraphs[0]
igraph = StructureGraph.with_empty_graph(
structure, edge_weight_name="exchange_constant", edge_weight_units="meV"
)
if "<J>" in self.ex_params: # Only <J> is available
warning_msg = """
Only <J> is available. The interaction graph will not tell
you much.
"""
logging.warning(warning_msg)
# J_ij exchange interaction matrix
for i, node in enumerate(sgraph.graph.nodes):
connections = sgraph.get_connected_sites(i)
for c in connections:
jimage = c[1] # relative integer coordinates of atom j
j = c[2] # index of neighbor
dist = c[-1] # i <-> j distance
j_exc = self._get_j_exc(i, j, dist)
igraph.add_edge(i, j, to_jimage=jimage, weight=j_exc, warn_duplicates=False)
# Save to a json file if desired
if filename:
if filename.endswith(".json"):
dumpfn(igraph, filename)
else:
filename += ".json"
dumpfn(igraph, filename)
return igraph
def _get_j_exc(self, i, j, dist):
"""
Convenience method for looking up exchange parameter between two sites.
Args:
i (int): index of ith site
j (int): index of jth site
dist (float): distance (Angstrom) between sites
(10E-2 precision)
Returns:
j_exc (float): Exchange parameter in meV
"""
# Get unique site identifiers
for k in self.unique_site_ids.keys():
if i in k:
i_index = self.unique_site_ids[k]
if j in k:
j_index = self.unique_site_ids[k]
order = ""
# Determine order of interaction
if abs(dist - self.dists["nn"]) <= self.tol:
order = "-nn"
elif abs(dist - self.dists["nnn"]) <= self.tol:
order = "-nnn"
elif abs(dist - self.dists["nnnn"]) <= self.tol:
order = "-nnnn"
j_ij = str(i_index) + "-" + str(j_index) + order
j_ji = str(j_index) + "-" + str(i_index) + order
if j_ij in self.ex_params:
j_exc = self.ex_params[j_ij]
elif j_ji in self.ex_params:
j_exc = self.ex_params[j_ji]
else:
j_exc = 0
# Check if only averaged NN <J> values are available
if "<J>" in self.ex_params and order == "-nn":
j_exc = self.ex_params["<J>"]
return j_exc
def get_heisenberg_model(self):
"""Save results of mapping to a HeisenbergModel object.
Returns:
hmodel (HeisenbergModel): MSONable object.
"""
# Original formula unit with nonmagnetic ions
hm_formula = str(self.ordered_structures_[0].composition.reduced_formula)
hm_structures = self.ordered_structures
hm_energies = self.energies
hm_cutoff = self.cutoff
hm_tol = self.tol
hm_sgraphs = self.sgraphs
hm_usi = self.unique_site_ids
hm_wids = self.wyckoff_ids
hm_nni = self.nn_interactions
hm_d = self.dists
# Exchange matrix DataFrame in json format
hm_em = self.ex_mat.to_json()
hm_ep = self.get_exchange()
hm_javg = self.estimate_exchange()
hm_igraph = self.get_interaction_graph()
hmodel = HeisenbergModel(
hm_formula,
hm_structures,
hm_energies,
hm_cutoff,
hm_tol,
hm_sgraphs,
hm_usi,
hm_wids,
hm_nni,
hm_d,
hm_em,
hm_ep,
hm_javg,
hm_igraph,
)
return hmodel
class HeisenbergScreener:
"""
Class to clean and screen magnetic orderings.
"""
def __init__(self, structures, energies, screen=False):
"""
This class pre-processes magnetic orderings and energies for
HeisenbergMapper. It prioritizes low-energy orderings with large and
localized magnetic moments.
Args:
structures (list): Structure objects with magnetic moments.
energies (list): Energies/atom of magnetic orderings.
screen (bool): Try to screen out high energy and low-spin configurations.
Attributes:
screened_structures (list): Sorted structures.
screened_energies (list): Sorted energies.
"""
# Cleanup
structures, energies = self._do_cleanup(structures, energies)
n_structures = len(structures)
# If there are more than 2 structures, we want to perform a
# screening to prioritize well-behaved orderings
if screen and n_structures > 2:
structures, energies = self._do_screen(structures, energies)
self.screened_structures = structures
self.screened_energies = energies
@staticmethod
def _do_cleanup(structures, energies):
"""Sanitize input structures and energies.
Takes magnetic structures and performs the following operations
- Erases nonmagnetic ions and gives all ions ['magmom'] site prop
- Converts total energies -> energy / magnetic ion
- Checks for duplicate/degenerate orderings
- Sorts by energy
Args:
structures (list): Structure objects with magmoms.
energies (list): Corresponding energies.
Returns:
ordered_structures (list): Sanitized structures.
ordered_energies (list): Sorted energies.
"""
# Get only magnetic ions & give all structures site_properties['magmom']
# zero threshold so that magnetic ions with small moments
# are preserved
ordered_structures = [
CollinearMagneticStructureAnalyzer(
s, make_primitive=False, threshold=0.0
).get_structure_with_only_magnetic_atoms(make_primitive=False)
for s in structures
]
# Convert to energies / magnetic ion
energies = [e / len(s) for (e, s) in zip(energies, ordered_structures)]
# Check for duplicate / degenerate states (sometimes different initial
# configs relax to the same state)
remove_list = []
for i, e in enumerate(energies):
e_tol = 6 # 10^-6 eV/atom tol on energies
e = round(e, e_tol)
if i not in remove_list:
for i_check, e_check in enumerate(energies):
e_check = round(e_check, e_tol)
if i != i_check and i_check not in remove_list and e == e_check:
remove_list.append(i_check)
# Also discard structures with small |magmoms| < 0.1 uB
# xx - get rid of these or just bury them in the list?
# for i, s in enumerate(ordered_structures):
# magmoms = s.site_properties['magmom']
# if i not in remove_list:
# if any(abs(m) < 0.1 for m in magmoms):
# remove_list.append(i)
# Remove duplicates
if len(remove_list):
ordered_structures = [s for i, s in enumerate(ordered_structures) if i not in remove_list]
energies = [e for i, e in enumerate(energies) if i not in remove_list]
# Sort by energy if not already sorted
ordered_structures = [s for _, s in sorted(zip(energies, ordered_structures), reverse=False)]
ordered_energies = sorted(energies, reverse=False)
return ordered_structures, ordered_energies
@staticmethod
def _do_screen(structures, energies):
"""Screen and sort magnetic orderings based on some criteria.
Prioritize low energy orderings and large, localized magmoms. do_clean should be run first to sanitize inputs.
Args:
structures (list): At least three structure objects.
energies (list): Energies.
Returns:
screened_structures (list): Sorted structures.
screened_energies (list): Sorted energies.
"""
magmoms = [s.site_properties["magmom"] for s in structures]
n_below_1ub = [len([m for m in ms if abs(m) < 1]) for ms in magmoms]
df = pd.DataFrame(
{
"structure": structures,
"energy": energies,
"magmoms": magmoms,
"n_below_1ub": n_below_1ub,
}
)
# keep the ground and first excited state fixed to capture the
# low-energy spectrum
index = list(df.index)[2:]
df_high_energy = df.iloc[2:]
# Prioritize structures with fewer magmoms < 1 uB
df_high_energy = df_high_energy.sort_values(by="n_below_1ub")
index = [0, 1] + list(df_high_energy.index)
# sort
df = df.reindex(index)
screened_structures = list(df["structure"].values)
screened_energies = list(df["energy"].values)
return screened_structures, screened_energies
class HeisenbergModel(MSONable):
"""
Store a Heisenberg model fit to low-energy magnetic orderings.
Intended to be generated by HeisenbergMapper.get_heisenberg_model().
"""
def __init__(
self,
formula=None,
structures=None,
energies=None,
cutoff=None,
tol=None,
sgraphs=None,
unique_site_ids=None,
wyckoff_ids=None,
nn_interactions=None,
dists=None,
ex_mat=None,
ex_params=None,
javg=None,
igraph=None,
):
"""
Args:
formula (str): Reduced formula of compound.
structures (list): Structure objects with magmoms.
energies (list): Energies of each relaxed magnetic structure.
cutoff (float): Cutoff in Angstrom for nearest neighbor search.
tol (float): Tolerance (in Angstrom) on nearest neighbor distances being equal.
sgraphs (list): StructureGraph objects.
unique_site_ids (dict): Maps each site to its unique numerical
identifier.
wyckoff_ids (dict): Maps unique numerical identifier to wyckoff
position.
nn_interacations (dict): {i: j} pairs of NN interactions
between unique sites.
dists (dict): NN, NNN, and NNNN interaction distances
ex_mat (DataFrame): Invertible Heisenberg Hamiltonian for each
graph.
ex_params (dict): Exchange parameter values (meV/atom).
javg (float): <J> exchange param (meV/atom).
igraph (StructureGraph): Exchange interaction graph.
"""
self.formula = formula
self.structures = structures
self.energies = energies
self.cutoff = cutoff
self.tol = tol
self.sgraphs = sgraphs
self.unique_site_ids = unique_site_ids
self.wyckoff_ids = wyckoff_ids
self.nn_interactions = nn_interactions
self.dists = dists
self.ex_mat = ex_mat
self.ex_params = ex_params
self.javg = javg
self.igraph = igraph
def as_dict(self):
"""
Because some dicts have tuple keys, some sanitization is required for json compatibility.
"""
d = {}
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
d["@version"] = __version__
d["formula"] = self.formula
d["structures"] = [s.as_dict() for s in self.structures]
d["energies"] = self.energies
d["cutoff"] = self.cutoff
d["tol"] = self.tol
d["sgraphs"] = [sgraph.as_dict() for sgraph in self.sgraphs]
d["dists"] = self.dists
d["ex_params"] = self.ex_params
d["javg"] = self.javg
d["igraph"] = self.igraph.as_dict()
# Sanitize tuple & int keys
d["ex_mat"] = jsanitize(self.ex_mat)
d["nn_interactions"] = jsanitize(self.nn_interactions)
d["unique_site_ids"] = jsanitize(self.unique_site_ids)
d["wyckoff_ids"] = jsanitize(self.wyckoff_ids)
return d
@classmethod
def from_dict(cls, d):
"""Create a HeisenbergModel from a dict."""
# Reconstitute the site ids
usids = {}
wids = {}
nnis = {}
for k, v in d["nn_interactions"].items():
nn_dict = {}
for k1, v1 in v.items():
key = literal_eval(k1)
nn_dict[key] = v1
nnis[k] = nn_dict
for k, v in d["unique_site_ids"].items():
key = literal_eval(k)
if isinstance(key, int):
usids[tuple([key])] = v
elif isinstance(key, tuple):
usids[key] = v
for k, v in d["wyckoff_ids"].items():
key = literal_eval(k)
wids[key] = v
# Reconstitute the structure and graph objects
structures = []
sgraphs = []
for v in d["structures"]:
structures.append(Structure.from_dict(v))
for v in d["sgraphs"]:
sgraphs.append(StructureGraph.from_dict(v))
# Interaction graph
igraph = StructureGraph.from_dict(d["igraph"])
# Reconstitute the exchange matrix DataFrame
try:
ex_mat = eval(d["ex_mat"])
ex_mat = pd.DataFrame.from_dict(ex_mat)
except SyntaxError: # if ex_mat is empty
ex_mat = pd.DataFrame(columns=["E", "E0"])
hmodel = HeisenbergModel(
formula=d["formula"],
structures=structures,
energies=d["energies"],
cutoff=d["cutoff"],
tol=d["tol"],
sgraphs=sgraphs,
unique_site_ids=usids,
wyckoff_ids=wids,
nn_interactions=nnis,
dists=d["dists"],
ex_mat=ex_mat,
ex_params=d["ex_params"],
javg=d["javg"],
igraph=igraph,
)
return hmodel
def _get_j_exc(self, i, j, dist):
"""
Convenience method for looking up exchange parameter between two sites.
Args:
i (int): index of ith site
j (int): index of jth site
dist (float): distance (Angstrom) between sites +- tol
Returns:
j_exc (float): Exchange parameter in meV
"""
# Get unique site identifiers
for k in self.unique_site_ids.keys():
if i in k:
i_index = self.unique_site_ids[k]
if j in k:
j_index = self.unique_site_ids[k]
order = ""
# Determine order of interaction
if abs(dist - self.dists["nn"]) <= self.tol:
order = "-nn"
elif abs(dist - self.dists["nnn"]) <= self.tol:
order = "-nnn"
elif abs(dist - self.dists["nnnn"]) <= self.tol:
order = "-nnnn"
j_ij = str(i_index) + "-" + str(j_index) + order
j_ji = str(j_index) + "-" + str(i_index) + order
if j_ij in self.ex_params:
j_exc = self.ex_params[j_ij]
elif j_ji in self.ex_params:
j_exc = self.ex_params[j_ji]
else:
j_exc = 0
# Check if only averaged NN <J> values are available
if "<J>" in self.ex_params and order == "-nn":
j_exc = self.ex_params["<J>"]
return j_exc
| mit |
Chuban/moose | python/peacock/tests/peacock_app/CheckRequirements/test_CheckRequirements.py | 5 | 1733 | #!/usr/bin/env python
import unittest
import mock
from peacock import CheckRequirements
try:
import builtins
except ImportError:
import __builtin__ as builtins
realimport = builtins.__import__
bad_import_name = ""
def myimport(name, globals={}, locals={}, fromlist=[], level=-1):
if name == bad_import_name:
raise ImportError
else:
return realimport(name, globals, locals, fromlist, level)
class Tests(unittest.TestCase):
def setUp(self):
builtins.__import__ = realimport
def blockImport(self, name):
global bad_import_name
bad_import_name = name
builtins.__import__ = myimport
def testBadVTK(self):
self.blockImport("vtk")
self.assertFalse(CheckRequirements.check_vtk())
@mock.patch('peacock.CheckRequirements.ErrorObserver')
def testBadOpenGL(self, mock_err):
self.assertFalse(CheckRequirements.check_vtk())
def testGoodVTK(self):
self.assertTrue(CheckRequirements.check_vtk())
def testGoodQt(self):
self.assertTrue(CheckRequirements.check_qt())
def testBadQt(self):
self.blockImport("PyQt5")
self.assertFalse(CheckRequirements.check_qt())
def testGoodMatplotlib(self):
self.assertTrue(CheckRequirements.check_matplotlib())
def testBadMatplotlib(self):
self.blockImport("matplotlib")
self.assertFalse(CheckRequirements.check_matplotlib())
def testBadRequirements(self):
self.blockImport("matplotlib")
self.assertFalse(CheckRequirements.has_requirements())
def testGoodRequirements(self):
self.assertTrue(CheckRequirements.has_requirements())
if __name__ == '__main__':
unittest.main(verbosity=2)
| lgpl-2.1 |
axiom-data-science/pyaxiom | pyaxiom/netcdf/sensors/profile.py | 1 | 7828 | #!python
# coding=utf-8
import os
import random
import bisect
import calendar
from datetime import datetime
import netCDF4
import numpy as np
import pandas as pd
from pyaxiom import logger
class Profile(object):
def __init__(self, df=None, global_attributes=None, variable_attributes=None, fill_value=None, vertical_positive=None, base_time=None):
self.df = df if isinstance(df, pd.DataFrame) and not df.empty else pd.DataFrame()
self.fill_value = fill_value or -9999.9
self.global_attributes = global_attributes or {}
self.variable_attributes = variable_attributes or {}
self.vertical_positive = vertical_positive or 'down'
self.base_time = base_time or 'seconds since 1970-01-01 00:00:00'
@property
def variable_attributes(self):
defaults = {
'time' : {
'units' : self.base_time,
'standard_name' : 'time',
'long_name': 'time'
},
'latitude' : {
'units' : 'degrees_north',
'standard_name' : 'latitude',
'long_name' : 'latitude',
'axis': 'Y'
},
'longitude' : {
'units' : 'degrees_east',
'standard_name' : 'longitude',
'long_name' : 'longitude',
'axis': 'X'
},
'z' : {
'units' : 'm',
'standard_name' : 'depth',
'long_name' : 'depth',
'positive': self.vertical_positive,
'axis': 'Z'
},
'profile' : {
'cf_role' : 'profile_id'
},
'crs' : {
'long_name' : 'http://www.opengis.net/def/crs/EPSG/0/4326',
'grid_mapping_name' : 'latitude_longitude',
'epsg_code' : 'EPSG:4326',
'semi_major_axis' : float(6378137.0),
'inverse_flattening' : float(298.257223563)
},
'platform' : {
'definition' : "http://mmisw.org/ont/ioos/definition/stationID"
}
}
defaults.update(self._variable_attributes)
return defaults
@variable_attributes.setter
def variable_attributes(self, vas):
self._variable_attributes = vas
@property
def global_attributes(self):
gas = self._global_attributes
gas.update({
'geospatial_vertical_positive': self.vertical_positive,
'date_created': datetime.utcnow().strftime("%Y-%m-%dT%H:%M:00Z"),
'Conventions': 'CF-1.6',
'Metadata_conventions': 'Unidata Dataset Discovery v1.0',
'featureType': 'profile',
'cdm_data_type': 'Profile'
})
if not self.df.empty:
# Time
starting = self.df['time'].min()
ending = self.df['time'].max()
duration = "P%sS" % str(int(round((ending - starting).total_seconds())))
gas.update({
'time_coverage_start': starting.strftime("%Y-%m-%dT%H:%M:00Z"),
'time_coverage_end': ending.strftime("%Y-%m-%dT%H:%M:00Z"),
'time_coverage_duration': duration,
})
diffs = self.df['time'].unique()[1:] - self.df['time'].unique()[:-1]
uniqs, inverse = np.unique(diffs, return_inverse=True)
if uniqs.size > 1:
time_diffs = diffs[np.bincount(inverse).argmax()]
gas.update({
'time_coverage_resolution': "P%sS" % str(round(time_diffs.astype('timedelta64[s]').astype(int)))
})
# Vertical
gas.update({
'geospatial_vertical_min': self.df['z'].min(),
'geospatial_vertical_max': self.df['z'].max(),
})
# Horizontal
gas.update({
'geospatial_lat_min': self.df['latitude'].min(),
'geospatial_lat_max': self.df['latitude'].max(),
'geospatial_lon_min': self.df['longitude'].min(),
'geospatial_lon_max': self.df['longitude'].max(),
})
return gas
@global_attributes.setter
def global_attributes(self, gas):
# These are set by this script, we don't someone to be able to set them manually
global_skips = ["time_coverage_start", "time_coverage_end", "time_coverage_duration", "time_coverage_resolution",
"featureType", "geospatial_vertical_positive", "geospatial_vertical_min", "geospatial_vertical_max",
"geospatial_lat_min", "geospatial_lon_min", "geospatial_lat_max", "geospatial_lon_max",
"Conventions", "date_created", "cdm_data_type"]
for i in set(global_skips) & gas.keys():
logger.warning("Ignoring global attribute {} because it is calculated or set automatically".format(i))
self._global_attributes = { k: v for k, v in gas.items() if k not in global_skips }
def export(self, output_file, file_type=None):
# Make output directory
if not os.path.exists(os.path.dirname(output_file)):
os.makedirs(os.path.dirname(output_file))
class IncompleteProfile(Profile):
def export(self, output_file):
super(IncompleteProfile, self).export(output_file)
with netCDF4.Dataset(output_file, 'w', clobber=True) as nc:
gas = self.global_attributes
nc.setncatts(gas)
profiles = self.df.profile.unique().size
profile_group = self.df.groupby('profile')
max_z = profile_group.size().max()
nc.createDimension('profile', profiles)
nc.createDimension('z', max_z)
profile = nc.createVariable('profile', self.df.profile.dtype, ('profile',))
_, unique_profile_rows = np.unique(self.df.profile.values, return_index=True)
profile[:] = list(range(profiles))
time = nc.createVariable('time', int, ('profile',))
time[:] = netCDF4.date2num([datetime.utcfromtimestamp(t) for t in self.df.time.unique().astype('<M8[s]').astype(int)], units=self.base_time)
latitude = nc.createVariable('latitude', self.df.latitude.dtype, ('profile',))
latitude[:] = self.df.latitude.values[unique_profile_rows]
longitude = nc.createVariable('longitude', self.df.longitude.dtype, ('profile',))
longitude[:] = self.df.longitude.values[unique_profile_rows]
# Metadata variables
nc.createVariable("crs", 'i4')
nc.createVariable("platform", "i4")
nc.setncattr('platform', 'platform')
# Data vars
reserved_columns = ['profile', 'time', 'latitude', 'longitude']
for i, (name, p) in enumerate(profile_group):
for c in [d for d in self.df.columns if d not in reserved_columns]:
var_name = c.split(' ')[0].lower()
fill = p[c].dtype.type(self.fill_value)
if var_name not in nc.variables:
v = nc.createVariable(var_name, self.df[c].dtype, ('profile', 'z'), fill_value=fill)
else:
v = nc.variables[var_name]
assignable_values = p[c].fillna(fill).values
v[i, :len(assignable_values)] = assignable_values
for k, v in self.variable_attributes.items():
if k in nc.variables:
for n, z in v.items():
try:
nc.variables[k].setncattr(n, z)
except BaseException:
logger.warning('Could not set attribute {} on {}'.format(n, k))
| mit |
numenta/nupic.fluent | tests/unit/utils/network_data_generator_test.py | 4 | 7356 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Tests for the NetworkDataGenerator class."""
import os
import pandas
import random
import unittest
from fluent.utils.network_data_generator import NetworkDataGenerator
from nupic.data.file_record_stream import FileRecordStream
try:
import simplejson as json
except:
import json
class NetworkDataGeneratorTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(NetworkDataGeneratorTest, self).__init__(*args, **kwargs)
self.expected = [[
{"_token": "get",
"_categories": "0 1",
"_sequenceID": 0,
"ID": "1",
"_reset": 1},
{"_token": "rid",
"_categories": "0 1",
"_sequenceID": 0,
"ID": "1",
"_reset": 0},
{"_token": "of",
"_categories": "0 1",
"_sequenceID": 0,
"ID": "1",
"_reset": 0},
{"_token": "the",
"_categories": "0 1",
"_sequenceID": 0,
"ID": "1",
"_reset": 0},
{"_token": "trrible",
"_categories": "0 1",
"_sequenceID": 0,
"ID": "1",
"_reset": 0},
{"_token": "kitchen",
"_categories": "0 1",
"_sequenceID": 0,
"ID": "1",
"_reset": 0},
{"_token": "odor",
"_categories": "0 1",
"_sequenceID": 0,
"ID": "1",
"_reset": 0}],
[{"_token": "i",
"_categories": "2",
"_sequenceID": 1,
"ID": "2",
"_reset": 1},
{"_token": "don",
"_categories": "2",
"_sequenceID": 1,
"ID": "2",
"_reset": 0},
{"_token": "t",
"_categories": "2",
"_sequenceID": 1,
"ID": "2",
"_reset": 0},
{"_token": "care",
"_categories": "2",
"_sequenceID": 1,
"ID": "2",
"_reset": 0}]]
self.dirName = os.path.dirname(os.path.realpath(__file__))
def assertRecordsEqual(self, actual, expected):
self.assertIsInstance(actual, list)
self.assertEqual(len(actual), len(expected))
for a, e in zip(actual, expected):
self.assertEqual(len(a), len(e))
for ra, re in zip(a, e):
self.assertDictEqual(ra, re)
def testSplitNoPreprocess(self):
ndg = NetworkDataGenerator()
filename = os.path.join(self.dirName, "test_data/multi_sample.csv")
ndg.split(filename, 3, False)
self.assertRecordsEqual(ndg.records, self.expected)
def testSplitPreprocess(self):
ndg = NetworkDataGenerator()
filename = os.path.join(self.dirName, "test_data/multi_sample.csv")
expected = [[
{"_token": "gohbkchoo",
"_categories": "0 1",
"_sequenceID": 0,
"ID": "1",
"_reset": 1}],
[{"_token": "o",
"_categories": "2",
"_sequenceID": 1,
"ID": "2",
"_reset": 1},
{"_token": "ca",
"_categories": "2",
"_sequenceID": 1,
"ID": "2",
"_reset": 0}]]
ndg.split(filename, 3, True, ignoreCommon=100, correctSpell=True)
self.assertRecordsEqual(ndg.records, expected)
def testRandomize(self):
ndg = NetworkDataGenerator()
filename = os.path.join(self.dirName, "test_data/multi_sample.csv")
ndg.split(filename, 3, False)
random.seed(1)
ndg.randomizeData()
dataOutputFile = os.path.join(
self.dirName, "test_data/multi_sample_split.csv")
categoriesOutputFile = os.path.join(
self.dirName, "test_data/multi_sample_categories.json")
success = ndg.saveData(dataOutputFile, categoriesOutputFile)
randomizedIDs = []
dataTable = pandas.read_csv(dataOutputFile)
for _, values in dataTable.iterrows():
record = values.to_dict()
idx = record["_sequenceID"]
if idx.isdigit() and (not randomizedIDs or randomizedIDs[-1] != idx):
randomizedIDs.append(idx)
self.assertNotEqual(randomizedIDs, range(len(randomizedIDs)))
def testSaveData(self):
ndg = NetworkDataGenerator()
filename = os.path.join(self.dirName, "test_data/multi_sample.csv")
ndg.split(filename, 3, False)
dataOutputFile = os.path.join(
self.dirName, "test_data/multi_sample_split.csv")
categoriesOutputFile = os.path.join(
self.dirName, "test_data/multi_sample_categories.json")
success = ndg.saveData(dataOutputFile, categoriesOutputFile)
self.assertTrue(success)
dataTable = pandas.read_csv(dataOutputFile).fillna("")
types = {"_categories": "list",
"_token": "string",
"_sequenceID": "int",
"_reset": "int",
"ID": "string"}
specials = {"_categories": "C",
"_token": "",
"_sequenceID": "S",
"_reset": "R",
"ID": ""}
expected_records = [record for data in self.expected for record in data]
expected_records.insert(0, specials)
expected_records.insert(0, types)
for idx, values in dataTable.iterrows():
record = values.to_dict()
if idx > 1:
# csv values are strings, so cast the ints
record["_sequenceID"] = int(record["_sequenceID"])
record["_reset"] = int(record["_reset"])
self.assertDictEqual(record, expected_records[idx])
with open(categoriesOutputFile) as f:
categories = json.load(f)
expected_categories = {"kitchen": 0, "environment": 1, "not helpful": 2}
self.assertDictEqual(categories, expected_categories)
def testSaveDataIncorrectType(self):
ndg = NetworkDataGenerator()
filename = os.path.join(self.dirName, "test_data/multi_sample.csv")
dataOutputFile = os.path.join(
self.dirName, "test_data/multi_sample_split.csv")
categoriesOutputFile = os.path.join(
self.dirName, "test_data/multi_sample_categories.csv")
ndg.split(filename, 3, False)
with self.assertRaises(TypeError):
ndg.saveData(dataOutputFile, categoriesOutputFile)
def testFileRecordStreamReadData(self):
ndg = NetworkDataGenerator()
filename = os.path.join(self.dirName, "test_data/multi_sample.csv")
ndg.split(filename, 3, False)
dataOutputFile = os.path.join(
self.dirName, "test_data/multi_sample_split.csv")
categoriesOutputFile = os.path.join(
self.dirName, "test_data/multi_sample_categories.json")
ndg.saveData(dataOutputFile, categoriesOutputFile)
# If no error is raised, then the data is in the correct format
frs = FileRecordStream(dataOutputFile)
if __name__ == "__main__":
unittest.main()
| agpl-3.0 |
binghongcha08/pyQMD | QMC/MC_exchange/permute4d/dissipation/4.0/traj.py | 17 | 1290 | import numpy as np
import pylab as plt
import matplotlib.pyplot as plt
import matplotlib as mpl
#data = np.genfromtxt(fname='/home/bing/dissipation/energy.dat')
data = np.genfromtxt(fname='energy.dat')
fig, (ax1,ax2) = plt.subplots(ncols=1, nrows=2, sharex=True)
#font = {'family' : 'ubuntu',
# 'weight' : 'normal',
# 'size' : '16'}
#mpl.rc('font', **font) # pass in the font dict as kwargs
mpl.rcParams['font.size'] = 12
#mpl.rcParams['figure.figsize'] = 8,6
#pl.title('two-steps fitting alg')
ax1.set_ylabel('Energy [hartree]')
ax1.plot(data[:,0],data[:,2],'b--',linewidth=2,label='Potential')
#pl.plot(dat[:,0],dat[:,2],'r-',linewidth=2)
ax1.plot(data[:,0],data[:,3],'g-.',linewidth=2,label='Quantum Potential')
ax1.plot(data[:,0],data[:,4],'k-',linewidth=2,label='Energy')
#pl.legend(bbox_to_anchor=(0.5, 0.38, 0.42, .302), loc=3,ncol=1, mode="expand", borderaxespad=0.)
#ax1.set_yticks((0.4,0.6,0.8))
ax1.legend(loc=0)
ax1.set_ylim(0,5)
ax2.set_xlabel('time [a.u.]')
ax2.set_ylabel('Energy [hartree]')
ax2.plot(data[:,0],data[:,1],'r--',linewidth=2,label='$Kinetic$')
#pl.plot(dat[:,0],dat[:,1],'k-',linewidth=2)
ax2.set_yscale('log')
#ax2.set_xticks((0,4,8))
#ax2.set_yticks((1e-7,1e-5,1e-3))
plt.legend(loc=0)
plt.subplots_adjust(hspace=0.)
plt.show()
| gpl-3.0 |
dsm054/pandas | pandas/core/internals/blocks.py | 1 | 112726 | # -*- coding: utf-8 -*-
import functools
import warnings
import inspect
import re
from datetime import datetime, timedelta, date
import numpy as np
from pandas._libs import lib, tslib, tslibs, internals as libinternals
from pandas._libs.tslibs import conversion, Timedelta
from pandas import compat
from pandas.compat import range, zip
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.dtypes import (
ExtensionDtype, DatetimeTZDtype,
PandasExtensionDtype,
CategoricalDtype)
from pandas.core.dtypes.common import (
_TD_DTYPE, _NS_DTYPE,
ensure_platform_int,
is_integer,
is_dtype_equal,
is_timedelta64_dtype,
is_datetime64_dtype, is_datetimetz,
is_categorical, is_categorical_dtype,
is_integer_dtype,
is_datetime64tz_dtype,
is_bool_dtype,
is_object_dtype,
is_float_dtype,
is_numeric_v_string_like, is_extension_type,
is_extension_array_dtype,
is_list_like,
is_re,
is_re_compilable,
is_sparse,
pandas_dtype)
from pandas.core.dtypes.cast import (
maybe_downcast_to_dtype,
maybe_upcast,
maybe_promote,
infer_dtype_from,
infer_dtype_from_scalar,
soft_convert_objects,
maybe_convert_objects,
astype_nansafe,
find_common_type,
maybe_infer_dtype_type)
from pandas.core.dtypes.missing import (
isna, notna, array_equivalent,
_isna_compat,
is_null_datelike_scalar)
import pandas.core.dtypes.concat as _concat
from pandas.core.dtypes.generic import (
ABCSeries,
ABCDatetimeIndex,
ABCExtensionArray,
ABCIndexClass)
import pandas.core.common as com
import pandas.core.algorithms as algos
import pandas.core.missing as missing
from pandas.core.base import PandasObject
from pandas.core.arrays import Categorical
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.core.indexing import check_setitem_lengths
from pandas.io.formats.printing import pprint_thing
class Block(PandasObject):
"""
Canonical n-dimensional unit of homogeneous dtype contained in a pandas
data structure
Index-ignorant; let the container take care of that
"""
__slots__ = ['_mgr_locs', 'values', 'ndim']
is_numeric = False
is_float = False
is_integer = False
is_complex = False
is_datetime = False
is_datetimetz = False
is_timedelta = False
is_bool = False
is_object = False
is_categorical = False
is_sparse = False
is_extension = False
_box_to_block_values = True
_can_hold_na = False
_can_consolidate = True
_verify_integrity = True
_validate_ndim = True
_ftype = 'dense'
_concatenator = staticmethod(np.concatenate)
def __init__(self, values, placement, ndim=None):
self.ndim = self._check_ndim(values, ndim)
self.mgr_locs = placement
self.values = values
if (self._validate_ndim and self.ndim and
len(self.mgr_locs) != len(self.values)):
raise ValueError(
'Wrong number of items passed {val}, placement implies '
'{mgr}'.format(val=len(self.values), mgr=len(self.mgr_locs)))
def _check_ndim(self, values, ndim):
"""ndim inference and validation.
Infers ndim from 'values' if not provided to __init__.
Validates that values.ndim and ndim are consistent if and only if
the class variable '_validate_ndim' is True.
Parameters
----------
values : array-like
ndim : int or None
Returns
-------
ndim : int
Raises
------
ValueError : the number of dimensions do not match
"""
if ndim is None:
ndim = values.ndim
if self._validate_ndim and values.ndim != ndim:
msg = ("Wrong number of dimensions. values.ndim != ndim "
"[{} != {}]")
raise ValueError(msg.format(values.ndim, ndim))
return ndim
@property
def _holder(self):
"""The array-like that can hold the underlying values.
None for 'Block', overridden by subclasses that don't
use an ndarray.
"""
return None
@property
def _consolidate_key(self):
return (self._can_consolidate, self.dtype.name)
@property
def _is_single_block(self):
return self.ndim == 1
@property
def is_view(self):
""" return a boolean if I am possibly a view """
return self.values.base is not None
@property
def is_datelike(self):
""" return True if I am a non-datelike """
return self.is_datetime or self.is_timedelta
def is_categorical_astype(self, dtype):
"""
validate that we have a astypeable to categorical,
returns a boolean if we are a categorical
"""
if dtype is Categorical or dtype is CategoricalDtype:
# this is a pd.Categorical, but is not
# a valid type for astypeing
raise TypeError("invalid type {0} for astype".format(dtype))
elif is_categorical_dtype(dtype):
return True
return False
def external_values(self, dtype=None):
""" return an outside world format, currently just the ndarray """
return self.values
def internal_values(self, dtype=None):
""" return an internal format, currently just the ndarray
this should be the pure internal API format
"""
return self.values
def formatting_values(self):
"""Return the internal values used by the DataFrame/SeriesFormatter"""
return self.internal_values()
def get_values(self, dtype=None):
"""
return an internal format, currently just the ndarray
this is often overridden to handle to_dense like operations
"""
if is_object_dtype(dtype):
return self.values.astype(object)
return self.values
def to_dense(self):
return self.values.view()
@property
def _na_value(self):
return np.nan
@property
def fill_value(self):
return np.nan
@property
def mgr_locs(self):
return self._mgr_locs
@mgr_locs.setter
def mgr_locs(self, new_mgr_locs):
if not isinstance(new_mgr_locs, libinternals.BlockPlacement):
new_mgr_locs = libinternals.BlockPlacement(new_mgr_locs)
self._mgr_locs = new_mgr_locs
@property
def array_dtype(self):
""" the dtype to return if I want to construct this block as an
array
"""
return self.dtype
def make_block(self, values, placement=None, ndim=None):
"""
Create a new block, with type inference propagate any values that are
not specified
"""
if placement is None:
placement = self.mgr_locs
if ndim is None:
ndim = self.ndim
return make_block(values, placement=placement, ndim=ndim)
def make_block_scalar(self, values):
"""
Create a ScalarBlock
"""
return ScalarBlock(values)
def make_block_same_class(self, values, placement=None, ndim=None,
dtype=None):
""" Wrap given values in a block of same type as self. """
if dtype is not None:
# issue 19431 fastparquet is passing this
warnings.warn("dtype argument is deprecated, will be removed "
"in a future release.", DeprecationWarning)
if placement is None:
placement = self.mgr_locs
return make_block(values, placement=placement, ndim=ndim,
klass=self.__class__, dtype=dtype)
def __unicode__(self):
# don't want to print out all of the items here
name = pprint_thing(self.__class__.__name__)
if self._is_single_block:
result = '{name}: {len} dtype: {dtype}'.format(
name=name, len=len(self), dtype=self.dtype)
else:
shape = ' x '.join(pprint_thing(s) for s in self.shape)
result = '{name}: {index}, {shape}, dtype: {dtype}'.format(
name=name, index=pprint_thing(self.mgr_locs.indexer),
shape=shape, dtype=self.dtype)
return result
def __len__(self):
return len(self.values)
def __getstate__(self):
return self.mgr_locs.indexer, self.values
def __setstate__(self, state):
self.mgr_locs = libinternals.BlockPlacement(state[0])
self.values = state[1]
self.ndim = self.values.ndim
def _slice(self, slicer):
""" return a slice of my values """
return self.values[slicer]
def reshape_nd(self, labels, shape, ref_items):
"""
Parameters
----------
labels : list of new axis labels
shape : new shape
ref_items : new ref_items
return a new block that is transformed to a nd block
"""
return _block2d_to_blocknd(values=self.get_values().T,
placement=self.mgr_locs, shape=shape,
labels=labels, ref_items=ref_items)
def getitem_block(self, slicer, new_mgr_locs=None):
"""
Perform __getitem__-like, return result as block.
As of now, only supports slices that preserve dimensionality.
"""
if new_mgr_locs is None:
if isinstance(slicer, tuple):
axis0_slicer = slicer[0]
else:
axis0_slicer = slicer
new_mgr_locs = self.mgr_locs[axis0_slicer]
new_values = self._slice(slicer)
if self._validate_ndim and new_values.ndim != self.ndim:
raise ValueError("Only same dim slicing is allowed")
return self.make_block_same_class(new_values, new_mgr_locs)
@property
def shape(self):
return self.values.shape
@property
def dtype(self):
return self.values.dtype
@property
def ftype(self):
if getattr(self.values, '_pandas_ftype', False):
dtype = self.dtype.subtype
else:
dtype = self.dtype
return "{dtype}:{ftype}".format(dtype=dtype, ftype=self._ftype)
def merge(self, other):
return _merge_blocks([self, other])
def concat_same_type(self, to_concat, placement=None):
"""
Concatenate list of single blocks of the same type.
"""
values = self._concatenator([blk.values for blk in to_concat],
axis=self.ndim - 1)
return self.make_block_same_class(
values, placement=placement or slice(0, len(values), 1))
def iget(self, i):
return self.values[i]
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
self.values[locs] = values
def delete(self, loc):
"""
Delete given loc(-s) from block in-place.
"""
self.values = np.delete(self.values, loc, 0)
self.mgr_locs = self.mgr_locs.delete(loc)
def apply(self, func, **kwargs):
""" apply the function to my values; return a block if we are not
one
"""
with np.errstate(all='ignore'):
result = func(self.values, **kwargs)
if not isinstance(result, Block):
result = self.make_block(values=_block_shape(result,
ndim=self.ndim))
return result
def fillna(self, value, limit=None, inplace=False, downcast=None):
""" fillna on the block with the value. If we fail, then convert to
ObjectBlock and try again
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not self._can_hold_na:
if inplace:
return self
else:
return self.copy()
mask = isna(self.values)
if limit is not None:
if not is_integer(limit):
raise ValueError('Limit must be an integer')
if limit < 1:
raise ValueError('Limit must be greater than 0')
if self.ndim > 2:
raise NotImplementedError("number of dimensions for 'fillna' "
"is currently limited to 2")
mask[mask.cumsum(self.ndim - 1) > limit] = False
# fillna, but if we cannot coerce, then try again as an ObjectBlock
try:
values, _ = self._try_coerce_args(self.values, value)
blocks = self.putmask(mask, value, inplace=inplace)
blocks = [b.make_block(values=self._try_coerce_result(b.values))
for b in blocks]
return self._maybe_downcast(blocks, downcast)
except (TypeError, ValueError):
# we can't process the value, but nothing to do
if not mask.any():
return self if inplace else self.copy()
# operate column-by-column
def f(m, v, i):
block = self.coerce_to_target_dtype(value)
# slice out our block
if i is not None:
block = block.getitem_block(slice(i, i + 1))
return block.fillna(value,
limit=limit,
inplace=inplace,
downcast=None)
return self.split_and_operate(mask, f, inplace)
def split_and_operate(self, mask, f, inplace):
"""
split the block per-column, and apply the callable f
per-column, return a new block for each. Handle
masking which will not change a block unless needed.
Parameters
----------
mask : 2-d boolean mask
f : callable accepting (1d-mask, 1d values, indexer)
inplace : boolean
Returns
-------
list of blocks
"""
if mask is None:
mask = np.ones(self.shape, dtype=bool)
new_values = self.values
def make_a_block(nv, ref_loc):
if isinstance(nv, Block):
block = nv
elif isinstance(nv, list):
block = nv[0]
else:
# Put back the dimension that was taken from it and make
# a block out of the result.
try:
nv = _block_shape(nv, ndim=self.ndim)
except (AttributeError, NotImplementedError):
pass
block = self.make_block(values=nv,
placement=ref_loc)
return block
# ndim == 1
if self.ndim == 1:
if mask.any():
nv = f(mask, new_values, None)
else:
nv = new_values if inplace else new_values.copy()
block = make_a_block(nv, self.mgr_locs)
return [block]
# ndim > 1
new_blocks = []
for i, ref_loc in enumerate(self.mgr_locs):
m = mask[i]
v = new_values[i]
# need a new block
if m.any():
nv = f(m, v, i)
else:
nv = v if inplace else v.copy()
block = make_a_block(nv, [ref_loc])
new_blocks.append(block)
return new_blocks
def _maybe_downcast(self, blocks, downcast=None):
# no need to downcast our float
# unless indicated
if downcast is None and self.is_float:
return blocks
elif downcast is None and (self.is_timedelta or self.is_datetime):
return blocks
if not isinstance(blocks, list):
blocks = [blocks]
return _extend_blocks([b.downcast(downcast) for b in blocks])
def downcast(self, dtypes=None):
""" try to downcast each item to the dict of dtypes if present """
# turn it off completely
if dtypes is False:
return self
values = self.values
# single block handling
if self._is_single_block:
# try to cast all non-floats here
if dtypes is None:
dtypes = 'infer'
nv = maybe_downcast_to_dtype(values, dtypes)
return self.make_block(nv)
# ndim > 1
if dtypes is None:
return self
if not (dtypes == 'infer' or isinstance(dtypes, dict)):
raise ValueError("downcast must have a dictionary or 'infer' as "
"its argument")
# operate column-by-column
# this is expensive as it splits the blocks items-by-item
def f(m, v, i):
if dtypes == 'infer':
dtype = 'infer'
else:
raise AssertionError("dtypes as dict is not supported yet")
if dtype is not None:
v = maybe_downcast_to_dtype(v, dtype)
return v
return self.split_and_operate(None, f, False)
def astype(self, dtype, copy=False, errors='raise', values=None, **kwargs):
return self._astype(dtype, copy=copy, errors=errors, values=values,
**kwargs)
def _astype(self, dtype, copy=False, errors='raise', values=None,
klass=None, **kwargs):
"""Coerce to the new type
Parameters
----------
dtype : str, dtype convertible
copy : boolean, default False
copy if indicated
errors : str, {'raise', 'ignore'}, default 'ignore'
- ``raise`` : allow exceptions to be raised
- ``ignore`` : suppress exceptions. On error return original object
Returns
-------
Block
"""
errors_legal_values = ('raise', 'ignore')
if errors not in errors_legal_values:
invalid_arg = ("Expected value of kwarg 'errors' to be one of {}. "
"Supplied value is '{}'".format(
list(errors_legal_values), errors))
raise ValueError(invalid_arg)
if (inspect.isclass(dtype) and
issubclass(dtype, (PandasExtensionDtype, ExtensionDtype))):
msg = ("Expected an instance of {}, but got the class instead. "
"Try instantiating 'dtype'.".format(dtype.__name__))
raise TypeError(msg)
# may need to convert to categorical
if self.is_categorical_astype(dtype):
# deprecated 17636
if ('categories' in kwargs or 'ordered' in kwargs):
if isinstance(dtype, CategoricalDtype):
raise TypeError(
"Cannot specify a CategoricalDtype and also "
"`categories` or `ordered`. Use "
"`dtype=CategoricalDtype(categories, ordered)`"
" instead.")
warnings.warn("specifying 'categories' or 'ordered' in "
".astype() is deprecated; pass a "
"CategoricalDtype instead",
FutureWarning, stacklevel=7)
categories = kwargs.get('categories', None)
ordered = kwargs.get('ordered', None)
if com._any_not_none(categories, ordered):
dtype = CategoricalDtype(categories, ordered)
if is_categorical_dtype(self.values):
# GH 10696/18593: update an existing categorical efficiently
return self.make_block(self.values.astype(dtype, copy=copy))
return self.make_block(Categorical(self.values, dtype=dtype))
# convert dtypes if needed
dtype = pandas_dtype(dtype)
# astype processing
if is_dtype_equal(self.dtype, dtype):
if copy:
return self.copy()
return self
if klass is None:
if is_sparse(self.values):
# special case sparse, Series[Sparse].astype(object) is sparse
klass = ExtensionBlock
elif is_object_dtype(dtype):
klass = ObjectBlock
elif is_extension_array_dtype(dtype):
klass = ExtensionBlock
try:
# force the copy here
if values is None:
if self.is_extension:
values = self.values.astype(dtype)
else:
if issubclass(dtype.type,
(compat.text_type, compat.string_types)):
# use native type formatting for datetime/tz/timedelta
if self.is_datelike:
values = self.to_native_types()
# astype formatting
else:
values = self.get_values()
else:
values = self.get_values(dtype=dtype)
# _astype_nansafe works fine with 1-d only
values = astype_nansafe(values.ravel(), dtype, copy=True)
# TODO(extension)
# should we make this attribute?
try:
values = values.reshape(self.shape)
except AttributeError:
pass
newb = make_block(values, placement=self.mgr_locs,
klass=klass, ndim=self.ndim)
except Exception: # noqa: E722
if errors == 'raise':
raise
newb = self.copy() if copy else self
if newb.is_numeric and self.is_numeric:
if newb.shape != self.shape:
raise TypeError(
"cannot set astype for copy = [{copy}] for dtype "
"({dtype} [{shape}]) to different shape "
"({newb_dtype} [{newb_shape}])".format(
copy=copy, dtype=self.dtype.name,
shape=self.shape, newb_dtype=newb.dtype.name,
newb_shape=newb.shape))
return newb
def convert(self, copy=True, **kwargs):
""" attempt to coerce any object types to better types return a copy
of the block (if copy = True) by definition we are not an ObjectBlock
here!
"""
return self.copy() if copy else self
def _can_hold_element(self, element):
""" require the same dtype as ourselves """
dtype = self.values.dtype.type
tipo = maybe_infer_dtype_type(element)
if tipo is not None:
return issubclass(tipo.type, dtype)
return isinstance(element, dtype)
def _try_cast_result(self, result, dtype=None):
""" try to cast the result to our original type, we may have
roundtripped thru object in the mean-time
"""
if dtype is None:
dtype = self.dtype
if self.is_integer or self.is_bool or self.is_datetime:
pass
elif self.is_float and result.dtype == self.dtype:
# protect against a bool/object showing up here
if isinstance(dtype, compat.string_types) and dtype == 'infer':
return result
if not isinstance(dtype, type):
dtype = dtype.type
if issubclass(dtype, (np.bool_, np.object_)):
if issubclass(dtype, np.bool_):
if isna(result).all():
return result.astype(np.bool_)
else:
result = result.astype(np.object_)
result[result == 1] = True
result[result == 0] = False
return result
else:
return result.astype(np.object_)
return result
# may need to change the dtype here
return maybe_downcast_to_dtype(result, dtype)
def _try_coerce_args(self, values, other):
""" provide coercion to our input arguments """
if np.any(notna(other)) and not self._can_hold_element(other):
# coercion issues
# let higher levels handle
raise TypeError("cannot convert {} to an {}".format(
type(other).__name__,
type(self).__name__.lower().replace('Block', '')))
return values, other
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
return result
def _try_coerce_and_cast_result(self, result, dtype=None):
result = self._try_coerce_result(result)
result = self._try_cast_result(result, dtype=dtype)
return result
def to_native_types(self, slicer=None, na_rep='nan', quoting=None,
**kwargs):
""" convert to our native types format, slicing if desired """
values = self.get_values()
if slicer is not None:
values = values[:, slicer]
mask = isna(values)
if not self.is_object and not quoting:
values = values.astype(str)
else:
values = np.array(values, dtype='object')
values[mask] = na_rep
return values
# block actions ####
def copy(self, deep=True):
""" copy constructor """
values = self.values
if deep:
values = values.copy()
return self.make_block_same_class(values)
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False, convert=True):
"""replace the to_replace value with value, possible to create new
blocks here this is just a call to putmask. regex is not used here.
It is used in ObjectBlocks. It is here for API compatibility.
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
original_to_replace = to_replace
# try to replace, if we raise an error, convert to ObjectBlock and
# retry
try:
values, to_replace = self._try_coerce_args(self.values,
to_replace)
mask = missing.mask_missing(values, to_replace)
if filter is not None:
filtered_out = ~self.mgr_locs.isin(filter)
mask[filtered_out.nonzero()[0]] = False
blocks = self.putmask(mask, value, inplace=inplace)
if convert:
blocks = [b.convert(by_item=True, numeric=False,
copy=not inplace) for b in blocks]
return blocks
except (TypeError, ValueError):
# GH 22083, TypeError or ValueError occurred within error handling
# causes infinite loop. Cast and retry only if not objectblock.
if is_object_dtype(self):
raise
# try again with a compatible block
block = self.astype(object)
return block.replace(to_replace=original_to_replace,
value=value,
inplace=inplace,
filter=filter,
regex=regex,
convert=convert)
def _replace_single(self, *args, **kwargs):
""" no-op on a non-ObjectBlock """
return self if kwargs['inplace'] else self.copy()
def setitem(self, indexer, value):
"""Set the value inplace, returning a a maybe different typed block.
Parameters
----------
indexer : tuple, list-like, array-like, slice
The subset of self.values to set
value : object
The value being set
Returns
-------
Block
Notes
-----
`indexer` is a direct slice/positional indexer. `value` must
be a compatible shape.
"""
# coerce None values, if appropriate
if value is None:
if self.is_numeric:
value = np.nan
# coerce if block dtype can store value
values = self.values
try:
values, value = self._try_coerce_args(values, value)
# can keep its own dtype
if hasattr(value, 'dtype') and is_dtype_equal(values.dtype,
value.dtype):
dtype = self.dtype
else:
dtype = 'infer'
except (TypeError, ValueError):
# current dtype cannot store value, coerce to common dtype
find_dtype = False
if hasattr(value, 'dtype'):
dtype = value.dtype
find_dtype = True
elif lib.is_scalar(value):
if isna(value):
# NaN promotion is handled in latter path
dtype = False
else:
dtype, _ = infer_dtype_from_scalar(value,
pandas_dtype=True)
find_dtype = True
else:
dtype = 'infer'
if find_dtype:
dtype = find_common_type([values.dtype, dtype])
if not is_dtype_equal(self.dtype, dtype):
b = self.astype(dtype)
return b.setitem(indexer, value)
# value must be storeable at this moment
arr_value = np.array(value)
# cast the values to a type that can hold nan (if necessary)
if not self._can_hold_element(value):
dtype, _ = maybe_promote(arr_value.dtype)
values = values.astype(dtype)
transf = (lambda x: x.T) if self.ndim == 2 else (lambda x: x)
values = transf(values)
# length checking
check_setitem_lengths(indexer, value, values)
def _is_scalar_indexer(indexer):
# return True if we are all scalar indexers
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = tuple([indexer])
return any(isinstance(idx, np.ndarray) and len(idx) == 0
for idx in indexer)
return False
def _is_empty_indexer(indexer):
# return a boolean if we have an empty indexer
if is_list_like(indexer) and not len(indexer):
return True
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = tuple([indexer])
return any(isinstance(idx, np.ndarray) and len(idx) == 0
for idx in indexer)
return False
# empty indexers
# 8669 (empty)
if _is_empty_indexer(indexer):
pass
# setting a single element for each dim and with a rhs that could
# be say a list
# GH 6043
elif _is_scalar_indexer(indexer):
values[indexer] = value
# if we are an exact match (ex-broadcasting),
# then use the resultant dtype
elif (len(arr_value.shape) and
arr_value.shape[0] == values.shape[0] and
np.prod(arr_value.shape) == np.prod(values.shape)):
values[indexer] = value
try:
values = values.astype(arr_value.dtype)
except ValueError:
pass
# set
else:
values[indexer] = value
# coerce and try to infer the dtypes of the result
values = self._try_coerce_and_cast_result(values, dtype)
block = self.make_block(transf(values))
return block
def putmask(self, mask, new, align=True, inplace=False, axis=0,
transpose=False):
""" putmask the data to the block; it is possible that we may create a
new dtype of block
return the resulting block(s)
Parameters
----------
mask : the condition to respect
new : a ndarray/object
align : boolean, perform alignment on other/cond, default is True
inplace : perform inplace modification, default is False
axis : int
transpose : boolean
Set to True if self is stored with axes reversed
Returns
-------
a list of new blocks, the result of the putmask
"""
new_values = self.values if inplace else self.values.copy()
new = getattr(new, 'values', new)
mask = getattr(mask, 'values', mask)
# if we are passed a scalar None, convert it here
if not is_list_like(new) and isna(new) and not self.is_object:
new = self.fill_value
if self._can_hold_element(new):
_, new = self._try_coerce_args(new_values, new)
if transpose:
new_values = new_values.T
# If the default repeat behavior in np.putmask would go in the
# wrong direction, then explicitly repeat and reshape new instead
if getattr(new, 'ndim', 0) >= 1:
if self.ndim - 1 == new.ndim and axis == 1:
new = np.repeat(
new, new_values.shape[-1]).reshape(self.shape)
new = new.astype(new_values.dtype)
# we require exact matches between the len of the
# values we are setting (or is compat). np.putmask
# doesn't check this and will simply truncate / pad
# the output, but we want sane error messages
#
# TODO: this prob needs some better checking
# for 2D cases
if ((is_list_like(new) and
np.any(mask[mask]) and
getattr(new, 'ndim', 1) == 1)):
if not (mask.shape[-1] == len(new) or
mask[mask].shape[-1] == len(new) or
len(new) == 1):
raise ValueError("cannot assign mismatch "
"length to masked array")
np.putmask(new_values, mask, new)
# maybe upcast me
elif mask.any():
if transpose:
mask = mask.T
if isinstance(new, np.ndarray):
new = new.T
axis = new_values.ndim - axis - 1
# Pseudo-broadcast
if getattr(new, 'ndim', 0) >= 1:
if self.ndim - 1 == new.ndim:
new_shape = list(new.shape)
new_shape.insert(axis, 1)
new = new.reshape(tuple(new_shape))
# operate column-by-column
def f(m, v, i):
if i is None:
# ndim==1 case.
n = new
else:
if isinstance(new, np.ndarray):
n = np.squeeze(new[i % new.shape[0]])
else:
n = np.array(new)
# type of the new block
dtype, _ = maybe_promote(n.dtype)
# we need to explicitly astype here to make a copy
n = n.astype(dtype)
nv = _putmask_smart(v, m, n)
return nv
new_blocks = self.split_and_operate(mask, f, inplace)
return new_blocks
if inplace:
return [self]
if transpose:
new_values = new_values.T
return [self.make_block(new_values)]
def coerce_to_target_dtype(self, other):
"""
coerce the current block to a dtype compat for other
we will return a block, possibly object, and not raise
we can also safely try to coerce to the same dtype
and will receive the same block
"""
# if we cannot then coerce to object
dtype, _ = infer_dtype_from(other, pandas_dtype=True)
if is_dtype_equal(self.dtype, dtype):
return self
if self.is_bool or is_object_dtype(dtype) or is_bool_dtype(dtype):
# we don't upcast to bool
return self.astype(object)
elif ((self.is_float or self.is_complex) and
(is_integer_dtype(dtype) or is_float_dtype(dtype))):
# don't coerce float/complex to int
return self
elif (self.is_datetime or
is_datetime64_dtype(dtype) or
is_datetime64tz_dtype(dtype)):
# not a datetime
if not ((is_datetime64_dtype(dtype) or
is_datetime64tz_dtype(dtype)) and self.is_datetime):
return self.astype(object)
# don't upcast timezone with different timezone or no timezone
mytz = getattr(self.dtype, 'tz', None)
othertz = getattr(dtype, 'tz', None)
if str(mytz) != str(othertz):
return self.astype(object)
raise AssertionError("possible recursion in "
"coerce_to_target_dtype: {} {}".format(
self, other))
elif (self.is_timedelta or is_timedelta64_dtype(dtype)):
# not a timedelta
if not (is_timedelta64_dtype(dtype) and self.is_timedelta):
return self.astype(object)
raise AssertionError("possible recursion in "
"coerce_to_target_dtype: {} {}".format(
self, other))
try:
return self.astype(dtype)
except (ValueError, TypeError):
pass
return self.astype(object)
def interpolate(self, method='pad', axis=0, index=None, values=None,
inplace=False, limit=None, limit_direction='forward',
limit_area=None, fill_value=None, coerce=False,
downcast=None, **kwargs):
inplace = validate_bool_kwarg(inplace, 'inplace')
def check_int_bool(self, inplace):
# Only FloatBlocks will contain NaNs.
# timedelta subclasses IntBlock
if (self.is_bool or self.is_integer) and not self.is_timedelta:
if inplace:
return self
else:
return self.copy()
# a fill na type method
try:
m = missing.clean_fill_method(method)
except ValueError:
m = None
if m is not None:
r = check_int_bool(self, inplace)
if r is not None:
return r
return self._interpolate_with_fill(method=m, axis=axis,
inplace=inplace, limit=limit,
fill_value=fill_value,
coerce=coerce,
downcast=downcast)
# try an interp method
try:
m = missing.clean_interp_method(method, **kwargs)
except ValueError:
m = None
if m is not None:
r = check_int_bool(self, inplace)
if r is not None:
return r
return self._interpolate(method=m, index=index, values=values,
axis=axis, limit=limit,
limit_direction=limit_direction,
limit_area=limit_area,
fill_value=fill_value, inplace=inplace,
downcast=downcast, **kwargs)
raise ValueError("invalid method '{0}' to interpolate.".format(method))
def _interpolate_with_fill(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, coerce=False,
downcast=None):
""" fillna but using the interpolate machinery """
inplace = validate_bool_kwarg(inplace, 'inplace')
# if we are coercing, then don't force the conversion
# if the block can't hold the type
if coerce:
if not self._can_hold_na:
if inplace:
return [self]
else:
return [self.copy()]
values = self.values if inplace else self.values.copy()
values, fill_value = self._try_coerce_args(values, fill_value)
values = missing.interpolate_2d(values, method=method, axis=axis,
limit=limit, fill_value=fill_value,
dtype=self.dtype)
values = self._try_coerce_result(values)
blocks = [self.make_block_same_class(values, ndim=self.ndim)]
return self._maybe_downcast(blocks, downcast)
def _interpolate(self, method=None, index=None, values=None,
fill_value=None, axis=0, limit=None,
limit_direction='forward', limit_area=None,
inplace=False, downcast=None, **kwargs):
""" interpolate using scipy wrappers """
inplace = validate_bool_kwarg(inplace, 'inplace')
data = self.values if inplace else self.values.copy()
# only deal with floats
if not self.is_float:
if not self.is_integer:
return self
data = data.astype(np.float64)
if fill_value is None:
fill_value = self.fill_value
if method in ('krogh', 'piecewise_polynomial', 'pchip'):
if not index.is_monotonic:
raise ValueError("{0} interpolation requires that the "
"index be monotonic.".format(method))
# process 1-d slices in the axis direction
def func(x):
# process a 1-d slice, returning it
# should the axis argument be handled below in apply_along_axis?
# i.e. not an arg to missing.interpolate_1d
return missing.interpolate_1d(index, x, method=method, limit=limit,
limit_direction=limit_direction,
limit_area=limit_area,
fill_value=fill_value,
bounds_error=False, **kwargs)
# interp each column independently
interp_values = np.apply_along_axis(func, axis, data)
blocks = [self.make_block_same_class(interp_values)]
return self._maybe_downcast(blocks, downcast)
def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None):
"""
Take values according to indexer and return them as a block.bb
"""
# algos.take_nd dispatches for DatetimeTZBlock, CategoricalBlock
# so need to preserve types
# sparse is treated like an ndarray, but needs .get_values() shaping
values = self.values
if self.is_sparse:
values = self.get_values()
if fill_tuple is None:
fill_value = self.fill_value
new_values = algos.take_nd(values, indexer, axis=axis,
allow_fill=False, fill_value=fill_value)
else:
fill_value = fill_tuple[0]
new_values = algos.take_nd(values, indexer, axis=axis,
allow_fill=True, fill_value=fill_value)
if new_mgr_locs is None:
if axis == 0:
slc = libinternals.indexer_as_slice(indexer)
if slc is not None:
new_mgr_locs = self.mgr_locs[slc]
else:
new_mgr_locs = self.mgr_locs[indexer]
else:
new_mgr_locs = self.mgr_locs
if not is_dtype_equal(new_values.dtype, self.dtype):
return self.make_block(new_values, new_mgr_locs)
else:
return self.make_block_same_class(new_values, new_mgr_locs)
def diff(self, n, axis=1):
""" return block for the diff of the values """
new_values = algos.diff(self.values, n, axis=axis)
return [self.make_block(values=new_values)]
def shift(self, periods, axis=0):
""" shift the block by periods, possibly upcast """
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
new_values, fill_value = maybe_upcast(self.values)
# make sure array sent to np.roll is c_contiguous
f_ordered = new_values.flags.f_contiguous
if f_ordered:
new_values = new_values.T
axis = new_values.ndim - axis - 1
if np.prod(new_values.shape):
new_values = np.roll(new_values, ensure_platform_int(periods),
axis=axis)
axis_indexer = [slice(None)] * self.ndim
if periods > 0:
axis_indexer[axis] = slice(None, periods)
else:
axis_indexer[axis] = slice(periods, None)
new_values[tuple(axis_indexer)] = fill_value
# restore original order
if f_ordered:
new_values = new_values.T
return [self.make_block(new_values)]
def where(self, other, cond, align=True, errors='raise',
try_cast=False, axis=0, transpose=False):
"""
evaluate the block; return result block(s) from the result
Parameters
----------
other : a ndarray/object
cond : the condition to respect
align : boolean, perform alignment on other/cond
errors : str, {'raise', 'ignore'}, default 'raise'
- ``raise`` : allow exceptions to be raised
- ``ignore`` : suppress exceptions. On error return original object
axis : int
transpose : boolean
Set to True if self is stored with axes reversed
Returns
-------
a new block(s), the result of the func
"""
import pandas.core.computation.expressions as expressions
assert errors in ['raise', 'ignore']
values = self.values
orig_other = other
if transpose:
values = values.T
other = getattr(other, '_values', getattr(other, 'values', other))
cond = getattr(cond, 'values', cond)
# If the default broadcasting would go in the wrong direction, then
# explicitly reshape other instead
if getattr(other, 'ndim', 0) >= 1:
if values.ndim - 1 == other.ndim and axis == 1:
other = other.reshape(tuple(other.shape + (1, )))
elif transpose and values.ndim == self.ndim - 1:
cond = cond.T
if not hasattr(cond, 'shape'):
raise ValueError("where must have a condition that is ndarray "
"like")
# our where function
def func(cond, values, other):
if cond.ravel().all():
return values
values, other = self._try_coerce_args(values, other)
try:
return self._try_coerce_result(expressions.where(
cond, values, other))
except Exception as detail:
if errors == 'raise':
raise TypeError(
'Could not operate [{other!r}] with block values '
'[{detail!s}]'.format(other=other, detail=detail))
else:
# return the values
result = np.empty(values.shape, dtype='float64')
result.fill(np.nan)
return result
# see if we can operate on the entire block, or need item-by-item
# or if we are a single block (ndim == 1)
try:
result = func(cond, values, other)
except TypeError:
# we cannot coerce, return a compat dtype
# we are explicitly ignoring errors
block = self.coerce_to_target_dtype(other)
blocks = block.where(orig_other, cond, align=align,
errors=errors,
try_cast=try_cast, axis=axis,
transpose=transpose)
return self._maybe_downcast(blocks, 'infer')
if self._can_hold_na or self.ndim == 1:
if transpose:
result = result.T
# try to cast if requested
if try_cast:
result = self._try_cast_result(result)
return self.make_block(result)
# might need to separate out blocks
axis = cond.ndim - 1
cond = cond.swapaxes(axis, 0)
mask = np.array([cond[i].all() for i in range(cond.shape[0])],
dtype=bool)
result_blocks = []
for m in [mask, ~mask]:
if m.any():
r = self._try_cast_result(result.take(m.nonzero()[0],
axis=axis))
result_blocks.append(
self.make_block(r.T, placement=self.mgr_locs[m]))
return result_blocks
def equals(self, other):
if self.dtype != other.dtype or self.shape != other.shape:
return False
return array_equivalent(self.values, other.values)
def _unstack(self, unstacker_func, new_columns, n_rows, fill_value):
"""Return a list of unstacked blocks of self
Parameters
----------
unstacker_func : callable
Partially applied unstacker.
new_columns : Index
All columns of the unstacked BlockManager.
n_rows : int
Only used in ExtensionBlock.unstack
fill_value : int
Only used in ExtensionBlock.unstack
Returns
-------
blocks : list of Block
New blocks of unstacked values.
mask : array_like of bool
The mask of columns of `blocks` we should keep.
"""
unstacker = unstacker_func(self.values.T)
new_items = unstacker.get_new_columns()
new_placement = new_columns.get_indexer(new_items)
new_values, mask = unstacker.get_new_values()
mask = mask.any(0)
new_values = new_values.T[mask]
new_placement = new_placement[mask]
blocks = [make_block(new_values, placement=new_placement)]
return blocks, mask
def quantile(self, qs, interpolation='linear', axis=0, axes=None):
"""
compute the quantiles of the
Parameters
----------
qs: a scalar or list of the quantiles to be computed
interpolation: type of interpolation, default 'linear'
axis: axis to compute, default 0
axes : BlockManager.axes
Returns
-------
tuple of (axis, block)
"""
kw = {'interpolation': interpolation}
values = self.get_values()
values, _ = self._try_coerce_args(values, values)
def _nanpercentile1D(values, mask, q, **kw):
# mask is Union[ExtensionArray, ndarray]
# we convert to an ndarray for NumPy 1.9 compat, which didn't
# treat boolean-like arrays as boolean. This conversion would have
# been done inside ndarray.__getitem__ anyway, since values is
# an ndarray at this point.
mask = np.asarray(mask)
values = values[~mask]
if len(values) == 0:
if lib.is_scalar(q):
return self._na_value
else:
return np.array([self._na_value] * len(q),
dtype=values.dtype)
return np.percentile(values, q, **kw)
def _nanpercentile(values, q, axis, **kw):
mask = isna(self.values)
if not lib.is_scalar(mask) and mask.any():
if self.ndim == 1:
return _nanpercentile1D(values, mask, q, **kw)
else:
# for nonconsolidatable blocks mask is 1D, but values 2D
if mask.ndim < values.ndim:
mask = mask.reshape(values.shape)
if axis == 0:
values = values.T
mask = mask.T
result = [_nanpercentile1D(val, m, q, **kw) for (val, m)
in zip(list(values), list(mask))]
result = np.array(result, dtype=values.dtype, copy=False).T
return result
else:
return np.percentile(values, q, axis=axis, **kw)
from pandas import Float64Index
is_empty = values.shape[axis] == 0
if is_list_like(qs):
ax = Float64Index(qs)
if is_empty:
if self.ndim == 1:
result = self._na_value
else:
# create the array of na_values
# 2d len(values) * len(qs)
result = np.repeat(np.array([self._na_value] * len(qs)),
len(values)).reshape(len(values),
len(qs))
else:
try:
result = _nanpercentile(values, np.array(qs) * 100,
axis=axis, **kw)
except ValueError:
# older numpies don't handle an array for q
result = [_nanpercentile(values, q * 100,
axis=axis, **kw) for q in qs]
result = np.array(result, copy=False)
if self.ndim > 1:
result = result.T
else:
if self.ndim == 1:
ax = Float64Index([qs])
else:
ax = axes[0]
if is_empty:
if self.ndim == 1:
result = self._na_value
else:
result = np.array([self._na_value] * len(self))
else:
result = _nanpercentile(values, qs * 100, axis=axis, **kw)
ndim = getattr(result, 'ndim', None) or 0
result = self._try_coerce_result(result)
if lib.is_scalar(result):
return ax, self.make_block_scalar(result)
return ax, make_block(result,
placement=np.arange(len(result)),
ndim=ndim)
def _replace_coerce(self, to_replace, value, inplace=True, regex=False,
convert=False, mask=None):
"""
Replace value corresponding to the given boolean array with another
value.
Parameters
----------
to_replace : object or pattern
Scalar to replace or regular expression to match.
value : object
Replacement object.
inplace : bool, default False
Perform inplace modification.
regex : bool, default False
If true, perform regular expression substitution.
convert : bool, default True
If true, try to coerce any object types to better types.
mask : array-like of bool, optional
True indicate corresponding element is ignored.
Returns
-------
A new block if there is anything to replace or the original block.
"""
if mask.any():
if not regex:
self = self.coerce_to_target_dtype(value)
return self.putmask(mask, value, inplace=inplace)
else:
return self._replace_single(to_replace, value, inplace=inplace,
regex=regex,
convert=convert,
mask=mask)
return self
class ScalarBlock(Block):
"""
a scalar compat Block
"""
__slots__ = ['_mgr_locs', 'values', 'ndim']
def __init__(self, values):
self.ndim = 0
self.mgr_locs = [0]
self.values = values
@property
def dtype(self):
return type(self.values)
@property
def shape(self):
return tuple([0])
def __len__(self):
return 0
class NonConsolidatableMixIn(object):
""" hold methods for the nonconsolidatable blocks """
_can_consolidate = False
_verify_integrity = False
_validate_ndim = False
def __init__(self, values, placement, ndim=None):
"""Initialize a non-consolidatable block.
'ndim' may be inferred from 'placement'.
This will call continue to call __init__ for the other base
classes mixed in with this Mixin.
"""
# Placement must be converted to BlockPlacement so that we can check
# its length
if not isinstance(placement, libinternals.BlockPlacement):
placement = libinternals.BlockPlacement(placement)
# Maybe infer ndim from placement
if ndim is None:
if len(placement) != 1:
ndim = 1
else:
ndim = 2
super(NonConsolidatableMixIn, self).__init__(values, placement,
ndim=ndim)
@property
def shape(self):
if self.ndim == 1:
return (len(self.values)),
return (len(self.mgr_locs), len(self.values))
def get_values(self, dtype=None):
""" need to to_dense myself (and always return a ndim sized object) """
values = self.values.to_dense()
if values.ndim == self.ndim - 1:
values = values.reshape((1,) + values.shape)
return values
def iget(self, col):
if self.ndim == 2 and isinstance(col, tuple):
col, loc = col
if not com.is_null_slice(col) and col != 0:
raise IndexError("{0} only contains one item".format(self))
return self.values[loc]
else:
if col != 0:
raise IndexError("{0} only contains one item".format(self))
return self.values
def should_store(self, value):
return isinstance(value, self._holder)
def set(self, locs, values, check=False):
assert locs.tolist() == [0]
self.values = values
def putmask(self, mask, new, align=True, inplace=False, axis=0,
transpose=False):
"""
putmask the data to the block; we must be a single block and not
generate other blocks
return the resulting block
Parameters
----------
mask : the condition to respect
new : a ndarray/object
align : boolean, perform alignment on other/cond, default is True
inplace : perform inplace modification, default is False
Returns
-------
a new block, the result of the putmask
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
# use block's copy logic.
# .values may be an Index which does shallow copy by default
new_values = self.values if inplace else self.copy().values
new_values, new = self._try_coerce_args(new_values, new)
if isinstance(new, np.ndarray) and len(new) == len(mask):
new = new[mask]
mask = _safe_reshape(mask, new_values.shape)
new_values[mask] = new
new_values = self._try_coerce_result(new_values)
return [self.make_block(values=new_values)]
def _slice(self, slicer):
""" return a slice of my values (but densify first) """
return self.get_values()[slicer]
def _try_cast_result(self, result, dtype=None):
return result
def _unstack(self, unstacker_func, new_columns, n_rows, fill_value):
"""Return a list of unstacked blocks of self
Parameters
----------
unstacker_func : callable
Partially applied unstacker.
new_columns : Index
All columns of the unstacked BlockManager.
n_rows : int
Only used in ExtensionBlock.unstack
fill_value : int
Only used in ExtensionBlock.unstack
Returns
-------
blocks : list of Block
New blocks of unstacked values.
mask : array_like of bool
The mask of columns of `blocks` we should keep.
"""
# NonConsolidatable blocks can have a single item only, so we return
# one block per item
unstacker = unstacker_func(self.values.T)
new_placement, new_values, mask = self._get_unstack_items(
unstacker, new_columns
)
new_values = new_values.T[mask]
new_placement = new_placement[mask]
blocks = [self.make_block_same_class(vals, [place])
for vals, place in zip(new_values, new_placement)]
return blocks, mask
def _get_unstack_items(self, unstacker, new_columns):
"""
Get the placement, values, and mask for a Block unstack.
This is shared between ObjectBlock and ExtensionBlock. They
differ in that ObjectBlock passes the values, while ExtensionBlock
passes the dummy ndarray of positions to be used by a take
later.
Parameters
----------
unstacker : pandas.core.reshape.reshape._Unstacker
new_columns : Index
All columns of the unstacked BlockManager.
Returns
-------
new_placement : ndarray[int]
The placement of the new columns in `new_columns`.
new_values : Union[ndarray, ExtensionArray]
The first return value from _Unstacker.get_new_values.
mask : ndarray[bool]
The second return value from _Unstacker.get_new_values.
"""
# shared with ExtensionBlock
new_items = unstacker.get_new_columns()
new_placement = new_columns.get_indexer(new_items)
new_values, mask = unstacker.get_new_values()
mask = mask.any(0)
return new_placement, new_values, mask
class ExtensionBlock(NonConsolidatableMixIn, Block):
"""Block for holding extension types.
Notes
-----
This holds all 3rd-party extension array types. It's also the immediate
parent class for our internal extension types' blocks, CategoricalBlock.
ExtensionArrays are limited to 1-D.
"""
is_extension = True
def __init__(self, values, placement, ndim=None):
values = self._maybe_coerce_values(values)
super(ExtensionBlock, self).__init__(values, placement, ndim)
def _maybe_coerce_values(self, values):
"""Unbox to an extension array.
This will unbox an ExtensionArray stored in an Index or Series.
ExtensionArrays pass through. No dtype coercion is done.
Parameters
----------
values : Index, Series, ExtensionArray
Returns
-------
ExtensionArray
"""
if isinstance(values, (ABCIndexClass, ABCSeries)):
values = values._values
return values
@property
def _holder(self):
# For extension blocks, the holder is values-dependent.
return type(self.values)
@property
def fill_value(self):
# Used in reindex_indexer
return self.values.dtype.na_value
@property
def _can_hold_na(self):
# The default ExtensionArray._can_hold_na is True
return self._holder._can_hold_na
@property
def is_view(self):
"""Extension arrays are never treated as views."""
return False
@property
def is_numeric(self):
return self.values.dtype._is_numeric
def setitem(self, indexer, value):
"""Set the value inplace, returning a same-typed block.
This differs from Block.setitem by not allowing setitem to change
the dtype of the Block.
Parameters
----------
indexer : tuple, list-like, array-like, slice
The subset of self.values to set
value : object
The value being set
Returns
-------
Block
Notes
-----
`indexer` is a direct slice/positional indexer. `value` must
be a compatible shape.
"""
if isinstance(indexer, tuple):
# we are always 1-D
indexer = indexer[0]
check_setitem_lengths(indexer, value, self.values)
self.values[indexer] = value
return self
def get_values(self, dtype=None):
# ExtensionArrays must be iterable, so this works.
values = np.asarray(self.values)
if values.ndim == self.ndim - 1:
values = values.reshape((1,) + values.shape)
return values
def to_dense(self):
return np.asarray(self.values)
def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None):
"""
Take values according to indexer and return them as a block.
"""
if fill_tuple is None:
fill_value = None
else:
fill_value = fill_tuple[0]
# axis doesn't matter; we are really a single-dim object
# but are passed the axis depending on the calling routing
# if its REALLY axis 0, then this will be a reindex and not a take
new_values = self.values.take(indexer, fill_value=fill_value,
allow_fill=True)
# if we are a 1-dim object, then always place at 0
if self.ndim == 1:
new_mgr_locs = [0]
else:
if new_mgr_locs is None:
new_mgr_locs = self.mgr_locs
return self.make_block_same_class(new_values, new_mgr_locs)
def _can_hold_element(self, element):
# XXX: We may need to think about pushing this onto the array.
# We're doing the same as CategoricalBlock here.
return True
def _slice(self, slicer):
""" return a slice of my values """
# slice the category
# return same dims as we currently have
if isinstance(slicer, tuple) and len(slicer) == 2:
if not com.is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
slicer = slicer[1]
return self.values[slicer]
def formatting_values(self):
return self.values._formatting_values()
def concat_same_type(self, to_concat, placement=None):
"""
Concatenate list of single blocks of the same type.
"""
values = self._holder._concat_same_type(
[blk.values for blk in to_concat])
placement = placement or slice(0, len(values), 1)
return self.make_block_same_class(values, ndim=self.ndim,
placement=placement)
def fillna(self, value, limit=None, inplace=False, downcast=None):
values = self.values if inplace else self.values.copy()
values = values.fillna(value=value, limit=limit)
return [self.make_block_same_class(values=values,
placement=self.mgr_locs,
ndim=self.ndim)]
def interpolate(self, method='pad', axis=0, inplace=False, limit=None,
fill_value=None, **kwargs):
values = self.values if inplace else self.values.copy()
return self.make_block_same_class(
values=values.fillna(value=fill_value, method=method,
limit=limit),
placement=self.mgr_locs)
def shift(self, periods, axis=0):
"""
Shift the block by `periods`.
Dispatches to underlying ExtensionArray and re-boxes in an
ExtensionBlock.
"""
# type: (int, Optional[BlockPlacement]) -> List[ExtensionBlock]
return [self.make_block_same_class(self.values.shift(periods=periods),
placement=self.mgr_locs,
ndim=self.ndim)]
@property
def _ftype(self):
return getattr(self.values, '_pandas_ftype', Block._ftype)
def _unstack(self, unstacker_func, new_columns, n_rows, fill_value):
# ExtensionArray-safe unstack.
# We override ObjectBlock._unstack, which unstacks directly on the
# values of the array. For EA-backed blocks, this would require
# converting to a 2-D ndarray of objects.
# Instead, we unstack an ndarray of integer positions, followed by
# a `take` on the actual values.
dummy_arr = np.arange(n_rows)
dummy_unstacker = functools.partial(unstacker_func, fill_value=-1)
unstacker = dummy_unstacker(dummy_arr)
new_placement, new_values, mask = self._get_unstack_items(
unstacker, new_columns
)
blocks = [
self.make_block_same_class(
self.values.take(indices, allow_fill=True,
fill_value=fill_value),
[place])
for indices, place in zip(new_values.T, new_placement)
]
return blocks, mask
class NumericBlock(Block):
__slots__ = ()
is_numeric = True
_can_hold_na = True
class FloatOrComplexBlock(NumericBlock):
__slots__ = ()
def equals(self, other):
if self.dtype != other.dtype or self.shape != other.shape:
return False
left, right = self.values, other.values
return ((left == right) | (np.isnan(left) & np.isnan(right))).all()
class FloatBlock(FloatOrComplexBlock):
__slots__ = ()
is_float = True
def _can_hold_element(self, element):
tipo = maybe_infer_dtype_type(element)
if tipo is not None:
return (issubclass(tipo.type, (np.floating, np.integer)) and
not issubclass(tipo.type, (np.datetime64, np.timedelta64)))
return (
isinstance(
element, (float, int, np.floating, np.int_, compat.long))
and not isinstance(element, (bool, np.bool_, datetime, timedelta,
np.datetime64, np.timedelta64)))
def to_native_types(self, slicer=None, na_rep='', float_format=None,
decimal='.', quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
# see gh-13418: no special formatting is desired at the
# output (important for appropriate 'quoting' behaviour),
# so do not pass it through the FloatArrayFormatter
if float_format is None and decimal == '.':
mask = isna(values)
if not quoting:
values = values.astype(str)
else:
values = np.array(values, dtype='object')
values[mask] = na_rep
return values
from pandas.io.formats.format import FloatArrayFormatter
formatter = FloatArrayFormatter(values, na_rep=na_rep,
float_format=float_format,
decimal=decimal, quoting=quoting,
fixed_width=False)
return formatter.get_result_as_array()
def should_store(self, value):
# when inserting a column should not coerce integers to floats
# unnecessarily
return (issubclass(value.dtype.type, np.floating) and
value.dtype == self.dtype)
class ComplexBlock(FloatOrComplexBlock):
__slots__ = ()
is_complex = True
def _can_hold_element(self, element):
tipo = maybe_infer_dtype_type(element)
if tipo is not None:
return issubclass(tipo.type,
(np.floating, np.integer, np.complexfloating))
return (
isinstance(
element,
(float, int, complex, np.float_, np.int_, compat.long))
and not isinstance(element, (bool, np.bool_)))
def should_store(self, value):
return issubclass(value.dtype.type, np.complexfloating)
class IntBlock(NumericBlock):
__slots__ = ()
is_integer = True
_can_hold_na = False
def _can_hold_element(self, element):
tipo = maybe_infer_dtype_type(element)
if tipo is not None:
return (issubclass(tipo.type, np.integer) and
not issubclass(tipo.type, (np.datetime64,
np.timedelta64)) and
self.dtype.itemsize >= tipo.itemsize)
return is_integer(element)
def should_store(self, value):
return is_integer_dtype(value) and value.dtype == self.dtype
class DatetimeLikeBlockMixin(object):
"""Mixin class for DatetimeBlock and DatetimeTZBlock."""
@property
def _holder(self):
return DatetimeIndex
@property
def _na_value(self):
return tslibs.NaT
@property
def fill_value(self):
return tslibs.iNaT
def get_values(self, dtype=None):
"""
return object dtype as boxed values, such as Timestamps/Timedelta
"""
if is_object_dtype(dtype):
return lib.map_infer(self.values.ravel(),
self._box_func).reshape(self.values.shape)
return self.values
class TimeDeltaBlock(DatetimeLikeBlockMixin, IntBlock):
__slots__ = ()
is_timedelta = True
_can_hold_na = True
is_numeric = False
def __init__(self, values, placement, ndim=None):
if values.dtype != _TD_DTYPE:
values = conversion.ensure_timedelta64ns(values)
super(TimeDeltaBlock, self).__init__(values,
placement=placement, ndim=ndim)
@property
def _holder(self):
return TimedeltaIndex
@property
def _box_func(self):
return lambda x: Timedelta(x, unit='ns')
def _can_hold_element(self, element):
tipo = maybe_infer_dtype_type(element)
if tipo is not None:
return issubclass(tipo.type, (np.timedelta64, np.int64))
return is_integer(element) or isinstance(
element, (timedelta, np.timedelta64, np.int64))
def fillna(self, value, **kwargs):
# allow filling with integers to be
# interpreted as seconds
if is_integer(value) and not isinstance(value, np.timedelta64):
value = Timedelta(value, unit='s')
return super(TimeDeltaBlock, self).fillna(value, **kwargs)
def _try_coerce_args(self, values, other):
"""
Coerce values and other to int64, with null values converted to
iNaT. values is always ndarray-like, other may not be
Parameters
----------
values : ndarray-like
other : ndarray-like or scalar
Returns
-------
base-type values, base-type other
"""
values = values.view('i8')
if isinstance(other, bool):
raise TypeError
elif is_null_datelike_scalar(other):
other = tslibs.iNaT
elif isinstance(other, Timedelta):
other = other.value
elif isinstance(other, timedelta):
other = Timedelta(other).value
elif isinstance(other, np.timedelta64):
other = Timedelta(other).value
elif hasattr(other, 'dtype') and is_timedelta64_dtype(other):
other = other.astype('i8', copy=False).view('i8')
else:
# coercion issues
# let higher levels handle
raise TypeError
return values, other
def _try_coerce_result(self, result):
""" reverse of try_coerce_args / try_operate """
if isinstance(result, np.ndarray):
mask = isna(result)
if result.dtype.kind in ['i', 'f', 'O']:
result = result.astype('m8[ns]')
result[mask] = tslibs.iNaT
elif isinstance(result, (np.integer, np.float)):
result = self._box_func(result)
return result
def should_store(self, value):
return (issubclass(value.dtype.type, np.timedelta64) and
not is_extension_array_dtype(value))
def to_native_types(self, slicer=None, na_rep=None, quoting=None,
**kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = isna(values)
rvalues = np.empty(values.shape, dtype=object)
if na_rep is None:
na_rep = 'NaT'
rvalues[mask] = na_rep
imask = (~mask).ravel()
# FIXME:
# should use the formats.format.Timedelta64Formatter here
# to figure what format to pass to the Timedelta
# e.g. to not show the decimals say
rvalues.flat[imask] = np.array([Timedelta(val)._repr_base(format='all')
for val in values.ravel()[imask]],
dtype=object)
return rvalues
class BoolBlock(NumericBlock):
__slots__ = ()
is_bool = True
_can_hold_na = False
def _can_hold_element(self, element):
tipo = maybe_infer_dtype_type(element)
if tipo is not None:
return issubclass(tipo.type, np.bool_)
return isinstance(element, (bool, np.bool_))
def should_store(self, value):
return (issubclass(value.dtype.type, np.bool_) and not
is_extension_array_dtype(value))
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False, convert=True):
inplace = validate_bool_kwarg(inplace, 'inplace')
to_replace_values = np.atleast_1d(to_replace)
if not np.can_cast(to_replace_values, bool):
return self
return super(BoolBlock, self).replace(to_replace, value,
inplace=inplace, filter=filter,
regex=regex, convert=convert)
class ObjectBlock(Block):
__slots__ = ()
is_object = True
_can_hold_na = True
def __init__(self, values, placement=None, ndim=2):
if issubclass(values.dtype.type, compat.string_types):
values = np.array(values, dtype=object)
super(ObjectBlock, self).__init__(values, ndim=ndim,
placement=placement)
@property
def is_bool(self):
""" we can be a bool if we have only bool values but are of type
object
"""
return lib.is_bool_array(self.values.ravel())
# TODO: Refactor when convert_objects is removed since there will be 1 path
def convert(self, *args, **kwargs):
""" attempt to coerce any object types to better types return a copy of
the block (if copy = True) by definition we ARE an ObjectBlock!!!!!
can return multiple blocks!
"""
if args:
raise NotImplementedError
by_item = True if 'by_item' not in kwargs else kwargs['by_item']
new_inputs = ['coerce', 'datetime', 'numeric', 'timedelta']
new_style = False
for kw in new_inputs:
new_style |= kw in kwargs
if new_style:
fn = soft_convert_objects
fn_inputs = new_inputs
else:
fn = maybe_convert_objects
fn_inputs = ['convert_dates', 'convert_numeric',
'convert_timedeltas']
fn_inputs += ['copy']
fn_kwargs = {}
for key in fn_inputs:
if key in kwargs:
fn_kwargs[key] = kwargs[key]
# operate column-by-column
def f(m, v, i):
shape = v.shape
values = fn(v.ravel(), **fn_kwargs)
try:
values = values.reshape(shape)
values = _block_shape(values, ndim=self.ndim)
except (AttributeError, NotImplementedError):
pass
return values
if by_item and not self._is_single_block:
blocks = self.split_and_operate(None, f, False)
else:
values = f(None, self.values.ravel(), None)
blocks = [make_block(values, ndim=self.ndim,
placement=self.mgr_locs)]
return blocks
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
# GH6026
if check:
try:
if (self.values[locs] == values).all():
return
except (IndexError, ValueError):
pass
try:
self.values[locs] = values
except (ValueError):
# broadcasting error
# see GH6171
new_shape = list(values.shape)
new_shape[0] = len(self.items)
self.values = np.empty(tuple(new_shape), dtype=self.dtype)
self.values.fill(np.nan)
self.values[locs] = values
def _maybe_downcast(self, blocks, downcast=None):
if downcast is not None:
return blocks
# split and convert the blocks
return _extend_blocks([b.convert(datetime=True, numeric=False)
for b in blocks])
def _can_hold_element(self, element):
return True
def _try_coerce_args(self, values, other):
""" provide coercion to our input arguments """
if isinstance(other, ABCDatetimeIndex):
# to store DatetimeTZBlock as object
other = other.astype(object).values
return values, other
def should_store(self, value):
return not (issubclass(value.dtype.type,
(np.integer, np.floating, np.complexfloating,
np.datetime64, np.bool_)) or
# TODO(ExtensionArray): remove is_extension_type
# when all extension arrays have been ported.
is_extension_type(value) or
is_extension_array_dtype(value))
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False, convert=True):
to_rep_is_list = is_list_like(to_replace)
value_is_list = is_list_like(value)
both_lists = to_rep_is_list and value_is_list
either_list = to_rep_is_list or value_is_list
result_blocks = []
blocks = [self]
if not either_list and is_re(to_replace):
return self._replace_single(to_replace, value, inplace=inplace,
filter=filter, regex=True,
convert=convert)
elif not (either_list or regex):
return super(ObjectBlock, self).replace(to_replace, value,
inplace=inplace,
filter=filter, regex=regex,
convert=convert)
elif both_lists:
for to_rep, v in zip(to_replace, value):
result_blocks = []
for b in blocks:
result = b._replace_single(to_rep, v, inplace=inplace,
filter=filter, regex=regex,
convert=convert)
result_blocks = _extend_blocks(result, result_blocks)
blocks = result_blocks
return result_blocks
elif to_rep_is_list and regex:
for to_rep in to_replace:
result_blocks = []
for b in blocks:
result = b._replace_single(to_rep, value, inplace=inplace,
filter=filter, regex=regex,
convert=convert)
result_blocks = _extend_blocks(result, result_blocks)
blocks = result_blocks
return result_blocks
return self._replace_single(to_replace, value, inplace=inplace,
filter=filter, convert=convert,
regex=regex)
def _replace_single(self, to_replace, value, inplace=False, filter=None,
regex=False, convert=True, mask=None):
"""
Replace elements by the given value.
Parameters
----------
to_replace : object or pattern
Scalar to replace or regular expression to match.
value : object
Replacement object.
inplace : bool, default False
Perform inplace modification.
filter : list, optional
regex : bool, default False
If true, perform regular expression substitution.
convert : bool, default True
If true, try to coerce any object types to better types.
mask : array-like of bool, optional
True indicate corresponding element is ignored.
Returns
-------
a new block, the result after replacing
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
# to_replace is regex compilable
to_rep_re = regex and is_re_compilable(to_replace)
# regex is regex compilable
regex_re = is_re_compilable(regex)
# only one will survive
if to_rep_re and regex_re:
raise AssertionError('only one of to_replace and regex can be '
'regex compilable')
# if regex was passed as something that can be a regex (rather than a
# boolean)
if regex_re:
to_replace = regex
regex = regex_re or to_rep_re
# try to get the pattern attribute (compiled re) or it's a string
try:
pattern = to_replace.pattern
except AttributeError:
pattern = to_replace
# if the pattern is not empty and to_replace is either a string or a
# regex
if regex and pattern:
rx = re.compile(to_replace)
else:
# if the thing to replace is not a string or compiled regex call
# the superclass method -> to_replace is some kind of object
return super(ObjectBlock, self).replace(to_replace, value,
inplace=inplace,
filter=filter, regex=regex)
new_values = self.values if inplace else self.values.copy()
# deal with replacing values with objects (strings) that match but
# whose replacement is not a string (numeric, nan, object)
if isna(value) or not isinstance(value, compat.string_types):
def re_replacer(s):
try:
return value if rx.search(s) is not None else s
except TypeError:
return s
else:
# value is guaranteed to be a string here, s can be either a string
# or null if it's null it gets returned
def re_replacer(s):
try:
return rx.sub(value, s)
except TypeError:
return s
f = np.vectorize(re_replacer, otypes=[self.dtype])
if filter is None:
filt = slice(None)
else:
filt = self.mgr_locs.isin(filter).nonzero()[0]
if mask is None:
new_values[filt] = f(new_values[filt])
else:
new_values[filt][mask] = f(new_values[filt][mask])
# convert
block = self.make_block(new_values)
if convert:
block = block.convert(by_item=True, numeric=False)
return block
def _replace_coerce(self, to_replace, value, inplace=True, regex=False,
convert=False, mask=None):
"""
Replace value corresponding to the given boolean array with another
value.
Parameters
----------
to_replace : object or pattern
Scalar to replace or regular expression to match.
value : object
Replacement object.
inplace : bool, default False
Perform inplace modification.
regex : bool, default False
If true, perform regular expression substitution.
convert : bool, default True
If true, try to coerce any object types to better types.
mask : array-like of bool, optional
True indicate corresponding element is ignored.
Returns
-------
A new block if there is anything to replace or the original block.
"""
if mask.any():
block = super(ObjectBlock, self)._replace_coerce(
to_replace=to_replace, value=value, inplace=inplace,
regex=regex, convert=convert, mask=mask)
if convert:
block = [b.convert(by_item=True, numeric=False, copy=True)
for b in block]
return block
return self
class CategoricalBlock(ExtensionBlock):
__slots__ = ()
is_categorical = True
_verify_integrity = True
_can_hold_na = True
_concatenator = staticmethod(_concat._concat_categorical)
def __init__(self, values, placement, ndim=None):
from pandas.core.arrays.categorical import _maybe_to_categorical
# coerce to categorical if we can
super(CategoricalBlock, self).__init__(_maybe_to_categorical(values),
placement=placement,
ndim=ndim)
@property
def _holder(self):
return Categorical
@property
def array_dtype(self):
""" the dtype to return if I want to construct this block as an
array
"""
return np.object_
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
# GH12564: CategoricalBlock is 1-dim only
# while returned results could be any dim
if ((not is_categorical_dtype(result)) and
isinstance(result, np.ndarray)):
result = _block_shape(result, ndim=self.ndim)
return result
def to_dense(self):
# Categorical.get_values returns a DatetimeIndex for datetime
# categories, so we can't simply use `np.asarray(self.values)` like
# other types.
return self.values.get_values()
def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
# Categorical is always one dimension
values = values[slicer]
mask = isna(values)
values = np.array(values, dtype='object')
values[mask] = na_rep
# we are expected to return a 2-d ndarray
return values.reshape(1, len(values))
def concat_same_type(self, to_concat, placement=None):
"""
Concatenate list of single blocks of the same type.
Note that this CategoricalBlock._concat_same_type *may* not
return a CategoricalBlock. When the categories in `to_concat`
differ, this will return an object ndarray.
If / when we decide we don't like that behavior:
1. Change Categorical._concat_same_type to use union_categoricals
2. Delete this method.
"""
values = self._concatenator([blk.values for blk in to_concat],
axis=self.ndim - 1)
# not using self.make_block_same_class as values can be object dtype
return make_block(
values, placement=placement or slice(0, len(values), 1),
ndim=self.ndim)
class DatetimeBlock(DatetimeLikeBlockMixin, Block):
__slots__ = ()
is_datetime = True
_can_hold_na = True
def __init__(self, values, placement, ndim=None):
values = self._maybe_coerce_values(values)
super(DatetimeBlock, self).__init__(values,
placement=placement, ndim=ndim)
def _maybe_coerce_values(self, values):
"""Input validation for values passed to __init__. Ensure that
we have datetime64ns, coercing if necessary.
Parameters
----------
values : array-like
Must be convertible to datetime64
Returns
-------
values : ndarray[datetime64ns]
Overridden by DatetimeTZBlock.
"""
if values.dtype != _NS_DTYPE:
values = conversion.ensure_datetime64ns(values)
return values
def _astype(self, dtype, **kwargs):
"""
these automatically copy, so copy=True has no effect
raise on an except if raise == True
"""
# if we are passed a datetime64[ns, tz]
if is_datetime64tz_dtype(dtype):
dtype = DatetimeTZDtype(dtype)
values = self.values
if getattr(values, 'tz', None) is None:
values = DatetimeIndex(values).tz_localize('UTC')
values = values.tz_convert(dtype.tz)
return self.make_block(values)
# delegate
return super(DatetimeBlock, self)._astype(dtype=dtype, **kwargs)
def _can_hold_element(self, element):
tipo = maybe_infer_dtype_type(element)
if tipo is not None:
return tipo == _NS_DTYPE or tipo == np.int64
return (is_integer(element) or isinstance(element, datetime) or
isna(element))
def _try_coerce_args(self, values, other):
"""
Coerce values and other to dtype 'i8'. NaN and NaT convert to
the smallest i8, and will correctly round-trip to NaT if converted
back in _try_coerce_result. values is always ndarray-like, other
may not be
Parameters
----------
values : ndarray-like
other : ndarray-like or scalar
Returns
-------
base-type values, base-type other
"""
values = values.view('i8')
if isinstance(other, bool):
raise TypeError
elif is_null_datelike_scalar(other):
other = tslibs.iNaT
elif isinstance(other, (datetime, np.datetime64, date)):
other = self._box_func(other)
if getattr(other, 'tz') is not None:
raise TypeError("cannot coerce a Timestamp with a tz on a "
"naive Block")
other = other.asm8.view('i8')
elif hasattr(other, 'dtype') and is_datetime64_dtype(other):
other = other.astype('i8', copy=False).view('i8')
else:
# coercion issues
# let higher levels handle
raise TypeError
return values, other
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
if isinstance(result, np.ndarray):
if result.dtype.kind in ['i', 'f', 'O']:
try:
result = result.astype('M8[ns]')
except ValueError:
pass
elif isinstance(result, (np.integer, np.float, np.datetime64)):
result = self._box_func(result)
return result
@property
def _box_func(self):
return tslibs.Timestamp
def to_native_types(self, slicer=None, na_rep=None, date_format=None,
quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[..., slicer]
from pandas.io.formats.format import _get_format_datetime64_from_values
format = _get_format_datetime64_from_values(values, date_format)
result = tslib.format_array_from_datetime(
values.view('i8').ravel(), tz=getattr(self.values, 'tz', None),
format=format, na_rep=na_rep).reshape(values.shape)
return np.atleast_2d(result)
def should_store(self, value):
return (issubclass(value.dtype.type, np.datetime64) and
not is_datetimetz(value) and
not is_extension_array_dtype(value))
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
if values.dtype != _NS_DTYPE:
# Workaround for numpy 1.6 bug
values = conversion.ensure_datetime64ns(values)
self.values[locs] = values
class DatetimeTZBlock(NonConsolidatableMixIn, DatetimeBlock):
""" implement a datetime64 block with a tz attribute """
__slots__ = ()
_concatenator = staticmethod(_concat._concat_datetime)
is_datetimetz = True
def __init__(self, values, placement, ndim=2, dtype=None):
# XXX: This will end up calling _maybe_coerce_values twice
# when dtype is not None. It's relatively cheap (just an isinstance)
# but it'd nice to avoid.
#
# If we can remove dtype from __init__, and push that conversion
# push onto the callers, then we can remove this entire __init__
# and just use DatetimeBlock's.
if dtype is not None:
values = self._maybe_coerce_values(values, dtype=dtype)
super(DatetimeTZBlock, self).__init__(values, placement=placement,
ndim=ndim)
def _maybe_coerce_values(self, values, dtype=None):
"""Input validation for values passed to __init__. Ensure that
we have datetime64TZ, coercing if necessary.
Parametetrs
-----------
values : array-like
Must be convertible to datetime64
dtype : string or DatetimeTZDtype, optional
Does a shallow copy to this tz
Returns
-------
values : ndarray[datetime64ns]
"""
if not isinstance(values, self._holder):
values = self._holder(values)
if dtype is not None:
if isinstance(dtype, compat.string_types):
dtype = DatetimeTZDtype.construct_from_string(dtype)
values = values._shallow_copy(tz=dtype.tz)
if values.tz is None:
raise ValueError("cannot create a DatetimeTZBlock without a tz")
return values
@property
def is_view(self):
""" return a boolean if I am possibly a view """
# check the ndarray values of the DatetimeIndex values
return self.values.values.base is not None
def copy(self, deep=True):
""" copy constructor """
values = self.values
if deep:
values = values.copy(deep=True)
return self.make_block_same_class(values)
def external_values(self):
""" we internally represent the data as a DatetimeIndex, but for
external compat with ndarray, export as a ndarray of Timestamps
"""
return self.values.astype('datetime64[ns]').values
def get_values(self, dtype=None):
# return object dtype as Timestamps with the zones
if is_object_dtype(dtype):
return lib.map_infer(
self.values.ravel(), self._box_func).reshape(self.values.shape)
return self.values
def _slice(self, slicer):
""" return a slice of my values """
if isinstance(slicer, tuple):
col, loc = slicer
if not com.is_null_slice(col) and col != 0:
raise IndexError("{0} only contains one item".format(self))
return self.values[loc]
return self.values[slicer]
def _try_coerce_args(self, values, other):
"""
localize and return i8 for the values
Parameters
----------
values : ndarray-like
other : ndarray-like or scalar
Returns
-------
base-type values, base-type other
"""
# asi8 is a view, needs copy
values = _block_shape(values.asi8, ndim=self.ndim)
if isinstance(other, ABCSeries):
other = self._holder(other)
if isinstance(other, bool):
raise TypeError
elif (is_null_datelike_scalar(other) or
(lib.is_scalar(other) and isna(other))):
other = tslibs.iNaT
elif isinstance(other, self._holder):
if other.tz != self.values.tz:
raise ValueError("incompatible or non tz-aware value")
other = _block_shape(other.asi8, ndim=self.ndim)
elif isinstance(other, (np.datetime64, datetime, date)):
other = tslibs.Timestamp(other)
tz = getattr(other, 'tz', None)
# test we can have an equal time zone
if tz is None or str(tz) != str(self.values.tz):
raise ValueError("incompatible or non tz-aware value")
other = other.value
else:
raise TypeError
return values, other
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
if isinstance(result, np.ndarray):
if result.dtype.kind in ['i', 'f', 'O']:
result = result.astype('M8[ns]')
elif isinstance(result, (np.integer, np.float, np.datetime64)):
result = tslibs.Timestamp(result, tz=self.values.tz)
if isinstance(result, np.ndarray):
# allow passing of > 1dim if its trivial
if result.ndim > 1:
result = result.reshape(np.prod(result.shape))
result = self.values._shallow_copy(result)
return result
@property
def _box_func(self):
return lambda x: tslibs.Timestamp(x, tz=self.dtype.tz)
def shift(self, periods, axis=0):
""" shift the block by periods """
# think about moving this to the DatetimeIndex. This is a non-freq
# (number of periods) shift ###
N = len(self)
indexer = np.zeros(N, dtype=int)
if periods > 0:
indexer[periods:] = np.arange(N - periods)
else:
indexer[:periods] = np.arange(-periods, N)
new_values = self.values.asi8.take(indexer)
if periods > 0:
new_values[:periods] = tslibs.iNaT
else:
new_values[periods:] = tslibs.iNaT
new_values = self.values._shallow_copy(new_values)
return [self.make_block_same_class(new_values,
placement=self.mgr_locs)]
def diff(self, n, axis=0):
"""1st discrete difference
Parameters
----------
n : int, number of periods to diff
axis : int, axis to diff upon. default 0
Return
------
A list with a new TimeDeltaBlock.
Note
----
The arguments here are mimicking shift so they are called correctly
by apply.
"""
if axis == 0:
# Cannot currently calculate diff across multiple blocks since this
# function is invoked via apply
raise NotImplementedError
new_values = (self.values - self.shift(n, axis=axis)[0].values).asi8
# Reshape the new_values like how algos.diff does for timedelta data
new_values = new_values.reshape(1, len(new_values))
new_values = new_values.astype('timedelta64[ns]')
return [TimeDeltaBlock(new_values, placement=self.mgr_locs.indexer)]
def concat_same_type(self, to_concat, placement=None):
"""
Concatenate list of single blocks of the same type.
"""
values = self._concatenator([blk.values for blk in to_concat],
axis=self.ndim - 1)
# not using self.make_block_same_class as values can be non-tz dtype
return make_block(
values, placement=placement or slice(0, len(values), 1))
# -----------------------------------------------------------------
# Constructor Helpers
def get_block_type(values, dtype=None):
"""
Find the appropriate Block subclass to use for the given values and dtype.
Parameters
----------
values : ndarray-like
dtype : numpy or pandas dtype
Returns
-------
cls : class, subclass of Block
"""
dtype = dtype or values.dtype
vtype = dtype.type
if is_categorical(values):
cls = CategoricalBlock
elif is_extension_array_dtype(values):
cls = ExtensionBlock
elif issubclass(vtype, np.floating):
cls = FloatBlock
elif issubclass(vtype, np.timedelta64):
assert issubclass(vtype, np.integer)
cls = TimeDeltaBlock
elif issubclass(vtype, np.complexfloating):
cls = ComplexBlock
elif issubclass(vtype, np.datetime64):
assert not is_datetimetz(values)
cls = DatetimeBlock
elif is_datetimetz(values):
cls = DatetimeTZBlock
elif issubclass(vtype, np.integer):
cls = IntBlock
elif dtype == np.bool_:
cls = BoolBlock
else:
cls = ObjectBlock
return cls
def make_block(values, placement, klass=None, ndim=None, dtype=None,
fastpath=None):
if fastpath is not None:
# GH#19265 pyarrow is passing this
warnings.warn("fastpath argument is deprecated, will be removed "
"in a future release.", DeprecationWarning)
if klass is None:
dtype = dtype or values.dtype
klass = get_block_type(values, dtype)
elif klass is DatetimeTZBlock and not is_datetimetz(values):
return klass(values, ndim=ndim,
placement=placement, dtype=dtype)
return klass(values, ndim=ndim, placement=placement)
# -----------------------------------------------------------------
def _extend_blocks(result, blocks=None):
""" return a new extended blocks, givin the result """
from pandas.core.internals import BlockManager
if blocks is None:
blocks = []
if isinstance(result, list):
for r in result:
if isinstance(r, list):
blocks.extend(r)
else:
blocks.append(r)
elif isinstance(result, BlockManager):
blocks.extend(result.blocks)
else:
blocks.append(result)
return blocks
def _block_shape(values, ndim=1, shape=None):
""" guarantee the shape of the values to be at least 1 d """
if values.ndim < ndim:
if shape is None:
shape = values.shape
if not is_extension_array_dtype(values):
# TODO: https://github.com/pandas-dev/pandas/issues/23023
# block.shape is incorrect for "2D" ExtensionArrays
# We can't, and don't need to, reshape.
values = values.reshape(tuple((1, ) + shape))
return values
def _merge_blocks(blocks, dtype=None, _can_consolidate=True):
if len(blocks) == 1:
return blocks[0]
if _can_consolidate:
if dtype is None:
if len({b.dtype for b in blocks}) != 1:
raise AssertionError("_merge_blocks are invalid!")
dtype = blocks[0].dtype
# FIXME: optimization potential in case all mgrs contain slices and
# combination of those slices is a slice, too.
new_mgr_locs = np.concatenate([b.mgr_locs.as_array for b in blocks])
new_values = _vstack([b.values for b in blocks], dtype)
argsort = np.argsort(new_mgr_locs)
new_values = new_values[argsort]
new_mgr_locs = new_mgr_locs[argsort]
return make_block(new_values, placement=new_mgr_locs)
# no merge
return blocks
def _vstack(to_stack, dtype):
# work around NumPy 1.6 bug
if dtype == _NS_DTYPE or dtype == _TD_DTYPE:
new_values = np.vstack([x.view('i8') for x in to_stack])
return new_values.view(dtype)
else:
return np.vstack(to_stack)
def _block2d_to_blocknd(values, placement, shape, labels, ref_items):
""" pivot to the labels shape """
panel_shape = (len(placement),) + shape
# TODO: lexsort depth needs to be 2!!
# Create observation selection vector using major and minor
# labels, for converting to panel format.
selector = _factor_indexer(shape[1:], labels)
mask = np.zeros(np.prod(shape), dtype=bool)
mask.put(selector, True)
if mask.all():
pvalues = np.empty(panel_shape, dtype=values.dtype)
else:
dtype, fill_value = maybe_promote(values.dtype)
pvalues = np.empty(panel_shape, dtype=dtype)
pvalues.fill(fill_value)
for i in range(len(placement)):
pvalues[i].flat[mask] = values[:, i]
return make_block(pvalues, placement=placement)
def _safe_reshape(arr, new_shape):
"""
If possible, reshape `arr` to have shape `new_shape`,
with a couple of exceptions (see gh-13012):
1) If `arr` is a ExtensionArray or Index, `arr` will be
returned as is.
2) If `arr` is a Series, the `_values` attribute will
be reshaped and returned.
Parameters
----------
arr : array-like, object to be reshaped
new_shape : int or tuple of ints, the new shape
"""
if isinstance(arr, ABCSeries):
arr = arr._values
if not isinstance(arr, ABCExtensionArray):
arr = arr.reshape(new_shape)
return arr
def _factor_indexer(shape, labels):
"""
given a tuple of shape and a list of Categorical labels, return the
expanded label indexer
"""
mult = np.array(shape)[::-1].cumprod()[::-1]
return ensure_platform_int(
np.sum(np.array(labels).T * np.append(mult, [1]), axis=1).T)
def _putmask_smart(v, m, n):
"""
Return a new ndarray, try to preserve dtype if possible.
Parameters
----------
v : `values`, updated in-place (array like)
m : `mask`, applies to both sides (array like)
n : `new values` either scalar or an array like aligned with `values`
Returns
-------
values : ndarray with updated values
this *may* be a copy of the original
See Also
--------
ndarray.putmask
"""
# we cannot use np.asarray() here as we cannot have conversions
# that numpy does when numeric are mixed with strings
# n should be the length of the mask or a scalar here
if not is_list_like(n):
n = np.repeat(n, len(m))
elif isinstance(n, np.ndarray) and n.ndim == 0: # numpy scalar
n = np.repeat(np.array(n, ndmin=1), len(m))
# see if we are only masking values that if putted
# will work in the current dtype
try:
nn = n[m]
# make sure that we have a nullable type
# if we have nulls
if not _isna_compat(v, nn[0]):
raise ValueError
# we ignore ComplexWarning here
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", np.ComplexWarning)
nn_at = nn.astype(v.dtype)
# avoid invalid dtype comparisons
# between numbers & strings
# only compare integers/floats
# don't compare integers to datetimelikes
if (not is_numeric_v_string_like(nn, nn_at) and
(is_float_dtype(nn.dtype) or
is_integer_dtype(nn.dtype) and
is_float_dtype(nn_at.dtype) or
is_integer_dtype(nn_at.dtype))):
comp = (nn == nn_at)
if is_list_like(comp) and comp.all():
nv = v.copy()
nv[m] = nn_at
return nv
except (ValueError, IndexError, TypeError):
pass
n = np.asarray(n)
def _putmask_preserve(nv, n):
try:
nv[m] = n[m]
except (IndexError, ValueError):
nv[m] = n
return nv
# preserves dtype if possible
if v.dtype.kind == n.dtype.kind:
return _putmask_preserve(v, n)
# change the dtype if needed
dtype, _ = maybe_promote(n.dtype)
if is_extension_type(v.dtype) and is_object_dtype(dtype):
v = v.get_values(dtype)
else:
v = v.astype(dtype)
return _putmask_preserve(v, n)
| bsd-3-clause |
nan86150/ImageFusion | lib/python2.7/site-packages/matplotlib/backends/backend_pgf.py | 10 | 36343 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import math
import os
import sys
import re
import shutil
import tempfile
import codecs
import atexit
import weakref
import warnings
import matplotlib as mpl
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.backends.backend_mixed import MixedModeRenderer
from matplotlib.figure import Figure
from matplotlib.text import Text
from matplotlib.path import Path
from matplotlib import _png, rcParams
from matplotlib.cbook import is_string_like, is_writable_file_like
from matplotlib.compat import subprocess
from matplotlib.compat.subprocess import check_output
###############################################################################
# create a list of system fonts, all of these should work with xe/lua-latex
system_fonts = []
if sys.platform.startswith('win'):
from matplotlib import font_manager
from matplotlib.ft2font import FT2Font
for f in font_manager.win32InstalledFonts():
try:
system_fonts.append(FT2Font(str(f)).family_name)
except:
pass # unknown error, skip this font
else:
# assuming fontconfig is installed and the command 'fc-list' exists
try:
# list scalable (non-bitmap) fonts
fc_list = check_output(['fc-list', ':outline,scalable', 'family'])
fc_list = fc_list.decode('utf8')
system_fonts = [f.split(',')[0] for f in fc_list.splitlines()]
system_fonts = list(set(system_fonts))
except:
warnings.warn('error getting fonts from fc-list', UserWarning)
def get_texcommand():
"""Get chosen TeX system from rc."""
texsystem_options = ["xelatex", "lualatex", "pdflatex"]
texsystem = rcParams.get("pgf.texsystem", "xelatex")
return texsystem if texsystem in texsystem_options else "xelatex"
def get_fontspec():
"""Build fontspec preamble from rc."""
latex_fontspec = []
texcommand = get_texcommand()
if texcommand != "pdflatex":
latex_fontspec.append("\\usepackage{fontspec}")
if texcommand != "pdflatex" and rcParams.get("pgf.rcfonts", True):
# try to find fonts from rc parameters
families = ["serif", "sans-serif", "monospace"]
fontspecs = [r"\setmainfont{%s}", r"\setsansfont{%s}",
r"\setmonofont{%s}"]
for family, fontspec in zip(families, fontspecs):
matches = [f for f in rcParams["font." + family]
if f in system_fonts]
if matches:
latex_fontspec.append(fontspec % matches[0])
else:
pass # no fonts found, fallback to LaTeX defaule
return "\n".join(latex_fontspec)
def get_preamble():
"""Get LaTeX preamble from rc."""
latex_preamble = rcParams.get("pgf.preamble", "")
if type(latex_preamble) == list:
latex_preamble = "\n".join(latex_preamble)
return latex_preamble
###############################################################################
# This almost made me cry!!!
# In the end, it's better to use only one unit for all coordinates, since the
# arithmetic in latex seems to produce inaccurate conversions.
latex_pt_to_in = 1. / 72.27
latex_in_to_pt = 1. / latex_pt_to_in
mpl_pt_to_in = 1. / 72.
mpl_in_to_pt = 1. / mpl_pt_to_in
###############################################################################
# helper functions
NO_ESCAPE = r"(?<!\\)(?:\\\\)*"
re_mathsep = re.compile(NO_ESCAPE + r"\$")
re_escapetext = re.compile(NO_ESCAPE + "([_^$%])")
repl_escapetext = lambda m: "\\" + m.group(1)
re_mathdefault = re.compile(NO_ESCAPE + r"(\\mathdefault)")
repl_mathdefault = lambda m: m.group(0)[:-len(m.group(1))]
def common_texification(text):
"""
Do some necessary and/or useful substitutions for texts to be included in
LaTeX documents.
"""
# Sometimes, matplotlib adds the unknown command \mathdefault.
# Not using \mathnormal instead since this looks odd for the latex cm font.
text = re_mathdefault.sub(repl_mathdefault, text)
# split text into normaltext and inline math parts
parts = re_mathsep.split(text)
for i, s in enumerate(parts):
if not i % 2:
# textmode replacements
s = re_escapetext.sub(repl_escapetext, s)
else:
# mathmode replacements
s = r"\(\displaystyle %s\)" % s
parts[i] = s
return "".join(parts)
def writeln(fh, line):
# every line of a file included with \input must be terminated with %
# if not, latex will create additional vertical spaces for some reason
fh.write(line)
fh.write("%\n")
def _font_properties_str(prop):
# translate font properties to latex commands, return as string
commands = []
families = {"serif": r"\rmfamily", "sans": r"\sffamily",
"sans-serif": r"\sffamily", "monospace": r"\ttfamily"}
family = prop.get_family()[0]
if family in families:
commands.append(families[family])
elif family in system_fonts and get_texcommand() != "pdflatex":
commands.append(r"\setmainfont{%s}\rmfamily" % family)
else:
pass # print warning?
size = prop.get_size_in_points()
commands.append(r"\fontsize{%f}{%f}" % (size, size * 1.2))
styles = {"normal": r"", "italic": r"\itshape", "oblique": r"\slshape"}
commands.append(styles[prop.get_style()])
boldstyles = ["semibold", "demibold", "demi", "bold", "heavy",
"extra bold", "black"]
if prop.get_weight() in boldstyles:
commands.append(r"\bfseries")
commands.append(r"\selectfont")
return "".join(commands)
def make_pdf_to_png_converter():
"""
Returns a function that converts a pdf file to a png file.
"""
tools_available = []
# check for pdftocairo
try:
check_output(["pdftocairo", "-v"], stderr=subprocess.STDOUT)
tools_available.append("pdftocairo")
except:
pass
# check for ghostscript
gs, ver = mpl.checkdep_ghostscript()
if gs:
tools_available.append("gs")
# pick converter
if "pdftocairo" in tools_available:
def cairo_convert(pdffile, pngfile, dpi):
cmd = ["pdftocairo", "-singlefile", "-png",
"-r %d" % dpi, pdffile, os.path.splitext(pngfile)[0]]
# for some reason this doesn't work without shell
check_output(" ".join(cmd), shell=True, stderr=subprocess.STDOUT)
return cairo_convert
elif "gs" in tools_available:
def gs_convert(pdffile, pngfile, dpi):
cmd = [gs, '-dQUIET', '-dSAFER', '-dBATCH', '-dNOPAUSE', '-dNOPROMPT',
'-sDEVICE=png16m', '-dUseCIEColor', '-dTextAlphaBits=4',
'-dGraphicsAlphaBits=4', '-dDOINTERPOLATE', '-sOutputFile=%s' % pngfile,
'-r%d' % dpi, pdffile]
check_output(cmd, stderr=subprocess.STDOUT)
return gs_convert
else:
raise RuntimeError("No suitable pdf to png renderer found.")
class LatexError(Exception):
def __init__(self, message, latex_output=""):
Exception.__init__(self, message)
self.latex_output = latex_output
class LatexManagerFactory:
previous_instance = None
@staticmethod
def get_latex_manager():
texcommand = get_texcommand()
latex_header = LatexManager._build_latex_header()
prev = LatexManagerFactory.previous_instance
# check if the previous instance of LatexManager can be reused
if prev and prev.latex_header == latex_header and prev.texcommand == texcommand:
if rcParams.get("pgf.debug", False):
print("reusing LatexManager")
return prev
else:
if rcParams.get("pgf.debug", False):
print("creating LatexManager")
new_inst = LatexManager()
LatexManagerFactory.previous_instance = new_inst
return new_inst
class WeakSet:
# TODO: Poor man's weakref.WeakSet.
# Remove this once python 2.6 support is dropped from matplotlib.
def __init__(self):
self.weak_key_dict = weakref.WeakKeyDictionary()
def add(self, item):
self.weak_key_dict[item] = None
def discard(self, item):
if item in self.weak_key_dict:
del self.weak_key_dict[item]
def __iter__(self):
return six.iterkeys(self.weak_key_dict)
class LatexManager:
"""
The LatexManager opens an instance of the LaTeX application for
determining the metrics of text elements. The LaTeX environment can be
modified by setting fonts and/or a custem preamble in the rc parameters.
"""
_unclean_instances = WeakSet()
@staticmethod
def _build_latex_header():
latex_preamble = get_preamble()
latex_fontspec = get_fontspec()
# Create LaTeX header with some content, else LaTeX will load some
# math fonts later when we don't expect the additional output on stdout.
# TODO: is this sufficient?
latex_header = [r"\documentclass{minimal}",
latex_preamble,
latex_fontspec,
r"\begin{document}",
r"text $math \mu$", # force latex to load fonts now
r"\typeout{pgf_backend_query_start}"]
return "\n".join(latex_header)
@staticmethod
def _cleanup_remaining_instances():
unclean_instances = list(LatexManager._unclean_instances)
for latex_manager in unclean_instances:
latex_manager._cleanup()
def _stdin_writeln(self, s):
self.latex_stdin_utf8.write(s)
self.latex_stdin_utf8.write("\n")
self.latex_stdin_utf8.flush()
def _expect(self, s):
exp = s.encode("utf8")
buf = bytearray()
while True:
b = self.latex.stdout.read(1)
buf += b
if buf[-len(exp):] == exp:
break
if not len(b):
raise LatexError("LaTeX process halted", buf.decode("utf8"))
return buf.decode("utf8")
def _expect_prompt(self):
return self._expect("\n*")
def __init__(self):
# create a tmp directory for running latex, remember to cleanup
self.tmpdir = tempfile.mkdtemp(prefix="mpl_pgf_lm_")
LatexManager._unclean_instances.add(self)
# test the LaTeX setup to ensure a clean startup of the subprocess
self.texcommand = get_texcommand()
self.latex_header = LatexManager._build_latex_header()
latex_end = "\n\\makeatletter\n\\@@end\n"
try:
latex = subprocess.Popen([self.texcommand, "-halt-on-error"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
cwd=self.tmpdir)
except OSError:
raise RuntimeError("Error starting process '%s'" % self.texcommand)
test_input = self.latex_header + latex_end
stdout, stderr = latex.communicate(test_input.encode("utf-8"))
if latex.returncode != 0:
raise LatexError("LaTeX returned an error, probably missing font or error in preamble:\n%s" % stdout)
# open LaTeX process for real work
latex = subprocess.Popen([self.texcommand, "-halt-on-error"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
cwd=self.tmpdir)
self.latex = latex
self.latex_stdin_utf8 = codecs.getwriter("utf8")(self.latex.stdin)
# write header with 'pgf_backend_query_start' token
self._stdin_writeln(self._build_latex_header())
# read all lines until our 'pgf_backend_query_start' token appears
self._expect("*pgf_backend_query_start")
self._expect_prompt()
# cache for strings already processed
self.str_cache = {}
def _cleanup(self):
if not os.path.isdir(self.tmpdir):
return
try:
self.latex_stdin_utf8.close()
self.latex.communicate()
self.latex.wait()
except:
pass
try:
shutil.rmtree(self.tmpdir)
LatexManager._unclean_instances.discard(self)
except:
sys.stderr.write("error deleting tmp directory %s\n" % self.tmpdir)
def __del__(self):
if rcParams.get("pgf.debug", False):
print("deleting LatexManager")
self._cleanup()
def get_width_height_descent(self, text, prop):
"""
Get the width, total height and descent for a text typesetted by the
current LaTeX environment.
"""
# apply font properties and define textbox
prop_cmds = _font_properties_str(prop)
textbox = "\\sbox0{%s %s}" % (prop_cmds, text)
# check cache
if textbox in self.str_cache:
return self.str_cache[textbox]
# send textbox to LaTeX and wait for prompt
self._stdin_writeln(textbox)
try:
self._expect_prompt()
except LatexError as e:
msg = "Error processing '%s'\nLaTeX Output:\n%s"
raise ValueError(msg % (text, e.latex_output))
# typeout width, height and text offset of the last textbox
self._stdin_writeln(r"\typeout{\the\wd0,\the\ht0,\the\dp0}")
# read answer from latex and advance to the next prompt
try:
answer = self._expect_prompt()
except LatexError as e:
msg = "Error processing '%s'\nLaTeX Output:\n%s"
raise ValueError(msg % (text, e.latex_output))
# parse metrics from the answer string
try:
width, height, offset = answer.splitlines()[0].split(",")
except:
msg = "Error processing '%s'\nLaTeX Output:\n%s" % (text, answer)
raise ValueError(msg)
w, h, o = float(width[:-2]), float(height[:-2]), float(offset[:-2])
# the height returned from LaTeX goes from base to top.
# the height matplotlib expects goes from bottom to top.
self.str_cache[textbox] = (w, h + o, o)
return w, h + o, o
class RendererPgf(RendererBase):
def __init__(self, figure, fh, dummy=False):
"""
Creates a new PGF renderer that translates any drawing instruction
into text commands to be interpreted in a latex pgfpicture environment.
Attributes:
* figure: Matplotlib figure to initialize height, width and dpi from.
* fh: File handle for the output of the drawing commands.
"""
RendererBase.__init__(self)
self.dpi = figure.dpi
self.fh = fh
self.figure = figure
self.image_counter = 0
# get LatexManager instance
self.latexManager = LatexManagerFactory.get_latex_manager()
if dummy:
# dummy==True deactivate all methods
nop = lambda *args, **kwargs: None
for m in RendererPgf.__dict__.keys():
if m.startswith("draw_"):
self.__dict__[m] = nop
else:
# if fh does not belong to a filename, deactivate draw_image
if not os.path.exists(fh.name):
self.__dict__["draw_image"] = lambda *args, **kwargs: None
def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
writeln(self.fh, r"\begin{pgfscope}")
# convert from display units to in
f = 1. / self.dpi
# set style and clip
self._print_pgf_clip(gc)
self._print_pgf_path_styles(gc, rgbFace)
# build marker definition
bl, tr = marker_path.get_extents(marker_trans).get_points()
coords = bl[0] * f, bl[1] * f, tr[0] * f, tr[1] * f
writeln(self.fh, r"\pgfsys@defobject{currentmarker}{\pgfqpoint{%fin}{%fin}}{\pgfqpoint{%fin}{%fin}}{" % coords)
self._print_pgf_path(None, marker_path, marker_trans)
self._pgf_path_draw(stroke=gc.get_linewidth() != 0.0,
fill=rgbFace is not None)
writeln(self.fh, r"}")
# draw marker for each vertex
for point, code in path.iter_segments(trans, simplify=False):
x, y = point[0] * f, point[1] * f
writeln(self.fh, r"\begin{pgfscope}")
writeln(self.fh, r"\pgfsys@transformshift{%fin}{%fin}" % (x, y))
writeln(self.fh, r"\pgfsys@useobject{currentmarker}{}")
writeln(self.fh, r"\end{pgfscope}")
writeln(self.fh, r"\end{pgfscope}")
def draw_path(self, gc, path, transform, rgbFace=None):
writeln(self.fh, r"\begin{pgfscope}")
# draw the path
self._print_pgf_clip(gc)
self._print_pgf_path_styles(gc, rgbFace)
self._print_pgf_path(gc, path, transform)
self._pgf_path_draw(stroke=gc.get_linewidth() != 0.0,
fill=rgbFace is not None)
writeln(self.fh, r"\end{pgfscope}")
# if present, draw pattern on top
if gc.get_hatch():
writeln(self.fh, r"\begin{pgfscope}")
self._print_pgf_path_styles(gc, rgbFace)
# combine clip and path for clipping
self._print_pgf_clip(gc)
self._print_pgf_path(gc, path, transform)
writeln(self.fh, r"\pgfusepath{clip}")
# build pattern definition
writeln(self.fh, r"\pgfsys@defobject{currentpattern}{\pgfqpoint{0in}{0in}}{\pgfqpoint{1in}{1in}}{")
writeln(self.fh, r"\begin{pgfscope}")
writeln(self.fh, r"\pgfpathrectangle{\pgfqpoint{0in}{0in}}{\pgfqpoint{1in}{1in}}")
writeln(self.fh, r"\pgfusepath{clip}")
scale = mpl.transforms.Affine2D().scale(self.dpi)
self._print_pgf_path(None, gc.get_hatch_path(), scale)
self._pgf_path_draw(stroke=True)
writeln(self.fh, r"\end{pgfscope}")
writeln(self.fh, r"}")
# repeat pattern, filling the bounding rect of the path
f = 1. / self.dpi
(xmin, ymin), (xmax, ymax) = path.get_extents(transform).get_points()
xmin, xmax = f * xmin, f * xmax
ymin, ymax = f * ymin, f * ymax
repx, repy = int(math.ceil(xmax-xmin)), int(math.ceil(ymax-ymin))
writeln(self.fh, r"\pgfsys@transformshift{%fin}{%fin}" % (xmin, ymin))
for iy in range(repy):
for ix in range(repx):
writeln(self.fh, r"\pgfsys@useobject{currentpattern}{}")
writeln(self.fh, r"\pgfsys@transformshift{1in}{0in}")
writeln(self.fh, r"\pgfsys@transformshift{-%din}{0in}" % repx)
writeln(self.fh, r"\pgfsys@transformshift{0in}{1in}")
writeln(self.fh, r"\end{pgfscope}")
def _print_pgf_clip(self, gc):
f = 1. / self.dpi
# check for clip box
bbox = gc.get_clip_rectangle()
if bbox:
p1, p2 = bbox.get_points()
w, h = p2 - p1
coords = p1[0] * f, p1[1] * f, w * f, h * f
writeln(self.fh, r"\pgfpathrectangle{\pgfqpoint{%fin}{%fin}}{\pgfqpoint{%fin}{%fin}} " % coords)
writeln(self.fh, r"\pgfusepath{clip}")
# check for clip path
clippath, clippath_trans = gc.get_clip_path()
if clippath is not None:
self._print_pgf_path(gc, clippath, clippath_trans)
writeln(self.fh, r"\pgfusepath{clip}")
def _print_pgf_path_styles(self, gc, rgbFace):
# cap style
capstyles = {"butt": r"\pgfsetbuttcap",
"round": r"\pgfsetroundcap",
"projecting": r"\pgfsetrectcap"}
writeln(self.fh, capstyles[gc.get_capstyle()])
# join style
joinstyles = {"miter": r"\pgfsetmiterjoin",
"round": r"\pgfsetroundjoin",
"bevel": r"\pgfsetbeveljoin"}
writeln(self.fh, joinstyles[gc.get_joinstyle()])
# filling
has_fill = rgbFace is not None
if gc.get_forced_alpha():
fillopacity = strokeopacity = gc.get_alpha()
else:
strokeopacity = gc.get_rgb()[3]
fillopacity = rgbFace[3] if has_fill and len(rgbFace) > 3 else 1.0
if has_fill:
writeln(self.fh, r"\definecolor{currentfill}{rgb}{%f,%f,%f}" % tuple(rgbFace[:3]))
writeln(self.fh, r"\pgfsetfillcolor{currentfill}")
if has_fill and fillopacity != 1.0:
writeln(self.fh, r"\pgfsetfillopacity{%f}" % fillopacity)
# linewidth and color
lw = gc.get_linewidth() * mpl_pt_to_in * latex_in_to_pt
stroke_rgba = gc.get_rgb()
writeln(self.fh, r"\pgfsetlinewidth{%fpt}" % lw)
writeln(self.fh, r"\definecolor{currentstroke}{rgb}{%f,%f,%f}" % stroke_rgba[:3])
writeln(self.fh, r"\pgfsetstrokecolor{currentstroke}")
if strokeopacity != 1.0:
writeln(self.fh, r"\pgfsetstrokeopacity{%f}" % strokeopacity)
# line style
dash_offset, dash_list = gc.get_dashes()
if dash_list is None:
writeln(self.fh, r"\pgfsetdash{}{0pt}")
else:
dash_str = r"\pgfsetdash{"
for dash in dash_list:
dash_str += r"{%fpt}" % dash
dash_str += r"}{%fpt}" % dash_offset
writeln(self.fh, dash_str)
def _print_pgf_path(self, gc, path, transform):
f = 1. / self.dpi
# check for clip box
bbox = gc.get_clip_rectangle() if gc else None
if bbox:
p1, p2 = bbox.get_points()
clip = (p1[0], p1[1], p2[0], p2[1])
else:
clip = None
# build path
for points, code in path.iter_segments(transform, clip=clip):
if code == Path.MOVETO:
x, y = tuple(points)
writeln(self.fh, r"\pgfpathmoveto{\pgfqpoint{%fin}{%fin}}" %
(f * x, f * y))
elif code == Path.CLOSEPOLY:
writeln(self.fh, r"\pgfpathclose")
elif code == Path.LINETO:
x, y = tuple(points)
writeln(self.fh, r"\pgfpathlineto{\pgfqpoint{%fin}{%fin}}" %
(f * x, f * y))
elif code == Path.CURVE3:
cx, cy, px, py = tuple(points)
coords = cx * f, cy * f, px * f, py * f
writeln(self.fh, r"\pgfpathquadraticcurveto{\pgfqpoint{%fin}{%fin}}{\pgfqpoint{%fin}{%fin}}" % coords)
elif code == Path.CURVE4:
c1x, c1y, c2x, c2y, px, py = tuple(points)
coords = c1x * f, c1y * f, c2x * f, c2y * f, px * f, py * f
writeln(self.fh, r"\pgfpathcurveto{\pgfqpoint{%fin}{%fin}}{\pgfqpoint{%fin}{%fin}}{\pgfqpoint{%fin}{%fin}}" % coords)
def _pgf_path_draw(self, stroke=True, fill=False):
actions = []
if stroke:
actions.append("stroke")
if fill:
actions.append("fill")
writeln(self.fh, r"\pgfusepath{%s}" % ",".join(actions))
def draw_image(self, gc, x, y, im):
# TODO: Almost no documentation for the behavior of this function.
# Something missing?
# save the images to png files
path = os.path.dirname(self.fh.name)
fname = os.path.splitext(os.path.basename(self.fh.name))[0]
fname_img = "%s-img%d.png" % (fname, self.image_counter)
self.image_counter += 1
im.flipud_out()
rows, cols, buf = im.as_rgba_str()
_png.write_png(buf, cols, rows, os.path.join(path, fname_img))
# reference the image in the pgf picture
writeln(self.fh, r"\begin{pgfscope}")
self._print_pgf_clip(gc)
h, w = im.get_size_out()
f = 1. / self.dpi # from display coords to inch
writeln(self.fh, r"\pgftext[at=\pgfqpoint{%fin}{%fin},left,bottom]{\pgfimage[interpolate=true,width=%fin,height=%fin]{%s}}" % (x * f, y * f, w * f, h * f, fname_img))
writeln(self.fh, r"\end{pgfscope}")
def draw_tex(self, gc, x, y, s, prop, angle, ismath="TeX!", mtext=None):
self.draw_text(gc, x, y, s, prop, angle, ismath, mtext)
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
# prepare string for tex
s = common_texification(s)
prop_cmds = _font_properties_str(prop)
s = r"%s %s" % (prop_cmds, s)
writeln(self.fh, r"\begin{pgfscope}")
alpha = gc.get_alpha()
if alpha != 1.0:
writeln(self.fh, r"\pgfsetfillopacity{%f}" % alpha)
writeln(self.fh, r"\pgfsetstrokeopacity{%f}" % alpha)
rgb = tuple(gc.get_rgb())[:3]
if rgb != (0, 0, 0):
writeln(self.fh, r"\definecolor{textcolor}{rgb}{%f,%f,%f}" % rgb)
writeln(self.fh, r"\pgfsetstrokecolor{textcolor}")
writeln(self.fh, r"\pgfsetfillcolor{textcolor}")
s = r"\color{textcolor}" + s
f = 1.0 / self.figure.dpi
text_args = []
if mtext and (angle == 0 or mtext.get_rotation_mode() == "anchor"):
# if text anchoring can be supported, get the original coordinates
# and add alignment information
x, y = mtext.get_transform().transform_point(mtext.get_position())
text_args.append("x=%fin" % (x * f))
text_args.append("y=%fin" % (y * f))
halign = {"left": "left", "right": "right", "center": ""}
valign = {"top": "top", "bottom": "bottom",
"baseline": "base", "center": ""}
text_args.append(halign[mtext.get_ha()])
text_args.append(valign[mtext.get_va()])
else:
# if not, use the text layout provided by matplotlib
text_args.append("x=%fin" % (x * f))
text_args.append("y=%fin" % (y * f))
text_args.append("left")
text_args.append("base")
if angle != 0:
text_args.append("rotate=%f" % angle)
writeln(self.fh, r"\pgftext[%s]{%s}" % (",".join(text_args), s))
writeln(self.fh, r"\end{pgfscope}")
def get_text_width_height_descent(self, s, prop, ismath):
# check if the math is supposed to be displaystyled
s = common_texification(s)
# get text metrics in units of latex pt, convert to display units
w, h, d = self.latexManager.get_width_height_descent(s, prop)
# TODO: this should be latex_pt_to_in instead of mpl_pt_to_in
# but having a little bit more space around the text looks better,
# plus the bounding box reported by LaTeX is VERY narrow
f = mpl_pt_to_in * self.dpi
return w * f, h * f, d * f
def flipy(self):
return False
def get_canvas_width_height(self):
return self.figure.get_figwidth(), self.figure.get_figheight()
def points_to_pixels(self, points):
return points * mpl_pt_to_in * self.dpi
def new_gc(self):
return GraphicsContextPgf()
class GraphicsContextPgf(GraphicsContextBase):
pass
########################################################################
def draw_if_interactive():
pass
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# if a main-level app must be created, this is the usual place to
# do it -- see backend_wx, backend_wxagg and backend_tkagg for
# examples. Not all GUIs require explicit instantiation of a
# main-level app (egg backend_gtk, backend_gtkagg) for pylab
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasPgf(figure)
manager = FigureManagerPgf(canvas, num)
return manager
class TmpDirCleaner:
remaining_tmpdirs = set()
@staticmethod
def add(tmpdir):
TmpDirCleaner.remaining_tmpdirs.add(tmpdir)
@staticmethod
def cleanup_remaining_tmpdirs():
for tmpdir in TmpDirCleaner.remaining_tmpdirs:
try:
shutil.rmtree(tmpdir)
except:
sys.stderr.write("error deleting tmp directory %s\n" % tmpdir)
class FigureCanvasPgf(FigureCanvasBase):
filetypes = {"pgf": "LaTeX PGF picture",
"pdf": "LaTeX compiled PGF picture",
"png": "Portable Network Graphics", }
def __init__(self, *args):
FigureCanvasBase.__init__(self, *args)
def get_default_filetype(self):
return 'pdf'
def _print_pgf_to_fh(self, fh, *args, **kwargs):
if kwargs.get("dryrun", False):
renderer = RendererPgf(self.figure, None, dummy=True)
self.figure.draw(renderer)
return
header_text = """%% Creator: Matplotlib, PGF backend
%%
%% To include the figure in your LaTeX document, write
%% \\input{<filename>.pgf}
%%
%% Make sure the required packages are loaded in your preamble
%% \\usepackage{pgf}
%%
%% Figures using additional raster images can only be included by \input if
%% they are in the same directory as the main LaTeX file. For loading figures
%% from other directories you can use the `import` package
%% \\usepackage{import}
%% and then include the figures with
%% \\import{<path to file>}{<filename>.pgf}
%%
"""
# append the preamble used by the backend as a comment for debugging
header_info_preamble = ["%% Matplotlib used the following preamble"]
for line in get_preamble().splitlines():
header_info_preamble.append("%% " + line)
for line in get_fontspec().splitlines():
header_info_preamble.append("%% " + line)
header_info_preamble.append("%%")
header_info_preamble = "\n".join(header_info_preamble)
# get figure size in inch
w, h = self.figure.get_figwidth(), self.figure.get_figheight()
dpi = self.figure.get_dpi()
# create pgfpicture environment and write the pgf code
fh.write(header_text)
fh.write(header_info_preamble)
fh.write("\n")
writeln(fh, r"\begingroup")
writeln(fh, r"\makeatletter")
writeln(fh, r"\begin{pgfpicture}")
writeln(fh, r"\pgfpathrectangle{\pgfpointorigin}{\pgfqpoint{%fin}{%fin}}" % (w, h))
writeln(fh, r"\pgfusepath{use as bounding box, clip}")
_bbox_inches_restore = kwargs.pop("bbox_inches_restore", None)
renderer = MixedModeRenderer(self.figure, w, h, dpi,
RendererPgf(self.figure, fh),
bbox_inches_restore=_bbox_inches_restore)
self.figure.draw(renderer)
# end the pgfpicture environment
writeln(fh, r"\end{pgfpicture}")
writeln(fh, r"\makeatother")
writeln(fh, r"\endgroup")
def print_pgf(self, fname_or_fh, *args, **kwargs):
"""
Output pgf commands for drawing the figure so it can be included and
rendered in latex documents.
"""
if kwargs.get("dryrun", False):
self._print_pgf_to_fh(None, *args, **kwargs)
return
# figure out where the pgf is to be written to
if is_string_like(fname_or_fh):
with codecs.open(fname_or_fh, "w", encoding="utf-8") as fh:
self._print_pgf_to_fh(fh, *args, **kwargs)
elif is_writable_file_like(fname_or_fh):
if not os.path.exists(fname_or_fh.name):
warnings.warn("streamed pgf-code does not support raster "
"graphics, consider using the pgf-to-pdf option",
UserWarning)
self._print_pgf_to_fh(fname_or_fh, *args, **kwargs)
else:
raise ValueError("filename must be a path")
def _print_pdf_to_fh(self, fh, *args, **kwargs):
w, h = self.figure.get_figwidth(), self.figure.get_figheight()
try:
# create temporary directory for compiling the figure
tmpdir = tempfile.mkdtemp(prefix="mpl_pgf_")
fname_pgf = os.path.join(tmpdir, "figure.pgf")
fname_tex = os.path.join(tmpdir, "figure.tex")
fname_pdf = os.path.join(tmpdir, "figure.pdf")
# print figure to pgf and compile it with latex
self.print_pgf(fname_pgf, *args, **kwargs)
latex_preamble = get_preamble()
latex_fontspec = get_fontspec()
latexcode = """
\\documentclass[12pt]{minimal}
\\usepackage[paperwidth=%fin, paperheight=%fin, margin=0in]{geometry}
%s
%s
\\usepackage{pgf}
\\begin{document}
\\centering
\\input{figure.pgf}
\\end{document}""" % (w, h, latex_preamble, latex_fontspec)
with codecs.open(fname_tex, "w", "utf-8") as fh_tex:
fh_tex.write(latexcode)
texcommand = get_texcommand()
cmdargs = [texcommand, "-interaction=nonstopmode",
"-halt-on-error", "figure.tex"]
try:
check_output(cmdargs, stderr=subprocess.STDOUT, cwd=tmpdir)
except subprocess.CalledProcessError as e:
raise RuntimeError("%s was not able to process your file.\n\nFull log:\n%s" % (texcommand, e.output))
# copy file contents to target
with open(fname_pdf, "rb") as fh_src:
shutil.copyfileobj(fh_src, fh)
finally:
try:
shutil.rmtree(tmpdir)
except:
TmpDirCleaner.add(tmpdir)
def print_pdf(self, fname_or_fh, *args, **kwargs):
"""
Use LaTeX to compile a Pgf generated figure to PDF.
"""
if kwargs.get("dryrun", False):
self._print_pgf_to_fh(None, *args, **kwargs)
return
# figure out where the pdf is to be written to
if is_string_like(fname_or_fh):
with open(fname_or_fh, "wb") as fh:
self._print_pdf_to_fh(fh, *args, **kwargs)
elif is_writable_file_like(fname_or_fh):
self._print_pdf_to_fh(fname_or_fh, *args, **kwargs)
else:
raise ValueError("filename must be a path or a file-like object")
def _print_png_to_fh(self, fh, *args, **kwargs):
converter = make_pdf_to_png_converter()
try:
# create temporary directory for pdf creation and png conversion
tmpdir = tempfile.mkdtemp(prefix="mpl_pgf_")
fname_pdf = os.path.join(tmpdir, "figure.pdf")
fname_png = os.path.join(tmpdir, "figure.png")
# create pdf and try to convert it to png
self.print_pdf(fname_pdf, *args, **kwargs)
converter(fname_pdf, fname_png, dpi=self.figure.dpi)
# copy file contents to target
with open(fname_png, "rb") as fh_src:
shutil.copyfileobj(fh_src, fh)
finally:
try:
shutil.rmtree(tmpdir)
except:
TmpDirCleaner.add(tmpdir)
def print_png(self, fname_or_fh, *args, **kwargs):
"""
Use LaTeX to compile a pgf figure to pdf and convert it to png.
"""
if kwargs.get("dryrun", False):
self._print_pgf_to_fh(None, *args, **kwargs)
return
if is_string_like(fname_or_fh):
with open(fname_or_fh, "wb") as fh:
self._print_png_to_fh(fh, *args, **kwargs)
elif is_writable_file_like(fname_or_fh):
self._print_png_to_fh(fname_or_fh, *args, **kwargs)
else:
raise ValueError("filename must be a path or a file-like object")
def get_renderer(self):
return RendererPgf(self.figure, None, dummy=True)
class FigureManagerPgf(FigureManagerBase):
def __init__(self, *args):
FigureManagerBase.__init__(self, *args)
FigureCanvas = FigureCanvasPgf
FigureManager = FigureManagerPgf
def _cleanup_all():
LatexManager._cleanup_remaining_instances()
TmpDirCleaner.cleanup_remaining_tmpdirs()
atexit.register(_cleanup_all)
| mit |
OshynSong/scikit-learn | benchmarks/bench_tree.py | 297 | 3617 | """
To run this, you'll need to have installed.
* scikit-learn
Does two benchmarks
First, we fix a training set, increase the number of
samples to classify and plot number of classified samples as a
function of time.
In the second benchmark, we increase the number of dimensions of the
training set, classify a sample and plot the time taken as a function
of the number of dimensions.
"""
import numpy as np
import pylab as pl
import gc
from datetime import datetime
# to store the results
scikit_classifier_results = []
scikit_regressor_results = []
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
def bench_scikit_tree_classifier(X, Y):
"""Benchmark with scikit-learn decision tree classifier"""
from sklearn.tree import DecisionTreeClassifier
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeClassifier()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_classifier_results.append(
delta.seconds + delta.microseconds / mu_second)
def bench_scikit_tree_regressor(X, Y):
"""Benchmark with scikit-learn decision tree regressor"""
from sklearn.tree import DecisionTreeRegressor
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeRegressor()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_regressor_results.append(
delta.seconds + delta.microseconds / mu_second)
if __name__ == '__main__':
print('============================================')
print('Warning: this is going to take a looong time')
print('============================================')
n = 10
step = 10000
n_samples = 10000
dim = 10
n_classes = 10
for i in range(n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
n_samples += step
X = np.random.randn(n_samples, dim)
Y = np.random.randint(0, n_classes, (n_samples,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(n_samples)
bench_scikit_tree_regressor(X, Y)
xx = range(0, n * step, step)
pl.figure('scikit-learn tree benchmark results')
pl.subplot(211)
pl.title('Learning with varying number of samples')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
scikit_classifier_results = []
scikit_regressor_results = []
n = 10
step = 500
start_dim = 500
n_classes = 10
dim = start_dim
for i in range(0, n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
dim += step
X = np.random.randn(100, dim)
Y = np.random.randint(0, n_classes, (100,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(100)
bench_scikit_tree_regressor(X, Y)
xx = np.arange(start_dim, start_dim + n * step, step)
pl.subplot(212)
pl.title('Learning in high dimensional spaces')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of dimensions')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
gplepage/lsqfit | doc/source/case-outliers.py | 1 | 4312 | from __future__ import print_function
import tee
import sys
STDOUT = sys.stdout
# NB: Need to run cases (True, False), (False, False) and (False, True)
LSQFIT_ONLY = False
MULTI_W = True
import matplotlib.pyplot as plt
import numpy as np
import gvar as gv
import lsqfit
def main():
### 1) least-squares fit to the data
x = np.array([
0.2, 0.4, 0.6, 0.8, 1.,
1.2, 1.4, 1.6, 1.8, 2.,
2.2, 2.4, 2.6, 2.8, 3.,
3.2, 3.4, 3.6, 3.8
])
y = gv.gvar([
'0.38(20)', '2.89(20)', '0.85(20)', '0.59(20)', '2.88(20)',
'1.44(20)', '0.73(20)', '1.23(20)', '1.68(20)', '1.36(20)',
'1.51(20)', '1.73(20)', '2.16(20)', '1.85(20)', '2.00(20)',
'2.11(20)', '2.75(20)', '0.86(20)', '2.73(20)'
])
prior = make_prior()
fit = lsqfit.nonlinear_fit(data=(x, y), prior=prior, fcn=fitfcn)
if LSQFIT_ONLY:
sys.stdout = tee.tee(STDOUT, open('case-outliers-lsq.out', 'w'))
elif not MULTI_W:
sys.stdout = tee.tee(STDOUT, open('case-outliers.out', 'w'))
print(fit)
# plot data
plt.errorbar(x, gv.mean(y), gv.sdev(y), fmt='o', c='b')
# plot fit function
xline = np.linspace(x[0], x[-1], 100)
yline = fitfcn(xline, fit.p)
plt.plot(xline, gv.mean(yline), 'k:')
yp = gv.mean(yline) + gv.sdev(yline)
ym = gv.mean(yline) - gv.sdev(yline)
plt.fill_between(xline, yp, ym, color='0.8')
plt.xlabel('x')
plt.ylabel('y')
plt.savefig('case-outliers1.png', bbox_inches='tight')
if LSQFIT_ONLY:
return
### 2) Bayesian integral with modified PDF
pdf = ModifiedPDF(data=(x, y), fcn=fitfcn, prior=prior)
# integrator for expectation values with modified PDF
expval = lsqfit.BayesIntegrator(fit, pdf=pdf)
# adapt integrator to pdf
expval(neval=1000, nitn=15)
# evaluate expectation value of g(p)
def g(p):
w = p['w']
c = p['c']
return dict(w=[w, w**2], mean=c, outer=np.outer(c,c))
results = expval(g, neval=1000, nitn=15, adapt=False)
print(results.summary())
# expval.map.show_grid(15)
if MULTI_W:
sys.stdout = tee.tee(STDOUT, open('case-outliers-multi.out', 'w'))
# parameters c[i]
mean = results['mean']
cov = results['outer'] - np.outer(mean, mean)
c = mean + gv.gvar(np.zeros(mean.shape), gv.mean(cov))
print('c =', c)
print(
'corr(c) =',
np.array2string(gv.evalcorr(c), prefix=10 * ' '),
'\n',
)
# parameter w
wmean, w2mean = results['w']
wsdev = gv.mean(w2mean - wmean ** 2) ** 0.5
w = wmean + gv.gvar(np.zeros(np.shape(wmean)), wsdev)
print('w =', w, '\n')
# Bayes Factor
print('logBF =', np.log(results.norm))
sys.stdout = STDOUT
if MULTI_W:
return
# add new fit to plot
yline = fitfcn(xline, dict(c=c))
plt.plot(xline, gv.mean(yline), 'r--')
yp = gv.mean(yline) + gv.sdev(yline)
ym = gv.mean(yline) - gv.sdev(yline)
plt.fill_between(xline, yp, ym, color='r', alpha=0.2)
plt.savefig('case-outliers2.png', bbox_inches='tight')
# plt.show()
class ModifiedPDF:
""" Modified PDF to account for measurement failure. """
def __init__(self, data, fcn, prior):
self.x, self.y = data
self.fcn = fcn
self.prior = prior
def __call__(self, p):
w = p['w']
y_fx = self.y - self.fcn(self.x, p)
data_pdf1 = self.gaussian_pdf(y_fx, 1.)
data_pdf2 = self.gaussian_pdf(y_fx, 10.)
prior_pdf = self.gaussian_pdf(
p.buf[:len(self.prior.buf)] - self.prior.buf
)
return np.prod((1. - w) * data_pdf1 + w * data_pdf2) * np.prod(prior_pdf)
@staticmethod
def gaussian_pdf(x, f=1.):
xmean = gv.mean(x)
xvar = gv.var(x) * f ** 2
return gv.exp(-xmean ** 2 / 2. /xvar) / gv.sqrt(2 * np.pi * xvar)
def fitfcn(x, p):
c = p['c']
return c[0] + c[1] * x #** c[2]
def make_prior():
prior = gv.BufferDict(c=gv.gvar(['0(5)', '0(5)']))
if LSQFIT_ONLY:
return prior
if MULTI_W:
prior['unif(w)'] = gv.BufferDict.uniform('unif', 0., 1., shape=19)
else:
prior['unif(w)'] = gv.BufferDict.uniform('unif', 0., 1.)
return prior
if __name__ == '__main__':
gv.ranseed([12345])
main() | gpl-3.0 |
codevlabs/pandashells | pandashells/lib/arg_lib.py | 7 | 6681 | from pandashells.lib import config_lib
def _check_for_recognized_args(*args):
"""
Raise an error if unrecognized argset is specified
"""
allowed_arg_set = set([
'io_in',
'io_out',
'example',
'xy_plotting',
'decorating',
])
in_arg_set = set(args)
unrecognized_set = in_arg_set - allowed_arg_set
if unrecognized_set:
msg = '{} not in allowed set {}'.format(unrecognized_set,
allowed_arg_set)
raise ValueError(msg)
def _io_in_adder(parser, config_dict, *args):
"""
Add input options to the parser
"""
in_arg_set = set(args)
if 'io_in' in in_arg_set:
group = parser.add_argument_group('Input Options')
# define the valid components
io_opt_list = ['csv', 'table', 'header', 'noheader']
# allow the option of supplying input column names
msg = 'Overwrite input column names with this list'
group.add_argument(
'--names', nargs='+', type=str, dest='names',
metavar="name", help=msg)
default_for_input = [
config_dict['io_input_type'],
config_dict['io_input_header']
]
msg = 'Must be one of {}'.format(repr(io_opt_list))
group.add_argument(
'-i', '--input_options', nargs='+', type=str, dest='input_options',
metavar='option', default=default_for_input, choices=io_opt_list,
help=msg)
def _io_out_adder(parser, config_dict, *args):
"""
Add output options to the parser
"""
in_arg_set = set(args)
if 'io_out' in in_arg_set:
group = parser.add_argument_group('Output Options')
# define the valid components
io_opt_list = [
'csv', 'table', 'html', 'header', 'noheader', 'index', 'noindex',
]
# define the current defaults
default_for_output = [
config_dict['io_output_type'],
config_dict['io_output_header'],
config_dict['io_output_index']
]
# show the current defaults in the arg parser
msg = 'Must be one of {}'.format(repr(io_opt_list))
group.add_argument(
'-o', '--output_options', nargs='+',
type=str, dest='output_options', metavar='option',
default=default_for_output, help=msg)
msg = (
'Replace NaNs with this string. '
'A string containing \'nan\' will set na_rep to numpy NaN. '
'Current default is {}'
).format(repr(str(config_dict['io_output_na_rep'])))
group.add_argument(
'--output_na_rep', nargs=1, type=str, dest='io_output_na_rep',
help=msg)
def _decorating_adder(parser, *args):
in_arg_set = set(args)
if 'decorating' in in_arg_set:
# get a list of valid plot styling info
context_list = [t for t in config_lib.CONFIG_OPTS if
t[0] == 'plot_context'][0][1]
theme_list = [t for t in config_lib.CONFIG_OPTS if
t[0] == 'plot_theme'][0][1]
palette_list = [t for t in config_lib.CONFIG_OPTS if
t[0] == 'plot_palette'][0][1]
group = parser.add_argument_group('Plot specific Options')
msg = "Set the x-limits for the plot"
group.add_argument(
'--xlim', nargs=2, type=float, dest='xlim',
metavar=('XMIN', 'XMAX'), help=msg)
msg = "Set the y-limits for the plot"
group.add_argument(
'--ylim', nargs=2, type=float, dest='ylim',
metavar=('YMIN', 'YMAX'), help=msg)
msg = "Draw x axis with log scale"
group.add_argument(
'--xlog', action='store_true', dest='xlog', default=False,
help=msg)
msg = "Draw y axis with log scale"
group.add_argument(
'--ylog', action='store_true', dest='ylog', default=False,
help=msg)
msg = "Set the x-label for the plot"
group.add_argument(
'--xlabel', nargs=1, type=str, dest='xlabel', help=msg)
msg = "Set the y-label for the plot"
group.add_argument(
'--ylabel', nargs=1, type=str, dest='ylabel', help=msg)
msg = "Set the title for the plot"
group.add_argument(
'--title', nargs=1, type=str, dest='title', help=msg)
msg = "Specify legend location"
group.add_argument(
'--legend', nargs=1, type=str, dest='legend',
choices=['1', '2', '3', '4', 'best'], help=msg)
msg = "Specify whether hide the grid or not"
group.add_argument(
'--nogrid', action='store_true', dest='no_grid', default=False,
help=msg)
msg = "Specify plot context. Default = '{}' ".format(context_list[0])
group.add_argument(
'--context', nargs=1, type=str, dest='plot_context',
default=[context_list[0]], choices=context_list, help=msg)
msg = "Specify plot theme. Default = '{}' ".format(theme_list[0])
group.add_argument(
'--theme', nargs=1, type=str, dest='plot_theme',
default=[theme_list[0]], choices=theme_list, help=msg)
msg = "Specify plot palette. Default = '{}' ".format(palette_list[0])
group.add_argument(
'--palette', nargs=1, type=str, dest='plot_palette',
default=[palette_list[0]], choices=palette_list, help=msg)
msg = "Save the figure to this file"
group.add_argument('--savefig', nargs=1, type=str, help=msg)
def _xy_adder(parser, *args):
in_arg_set = set(args)
if 'xy_plotting' in in_arg_set:
msg = 'Column to plot on x-axis'
parser.add_argument(
'-x', nargs=1, type=str, dest='x', metavar='col', help=msg)
msg = 'List of columns to plot on y-axis'
parser.add_argument(
'-y', nargs='+', type=str, dest='y', metavar='col', help=msg)
msg = "Plot style(s) defaults to .-"
parser.add_argument(
'-s', '--style', nargs='+', type=str, dest='style', default=['.-'],
help=msg, metavar='style')
def add_args(parser, *args):
"""Adds argument blocks to the arg parser
:type parser: argparse instance
:param parser: The argarse instance to use in adding arguments
Additinional arguments are the names of argument blocks to add
"""
config_dict = config_lib.get_config()
_check_for_recognized_args(*args)
_io_in_adder(parser, config_dict, *args)
_io_out_adder(parser, config_dict, *args)
_decorating_adder(parser, *args)
_xy_adder(parser, *args)
| bsd-2-clause |
nkmk/python-snippets | notebook/pandas_get_dummies.py | 1 | 6174 | import pandas as pd
import numpy as np
df = pd.read_csv('data/src/sample_pandas_normal.csv', index_col=0)
df['sex'] = ['female', np.nan, 'male', 'male', 'female', 'male']
df['rank'] = [2, 1, 1, 0, 2, 0]
print(df)
# age state point sex rank
# name
# Alice 24 NY 64 female 2
# Bob 42 CA 92 NaN 1
# Charlie 18 CA 70 male 1
# Dave 68 TX 70 male 0
# Ellen 24 CA 88 female 2
# Frank 30 NY 57 male 0
print(pd.get_dummies(df['sex']))
# female male
# name
# Alice 1 0
# Bob 0 0
# Charlie 0 1
# Dave 0 1
# Ellen 1 0
# Frank 0 1
print(pd.get_dummies(['male', 1, 1, 2]))
# 1 2 male
# 0 0 0 1
# 1 1 0 0
# 2 1 0 0
# 3 0 1 0
print(pd.get_dummies(np.arange(6)))
# 0 1 2 3 4 5
# 0 1 0 0 0 0 0
# 1 0 1 0 0 0 0
# 2 0 0 1 0 0 0
# 3 0 0 0 1 0 0
# 4 0 0 0 0 1 0
# 5 0 0 0 0 0 1
# print(pd.get_dummies(np.arange(6).reshape((2, 3))))
# Exception: Data must be 1-dimensional
print(pd.get_dummies(df))
# age point rank state_CA state_NY state_TX sex_female sex_male
# name
# Alice 24 64 2 0 1 0 1 0
# Bob 42 92 1 1 0 0 0 0
# Charlie 18 70 1 1 0 0 0 1
# Dave 68 70 0 0 0 1 0 1
# Ellen 24 88 2 1 0 0 1 0
# Frank 30 57 0 0 1 0 0 1
print(pd.get_dummies(df, drop_first=True))
# age point rank state_NY state_TX sex_male
# name
# Alice 24 64 2 1 0 0
# Bob 42 92 1 0 0 0
# Charlie 18 70 1 0 0 1
# Dave 68 70 0 0 1 1
# Ellen 24 88 2 0 0 0
# Frank 30 57 0 1 0 1
print(pd.get_dummies(df, drop_first=True, dummy_na=True))
# age point rank state_NY state_TX state_nan sex_male sex_nan
# name
# Alice 24 64 2 1 0 0 0 0
# Bob 42 92 1 0 0 0 0 1
# Charlie 18 70 1 0 0 0 1 0
# Dave 68 70 0 0 1 0 1 0
# Ellen 24 88 2 0 0 0 0 0
# Frank 30 57 0 1 0 0 1 0
print(pd.get_dummies(df, drop_first=True, prefix='', prefix_sep=''))
# age point rank NY TX male
# name
# Alice 24 64 2 1 0 0
# Bob 42 92 1 0 0 0
# Charlie 18 70 1 0 0 1
# Dave 68 70 0 0 1 1
# Ellen 24 88 2 0 0 0
# Frank 30 57 0 1 0 1
print(pd.get_dummies(df, drop_first=True, prefix=['ST', 'sex'], prefix_sep='-'))
# age point rank ST-NY ST-TX sex-male
# name
# Alice 24 64 2 1 0 0
# Bob 42 92 1 0 0 0
# Charlie 18 70 1 0 0 1
# Dave 68 70 0 0 1 1
# Ellen 24 88 2 0 0 0
# Frank 30 57 0 1 0 1
print(pd.get_dummies(df, drop_first=True, prefix={'state': 'ST', 'sex': 'sex'}, prefix_sep='-'))
# age point rank ST-NY ST-TX sex-male
# name
# Alice 24 64 2 1 0 0
# Bob 42 92 1 0 0 0
# Charlie 18 70 1 0 0 1
# Dave 68 70 0 0 1 1
# Ellen 24 88 2 0 0 0
# Frank 30 57 0 1 0 1
print(pd.get_dummies(df, drop_first=True, columns=['sex', 'rank']))
# age state point sex_male rank_1 rank_2
# name
# Alice 24 NY 64 0 0 1
# Bob 42 CA 92 0 1 0
# Charlie 18 CA 70 1 1 0
# Dave 68 TX 70 1 0 0
# Ellen 24 CA 88 0 0 1
# Frank 30 NY 57 1 0 0
df['rank'] = df['rank'].astype(object)
print(pd.get_dummies(df, drop_first=True))
# age point state_NY state_TX sex_male rank_1 rank_2
# name
# Alice 24 64 1 0 0 0 1
# Bob 42 92 0 0 0 1 0
# Charlie 18 70 0 0 1 1 0
# Dave 68 70 0 1 1 0 0
# Ellen 24 88 0 0 0 0 1
# Frank 30 57 1 0 1 0 0
print(df['state'].map({'CA': 0, 'NY': 1, 'TX': 2}))
# name
# Alice 1
# Bob 0
# Charlie 0
# Dave 2
# Ellen 0
# Frank 1
# Name: state, dtype: int64
df['state'] = df['state'].map({'CA': 0, 'NY': 1, 'TX': 2})
print(df)
# age state point sex rank
# name
# Alice 24 1 64 female 2
# Bob 42 0 92 NaN 1
# Charlie 18 0 70 male 1
# Dave 68 2 70 male 0
# Ellen 24 0 88 female 2
# Frank 30 1 57 male 0
| mit |
miyosuda/intro-to-dl-android | ImageClassification/jni-build/jni/include/tensorflow/python/client/notebook.py | 26 | 4596 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Notebook front-end to TensorFlow.
When you run this binary, you'll see something like below, which indicates
the serving URL of the notebook:
The IPython Notebook is running at: http://127.0.0.1:8888/
Press "Shift+Enter" to execute a cell
Press "Enter" on a cell to go into edit mode.
Press "Escape" to go back into command mode and use arrow keys to navigate.
Press "a" in command mode to insert cell above or "b" to insert cell below.
Your root notebooks directory is FLAGS.notebook_dir
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import socket
import sys
# pylint: disable=g-import-not-at-top
# Official recommended way of turning on fast protocol buffers as of 10/21/14
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "cpp"
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION"] = "2"
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
FLAGS = flags.FLAGS
flags.DEFINE_string(
"password", None,
"Password to require. If set, the server will allow public access."
" Only used if notebook config file does not exist.")
flags.DEFINE_string("notebook_dir", "experimental/brain/notebooks",
"root location where to store notebooks")
ORIG_ARGV = sys.argv
# Main notebook process calls itself with argv[1]="kernel" to start kernel
# subprocesses.
IS_KERNEL = len(sys.argv) > 1 and sys.argv[1] == "kernel"
def main(unused_argv):
sys.argv = ORIG_ARGV
if not IS_KERNEL:
# Drop all flags.
sys.argv = [sys.argv[0]]
# NOTE(sadovsky): For some reason, putting this import at the top level
# breaks inline plotting. It's probably a bug in the stone-age version of
# matplotlib.
from IPython.html.notebookapp import NotebookApp # pylint: disable=g-import-not-at-top
notebookapp = NotebookApp.instance()
notebookapp.open_browser = True
# password functionality adopted from quality/ranklab/main/tools/notebook.py
# add options to run with "password"
if FLAGS.password:
from IPython.lib import passwd # pylint: disable=g-import-not-at-top
notebookapp.ip = "0.0.0.0"
notebookapp.password = passwd(FLAGS.password)
else:
print ("\nNo password specified; Notebook server will only be available"
" on the local machine.\n")
notebookapp.initialize(argv=["--notebook-dir", FLAGS.notebook_dir])
if notebookapp.ip == "0.0.0.0":
proto = "https" if notebookapp.certfile else "http"
url = "%s://%s:%d%s" % (proto, socket.gethostname(), notebookapp.port,
notebookapp.base_project_url)
print("\nNotebook server will be publicly available at: %s\n" % url)
notebookapp.start()
return
# Drop the --flagfile flag so that notebook doesn't complain about an
# "unrecognized alias" when parsing sys.argv.
sys.argv = ([sys.argv[0]] +
[z for z in sys.argv[1:] if not z.startswith("--flagfile")])
from IPython.kernel.zmq.kernelapp import IPKernelApp # pylint: disable=g-import-not-at-top
kernelapp = IPKernelApp.instance()
kernelapp.initialize()
# Enable inline plotting. Equivalent to running "%matplotlib inline".
ipshell = kernelapp.shell
ipshell.enable_matplotlib("inline")
kernelapp.start()
if __name__ == "__main__":
# When the user starts the main notebook process, we don't touch sys.argv.
# When the main process launches kernel subprocesses, it writes all flags
# to a tmpfile and sets --flagfile to that tmpfile, so for kernel
# subprocesses here we drop all flags *except* --flagfile, then call
# app.run(), and then (in main) restore all flags before starting the
# kernel app.
if IS_KERNEL:
# Drop everything except --flagfile.
sys.argv = ([sys.argv[0]] +
[x for x in sys.argv[1:] if x.startswith("--flagfile")])
app.run()
| apache-2.0 |
Vimos/scikit-learn | examples/applications/plot_model_complexity_influence.py | 323 | 6372 | """
==========================
Model Complexity Influence
==========================
Demonstrate how model complexity influences both prediction accuracy and
computational performance.
The dataset is the Boston Housing dataset (resp. 20 Newsgroups) for
regression (resp. classification).
For each class of models we make the model complexity vary through the choice
of relevant model parameters and measure the influence on both computational
performance (latency) and predictive power (MSE or Hamming Loss).
"""
print(__doc__)
# Author: Eustache Diemert <[email protected]>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot
from mpl_toolkits.axisartist.axislines import Axes
from scipy.sparse.csr import csr_matrix
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.svm.classes import NuSVR
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.metrics import hamming_loss
###############################################################################
# Routines
# initialize random generator
np.random.seed(0)
def generate_data(case, sparse=False):
"""Generate regression/classification data."""
bunch = None
if case == 'regression':
bunch = datasets.load_boston()
elif case == 'classification':
bunch = datasets.fetch_20newsgroups_vectorized(subset='all')
X, y = shuffle(bunch.data, bunch.target)
offset = int(X.shape[0] * 0.8)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
if sparse:
X_train = csr_matrix(X_train)
X_test = csr_matrix(X_test)
else:
X_train = np.array(X_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_train = np.array(y_train)
data = {'X_train': X_train, 'X_test': X_test, 'y_train': y_train,
'y_test': y_test}
return data
def benchmark_influence(conf):
"""
Benchmark influence of :changing_param: on both MSE and latency.
"""
prediction_times = []
prediction_powers = []
complexities = []
for param_value in conf['changing_param_values']:
conf['tuned_params'][conf['changing_param']] = param_value
estimator = conf['estimator'](**conf['tuned_params'])
print("Benchmarking %s" % estimator)
estimator.fit(conf['data']['X_train'], conf['data']['y_train'])
conf['postfit_hook'](estimator)
complexity = conf['complexity_computer'](estimator)
complexities.append(complexity)
start_time = time.time()
for _ in range(conf['n_samples']):
y_pred = estimator.predict(conf['data']['X_test'])
elapsed_time = (time.time() - start_time) / float(conf['n_samples'])
prediction_times.append(elapsed_time)
pred_score = conf['prediction_performance_computer'](
conf['data']['y_test'], y_pred)
prediction_powers.append(pred_score)
print("Complexity: %d | %s: %.4f | Pred. Time: %fs\n" % (
complexity, conf['prediction_performance_label'], pred_score,
elapsed_time))
return prediction_powers, prediction_times, complexities
def plot_influence(conf, mse_values, prediction_times, complexities):
"""
Plot influence of model complexity on both accuracy and latency.
"""
plt.figure(figsize=(12, 6))
host = host_subplot(111, axes_class=Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
host.set_xlabel('Model Complexity (%s)' % conf['complexity_label'])
y1_label = conf['prediction_performance_label']
y2_label = "Time (s)"
host.set_ylabel(y1_label)
par1.set_ylabel(y2_label)
p1, = host.plot(complexities, mse_values, 'b-', label="prediction error")
p2, = par1.plot(complexities, prediction_times, 'r-',
label="latency")
host.legend(loc='upper right')
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
plt.title('Influence of Model Complexity - %s' % conf['estimator'].__name__)
plt.show()
def _count_nonzero_coefficients(estimator):
a = estimator.coef_.toarray()
return np.count_nonzero(a)
###############################################################################
# main code
regression_data = generate_data('regression')
classification_data = generate_data('classification', sparse=True)
configurations = [
{'estimator': SGDClassifier,
'tuned_params': {'penalty': 'elasticnet', 'alpha': 0.001, 'loss':
'modified_huber', 'fit_intercept': True},
'changing_param': 'l1_ratio',
'changing_param_values': [0.25, 0.5, 0.75, 0.9],
'complexity_label': 'non_zero coefficients',
'complexity_computer': _count_nonzero_coefficients,
'prediction_performance_computer': hamming_loss,
'prediction_performance_label': 'Hamming Loss (Misclassification Ratio)',
'postfit_hook': lambda x: x.sparsify(),
'data': classification_data,
'n_samples': 30},
{'estimator': NuSVR,
'tuned_params': {'C': 1e3, 'gamma': 2 ** -15},
'changing_param': 'nu',
'changing_param_values': [0.1, 0.25, 0.5, 0.75, 0.9],
'complexity_label': 'n_support_vectors',
'complexity_computer': lambda x: len(x.support_vectors_),
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
{'estimator': GradientBoostingRegressor,
'tuned_params': {'loss': 'ls'},
'changing_param': 'n_estimators',
'changing_param_values': [10, 50, 100, 200, 500],
'complexity_label': 'n_trees',
'complexity_computer': lambda x: x.n_estimators,
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
]
for conf in configurations:
prediction_performances, prediction_times, complexities = \
benchmark_influence(conf)
plot_influence(conf, prediction_performances, prediction_times,
complexities)
| bsd-3-clause |
runt18/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/scale.py | 1 | 13442 | import textwrap
import numpy as np
from numpy import ma
MaskedArray = ma.MaskedArray
from cbook import dedent
from ticker import NullFormatter, ScalarFormatter, LogFormatterMathtext, Formatter
from ticker import NullLocator, LogLocator, AutoLocator, SymmetricalLogLocator, FixedLocator
from transforms import Transform, IdentityTransform
class ScaleBase(object):
"""
The base class for all scales.
Scales are separable transformations, working on a single dimension.
Any subclasses will want to override:
- :attr:`name`
- :meth:`get_transform`
And optionally:
- :meth:`set_default_locators_and_formatters`
- :meth:`limit_range_for_scale`
"""
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` object
associated with this scale.
"""
raise NotImplementedError
def set_default_locators_and_formatters(self, axis):
"""
Set the :class:`~matplotlib.ticker.Locator` and
:class:`~matplotlib.ticker.Formatter` objects on the given
axis to match this scale.
"""
raise NotImplementedError
def limit_range_for_scale(self, vmin, vmax, minpos):
"""
Returns the range *vmin*, *vmax*, possibly limited to the
domain supported by this scale.
*minpos* should be the minimum positive value in the data.
This is used by log scales to determine a minimum value.
"""
return vmin, vmax
class LinearScale(ScaleBase):
"""
The default linear scale.
"""
name = 'linear'
def __init__(self, axis, **kwargs):
pass
def set_default_locators_and_formatters(self, axis):
"""
Set the locators and formatters to reasonable defaults for
linear scaling.
"""
axis.set_major_locator(AutoLocator())
axis.set_major_formatter(ScalarFormatter())
axis.set_minor_locator(NullLocator())
axis.set_minor_formatter(NullFormatter())
def get_transform(self):
"""
The transform for linear scaling is just the
:class:`~matplotlib.transforms.IdentityTransform`.
"""
return IdentityTransform()
def _mask_non_positives(a):
"""
Return a Numpy masked array where all non-positive values are
masked. If there are no non-positive values, the original array
is returned.
"""
mask = a <= 0.0
if mask.any():
return ma.MaskedArray(a, mask=mask)
return a
class LogScale(ScaleBase):
"""
A standard logarithmic scale. Care is taken so non-positive
values are not plotted.
For computational efficiency (to push as much as possible to Numpy
C code in the common cases), this scale provides different
transforms depending on the base of the logarithm:
- base 10 (:class:`Log10Transform`)
- base 2 (:class:`Log2Transform`)
- base e (:class:`NaturalLogTransform`)
- arbitrary base (:class:`LogTransform`)
"""
name = 'log'
class Log10Transform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
base = 10.0
def transform(self, a):
a = _mask_non_positives(a * 10.0)
if isinstance(a, MaskedArray):
return ma.log10(a)
return np.log10(a)
def inverted(self):
return LogScale.InvertedLog10Transform()
class InvertedLog10Transform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
base = 10.0
def transform(self, a):
return ma.power(10.0, a) / 10.0
def inverted(self):
return LogScale.Log10Transform()
class Log2Transform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
base = 2.0
def transform(self, a):
a = _mask_non_positives(a * 2.0)
if isinstance(a, MaskedArray):
return ma.log(a) / np.log(2)
return np.log2(a)
def inverted(self):
return LogScale.InvertedLog2Transform()
class InvertedLog2Transform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
base = 2.0
def transform(self, a):
return ma.power(2.0, a) / 2.0
def inverted(self):
return LogScale.Log2Transform()
class NaturalLogTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
base = np.e
def transform(self, a):
a = _mask_non_positives(a * np.e)
if isinstance(a, MaskedArray):
return ma.log(a)
return np.log(a)
def inverted(self):
return LogScale.InvertedNaturalLogTransform()
class InvertedNaturalLogTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
base = np.e
def transform(self, a):
return ma.power(np.e, a) / np.e
def inverted(self):
return LogScale.NaturalLogTransform()
class LogTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
def __init__(self, base):
Transform.__init__(self)
self.base = base
def transform(self, a):
a = _mask_non_positives(a * self.base)
if isinstance(a, MaskedArray):
return ma.log(a) / np.log(self.base)
return np.log(a) / np.log(self.base)
def inverted(self):
return LogScale.InvertedLogTransform(self.base)
class InvertedLogTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
def __init__(self, base):
Transform.__init__(self)
self.base = base
def transform(self, a):
return ma.power(self.base, a) / self.base
def inverted(self):
return LogScale.LogTransform(self.base)
def __init__(self, axis, **kwargs):
"""
*basex*/*basey*:
The base of the logarithm
*subsx*/*subsy*:
Where to place the subticks between each major tick.
Should be a sequence of integers. For example, in a log10
scale: ``[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]``
will place 10 logarithmically spaced minor ticks between
each major tick.
"""
if axis.axis_name == 'x':
base = kwargs.pop('basex', 10.0)
subs = kwargs.pop('subsx', None)
else:
base = kwargs.pop('basey', 10.0)
subs = kwargs.pop('subsy', None)
if base == 10.0:
self._transform = self.Log10Transform()
elif base == 2.0:
self._transform = self.Log2Transform()
elif base == np.e:
self._transform = self.NaturalLogTransform()
else:
self._transform = self.LogTransform(base)
self.base = base
self.subs = subs
def set_default_locators_and_formatters(self, axis):
"""
Set the locators and formatters to specialized versions for
log scaling.
"""
axis.set_major_locator(LogLocator(self.base))
axis.set_major_formatter(LogFormatterMathtext(self.base))
axis.set_minor_locator(LogLocator(self.base, self.subs))
axis.set_minor_formatter(NullFormatter())
def get_transform(self):
"""
Return a :class:`~matplotlib.transforms.Transform` instance
appropriate for the given logarithm base.
"""
return self._transform
def limit_range_for_scale(self, vmin, vmax, minpos):
"""
Limit the domain to positive values.
"""
return (vmin <= 0.0 and minpos or vmin,
vmax <= 0.0 and minpos or vmax)
class SymmetricalLogScale(ScaleBase):
"""
The symmetrical logarithmic scale is logarithmic in both the
positive and negative directions from the origin.
Since the values close to zero tend toward infinity, there is a
need to have a range around zero that is linear. The parameter
*linthresh* allows the user to specify the size of this range
(-*linthresh*, *linthresh*).
"""
name = 'symlog'
class SymmetricalLogTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
def __init__(self, base, linthresh):
Transform.__init__(self)
self.base = base
self.linthresh = linthresh
self._log_base = np.log(base)
self._linadjust = (np.log(linthresh) / self._log_base) / linthresh
def transform(self, a):
a = np.asarray(a)
sign = np.sign(a)
masked = ma.masked_inside(a, -self.linthresh, self.linthresh, copy=False)
log = sign * ma.log(np.abs(masked)) / self._log_base
if masked.mask.any():
return np.asarray(ma.where(masked.mask,
a * self._linadjust,
log))
else:
return np.asarray(log)
def inverted(self):
return SymmetricalLogScale.InvertedSymmetricalLogTransform(self.base, self.linthresh)
class InvertedSymmetricalLogTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
def __init__(self, base, linthresh):
Transform.__init__(self)
self.base = base
self.linthresh = linthresh
self._log_base = np.log(base)
self._log_linthresh = np.log(linthresh) / self._log_base
self._linadjust = linthresh / (np.log(linthresh) / self._log_base)
def transform(self, a):
a = np.asarray(a)
return np.where(a <= self._log_linthresh,
np.where(a >= -self._log_linthresh,
a * self._linadjust,
-(np.power(self.base, -a))),
np.power(self.base, a))
def inverted(self):
return SymmetricalLogScale.SymmetricalLogTransform(self.base)
def __init__(self, axis, **kwargs):
"""
*basex*/*basey*:
The base of the logarithm
*linthreshx*/*linthreshy*:
The range (-*x*, *x*) within which the plot is linear (to
avoid having the plot go to infinity around zero).
*subsx*/*subsy*:
Where to place the subticks between each major tick.
Should be a sequence of integers. For example, in a log10
scale: ``[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]``
will place 10 logarithmically spaced minor ticks between
each major tick.
"""
if axis.axis_name == 'x':
base = kwargs.pop('basex', 10.0)
linthresh = kwargs.pop('linthreshx', 2.0)
subs = kwargs.pop('subsx', None)
else:
base = kwargs.pop('basey', 10.0)
linthresh = kwargs.pop('linthreshy', 2.0)
subs = kwargs.pop('subsy', None)
self._transform = self.SymmetricalLogTransform(base, linthresh)
self.base = base
self.linthresh = linthresh
self.subs = subs
def set_default_locators_and_formatters(self, axis):
"""
Set the locators and formatters to specialized versions for
symmetrical log scaling.
"""
axis.set_major_locator(SymmetricalLogLocator(self.get_transform()))
axis.set_major_formatter(LogFormatterMathtext(self.base))
axis.set_minor_locator(SymmetricalLogLocator(self.get_transform(), self.subs))
axis.set_minor_formatter(NullFormatter())
def get_transform(self):
"""
Return a :class:`SymmetricalLogTransform` instance.
"""
return self._transform
_scale_mapping = {
'linear' : LinearScale,
'log' : LogScale,
'symlog' : SymmetricalLogScale
}
def get_scale_names():
names = _scale_mapping.keys()
names.sort()
return names
def scale_factory(scale, axis, **kwargs):
"""
Return a scale class by name.
ACCEPTS: [ %(names)s ]
"""
scale = scale.lower()
if scale is None:
scale = 'linear'
if scale not in _scale_mapping:
raise ValueError("Unknown scale type '{0!s}'".format(scale))
return _scale_mapping[scale](axis, **kwargs)
scale_factory.__doc__ = dedent(scale_factory.__doc__) % \
{'names': " | ".join(get_scale_names())}
def register_scale(scale_class):
"""
Register a new kind of scale.
*scale_class* must be a subclass of :class:`ScaleBase`.
"""
_scale_mapping[scale_class.name] = scale_class
def get_scale_docs():
"""
Helper function for generating docstrings related to scales.
"""
docs = []
for name in get_scale_names():
scale_class = _scale_mapping[name]
docs.append(" '{0!s}'".format(name))
docs.append("")
class_docs = dedent(scale_class.__init__.__doc__)
class_docs = "".join([" {0!s}\n".format(
x) for x in class_docs.split("\n")])
docs.append(class_docs)
docs.append("")
return "\n".join(docs)
| agpl-3.0 |
eclee25/flu-SDI-exploratory-age | scripts/pre_ORgenerator_py/OR_subtype_vaxmatch_v6-4-13.py | 1 | 5199 | #!/usr/bin/python
##############################################
###Python template
###Author: Elizabeth Lee
###Date: June 4, 2013
###Function:
#### generate a metric that represents the potential interactive effect between the prominent subtype and the vax strain match for the prominent subtype
#### draw a plot of OR (y-axis) vs this interaction metric (x-axis)
#### same plot is represented in two ways -- labels are season number or prominent subtype(s)
###Import data: subtype.csv, odds_c_a1.csv, odds_c_a3_a, odds_c_a3_b
###Command Line: python OR_subtype_vaxmatch_v6-4-13.py
##############################################
### notes ###
# potential interactive effect: vax match and prominent subtypes. % isolates that are H1 * % H1 isolates that matched H1 vax strain = # H1 isolates/# isolates * # H1 matched isolates/# H1 isolates
### packages ###
import matplotlib
import csv
import numpy as np
import matplotlib.pyplot as plt
from pylab import *
## local packages ##
### data structures ###
child1, child3a, child3b, adult1, adult3a, adult3b = [],[],[],[],[],[] # attack rates for children and adults for total, severe, and mild cases
y1, y3a, y3b = [],[],[] # odds ratios for total, severe, and mild cases
seasonnum, match_iso, psubtypelab = [],[],[] # season number, # matched isolates for prominent subtypes/# total isolates, prominent subtype label
### parameters ###
USchild = 20348657 + 20677194 + 22040343 #US child popn
USadult = 21585999 + 21101849 + 19962099 + 20179642 + 20890964 + 22708591 + 22298125 + 19664805 #US adult popn
### functions ###
def importer (csvreadfile, adultlist, childlist, ilicol):
ct=0
for row in csvreadfile:
if row[1] == "A":
adultlist.append(float(row[ilicol])/USadult)
elif row[1] == "C":
childlist.append(float(row[ilicol])/USchild)
else:
ct+=1
def ORgen (ylist, childlist, adultlist):
for i in range(0,len(childlist)):
ylist.append((childlist[i]/(1-childlist[i]))/(adultlist[i]/(1-adultlist[i])))
# print childlist[i], 1-childlist[i], adultlist[i], 1-adultlist[i]
def subtype_vaxmatch_import (csvreadfile, season, interact, s_label):
for row in csvreadfile:
H1i, H3i, Bi, TOTi = float(row[4]), float(row[5]), float(row[6]), float(row[7])
H1im, H3im, Bim, TOTim = float(row[8]), float(row[9]), float(row[10]), float(row[11])
season.append(int(row[0])) # season number
s_label.append(row[2])
val = int(row[3])
# subtype value determines how percentage will be calculated
if val == 1: # H1 matched isolates/# isolates
interact.append(H1im/TOTi)
elif val == 2: # H3 matched isolates/# isolates
interact.append(H3im/TOTi)
elif val == 5: # H1+B matched isolates/# isolates
interact.append((H1im+Bim)/TOTi)
elif val == 6: # H3+B matched isolates/# isolates
interact.append((H3im+Bim)/TOTi)
elif val == 7: # H1+H3+B matched isolates/# isolates
interact.append((H1im+H3im+Bim)/TOTi)
#print val, H1im, H3im, Bim, TOTi
### import data ###
d1in=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/SQL_export/odds_c_a1.csv','r')
d1=csv.reader(d1in, delimiter=',')
d3ain=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/SQL_export/odds_c_a3_a.csv','r')
d3a=csv.reader(d3ain, delimiter=',')
d3bin=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/SQL_export/odds_c_a3_b.csv','r')
d3b=csv.reader(d3bin, delimiter=',')
subtypein=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/SQL_export/subtype3.csv','r')
subtype=csv.reader(subtypein, delimiter=',')
### program ###
importer(d1, adult1, child1, 2)
importer(d3a, adult3a, child3a, 2)
importer(d3b, adult3b, child3b, 2)
ORgen(y1, child1, adult1)
ORgen(y3a, child3a, adult3a)
ORgen(y3b, child3b, adult3b)
subtype_vaxmatch_import(subtype, seasonnum, match_iso, psubtypelab)
print match_iso
# plot OR vs # matched isolates of prominent subtypes that season / # total isolates (labels represent season num)
plt.scatter(match_iso, y1, marker='o', color = 'black', label= "all cases")
plt.scatter(match_iso, y3a, marker='o', color = 'red', label= "severe cases")
plt.scatter(match_iso, y3b, marker='o', color = 'green', label= "milder cases")
for num, perc, OR in zip(seasonnum, match_iso, y1):
plt.annotate(num, xy = (perc, OR), xytext = (10,0), textcoords = 'offset points')
for num, perc, OR in zip(seasonnum, match_iso, y3a):
plt.annotate(num, xy = (perc, OR), xytext = (-10,0), textcoords = 'offset points')
for num, perc, OR in zip(seasonnum, match_iso, y3b):
plt.annotate(num, xy = (perc, OR), xytext = (-10,5), textcoords = 'offset points')
plt.ylabel('Odds ratio of attack rate, child:adult (US popn normalized)')
plt.xlabel('Matched isolates (prominent subtypes only)/ Total isolates')
plt.legend(loc="upper left")
plt.show()
# same plot as above except labels are prominent subtype
plt.scatter(match_iso, y1, marker='o', color = 'black', label= "all cases")
for lab, perc, OR in zip(psubtypelab, match_iso, y1):
plt.annotate(lab, xy = (perc, OR), xytext = (10,0), textcoords = 'offset points')
plt.ylabel('Odds ratio of attack rate, child:adult (US popn normalized)')
plt.xlabel('Matched isolates (prominent subtypes only)/ Total isolates')
plt.legend(loc="upper left")
plt.show()
| mit |
MechCoder/scikit-learn | sklearn/neighbors/tests/test_kde.py | 26 | 5518 | import numpy as np
from sklearn.utils.testing import (assert_allclose, assert_raises,
assert_equal)
from sklearn.neighbors import KernelDensity, KDTree, NearestNeighbors
from sklearn.neighbors.ball_tree import kernel_norm
from sklearn.pipeline import make_pipeline
from sklearn.datasets import make_blobs
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel) / X.shape[0]
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def check_results(kernel, bandwidth, atol, rtol, X, Y, dens_true):
kde = KernelDensity(kernel=kernel, bandwidth=bandwidth,
atol=atol, rtol=rtol)
log_dens = kde.fit(X).score_samples(Y)
assert_allclose(np.exp(log_dens), dens_true,
atol=atol, rtol=max(1E-7, rtol))
assert_allclose(np.exp(kde.score(Y)),
np.prod(dens_true),
atol=atol, rtol=max(1E-7, rtol))
def test_kernel_density(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_features)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for bandwidth in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, bandwidth)
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, bandwidth, atol, rtol,
X, Y, dens_true)
def test_kernel_density_sampling(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
bandwidth = 0.2
for kernel in ['gaussian', 'tophat']:
# draw a tophat sample
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
samp = kde.sample(100)
assert_equal(X.shape, samp.shape)
# check that samples are in the right range
nbrs = NearestNeighbors(n_neighbors=1).fit(X)
dist, ind = nbrs.kneighbors(X, return_distance=True)
if kernel == 'tophat':
assert np.all(dist < bandwidth)
elif kernel == 'gaussian':
# 5 standard deviations is safe for 100 samples, but there's a
# very small chance this test could fail.
assert np.all(dist < 5 * bandwidth)
# check unsupported kernels
for kernel in ['epanechnikov', 'exponential', 'linear', 'cosine']:
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
assert_raises(NotImplementedError, kde.sample, 100)
# non-regression test: used to return a scalar
X = rng.randn(4, 1)
kde = KernelDensity(kernel="gaussian").fit(X)
assert_equal(kde.sample().shape, (1, 1))
def test_kde_algorithm_metric_choice():
# Smoke test for various metrics and algorithms
rng = np.random.RandomState(0)
X = rng.randn(10, 2) # 2 features required for haversine dist.
Y = rng.randn(10, 2)
for algorithm in ['auto', 'ball_tree', 'kd_tree']:
for metric in ['euclidean', 'minkowski', 'manhattan',
'chebyshev', 'haversine']:
if algorithm == 'kd_tree' and metric not in KDTree.valid_metrics:
assert_raises(ValueError, KernelDensity,
algorithm=algorithm, metric=metric)
else:
kde = KernelDensity(algorithm=algorithm, metric=metric)
kde.fit(X)
y_dens = kde.score_samples(Y)
assert_equal(y_dens.shape, Y.shape[:1])
def test_kde_score(n_samples=100, n_features=3):
pass
#FIXME
#np.random.seed(0)
#X = np.random.random((n_samples, n_features))
#Y = np.random.random((n_samples, n_features))
def test_kde_badargs():
assert_raises(ValueError, KernelDensity,
algorithm='blah')
assert_raises(ValueError, KernelDensity,
bandwidth=0)
assert_raises(ValueError, KernelDensity,
kernel='blah')
assert_raises(ValueError, KernelDensity,
metric='blah')
assert_raises(ValueError, KernelDensity,
algorithm='kd_tree', metric='blah')
def test_kde_pipeline_gridsearch():
# test that kde plays nice in pipelines and grid-searches
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
pipe1 = make_pipeline(StandardScaler(with_mean=False, with_std=False),
KernelDensity(kernel="gaussian"))
params = dict(kerneldensity__bandwidth=[0.001, 0.01, 0.1, 1, 10])
search = GridSearchCV(pipe1, param_grid=params, cv=5)
search.fit(X)
assert_equal(search.best_params_['kerneldensity__bandwidth'], .1)
| bsd-3-clause |
LinkHS/incubator-mxnet | example/reinforcement-learning/ddpg/strategies.py | 42 | 2473 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
class BaseStrategy(object):
"""
Base class of exploration strategy.
"""
def get_action(self, obs, policy):
raise NotImplementedError
def reset(self):
pass
class OUStrategy(BaseStrategy):
"""
Ornstein-Uhlenbeck process: dxt = theta * (mu - xt) * dt + sigma * dWt
where Wt denotes the Wiener process.
"""
def __init__(self, env_spec, mu=0, theta=0.15, sigma=0.3):
self.mu = mu
self.theta = theta
self.sigma = sigma
self.action_space = env_spec.action_space
self.state = np.ones(self.action_space.flat_dim) * self.mu
def evolve_state(self):
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(len(x))
self.state = x + dx
return self.state
def reset(self):
self.state = np.ones(self.action_space.flat_dim) * self.mu
def get_action(self, obs, policy):
# get_action accepts a 2D tensor with one row
obs = obs.reshape((1, -1))
action = policy.get_action(obs)
increment = self.evolve_state()
return np.clip(action + increment,
self.action_space.low,
self.action_space.high)
if __name__ == "__main__":
class Env1(object):
def __init__(self):
self.action_space = Env2()
class Env2(object):
def __init__(self):
self.flat_dim = 2
env_spec = Env1()
test = OUStrategy(env_spec)
states = []
for i in range(1000):
states.append(test.evolve_state()[0])
import matplotlib.pyplot as plt
plt.plot(states)
plt.show()
| apache-2.0 |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/sklearn/metrics/tests/test_regression.py | 49 | 8058 | from __future__ import division, print_function
import numpy as np
from itertools import product
from sklearn.utils.testing import assert_raises, assert_raises_regex
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.metrics import explained_variance_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_squared_log_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import r2_score
from sklearn.metrics.regression import _check_reg_targets
def test_regression_metrics(n_samples=50):
y_true = np.arange(n_samples)
y_pred = y_true + 1
assert_almost_equal(mean_squared_error(y_true, y_pred), 1.)
assert_almost_equal(mean_squared_log_error(y_true, y_pred),
mean_squared_error(np.log(1 + y_true),
np.log(1 + y_pred)))
assert_almost_equal(mean_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(median_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(r2_score(y_true, y_pred), 0.995, 2)
assert_almost_equal(explained_variance_score(y_true, y_pred), 1.)
def test_multioutput_regression():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])
error = mean_squared_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
error = mean_squared_log_error(y_true, y_pred)
assert_almost_equal(error, 0.200, decimal=2)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
error = mean_absolute_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
error = r2_score(y_true, y_pred, multioutput='variance_weighted')
assert_almost_equal(error, 1. - 5. / 2)
error = r2_score(y_true, y_pred, multioutput='uniform_average')
assert_almost_equal(error, -.875)
def test_regression_metrics_at_limits():
assert_almost_equal(mean_squared_error([0.], [0.]), 0.00, 2)
assert_almost_equal(mean_squared_log_error([0.], [0.]), 0.00, 2)
assert_almost_equal(mean_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(median_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(explained_variance_score([0.], [0.]), 1.00, 2)
assert_almost_equal(r2_score([0., 1], [0., 1]), 1.00, 2)
assert_raises_regex(ValueError, "Mean Squared Logarithmic Error cannot be "
"used when targets contain negative values.",
mean_squared_log_error, [-1.], [-1.])
def test__check_reg_targets():
# All of length 3
EXAMPLES = [
("continuous", [1, 2, 3], 1),
("continuous", [[1], [2], [3]], 1),
("continuous-multioutput", [[1, 1], [2, 2], [3, 1]], 2),
("continuous-multioutput", [[5, 1], [4, 2], [3, 1]], 2),
("continuous-multioutput", [[1, 3, 4], [2, 2, 2], [3, 1, 1]], 3),
]
for (type1, y1, n_out1), (type2, y2, n_out2) in product(EXAMPLES,
repeat=2):
if type1 == type2 and n_out1 == n_out2:
y_type, y_check1, y_check2, multioutput = _check_reg_targets(
y1, y2, None)
assert_equal(type1, y_type)
if type1 == 'continuous':
assert_array_equal(y_check1, np.reshape(y1, (-1, 1)))
assert_array_equal(y_check2, np.reshape(y2, (-1, 1)))
else:
assert_array_equal(y_check1, y1)
assert_array_equal(y_check2, y2)
else:
assert_raises(ValueError, _check_reg_targets, y1, y2, None)
def test__check_reg_targets_exception():
invalid_multioutput = 'this_value_is_not_valid'
expected_message = ("Allowed 'multioutput' string values are.+"
"You provided multioutput={!r}".format(
invalid_multioutput))
assert_raises_regex(ValueError, expected_message,
_check_reg_targets,
[1, 2, 3],
[[1], [2], [3]],
invalid_multioutput)
def test_regression_multioutput_array():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [0.125, 0.5625], decimal=2)
assert_array_almost_equal(mae, [0.25, 0.625], decimal=2)
assert_array_almost_equal(r, [0.95, 0.93], decimal=2)
assert_array_almost_equal(evs, [0.95, 0.93], decimal=2)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
y_true = [[0, 0]]*4
y_pred = [[1, 1]]*4
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [1., 1.], decimal=2)
assert_array_almost_equal(mae, [1., 1.], decimal=2)
assert_array_almost_equal(r, [0., 0.], decimal=2)
r = r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput='raw_values')
assert_array_almost_equal(r, [0, -3.5], decimal=2)
assert_equal(np.mean(r), r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='uniform_average'))
evs = explained_variance_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='raw_values')
assert_array_almost_equal(evs, [0, -1.25], decimal=2)
# Checking for the condition in which both numerator and denominator is
# zero.
y_true = [[1, 3], [-1, 2]]
y_pred = [[1, 4], [-1, 1]]
r2 = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(r2, [1., -3.], decimal=2)
assert_equal(np.mean(r2), r2_score(y_true, y_pred,
multioutput='uniform_average'))
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(evs, [1., -3.], decimal=2)
assert_equal(np.mean(evs), explained_variance_score(y_true, y_pred))
# Handling msle separately as it does not accept negative inputs.
y_true = np.array([[0.5, 1], [1, 2], [7, 6]])
y_pred = np.array([[0.5, 2], [1, 2.5], [8, 8]])
msle = mean_squared_log_error(y_true, y_pred, multioutput='raw_values')
msle2 = mean_squared_error(np.log(1 + y_true), np.log(1 + y_pred),
multioutput='raw_values')
assert_array_almost_equal(msle, msle2, decimal=2)
def test_regression_custom_weights():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
msew = mean_squared_error(y_true, y_pred, multioutput=[0.4, 0.6])
maew = mean_absolute_error(y_true, y_pred, multioutput=[0.4, 0.6])
rw = r2_score(y_true, y_pred, multioutput=[0.4, 0.6])
evsw = explained_variance_score(y_true, y_pred, multioutput=[0.4, 0.6])
assert_almost_equal(msew, 0.39, decimal=2)
assert_almost_equal(maew, 0.475, decimal=3)
assert_almost_equal(rw, 0.94, decimal=2)
assert_almost_equal(evsw, 0.94, decimal=2)
# Handling msle separately as it does not accept negative inputs.
y_true = np.array([[0.5, 1], [1, 2], [7, 6]])
y_pred = np.array([[0.5, 2], [1, 2.5], [8, 8]])
msle = mean_squared_log_error(y_true, y_pred, multioutput=[0.3, 0.7])
msle2 = mean_squared_error(np.log(1 + y_true), np.log(1 + y_pred),
multioutput=[0.3, 0.7])
assert_almost_equal(msle, msle2, decimal=2)
| mit |
ryanjmccall/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/scale.py | 69 | 13414 | import textwrap
import numpy as np
from numpy import ma
MaskedArray = ma.MaskedArray
from cbook import dedent
from ticker import NullFormatter, ScalarFormatter, LogFormatterMathtext, Formatter
from ticker import NullLocator, LogLocator, AutoLocator, SymmetricalLogLocator, FixedLocator
from transforms import Transform, IdentityTransform
class ScaleBase(object):
"""
The base class for all scales.
Scales are separable transformations, working on a single dimension.
Any subclasses will want to override:
- :attr:`name`
- :meth:`get_transform`
And optionally:
- :meth:`set_default_locators_and_formatters`
- :meth:`limit_range_for_scale`
"""
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` object
associated with this scale.
"""
raise NotImplementedError
def set_default_locators_and_formatters(self, axis):
"""
Set the :class:`~matplotlib.ticker.Locator` and
:class:`~matplotlib.ticker.Formatter` objects on the given
axis to match this scale.
"""
raise NotImplementedError
def limit_range_for_scale(self, vmin, vmax, minpos):
"""
Returns the range *vmin*, *vmax*, possibly limited to the
domain supported by this scale.
*minpos* should be the minimum positive value in the data.
This is used by log scales to determine a minimum value.
"""
return vmin, vmax
class LinearScale(ScaleBase):
"""
The default linear scale.
"""
name = 'linear'
def __init__(self, axis, **kwargs):
pass
def set_default_locators_and_formatters(self, axis):
"""
Set the locators and formatters to reasonable defaults for
linear scaling.
"""
axis.set_major_locator(AutoLocator())
axis.set_major_formatter(ScalarFormatter())
axis.set_minor_locator(NullLocator())
axis.set_minor_formatter(NullFormatter())
def get_transform(self):
"""
The transform for linear scaling is just the
:class:`~matplotlib.transforms.IdentityTransform`.
"""
return IdentityTransform()
def _mask_non_positives(a):
"""
Return a Numpy masked array where all non-positive values are
masked. If there are no non-positive values, the original array
is returned.
"""
mask = a <= 0.0
if mask.any():
return ma.MaskedArray(a, mask=mask)
return a
class LogScale(ScaleBase):
"""
A standard logarithmic scale. Care is taken so non-positive
values are not plotted.
For computational efficiency (to push as much as possible to Numpy
C code in the common cases), this scale provides different
transforms depending on the base of the logarithm:
- base 10 (:class:`Log10Transform`)
- base 2 (:class:`Log2Transform`)
- base e (:class:`NaturalLogTransform`)
- arbitrary base (:class:`LogTransform`)
"""
name = 'log'
class Log10Transform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
base = 10.0
def transform(self, a):
a = _mask_non_positives(a * 10.0)
if isinstance(a, MaskedArray):
return ma.log10(a)
return np.log10(a)
def inverted(self):
return LogScale.InvertedLog10Transform()
class InvertedLog10Transform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
base = 10.0
def transform(self, a):
return ma.power(10.0, a) / 10.0
def inverted(self):
return LogScale.Log10Transform()
class Log2Transform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
base = 2.0
def transform(self, a):
a = _mask_non_positives(a * 2.0)
if isinstance(a, MaskedArray):
return ma.log(a) / np.log(2)
return np.log2(a)
def inverted(self):
return LogScale.InvertedLog2Transform()
class InvertedLog2Transform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
base = 2.0
def transform(self, a):
return ma.power(2.0, a) / 2.0
def inverted(self):
return LogScale.Log2Transform()
class NaturalLogTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
base = np.e
def transform(self, a):
a = _mask_non_positives(a * np.e)
if isinstance(a, MaskedArray):
return ma.log(a)
return np.log(a)
def inverted(self):
return LogScale.InvertedNaturalLogTransform()
class InvertedNaturalLogTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
base = np.e
def transform(self, a):
return ma.power(np.e, a) / np.e
def inverted(self):
return LogScale.NaturalLogTransform()
class LogTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
def __init__(self, base):
Transform.__init__(self)
self.base = base
def transform(self, a):
a = _mask_non_positives(a * self.base)
if isinstance(a, MaskedArray):
return ma.log(a) / np.log(self.base)
return np.log(a) / np.log(self.base)
def inverted(self):
return LogScale.InvertedLogTransform(self.base)
class InvertedLogTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
def __init__(self, base):
Transform.__init__(self)
self.base = base
def transform(self, a):
return ma.power(self.base, a) / self.base
def inverted(self):
return LogScale.LogTransform(self.base)
def __init__(self, axis, **kwargs):
"""
*basex*/*basey*:
The base of the logarithm
*subsx*/*subsy*:
Where to place the subticks between each major tick.
Should be a sequence of integers. For example, in a log10
scale: ``[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]``
will place 10 logarithmically spaced minor ticks between
each major tick.
"""
if axis.axis_name == 'x':
base = kwargs.pop('basex', 10.0)
subs = kwargs.pop('subsx', None)
else:
base = kwargs.pop('basey', 10.0)
subs = kwargs.pop('subsy', None)
if base == 10.0:
self._transform = self.Log10Transform()
elif base == 2.0:
self._transform = self.Log2Transform()
elif base == np.e:
self._transform = self.NaturalLogTransform()
else:
self._transform = self.LogTransform(base)
self.base = base
self.subs = subs
def set_default_locators_and_formatters(self, axis):
"""
Set the locators and formatters to specialized versions for
log scaling.
"""
axis.set_major_locator(LogLocator(self.base))
axis.set_major_formatter(LogFormatterMathtext(self.base))
axis.set_minor_locator(LogLocator(self.base, self.subs))
axis.set_minor_formatter(NullFormatter())
def get_transform(self):
"""
Return a :class:`~matplotlib.transforms.Transform` instance
appropriate for the given logarithm base.
"""
return self._transform
def limit_range_for_scale(self, vmin, vmax, minpos):
"""
Limit the domain to positive values.
"""
return (vmin <= 0.0 and minpos or vmin,
vmax <= 0.0 and minpos or vmax)
class SymmetricalLogScale(ScaleBase):
"""
The symmetrical logarithmic scale is logarithmic in both the
positive and negative directions from the origin.
Since the values close to zero tend toward infinity, there is a
need to have a range around zero that is linear. The parameter
*linthresh* allows the user to specify the size of this range
(-*linthresh*, *linthresh*).
"""
name = 'symlog'
class SymmetricalLogTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
def __init__(self, base, linthresh):
Transform.__init__(self)
self.base = base
self.linthresh = linthresh
self._log_base = np.log(base)
self._linadjust = (np.log(linthresh) / self._log_base) / linthresh
def transform(self, a):
a = np.asarray(a)
sign = np.sign(a)
masked = ma.masked_inside(a, -self.linthresh, self.linthresh, copy=False)
log = sign * ma.log(np.abs(masked)) / self._log_base
if masked.mask.any():
return np.asarray(ma.where(masked.mask,
a * self._linadjust,
log))
else:
return np.asarray(log)
def inverted(self):
return SymmetricalLogScale.InvertedSymmetricalLogTransform(self.base, self.linthresh)
class InvertedSymmetricalLogTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
def __init__(self, base, linthresh):
Transform.__init__(self)
self.base = base
self.linthresh = linthresh
self._log_base = np.log(base)
self._log_linthresh = np.log(linthresh) / self._log_base
self._linadjust = linthresh / (np.log(linthresh) / self._log_base)
def transform(self, a):
a = np.asarray(a)
return np.where(a <= self._log_linthresh,
np.where(a >= -self._log_linthresh,
a * self._linadjust,
-(np.power(self.base, -a))),
np.power(self.base, a))
def inverted(self):
return SymmetricalLogScale.SymmetricalLogTransform(self.base)
def __init__(self, axis, **kwargs):
"""
*basex*/*basey*:
The base of the logarithm
*linthreshx*/*linthreshy*:
The range (-*x*, *x*) within which the plot is linear (to
avoid having the plot go to infinity around zero).
*subsx*/*subsy*:
Where to place the subticks between each major tick.
Should be a sequence of integers. For example, in a log10
scale: ``[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]``
will place 10 logarithmically spaced minor ticks between
each major tick.
"""
if axis.axis_name == 'x':
base = kwargs.pop('basex', 10.0)
linthresh = kwargs.pop('linthreshx', 2.0)
subs = kwargs.pop('subsx', None)
else:
base = kwargs.pop('basey', 10.0)
linthresh = kwargs.pop('linthreshy', 2.0)
subs = kwargs.pop('subsy', None)
self._transform = self.SymmetricalLogTransform(base, linthresh)
self.base = base
self.linthresh = linthresh
self.subs = subs
def set_default_locators_and_formatters(self, axis):
"""
Set the locators and formatters to specialized versions for
symmetrical log scaling.
"""
axis.set_major_locator(SymmetricalLogLocator(self.get_transform()))
axis.set_major_formatter(LogFormatterMathtext(self.base))
axis.set_minor_locator(SymmetricalLogLocator(self.get_transform(), self.subs))
axis.set_minor_formatter(NullFormatter())
def get_transform(self):
"""
Return a :class:`SymmetricalLogTransform` instance.
"""
return self._transform
_scale_mapping = {
'linear' : LinearScale,
'log' : LogScale,
'symlog' : SymmetricalLogScale
}
def get_scale_names():
names = _scale_mapping.keys()
names.sort()
return names
def scale_factory(scale, axis, **kwargs):
"""
Return a scale class by name.
ACCEPTS: [ %(names)s ]
"""
scale = scale.lower()
if scale is None:
scale = 'linear'
if scale not in _scale_mapping:
raise ValueError("Unknown scale type '%s'" % scale)
return _scale_mapping[scale](axis, **kwargs)
scale_factory.__doc__ = dedent(scale_factory.__doc__) % \
{'names': " | ".join(get_scale_names())}
def register_scale(scale_class):
"""
Register a new kind of scale.
*scale_class* must be a subclass of :class:`ScaleBase`.
"""
_scale_mapping[scale_class.name] = scale_class
def get_scale_docs():
"""
Helper function for generating docstrings related to scales.
"""
docs = []
for name in get_scale_names():
scale_class = _scale_mapping[name]
docs.append(" '%s'" % name)
docs.append("")
class_docs = dedent(scale_class.__init__.__doc__)
class_docs = "".join([" %s\n" %
x for x in class_docs.split("\n")])
docs.append(class_docs)
docs.append("")
return "\n".join(docs)
| gpl-3.0 |
CallaJun/hackprince | indico/matplotlib/sphinxext/ipython_console_highlighting.py | 11 | 4601 | """reST directive for syntax-highlighting ipython interactive sessions.
XXX - See what improvements can be made based on the new (as of Sept 2009)
'pycon' lexer for the python console. At the very least it will give better
highlighted tracebacks.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
#-----------------------------------------------------------------------------
# Needed modules
# Standard library
import re
# Third party
from pygments.lexer import Lexer, do_insertions
from pygments.lexers.agile import (PythonConsoleLexer, PythonLexer,
PythonTracebackLexer)
from pygments.token import Comment, Generic
from sphinx import highlighting
import matplotlib
matplotlib.cbook.warn_deprecated("1.4", """
The Sphinx extension ipython_console_highlighting has moved from
matplotlib to IPython, and its use in matplotlib is deprecated.
Change your import from 'matplotlib.sphinxext.ipython_directive' to
'IPython.sphinxext.ipython_directive.""")
#-----------------------------------------------------------------------------
# Global constants
line_re = re.compile('.*?\n')
#-----------------------------------------------------------------------------
# Code begins - classes and functions
class IPythonConsoleLexer(Lexer):
"""
For IPython console output or doctests, such as:
.. sourcecode:: ipython
In [1]: a = 'foo'
In [2]: a
Out[2]: 'foo'
In [3]: print a
foo
In [4]: 1 / 0
Notes:
- Tracebacks are not currently supported.
- It assumes the default IPython prompts, not customized ones.
"""
name = 'IPython console session'
aliases = ['ipython']
mimetypes = ['text/x-ipython-console']
input_prompt = re.compile("(In \[[0-9]+\]: )|( \.\.\.+:)")
output_prompt = re.compile("(Out\[[0-9]+\]: )|( \.\.\.+:)")
continue_prompt = re.compile(" \.\.\.+:")
tb_start = re.compile("\-+")
def get_tokens_unprocessed(self, text):
pylexer = PythonLexer(**self.options)
tblexer = PythonTracebackLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
input_prompt = self.input_prompt.match(line)
continue_prompt = self.continue_prompt.match(line.rstrip())
output_prompt = self.output_prompt.match(line)
if line.startswith("#"):
insertions.append((len(curcode),
[(0, Comment, line)]))
elif input_prompt is not None:
insertions.append((len(curcode),
[(0, Generic.Prompt, input_prompt.group())]))
curcode += line[input_prompt.end():]
elif continue_prompt is not None:
insertions.append((len(curcode),
[(0, Generic.Prompt, continue_prompt.group())]))
curcode += line[continue_prompt.end():]
elif output_prompt is not None:
# Use the 'error' token for output. We should probably make
# our own token, but error is typicaly in a bright color like
# red, so it works fine for our output prompts.
insertions.append((len(curcode),
[(0, Generic.Error, output_prompt.group())]))
curcode += line[output_prompt.end():]
else:
if curcode:
for item in do_insertions(insertions,
pylexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
yield match.start(), Generic.Output, line
if curcode:
for item in do_insertions(insertions,
pylexer.get_tokens_unprocessed(curcode)):
yield item
def setup(app):
"""Setup as a sphinx extension."""
# This is only a lexer, so adding it below to pygments appears sufficient.
# But if somebody knows that the right API usage should be to do that via
# sphinx, by all means fix it here. At least having this setup.py
# suppresses the sphinx warning we'd get without it.
pass
#-----------------------------------------------------------------------------
# Register the extension as a valid pygments lexer
highlighting.lexers['ipython'] = IPythonConsoleLexer()
| lgpl-3.0 |
pprett/scikit-learn | sklearn/linear_model/bayes.py | 7 | 19494 | """
Various bayesian regression
"""
from __future__ import print_function
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from .base import LinearModel
from ..base import RegressorMixin
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_X_y
###############################################################################
# BayesianRidge regression
class BayesianRidge(LinearModel, RegressorMixin):
"""Bayesian ridge regression
Fit a Bayesian ridge model and optimize the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
Default is 1.e-6
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : float
estimated precision of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, tol=0.001, verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
-----
See examples/linear_model/plot_bayesian_ridge.py for an example.
References
----------
D. J. C. MacKay, Bayesian Interpolation, Computation and Neural Systems,
Vol. 4, No. 3, 1992.
R. Salakhutdinov, Lecture notes on Statistical Machine Learning,
http://www.utstat.toronto.edu/~rsalakhu/sta4273/notes/Lecture2.pdf#page=15
Their beta is our self.alpha_
Their alpha is our self.lambda_
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
fit_intercept=True, normalize=False, copy_X=True,
verbose=False):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the model
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
X, y, X_offset_, y_offset_, X_scale_ = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
self.X_offset_ = X_offset_
self.X_scale_ = X_scale_
n_samples, n_features = X.shape
# Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = 1.
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
# Convergence loop of the bayesian ridge regression
for iter_ in range(self.n_iter):
# Compute mu and sigma
# sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X)
# coef_ = sigma_^-1 * XT * y
if n_samples > n_features:
coef_ = np.dot(Vh.T,
Vh / (eigen_vals_ +
lambda_ / alpha_)[:, np.newaxis])
coef_ = np.dot(coef_, XT_y)
if self.compute_score:
logdet_sigma_ = - np.sum(
np.log(lambda_ + alpha_ * eigen_vals_))
else:
coef_ = np.dot(X.T, np.dot(
U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T))
coef_ = np.dot(coef_, y)
if self.compute_score:
logdet_sigma_ = lambda_ * np.ones(n_features)
logdet_sigma_[:n_samples] += alpha_ * eigen_vals_
logdet_sigma_ = - np.sum(np.log(logdet_sigma_))
# Preserve the alpha and lambda values that were used to
# calculate the final coefficients
self.alpha_ = alpha_
self.lambda_ = lambda_
# Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = (np.sum((alpha_ * eigen_vals_) /
(lambda_ + alpha_ * eigen_vals_)))
lambda_ = ((gamma_ + 2 * lambda_1) /
(np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1) /
(rmse_ + 2 * alpha_2))
# Compute the objective function
if self.compute_score:
s = lambda_1 * log(lambda_) - lambda_2 * lambda_
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (n_features * log(lambda_) +
n_samples * log(alpha_) -
alpha_ * rmse_ -
(lambda_ * np.sum(coef_ ** 2)) -
logdet_sigma_ -
n_samples * log(2 * np.pi))
self.scores_.append(s)
# Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
sigma_ = np.dot(Vh.T,
Vh / (eigen_vals_ + lambda_ / alpha_)[:, np.newaxis])
self.sigma_ = (1. / alpha_) * sigma_
self._set_intercept(X_offset_, y_offset_, X_scale_)
return self
def predict(self, X, return_std=False):
"""Predict using the linear model.
In addition to the mean of the predictive distribution, also its
standard deviation can be returned.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Samples.
return_std : boolean, optional
Whether to return the standard deviation of posterior prediction.
Returns
-------
y_mean : array, shape = (n_samples,)
Mean of predictive distribution of query points.
y_std : array, shape = (n_samples,)
Standard deviation of predictive distribution of query points.
"""
y_mean = self._decision_function(X)
if return_std is False:
return y_mean
else:
if self.normalize:
X = (X - self.X_offset_) / self.X_scale_
sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1)
y_std = np.sqrt(sigmas_squared_data + (1. / self.alpha_))
return y_mean, y_std
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(LinearModel, RegressorMixin):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6.
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter. Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter. Default is 1.e-6.
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False.
threshold_lambda : float, optional
threshold for removing (pruning) weights with high precision from
the computation. Default is 1.e+4.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : boolean, optional, default True.
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
ARDRegression(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, threshold_lambda=10000.0, tol=0.001,
verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
--------
See examples/linear_model/plot_ard.py for an example.
References
----------
D. J. C. MacKay, Bayesian nonlinear modeling for the prediction
competition, ASHRAE Transactions, 1994.
R. Salakhutdinov, Lecture notes on Statistical Machine Learning,
http://www.utstat.toronto.edu/~rsalakhu/sta4273/notes/Lecture2.pdf#page=15
Their beta is our self.alpha_
Their alpha is our self.lambda_
ARD is a little different than the slide: only dimensions/features for
which self.lambda_ < self.threshold_lambda are kept and the rest are
discarded.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
threshold_lambda=1.e+4, fit_intercept=True, normalize=False,
copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the ARDRegression model according to the given training data
and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
n_samples, n_features = X.shape
coef_ = np.zeros(n_features)
X, y, X_offset_, y_offset_, X_scale_ = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
# Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
# Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = np.ones(n_features)
self.scores_ = list()
coef_old_ = None
# Iterative procedure of ARDRegression
for iter_ in range(self.n_iter):
# Compute mu and sigma (using Woodbury matrix identity)
sigma_ = pinvh(np.eye(n_samples) / alpha_ +
np.dot(X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]),
X[:, keep_lambda].T))
sigma_ = np.dot(sigma_, X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]))
sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1]) *
X[:, keep_lambda].T, sigma_)
sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda]
coef_[keep_lambda] = alpha_ * np.dot(
sigma_, np.dot(X[:, keep_lambda].T, y))
# Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1) /
((coef_[keep_lambda]) ** 2 +
2. * lambda_2))
alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1) /
(rmse_ + 2. * alpha_2))
# Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
# Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_) +
np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
# Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_offset_, y_offset_, X_scale_)
return self
def predict(self, X, return_std=False):
"""Predict using the linear model.
In addition to the mean of the predictive distribution, also its
standard deviation can be returned.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Samples.
return_std : boolean, optional
Whether to return the standard deviation of posterior prediction.
Returns
-------
y_mean : array, shape = (n_samples,)
Mean of predictive distribution of query points.
y_std : array, shape = (n_samples,)
Standard deviation of predictive distribution of query points.
"""
y_mean = self._decision_function(X)
if return_std is False:
return y_mean
else:
if self.normalize:
X = (X - self.X_offset_) / self.X_scale_
X = X[:, self.lambda_ < self.threshold_lambda]
sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1)
y_std = np.sqrt(sigmas_squared_data + (1. / self.alpha_))
return y_mean, y_std
| bsd-3-clause |
amolkahat/pandas | asv_bench/benchmarks/io/json.py | 5 | 4937 | import numpy as np
import pandas.util.testing as tm
from pandas import DataFrame, date_range, timedelta_range, concat, read_json
from ..pandas_vb_common import BaseIO
class ReadJSON(BaseIO):
fname = "__test__.json"
params = (['split', 'index', 'records'], ['int', 'datetime'])
param_names = ['orient', 'index']
def setup(self, orient, index):
N = 100000
indexes = {'int': np.arange(N),
'datetime': date_range('20000101', periods=N, freq='H')}
df = DataFrame(np.random.randn(N, 5),
columns=['float_{}'.format(i) for i in range(5)],
index=indexes[index])
df.to_json(self.fname, orient=orient)
def time_read_json(self, orient, index):
read_json(self.fname, orient=orient)
class ReadJSONLines(BaseIO):
fname = "__test_lines__.json"
params = ['int', 'datetime']
param_names = ['index']
def setup(self, index):
N = 100000
indexes = {'int': np.arange(N),
'datetime': date_range('20000101', periods=N, freq='H')}
df = DataFrame(np.random.randn(N, 5),
columns=['float_{}'.format(i) for i in range(5)],
index=indexes[index])
df.to_json(self.fname, orient='records', lines=True)
def time_read_json_lines(self, index):
read_json(self.fname, orient='records', lines=True)
def time_read_json_lines_concat(self, index):
concat(read_json(self.fname, orient='records', lines=True,
chunksize=25000))
def peakmem_read_json_lines(self, index):
read_json(self.fname, orient='records', lines=True)
def peakmem_read_json_lines_concat(self, index):
concat(read_json(self.fname, orient='records', lines=True,
chunksize=25000))
class ToJSON(BaseIO):
fname = "__test__.json"
params = ['split', 'columns', 'index']
param_names = ['orient']
def setup(self, lines_orient):
N = 10**5
ncols = 5
index = date_range('20000101', periods=N, freq='H')
timedeltas = timedelta_range(start=1, periods=N, freq='s')
datetimes = date_range(start=1, periods=N, freq='s')
ints = np.random.randint(100000000, size=N)
floats = np.random.randn(N)
strings = tm.makeStringIndex(N)
self.df = DataFrame(np.random.randn(N, ncols), index=np.arange(N))
self.df_date_idx = DataFrame(np.random.randn(N, ncols), index=index)
self.df_td_int_ts = DataFrame({'td_1': timedeltas,
'td_2': timedeltas,
'int_1': ints,
'int_2': ints,
'ts_1': datetimes,
'ts_2': datetimes},
index=index)
self.df_int_floats = DataFrame({'int_1': ints,
'int_2': ints,
'int_3': ints,
'float_1': floats,
'float_2': floats,
'float_3': floats},
index=index)
self.df_int_float_str = DataFrame({'int_1': ints,
'int_2': ints,
'float_1': floats,
'float_2': floats,
'str_1': strings,
'str_2': strings},
index=index)
def time_floats_with_int_index(self, orient):
self.df.to_json(self.fname, orient=orient)
def time_floats_with_dt_index(self, orient):
self.df_date_idx.to_json(self.fname, orient=orient)
def time_delta_int_tstamp(self, orient):
self.df_td_int_ts.to_json(self.fname, orient=orient)
def time_float_int(self, orient):
self.df_int_floats.to_json(self.fname, orient=orient)
def time_float_int_str(self, orient):
self.df_int_float_str.to_json(self.fname, orient=orient)
def time_floats_with_int_idex_lines(self, orient):
self.df.to_json(self.fname, orient='records', lines=True)
def time_floats_with_dt_index_lines(self, orient):
self.df_date_idx.to_json(self.fname, orient='records', lines=True)
def time_delta_int_tstamp_lines(self, orient):
self.df_td_int_ts.to_json(self.fname, orient='records', lines=True)
def time_float_int_lines(self, orient):
self.df_int_floats.to_json(self.fname, orient='records', lines=True)
def time_float_int_str_lines(self, orient):
self.df_int_float_str.to_json(self.fname, orient='records', lines=True)
from ..pandas_vb_common import setup # noqa: F401
| bsd-3-clause |
CallaJun/hackprince | indico/matplotlib/tri/triplot.py | 21 | 3124 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
from matplotlib.tri.triangulation import Triangulation
def triplot(ax, *args, **kwargs):
"""
Draw a unstructured triangular grid as lines and/or markers.
The triangulation to plot can be specified in one of two ways;
either::
triplot(triangulation, ...)
where triangulation is a :class:`matplotlib.tri.Triangulation`
object, or
::
triplot(x, y, ...)
triplot(x, y, triangles, ...)
triplot(x, y, triangles=triangles, ...)
triplot(x, y, mask=mask, ...)
triplot(x, y, triangles, mask=mask, ...)
in which case a Triangulation object will be created. See
:class:`~matplotlib.tri.Triangulation` for a explanation of these
possibilities.
The remaining args and kwargs are the same as for
:meth:`~matplotlib.axes.Axes.plot`.
Return a list of 2 :class:`~matplotlib.lines.Line2D` containing
respectively:
- the lines plotted for triangles edges
- the markers plotted for triangles nodes
**Example:**
.. plot:: mpl_examples/pylab_examples/triplot_demo.py
"""
import matplotlib.axes
tri, args, kwargs = Triangulation.get_from_args_and_kwargs(*args, **kwargs)
x, y, edges = (tri.x, tri.y, tri.edges)
# Decode plot format string, e.g., 'ro-'
fmt = ""
if len(args) > 0:
fmt = args[0]
linestyle, marker, color = matplotlib.axes._base._process_plot_format(fmt)
# Insert plot format string into a copy of kwargs (kwargs values prevail).
kw = kwargs.copy()
for key, val in zip(('linestyle', 'marker', 'color'),
(linestyle, marker, color)):
if val is not None:
kw[key] = kwargs.get(key, val)
# Draw lines without markers.
# Note 1: If we drew markers here, most markers would be drawn more than
# once as they belong to several edges.
# Note 2: We insert nan values in the flattened edges arrays rather than
# plotting directly (triang.x[edges].T, triang.y[edges].T)
# as it considerably speeds-up code execution.
linestyle = kw['linestyle']
kw_lines = kw.copy()
kw_lines['marker'] = 'None' # No marker to draw.
kw_lines['zorder'] = kw.get('zorder', 1) # Path default zorder is used.
if (linestyle is not None) and (linestyle not in ['None', '', ' ']):
tri_lines_x = np.insert(x[edges], 2, np.nan, axis=1)
tri_lines_y = np.insert(y[edges], 2, np.nan, axis=1)
tri_lines = ax.plot(tri_lines_x.ravel(), tri_lines_y.ravel(),
**kw_lines)
else:
tri_lines = ax.plot([], [], **kw_lines)
# Draw markers separately.
marker = kw['marker']
kw_markers = kw.copy()
kw_markers['linestyle'] = 'None' # No line to draw.
if (marker is not None) and (marker not in ['None', '', ' ']):
tri_markers = ax.plot(x, y, **kw_markers)
else:
tri_markers = ax.plot([], [], **kw_markers)
return tri_lines + tri_markers
| lgpl-3.0 |
sao-eht/lmtscripts | 2017/loclib.py | 1 | 12222 | # 1mm localization and total power in dreampy
# 2015, 2016 LLB
import numpy
import matplotlib
import shutil
# matplotlib.use('agg')
from matplotlib import pylab, mlab, pyplot
import os
np = numpy
plt = pyplot
# plt.ion()
from argparse import Namespace
from glob import glob
import scipy.io
from scipy.signal import butter,lfilter,freqz
from scipy.interpolate import interp1d
from scipy.ndimage.filters import minimum_filter1d
from scipy.interpolate import UnivariateSpline
from matplotlib.mlab import griddata, psd
from datetime import datetime, timedelta
from scipy.optimize import fmin
def asec2rad(asec):
return asec * 2*np.pi / 3600. / 360.
def rad2asec(rad):
return rad * 3600. * 360. / (2*np.pi)
# fs = 50.
# nyq = fs/2.
# Namespace.keys = lambda(self): self.__dict__.keys()
# extract 1mm total power data and fix some timing jitter issues
def extract(nc):
t0 = nc.hdu.data.Time[0]
t = nc.hdu.data.Time - t0
a = nc.hdu.data.APower
b = nc.hdu.data.BPower
x = nc.hdu.data.XPos
y = nc.hdu.data.YPos
i = ~nc.hdu.data.BufPos.astype(np.bool)
iobs = nc.hdu.header.ObsNum[0]
if iobs >= 39150: # move to 50 Hz sampling to avoid ADC time glitches
fs = 50.
tnew = nc.hdu.data.Vlbi1mmTpmTime - nc.hdu.data.Vlbi1mmTpmTime[0]
idx = tnew <= t[-1]
a = a[idx]
b = b[idx]
tnew = tnew[idx]
elif iobs >= 38983: # kamal includes gap times
tnew = np.linspace(0, t[-1], len(t))
fs = 1./(t[1]-t[0])
if 'Time' in nc.hdu.data['Vlbi1mmTpm']: # use the ADC time if available >= 39118
adctime = nc.hdu.data.Vlbi1mmTpmTime - nc.hdu.data.Vlbi1mmTpmTime[0]
tnew = np.linspace(0, adctime[-1], len(adctime))
tnew = tnew[(tnew <= t[-1])]
a = interp1d(adctime, a)(tnew)
b = interp1d(adctime, b)(tnew)
elif iobs >= 38915: # 83.3 Hz becomes available but has gaps
fs = 1./0.012
tnew = np.arange(0, t[-1] + 1e-6, 1./fs)
a = interp1d(t, a)(tnew) # t is not a great varialbe to use, but all we have
b = interp1d(t, b)(tnew) # t is not a great varialbe to use, but all we have
else: # we are in 10 Hz data
fs = 10.
tnew = np.arange(0, t[-1] + 1e-6, .10)
a = interp1d(t, a)(tnew)
b = interp1d(t, b)(tnew)
x = interp1d(t, x)(tnew)
y = interp1d(t, y)(tnew)
i = interp1d(t, i)(tnew).astype(bool)
t = tnew
iobs = nc.hdu.header.ObsNum[0]
source = nc.hdu.header.SourceName
return Namespace(t0=t0, t=t, a=a, b=b, x=x, y=y, i=i, iobs=iobs, source=source, fs=fs)
# basic get scan, then extract data from it
def getscan(iobs, do_extract=True):
from dreampy.onemm.netcdf import OnemmNetCDFFile
filename = glob('/data_lmt/vlbi1mm/vlbi1mm_*%06d*.nc' % iobs)[-1]
nc = OnemmNetCDFFile(filename)
t = nc.hdu.data.Time
# remove large time glitches
tmid = np.median(t)
ibad = np.abs(t-tmid) > 3600
for i in np.nonzero(ibad)[0]:
nc.hdu.data.Time[i] = (t[i-1] + t[i+1]) / 2.
if do_extract:
return extract(nc)
else:
return nc
# raw open (no extract) get original structures
def rawopen(iobs):
from scipy.io import netcdf
filename = glob('/data_lmt/vlbi1mm/vlbi1mm_*%06d*.nc' % iobs)[-1]
nc = netcdf.netcdf_file(filename)
# keep = dict((name.split('.')[-1], val.data) for (name, val) in nc.variables.items()
# if name[:4] == 'Data')
keep = Namespace()
keep.BufPos = nc.variables['Data.Dcs.BufPos'].data
keep.Time = nc.variables['Data.Sky.Time'].data
keep.XPos = nc.variables['Data.Sky.XPos'].data
keep.YPos = nc.variables['Data.Sky.YPos'].data
keep.APower = nc.variables['Data.Vlbi1mmTpm.APower'].data
keep.BPower = nc.variables['Data.Vlbi1mmTpm.BPower'].data
if 'Data.Vlbi1mmTpm.Time' in nc.variables:
keep.ADCTime = nc.variables['Data.Vlbi1mmTpm.Time'].data
return keep
# export to standard numpy
def exportscan(iobs):
z = getscan(iobs)
np.savez('scan_%d' % iobs, **z.__dict__)
# export to standard matlab
def exportmat(iobs):
z = getscan(iobs)
scipy.io.savemat('scan_%d.mat' % iobs, z.__dict__)
# linear detrend, use only edges
def detrend(x, ntaper=100):
x0 = np.mean(x[:ntaper])
x1 = np.mean(x[-ntaper:])
m = (x1 - x0) / len(x)
x2 = x - (x0 + m*np.arange(len(x)))
w = np.hanning(2 * ntaper)
x2[:ntaper] *= w[:ntaper]
x2[-ntaper:] *= w[-ntaper:]
return x2
# patch together many scans and try to align in time (to the sample -- to keep X and Y)
def mfilt(scans):
aps = []
bps = []
xs = []
ys = []
ts = []
ss = []
fss = []
ntaper = 100
for i in sorted(scans):
scan = getscan(i)
aps.append(detrend(scan.a, ntaper=ntaper))
bps.append(detrend(scan.b, ntaper=ntaper))
ts.append(scan.t + scan.t0)
xs.append(scan.x)
ys.append(scan.y)
ss.append(scan.source)
fss.append(scan.fs)
s = ss[0]
fs = fss[0]
t0 = ts[0][0]
t1 = ts[-1][-1]
tnew = np.arange(t0, t1+1./fs, 1./fs)
idx = np.zeros(len(tnew), dtype=np.bool)
x = np.zeros(len(tnew))
y = np.zeros(len(tnew))
a = np.zeros(len(tnew))
b = np.zeros(len(tnew))
for i in range(len(ts)):
istart = int(np.round((ts[i][0] - t0) * 50.))
idx[istart:istart+len(ts[i])] = True
x[istart:istart+len(xs[i])] = xs[i][:len(x)-istart]
y[istart:istart+len(ys[i])] = ys[i][:len(y)-istart]
a[istart:istart+len(aps[i])] = aps[i][:len(a)-istart]
b[istart:istart+len(bps[i])] = bps[i][:len(b)-istart]
x[~idx] = np.inf
y[~idx] = np.inf
fillfrac = float(np.sum(idx)-ntaper*len(scans)) / len(tnew)
return Namespace(t=tnew, a=a, b=b, x=x, y=y, idx=idx, source=s, fs=fs, fillfrac=fillfrac)
def model(x, y, x0=0, y0=0, fwhm=11.):
fwhm = asec2rad(fwhm)
sigma = fwhm / 2.335
# predicted counts
m = np.exp(-((x-x0)**2 + (y-y0)**2) / (2*sigma**2))
return m
def fitmodel(z, win=50., res=2., fwhm=11., channel='b'):
Fs = z.fs
tp = z.__dict__[channel]
# 512 is balance between freq resolution and averaging, good for 50 Hz
(p, f) = psd(tp, NFFT=1024, pad_to=4096, Fs=Fs) # unit variance -> PSD = 1 / Hz
if 'fillfrac' in z:
p = p / z.fillfrac # account for zeros in stiched timeseries (otherwise 1)
N = len(z.t) # original sequence length
pad = 2**int(np.ceil(np.log2(N))) # pad length for efficient FFTs
fac = np.zeros(pad)
mpad = np.zeros(pad)
bpad = np.zeros(pad)
ipad = np.zeros(pad).astype(bool)
bpad[:N] = tp
B = np.fft.rfft(bpad).conj() # N factor goes into fft, ifft = 1/N * ..
fm = np.abs(np.fft.fftfreq(pad, d=1./Fs)[:1+pad/2])
fac = 1. / interp1d(f, p)(fm) / (Fs/2.) # 1/PSD for matched filter (double whiten), Fs/2 accounts for PSD normalization
fac[fm < 0.1] = 0. # turn off low freqs below 0.1 Hz - just a guess
x = asec2rad(np.arange(-win, win+res, res))
y = asec2rad(np.arange(-win, win+res, res))
(xx, yy) = np.meshgrid(x, y) # search grid
xr = xx.ravel()
yr = yy.ravel()
snrs = [] # signal-to-noise ratios
norms = [] # sqrt of whitened matched filter signal power
for (xtest, ytest) in zip(xr, yr):
mpad[:N] = model(z.x, z.y, xtest, ytest, fwhm=fwhm) # model signal
M = np.fft.rfft(mpad)
# take the real part of sum = 0.5 * ifft[0]
norm = np.sqrt(np.sum(np.abs(M)**2 * fac))
norms.append(norm)
snrs.append(np.sum((M * B * fac).real) / norm)
snr = np.array(snrs)
snr[snr < 0] = 0.
imax = np.argmax(snr) # maximum snr location
(xmax, ymax) = (rad2asec(xr[imax]), rad2asec(yr[imax]))
snr = snr.reshape(xx.shape)
isnr = np.argsort(snr.ravel())[::-1] # reverse sort high to low
prob = np.exp((snr.ravel()/np.sqrt(pad/2.))**2/2.)
pcum = np.zeros_like(prob)
pcum[isnr] = np.cumsum(prob[isnr])
pcum = pcum.reshape(xx.shape) / np.sum(prob)
xxa = xx * rad2asec(1.)
yya = yy * rad2asec(1.)
plt.clf()
h1 = plt.contourf(xxa, yya, pcum, scipy.special.erf(np.array([0,1,2,3])/np.sqrt(2)), cmap=plt.cm.get_cmap("Blues"))
plt.gca().set_axis_bgcolor('black')
# dw = asec2rad(res)
# plt.imshow(snr**2, extent=map(rad2asec, (x[0]-dw/2., x[-1]+dw/2., y[0]-dw/2., y[-1]+dw/2.)), interpolation='nearest', origin='lower')
plt.ylim(-win, win)
plt.xlim(-win, win)
plt.plot(xmax, ymax, 'y+', ms=11, mew=2)
plt.text(-win, win, '[%.1f, %.1f]' % (xmax, ymax), va='top', ha='left', color='yellow')
plt.text(win, win, '[%.2f mV]' % (1e3 * snrs[imax] / norms[imax]), va='top', ha='right', color='yellow')
print snrs[imax], norms[imax], pad
return Namespace(xx=xx, yy=yy, snr=snr/np.sqrt(pad/2.), v=snr/np.array(norms).reshape(xx.shape))
# (0, 6, 14, 14, 0)
def fitsearch(z, x0=0, y0=0, s10=20., s20=20., th0=0, channel='b'):
Fs = z.fs
tp = z.__dict__[channel]
# 512 is balance between freq resolution and averaging, good for 50 Hz
(p, f) = psd(tp, NFFT=512, pad_to=4096, Fs=Fs) # unit variance -> PSD = 1.
p = p / z.fillfrac # account for zeros in stiched timeseries
N = len(z.t) # original sequence length
pad = 2**int(np.ceil(np.log2(N))) # pad length for efficient FFTs
fac = np.zeros(pad)
mpad = np.zeros(pad)
bpad = np.zeros(pad)
bpad[:N] = tp
B = np.fft.rfft(bpad).conj() # N factor goes into fft, ifft = 1/N * ..
fm = np.abs(np.fft.fftfreq(pad, d=1./Fs)[:1+pad/2])
fac = 1. / interp1d(f, p)(fm) # 1/PSD for matched filter (double whiten)
fac[fm < 0.1] = 0. # turn off low freqs below 0.1 Hz - just a guess
def snr(args):
(xtest, ytest, s1test, s2test, thtest) = args
mpad[:N] = ezmodel(z.x, z.y, xtest, ytest, s1test, s2test, thtest) # model signal
M = np.fft.rfft(mpad)
norm = np.sqrt(np.sum(np.abs(M)**2 * fac))
snr = np.sum((M * B * fac).real) / norm
return -snr
result = fmin(snr, (asec2rad(x0), asec2rad(y0), asec2rad(s10)/2.355, asec2rad(s20)/2.355, th0*np.pi/180.))
print "x: %.1f" % rad2asec(result[0])
print "y: %.1f" % rad2asec(result[1])
print "s1: %.2f" % rad2asec(result[2]*2.355)
print "s2: %.2f" % rad2asec(result[3]*2.355)
print "th: %.2f" % (result[4] * 180./np.pi)
def fitgrid(z, channel='b'):
Fs = z.fs
tp = z.__dict__[channel]
# 512 is balance between freq resolution and averaging, good for 50 Hz
(p, f) = psd(tp, NFFT=512, pad_to=4096, Fs=Fs) # unit variance -> PSD = 1.
p = p / z.fillfrac # account for zeros in stiched timeseries
N = len(z.t) # original sequence length
pad = 2**int(np.ceil(np.log2(N))) # pad length for efficient FFTs
fac = np.zeros(pad)
mpad = np.zeros(pad)
bpad = np.zeros(pad)
bpad[:N] = tp
B = np.fft.rfft(bpad).conj() # N factor goes into fft, ifft = 1/N * ..
fm = np.abs(np.fft.fftfreq(pad, d=1./Fs)[:1+pad/2])
fac = 1. / interp1d(f, p)(fm) # 1/PSD for matched filter (double whiten)
fac[fm < 0.1] = 0. # turn off low freqs below 0.1 Hz - just a guess
def makesnr(*args):
(xtest, ytest, s1test, s2test, thtest) = args
mpad[:N] = ezmodel(z.x, z.y, xtest, ytest, s1test, s2test, thtest) # model signal
# mpad[:N] = model(z.x, z.y, xtest, ytest, fwhm=rad2asec(s1test)*2.355)
M = np.fft.rfft(mpad)
norm = np.sqrt(np.sum(np.abs(M)**2 * fac))
snr = np.sum((M * B * fac).real) / norm
return snr
(xx, yy, ss1, ss2, tt) = np.mgrid[-2:2, 12:16, 10:30, 10:20, 20:90:15]
snrs = []
pars = zip(xx.ravel(), yy.ravel(), ss1.ravel(), ss2.ravel(), tt.ravel())
for (x, y, s1, s2, th) in pars:
snrs.append(makesnr(asec2rad(x)/2, asec2rad(y)/2, asec2rad(s1/2.355), asec2rad(s2/2.355), th*np.pi/180.))
snrs = np.array(snrs)
ss = snrs.reshape(xx.shape)
return ss
def point(first, last=None, win=None, res=2., fwhm=11., channel='b'):
if last is None:
last = first
scans = range(first, last+1)
z = mfilt(scans)
if win is None:
win = np.ceil(rad2asec(np.abs(np.min(z.x))))
fitmodel(z, win=win, res=res, fwhm=fwhm, channel=channel)
if len(scans) == 1:
plt.title("%s: %d" % (z.source, scans[0]))
else:
plt.title("%s: [%d - %d]" % (z.source, scans[0], scans[-1]))
# general 2D Gaussian
def model2D(x, y, x0, y0, cov11, cov22, cov12):
invCov = 1.0/(cov11*cov22 - cov12**2) * np.array(( (cov22, -cov12), (-cov12, cov11) ) )
position = np.array( (x-x0, y-y0) )
m = np.exp(-0.5 * np.sum(position * np.dot(invCov, position), axis=0))
return m
def calcCov(sigma1, sigma2, angle):
vec = np.array( (np.cos(angle), np.sin(angle) ) ).T;
pvec = np.array( (-vec[1], vec[0]) );
eigvals = np.array( ( (sigma1, 0), (0, sigma2) ) )**2
eigvec = np.array( ( (vec[0], pvec[0]), (vec[1], pvec[1]) ) )
cov = np.dot(eigvec, np.dot(eigvals, eigvec.T) )
return cov
def ezmodel(x, y, x0, y0, sigma1, sigma2, angle):
cov = calcCov(sigma1, sigma2, angle)
return model2D(x, y, x0, y0, cov[0,0], cov[1,1], cov[1,0])
# def model2dplusring(x, y, x0, y0, cov11, cov22, cov12, ringsize, ringangle, ringrelativeAmplitude, radialprofile):
| mit |
aspera1631/TweetScore | length_test.py | 1 | 1607 | __author__ = 'bdeutsch'
import numpy as np
import pandas as pd
def cartesian(arrays, out=None):
arrays = [np.asarray(x) for x in arrays]
dtype = arrays[0].dtype
n = np.prod([x.size for x in arrays])
if out is None:
out = np.zeros([n, len(arrays)], dtype=dtype)
m = n / arrays[0].size
out[:,0] = np.repeat(arrays[0], m)
if arrays[1:]:
cartesian(arrays[1:], out=out[0:m,1:])
for j in xrange(1, arrays[0].size):
out[j*m:(j+1)*m,1:] = out[0:m,1:]
return out
emo_vals = range(0,7)
ht_vals = range(0,7)
media_vals = range(0,3)
txt_bas_vals = range(0,29)
url_vals = range(0,3)
user_vals = range(0,7)
'''
# generate the space of all possible tweets
emo_vals = range(0,2)
ht_vals = range(0,2)
media_vals = range(0,2)
txt_bas_vals = range(0,2)
url_vals = range(0,2)
user_vals = range(0,2)
'''
def get_txt_len(dfrow):
# weights represent number of characters per bin of each type of data
weight = pd.DataFrame([1, 2, 23, 5, 22, 2], index=["emo_num", "ht_num", "media_num", "txt_len_basic", "url_num", "user_num"])
len1 = dfrow.dot(weight)
return len1
# for each possible tweet, create a row of a dataframe
test = cartesian((emo_vals, ht_vals, media_vals, txt_bas_vals, url_vals, user_vals))
#test = [[141,0,0,0,0,0]]
# label the columns
tweetspace = pd.DataFrame(test, columns=["emo_num", "ht_num", "media_num", "txt_len_basic", "url_num", "user_num"])
tweetspace["len_tot"] = tweetspace.apply(get_txt_len, axis = 1)
legal_tweets = tweetspace[tweetspace["len_tot"] <= 140]
legal_tweets.to_pickle("legal_tweets_3")
| mit |
jreback/pandas | pandas/tests/util/test_deprecate_nonkeyword_arguments.py | 6 | 2713 | """
Tests for the `deprecate_nonkeyword_arguments` decorator
"""
import warnings
from pandas.util._decorators import deprecate_nonkeyword_arguments
import pandas._testing as tm
@deprecate_nonkeyword_arguments(version="1.1", allowed_args=["a", "b"])
def f(a, b=0, c=0, d=0):
return a + b + c + d
def test_one_argument():
with tm.assert_produces_warning(None):
assert f(19) == 19
def test_one_and_one_arguments():
with tm.assert_produces_warning(None):
assert f(19, d=6) == 25
def test_two_arguments():
with tm.assert_produces_warning(None):
assert f(1, 5) == 6
def test_two_and_two_arguments():
with tm.assert_produces_warning(None):
assert f(1, 3, c=3, d=5) == 12
def test_three_arguments():
with tm.assert_produces_warning(FutureWarning):
assert f(6, 3, 3) == 12
def test_four_arguments():
with tm.assert_produces_warning(FutureWarning):
assert f(1, 2, 3, 4) == 10
@deprecate_nonkeyword_arguments(version="1.1")
def g(a, b=0, c=0, d=0):
with tm.assert_produces_warning(None):
return a + b + c + d
def test_one_and_three_arguments_default_allowed_args():
with tm.assert_produces_warning(None):
assert g(1, b=3, c=3, d=5) == 12
def test_three_arguments_default_allowed_args():
with tm.assert_produces_warning(FutureWarning):
assert g(6, 3, 3) == 12
def test_three_positional_argument_with_warning_message_analysis():
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
assert g(6, 3, 3) == 12
assert len(w) == 1
for actual_warning in w:
assert actual_warning.category == FutureWarning
assert str(actual_warning.message) == (
"Starting with Pandas version 1.1 all arguments of g "
"except for the argument 'a' will be keyword-only"
)
@deprecate_nonkeyword_arguments(version="1.1")
def h(a=0, b=0, c=0, d=0):
return a + b + c + d
def test_all_keyword_arguments():
with tm.assert_produces_warning(None):
assert h(a=1, b=2) == 3
def test_one_positional_argument():
with tm.assert_produces_warning(FutureWarning):
assert h(23) == 23
def test_one_positional_argument_with_warning_message_analysis():
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
assert h(19) == 19
assert len(w) == 1
for actual_warning in w:
assert actual_warning.category == FutureWarning
assert str(actual_warning.message) == (
"Starting with Pandas version 1.1 all arguments "
"of h will be keyword-only"
)
| bsd-3-clause |
peerdavid/social-neural-network | neural_network/write_experience.py | 1 | 3920 | import sys
import traceback
import numpy as np
import tensorflow as tf
from sklearn import metrics
import params
import utils
import data_input
FLAGS = params.FLAGS
def _log_all_wrong_predictions(sess, prediction, labels_pl, images_pl, dropout_pl, dataset):
# Calculate predictions of trained network
y_true = []
y_pred = []
invalid_images = []
for i in range(len(dataset.image_list)):
feed_dict = utils.create_feed_data(sess, images_pl, labels_pl, dropout_pl, dataset, 1.0)
y_true.extend(feed_dict[labels_pl])
y_pred.extend(sess.run([prediction], feed_dict=feed_dict)[0])
if(y_true[i] != y_pred[i]):
invalid_images.append(dataset.image_list[i])
sys.stdout.write(" Calculating predictions ... %d%%\r" % (i * 100 / len(dataset.image_list)))
sys.stdout.flush()
sys.stdout.write(" \r")
sys.stdout.flush()
# Append experience to file
experience_file = FLAGS.generation_experience_file.format(FLAGS.generation)
with open(experience_file, 'a') as file_handler:
file_handler.write("\n".join(invalid_images))
file_handler.write("\n")
return
def _append_experience():
try:
# Create a session for running Ops on the Graph.
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Check every image, so set batch_size to one
FLAGS.batch_size = 1
# We only write down experience on not trained data. Therefore we use the validation set
train_file = FLAGS.generation_train_file.format(FLAGS.generation, FLAGS.cross_validation_iteration)
with open(train_file, 'r') as file_handler:
train_images = [line.rstrip('\n') for line in file_handler]
dataset = data_input.read_image_batches_with_labels_in_blacklist_from_path(FLAGS, FLAGS.train_dir, train_images)
# Inference model
print("Restore graph from meta file {0}.meta".format(FLAGS.checkpoint))
saver = tf.train.import_meta_graph("{0}.meta".format(FLAGS.checkpoint))
print("\nRestore session from checkpoint {0}".format(FLAGS.checkpoint))
saver.restore(sess, FLAGS.checkpoint)
graph = tf.get_default_graph()
# Load model and placeholder
logits = tf.get_collection('logits')[0]
images_pl = graph.get_tensor_by_name('images_pl:0')
labels_pl = graph.get_tensor_by_name('labels_pl:0')
dropout_pl = graph.get_tensor_by_name('dropout_pl:0')
# Prediction used for confusion matrix
prediction = tf.argmax(logits, 1)
try:
# Start the queue runners.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
# Write experience
print("\n# Writing experience file for gen. {0} iteration {1} ".format(
FLAGS.generation,
FLAGS.cross_validation_iteration
))
_log_all_wrong_predictions(sess, prediction, labels_pl, images_pl, dropout_pl, dataset)
finally:
input("Press any key to finish...")
print("\nWaiting for all threads...")
coord.request_stop()
coord.join(threads)
except:
traceback.print_exc()
finally:
print("\nDone.\n")
#
# M A I N
#
def main(argv=None):
if(len(argv) > 1):
FLAGS.generation = int(argv[1])
FLAGS.cross_validation_iteration = int(argv[2])
FLAGS.checkpoint = FLAGS.generation_checkpoint.format(FLAGS.generation, FLAGS.cross_validation_iteration)
_append_experience()
if __name__ == '__main__':
tf.app.run() | gpl-3.0 |
sugartom/tensorflow-alien | tensorflow/contrib/learn/python/learn/learn_io/pandas_io.py | 8 | 3794 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow pandas.DataFrame."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.estimator.inputs.pandas_io import pandas_input_fn # pylint: disable=unused-import
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
PANDAS_DTYPES = {
'int8': 'int',
'int16': 'int',
'int32': 'int',
'int64': 'int',
'uint8': 'int',
'uint16': 'int',
'uint32': 'int',
'uint64': 'int',
'float16': 'float',
'float32': 'float',
'float64': 'float',
'bool': 'i'
}
def extract_pandas_data(data):
"""Extract data from pandas.DataFrame for predictors.
Given a DataFrame, will extract the values and cast them to float. The
DataFrame is expected to contain values of type int, float or bool.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values as floats.
Raises:
ValueError: if data contains types other than int, float or bool.
"""
if not isinstance(data, pd.DataFrame):
return data
bad_data = [column for column in data
if data[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return data.values.astype('float')
else:
error_report = [("'" + str(column) + "' type='" +
data[column].dtype.name + "'") for column in bad_data]
raise ValueError('Data types for extracting pandas data must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
def extract_pandas_matrix(data):
"""Extracts numpy matrix from pandas DataFrame.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values.
"""
if not isinstance(data, pd.DataFrame):
return data
return data.as_matrix()
def extract_pandas_labels(labels):
"""Extract data from pandas.DataFrame for labels.
Args:
labels: `pandas.DataFrame` or `pandas.Series` containing one column of
labels to be extracted.
Returns:
A numpy `ndarray` of labels from the DataFrame.
Raises:
ValueError: if more than one column is found or type is not int, float or
bool.
"""
if isinstance(labels,
pd.DataFrame): # pandas.Series also belongs to DataFrame
if len(labels.columns) > 1:
raise ValueError('Only one column for labels is allowed.')
bad_data = [column for column in labels
if labels[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return labels.values
else:
error_report = ["'" + str(column) + "' type="
+ str(labels[column].dtype.name) for column in bad_data]
raise ValueError('Data types for extracting labels must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
else:
return labels
| apache-2.0 |
jhprinz/openpathsampling | openpathsampling/analysis/tis/flux.py | 1 | 12156 | import collections
import openpathsampling as paths
from openpathsampling.netcdfplus import StorableNamedObject
import pandas as pd
import numpy as np
from .core import MultiEnsembleSamplingAnalyzer
class MinusMoveFlux(MultiEnsembleSamplingAnalyzer):
"""
Calculating the flux from the minus move.
Raises
------
ValueError
if the number of interface sets per minus move is greater than one.
Cannot use Minus Move flux calculation with multiple interface set
TIS.
Parameters
----------
scheme: :class:`.MoveScheme`
move scheme that was used (includes information on the minus movers
and on the network)
flux_pairs: list of 2-tuple of :class:`.Volume`
pairs of (state, interface) for calculating the flux out of the
volume and through the state. Default is `None`, in which case the
state and innermost interface are used.
"""
def __init__(self, scheme, flux_pairs=None):
super(MinusMoveFlux, self).__init__()
# error string we'll re-use in a few places
mistis_err_str = ("Cannot use minus move flux with multiple "
+ "interface sets. ")
self.scheme = scheme
self.network = scheme.network
self.minus_movers = scheme.movers['minus']
for mover in self.minus_movers:
n_innermost = len(mover.innermost_ensembles)
if n_innermost != 1:
raise ValueError(
mistis_err_str + "Mover " + str(mover) + " does not "
+ "have exactly one innermost ensemble. Found "
+ str(len(mover.innermost_ensembles)) + ")."
)
if flux_pairs is None:
# get flux_pairs from network
flux_pairs = []
minus_ens_to_trans = self.network.special_ensembles['minus']
for minus_ens in self.network.minus_ensembles:
n_trans = len(minus_ens_to_trans[minus_ens])
if n_trans > 1: # pragma: no cover
# Should have been caught be the previous ValueError. If
# you hit this, something unexpected happened.
raise ValueError(mistis_err_str + "Ensemble "
+ repr(minus_ens) + " connects "
+ str(n_trans) + " transitions.")
trans = minus_ens_to_trans[minus_ens][0]
innermost = trans.interfaces[0]
state = trans.stateA
# a couple assertions as a sanity check
assert minus_ens.state_vol == state
assert minus_ens.innermost_vol == innermost
flux_pairs.append((state, innermost))
self.flux_pairs = flux_pairs
def _get_minus_steps(self, steps):
"""
Selects steps that used this object's minus movers
"""
return [s for s in steps
if s.change.canonical.mover in self.minus_movers
and s.change.accepted]
def trajectory_transition_flux_dict(self, minus_steps):
"""
Main minus move-based flux analysis routine.
Parameters
----------
minus_steps: list of :class:`.MCStep`
steps that used the minus movers
Returns
-------
dict of {(:class:`.Volume, :class:`.Volume`): dict}
keys are (state, interface); values are the result dict from
:method:`.TrajectoryTransitionAnalysis.analyze_flux` (keys are
strings 'in' and 'out', mapping to
:class:`.TrajectorySegmentContainer` with appropriate frames.
"""
# set up a few mappings that make it easier set up other things
flux_pair_to_transition = {
(trans.stateA, trans.interfaces[0]): trans
for trans in self.network.sampling_transitions
}
flux_pair_to_minus_mover = {
(m.minus_ensemble.state_vol, m.minus_ensemble.innermost_vol): m
for m in self.minus_movers
}
minus_mover_to_flux_pair = {flux_pair_to_minus_mover[k]: k
for k in flux_pair_to_minus_mover}
flux_pair_to_minus_ensemble = {
(minus_ens.state_vol, minus_ens.innermost_vol): minus_ens
for minus_ens in self.network.minus_ensembles
}
# sanity checks -- only run once per analysis, so keep them in
for pair in self.flux_pairs:
assert pair in flux_pair_to_transition.keys()
assert pair in flux_pair_to_minus_mover.keys()
assert len(self.flux_pairs) == len(minus_mover_to_flux_pair)
# organize the steps by mover used
mover_to_steps = collections.defaultdict(list)
for step in minus_steps:
mover_to_steps[step.change.canonical.mover].append(step)
# create the actual TrajectoryTransitionAnalysis objects to use
transition_flux_calculators = {
k: paths.TrajectoryTransitionAnalysis(
transition=flux_pair_to_transition[k],
dt=flux_pair_to_minus_mover[k].engine.snapshot_timestep
)
for k in self.flux_pairs
}
# do the analysis
results = {}
for flux_pair in self.flux_pairs:
(state, innermost) = flux_pair
mover = flux_pair_to_minus_mover[flux_pair]
calculator = transition_flux_calculators[flux_pair]
minus_ens = flux_pair_to_minus_ensemble[flux_pair]
# TODO: this won't work for SR minus, I don't think
# (but neither would our old version)
trajectories = [s.active[minus_ens].trajectory
for s in mover_to_steps[mover]]
results[flux_pair] = calculator.analyze_flux(
trajectories=trajectories,
state=state,
interface=innermost
)
return results
@staticmethod
def from_trajectory_transition_flux_dict(flux_dicts):
"""Load from existing TrajectoryTransitionAnalysis calculations.
Parameters
----------
flux_dicts: dict of {(:class:`.Volume`, :class:`.Volume`): dict}
keys are (state, interface); values are the result dict from
:method:`.TrajectoryTransitionAnalysis.analyze_flux` (keys are
strings 'in' and 'out', mapping to
:class:`.TrajectorySegmentContainer` with appropriate frames.
Returns
-------
dict of {(:class:`.Volume, :class:`.Volume`): float}
keys are (state, interface); values are the associated flux
"""
TTA = paths.TrajectoryTransitionAnalysis # readability on 80 col
return {k: TTA.flux_from_flux_dict(flux_dicts[k])
for k in flux_dicts}
def from_weighted_trajectories(self, input_dict):
"""Not implemented for flux calculation."""
# this can't be done, e.g., in the case of the single replica minus
# mover, where the minus trajectory isn't in the active samples
raise NotImplementedError(
"Can not calculate minus move from weighted trajectories."
)
def calculate(self, steps):
"""Perform the analysis, using `steps` as input.
Parameters
----------
steps : iterable of :class:`.MCStep`
the steps to use as input for this analysis
Returns
-------
dict of {(:class:`.Volume`, :class:`.Volume`): float}
keys are (state, interface); values are the associated flux
"""
intermediates = self.intermediates(steps)
return self.calculate_from_intermediates(*intermediates)
def intermediates(self, steps):
"""Calculate intermediates, using `steps` as input.
Parameters
----------
steps : iterable of :class:`.MCStep`
the steps to use as input for this analysis
Returns
-------
list (len 1) of dict of {(:class:`.Volume`, :class:`.Volume`): dict}
keys are (state, interface); values are the result dict from
:method:`.TrajectoryTransitionAnalysis.analyze_flux` (keys are
strings 'in' and 'out', mapping to
:class:`.TrajectorySegmentContainer` with appropriate frames.
"""
minus_steps = self._get_minus_steps(steps)
return [self.trajectory_transition_flux_dict(minus_steps)]
def calculate_from_intermediates(self, *intermediates):
"""Perform the analysis, using intermediates as input.
Parameters
----------
intermediates :
output of :method:`.intermediates`
Returns
-------
dict of {(:class:`.Volume, :class:`.Volume`): float}
keys are (state, interface); values are the associated flux
"""
flux_dicts = intermediates[0]
return self.from_trajectory_transition_flux_dict(flux_dicts)
class DictFlux(MultiEnsembleSamplingAnalyzer):
"""Pre-calculated flux, provided as a dict.
Parameters
----------
flux_dict: dict of {(:class:`.Volume`, :class:`.Volume`): float}
keys are (state, interface) pairs; values are associated flux
"""
def __init__(self, flux_dict):
super(DictFlux, self).__init__()
self.flux_dict = flux_dict
def calculate(self, steps):
"""Perform the analysis, using `steps` as input.
Parameters
----------
steps : iterable of :class:`.MCStep`
the steps to use as input for this analysis
Returns
-------
dict of {(:class:`.Volume`, :class:`.Volume`): float}
keys are (state, interface); values are the associated flux
"""
return self.flux_dict
def from_weighted_trajectories(self, input_dict):
"""Calculate results from weighted trajectories dictionary.
For :class:`.DictFlux`, this ignores the input.
Parameters
----------
input_dict : dict of {:class:`.Ensemble`: collections.Counter}
ensemble as key, and a counter mapping each trajectory
associated with that ensemble to its counter of time spent in
the ensemble.
Returns
-------
dict of {(:class:`.Volume`, :class:`.Volume`): float}
keys are (state, interface); values are the associated flux
"""
return self.flux_dict
def intermediates(self, steps):
"""Calculate intermediates, using `steps` as input.
Parameters
----------
steps : iterable of :class:`.MCStep`
the steps to use as input for this analysis
Returns
-------
list
empty list; the method is a placeholder for this class
"""
return []
def calculate_from_intermediates(self, *intermediates):
"""Perform the analysis, using intermediates as input.
Parameters
----------
intermediates :
output of :method:`.intermediates`
Returns
-------
dict of {(:class:`.Volume, :class:`.Volume`): float}
keys are (state, interface); values are the associated flux
"""
return self.flux_dict
@staticmethod
def combine_results(result_1, result_2):
"""Combine two sets of results from this analysis.
For :class:`.DictFlux`, the results must be identical.
Parameters
----------
result_1 : dict of {(:class:`.Volume, :class:`.Volume`): float}
first set of results from a flux calculation
result_2 : dict of {(:class:`.Volume, :class:`.Volume`): float}
second set of results from a flux calculation
Returns
-------
dict of {(:class:`.Volume, :class:`.Volume`): float}
keys are (state, interface); values are the associated flux
"""
if result_1 != result_2:
raise RuntimeError("Combining results from different DictFlux")
return result_1
| lgpl-2.1 |
LACNIC/labs-opendata-datasets | bin/venn_asociados.py | 1 | 2990 | # -*- coding: utf-8 -*-
"""
venn_asociados: calculates and plots a venn diagram showing lacnic member groupings
(c) Carlos M:, [email protected]
"""
import sqlite3 as sq
import os
import matplotlib_venn as mpv
import json
from matplotlib import pyplot as plt
if os.name == "nt":
os.chdir(r"C:\Users\carlos\Dropbox (Personal)\Workspaces\LACNIC-Wksp\70-checkouts\labs-opendata-datasets.git")
elif os.name == "posix":
os.chdir("/Users/carlos/Dropbox (Personal)/Workspaces/LACNIC-Wksp/70-checkouts/labs-opendata-datasets.git")
else:
print("WARN unable to detect operating system")
def runq(type,rir='lacnic'):
# TODO: parametrize date
c=sq.connect("var/netdata-2017-08-23.db")
cur=c.cursor()
r1=cur.execute("select distinct(orgid) from numres where rir='%s' and type='%s' and (status='allocated' or status='assigned')"
% (rir, type) )
rcorgs=[ x[0] for x in r1.fetchall() ]
return rcorgs
# end runq
class SetEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
return json.JSONEncoder.default(self, obj)
# end default
# end SetEncoder
if __name__ == "__main__":
rsets = { 'ipv4':0, 'ipv6':0, 'asn':0 }
for rc in rsets.keys():
print("get orgs with assigned %s" % (rc) )
rsets[rc] = set(runq(rc))
print("%s: %s" % (rc, len(rsets[rc])) )
# end for
ipv4nipv6 = rsets['ipv4'] & rsets['ipv6']
# print("orgs with both ipv4 and ipv6: %s" % (len(ipv4nipv6) ) )
ipv6only = rsets['ipv6'] - rsets['ipv4']
#print("orgs with ipv6 only: %s" % (len(ipv6only) ) )
s5 = rsets['ipv4'] & rsets['ipv6'] & rsets['asn']
rsets['Orgs with ipv4, ipv6 and asn'] = s5
s2 = (rsets['ipv4'] & rsets['ipv6']) - s5
rsets['Orgs with ipv4 and ipv6 but no asn'] = s2
s4 = (rsets['ipv4'] & rsets['asn']) - s5
rsets['Orgs with ipv4 and asn but no ipv6'] = s4
s6 = (rsets['ipv6'] & rsets['asn']) - s5
rsets['Orgs with ipv6 and asn but no ipv4'] = s6
s1 = rsets['ipv4'] - s4 -s5 - s2
rsets['Orgs with ipv4 only'] = s1
s3 = rsets['ipv6'] - s6 - s5 - s2
rsets['Orgs with ipv6 only'] = s3
s7 = rsets['asn'] - s5 - s4 - s6
rsets['Orgs with asn only'] = s7
# mpv.venn3(
# subsets = (len(s1),len(s2),len(s3),len(s4),len(s5),len(s6),len(s7) ),
# set_labels = ('ipv4','asn','ipv6' )
# )
# print text info
for rc in rsets.keys():
print("%s: %s" % (rc,len(rsets[rc])) )
# draw plot
plt.figure(figsize=(10,10))
plt.title("Asociados LACNIC", fontsize=24)
mpv.venn3([rsets['ipv4'], rsets['ipv6'], rsets['asn']], ('IPv4', 'IPv6', 'ASN'))
plt.show()
# print json
fp = open("var/venn-asociados-20170823.json", "w")
# fp = io.FileIO("var/venn-asociados-20170815.json","w")
json.dump(rsets, fp, cls=SetEncoder, indent=4)
fp.close() | bsd-2-clause |
fredRos/pypmc | setup.py | 1 | 3257 | from __future__ import print_function
# bootstrap: download setuptools 3.3 if needed
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages, Extension
import os
import sys
package_name = 'pypmc'
# set the version number
with open('pypmc/_version.py') as f:
exec(f.read())
def get_extensions():
import numpy
extra_compile_args=["-Wno-unused-but-set-variable",
"-Wno-unused-function",
"-O3"]
include_dirs = [numpy.get_include()]
from Cython.Build import cythonize
extensions = [Extension('*', ['pypmc/*/*.pyx'],
extra_compile_args=extra_compile_args,
include_dirs=include_dirs)]
compiler_directives = dict(boundscheck=False, cdivision=True,
embedsignature=True,
profile=False, wraparound=False,
# needed to make cython>0.29 happy
language_level=2)
ext_modules = cythonize(extensions, compiler_directives=compiler_directives)
return ext_modules
def setup_package():
# the long description is unavailable in a source distribution and not essential to build
try:
with open('doc/abstract.txt') as f:
long_description = f.read()
except:
long_description = ''
setup_args = dict(
name=package_name,
packages=find_packages(),
version=__version__,
author='Frederik Beaujean, Stephan Jahn',
author_email='[email protected], [email protected]',
url='https://github.com/fredRos/pypmc',
description='A toolkit for adaptive importance sampling featuring implementations of variational Bayes, population Monte Carlo, and Markov chains.',
long_description=long_description,
license='GPLv2',
install_requires=['numpy>=1.6, <2.0', 'scipy'],
extras_require={'testing': ['nose'], 'plotting': ['matplotlib'], 'parallelization': ['mpi4py']},
classifiers=['Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
'Operating System :: Unix',
'Programming Language :: Cython',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
],
platforms=['Unix'],
)
if len(sys.argv) >= 2 and (
'--help' in sys.argv[1:] or
sys.argv[1] in ('--help-commands', 'egg_info', '--version',
'clean')):
# For these actions, dependencies are not required.
pass
else:
setup_args['packages'] = find_packages()
setup_args['ext_modules'] = get_extensions()
setup(**setup_args)
if __name__ == '__main__':
setup_package()
| gpl-2.0 |
q1ang/scikit-learn | sklearn/semi_supervised/label_propagation.py | 71 | 15342 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
from abc import ABCMeta, abstractmethod
from scipy import sparse
import numpy as np
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import rbf_kernel
from ..utils.graph import graph_laplacian
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_X_y, check_is_fitted, check_array
from ..externals import six
from ..neighbors.unsupervised import NearestNeighbors
### Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_neighbors : integer > 0
Parameter for knn kernel
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
X_2d = check_array(X, accept_sparse = ['csc', 'csr', 'coo', 'dok',
'bsr', 'lil', 'dia'])
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause |
yarikoptic/pystatsmodels | statsmodels/sandbox/examples/try_smoothers.py | 3 | 2642 | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 01 15:17:52 2011
Author: Mike
Author: Josef
mainly script for checking Kernel Regression
"""
import numpy as np
if __name__ == "__main__":
#from statsmodels.sandbox.nonparametric import smoothers as s
from statsmodels.sandbox.nonparametric import smoothers, kernels
import matplotlib.pyplot as plt
#from numpy import sin, array, random
import time
np.random.seed(500)
nobs = 250
sig_fac = 0.5
#x = np.random.normal(size=nobs)
x = np.random.uniform(-2, 2, size=nobs)
#y = np.array([np.sin(i*5)/i + 2*i + (3+i)*np.random.normal() for i in x])
y = np.sin(x*5)/x + 2*x + sig_fac * (3+x)*np.random.normal(size=nobs)
K = kernels.Biweight(0.25)
K2 = kernels.CustomKernel(lambda x: (1 - x*x)**2, 0.25, domain = [-1.0,
1.0])
KS = smoothers.KernelSmoother(x, y, K)
KS2 = smoothers.KernelSmoother(x, y, K2)
KSx = np.arange(-3, 3, 0.1)
start = time.time()
KSy = KS.conf(KSx)
KVar = KS.std(KSx)
print time.time() - start # This should be significantly quicker...
start = time.time() #
KS2y = KS2.conf(KSx) #
K2Var = KS2.std(KSx) #
print time.time() - start # ...than this.
KSConfIntx, KSConfInty = KS.conf(15)
print "Norm const should be 0.9375"
print K2.norm_const
print "L2 Norms Should Match:"
print K.L2Norm
print K2.L2Norm
print "Fit values should match:"
#print zip(KSy, KS2y)
print KSy[28]
print KS2y[28]
print "Var values should match:"
#print zip(KVar, K2Var)
print KVar[39]
print K2Var[39]
fig = plt.figure()
ax = fig.add_subplot(221)
ax.plot(x, y, "+")
ax.plot(KSx, KSy, "-o")
#ax.set_ylim(-20, 30)
ax2 = fig.add_subplot(222)
ax2.plot(KSx, KVar, "-o")
ax3 = fig.add_subplot(223)
ax3.plot(x, y, "+")
ax3.plot(KSx, KS2y, "-o")
#ax3.set_ylim(-20, 30)
ax4 = fig.add_subplot(224)
ax4.plot(KSx, K2Var, "-o")
fig2 = plt.figure()
ax5 = fig2.add_subplot(111)
ax5.plot(x, y, "+")
ax5.plot(KSConfIntx, KSConfInty, "-o")
import statsmodels.nonparametric.smoothers_lowess as lo
ys = lo.lowess(y, x)
ax5.plot(ys[:,0], ys[:,1], 'b-')
ys2 = lo.lowess(y, x, frac=0.25)
ax5.plot(ys2[:,0], ys2[:,1], 'b--', lw=2)
#need to sort for matplolib plot ?
xind = np.argsort(x)
pmod = smoothers.PolySmoother(5, x[xind])
pmod.fit(y[xind])
yp = pmod(x[xind])
ax5.plot(x[xind], yp, 'k-')
ax5.set_title('Kernel regression, lowess - blue, polysmooth - black')
#plt.show()
| bsd-3-clause |
VirtualWatershed/vw-py | vwpy/isnobal.py | 2 | 50849 | """
Tools for working with IPW binary data and running the iSNOBAL model.
"""
#
# Copyright (c) 2014, Matthew Turner (maturner01.gmail.com)
#
# For the Tri-state EPSCoR Track II WC-WAVE Project
#
# Acknowledgements to Robert Lew for inspiration in the design of the IPW
# class (see https://github.com/rogerlew/RL_GIS_Sandbox/tree/master/isnobal).
#
import datetime
import logging
import subprocess
import netCDF4
import re
import warnings
import xray
from collections import namedtuple, defaultdict
from copy import deepcopy
from netCDF4 import Dataset
from numpy import (arange, array, zeros, ravel, reshape, fromstring, dtype,
floor, log10)
from numpy import sum as npsum
from numpy import round as npround
from numpy.ma import masked
from os import mkdir, listdir
from os.path import exists, dirname, basename
from os.path import join as osjoin
from pandas import date_range, DataFrame, Series, Timedelta
from progressbar import ProgressBar
from shutil import rmtree
from struct import pack
from .watershed import make_fgdc_metadata, make_watershed_metadata
from .netcdf import ncgen_from_template, utm2latlon
#: IPW standard. assumed unchanging since they've been the same for 20 years
BAND_TYPE_LOC = 1
BAND_INDEX_LOC = 2
#: For converting NetCDF to iSNOBAL, use 2 bytes for all variables except mask
NC_NBYTES = 2
NC_NBITS = 16
NC_MAXINT = pow(2, NC_NBITS) - 1
#: Container for ISNOBAL Global Band information
GlobalBand = namedtuple("GlobalBand", 'byteorder nLines nSamps nBands')
#: Check if a header is starting
IsHeaderStart = lambda headerLine: headerLine.split()[0] == "!<header>"
def AssertISNOBALInput(nc):
"""Check if a NetCDF conforms to iSNOBAL requirements for running that
model. Throw a ISNOBALNetcdfError if not
"""
if type(nc) is xray.Dataset:
nca = nc.attrs
elif type(nc) is netCDF4.Dataset:
nca = nc.ncattrs()
else:
raise Exception('NetCDF is not a valid type')
valid = ('data_tstep' in nca and 'nsteps' in nca and
'output_frequency' in nca)
if not valid:
raise ISNOBALNetcdfError("Attributes 'data_tstep', 'nsteps', "
"'output_frequency', 'bline', 'bsamp', "
"'dline', and 'dsamp' not all in NetCDF")
ncv = nc.variables
expected_variables = ['alt', 'mask', 'time', 'easting', 'northing', 'lat',
'lon', 'I_lw', 'T_a', 'e_a', 'u', 'T_g', 'S_n', 'z',
'z_0', 'z_s', 'rho', 'T_s_0', 'T_s', 'h2o_sat',
'm_pp', 'percent_snow', 'rho_snow']
not_present = []
for exp_var in expected_variables:
if exp_var not in ncv:
not_present += [exp_var]
if not_present:
raise ISNOBALNetcdfError(
"Variables " + ', '.join(not_present) +
" are missing from input NetCDF")
#: varnames for loading the NetCDF
VARNAME_BY_FILETYPE = \
{
'dem': ['alt'],
'in': ['I_lw', 'T_a', 'e_a', 'u', 'T_g', 'S_n'],
'precip': ['m_pp', 'percent_snow', 'rho_snow', 'T_pp'],
'mask': ['mask'],
'init': ['z', 'z_0', 'z_s', 'rho', 'T_s_0', 'T_s', 'h2o_sat'],
'em': ['R_n', 'H', 'L_v_E', 'G', 'M', 'delta_Q',
'E_s', 'melt', 'ro_predict', 'cc_s'],
'snow': ['z_s', 'rho', 'm_s', 'h2o', 'T_s_0',
'T_s_l', 'T_s', 'z_s_l', 'h2o_sat']
}
#: ISNOBAL variable names to be looked up to make dataframes and write metadata
#: Convert number of bytes to struct package code for unsigned integer type
PACK_DICT = \
{
1: 'B',
2: 'H',
4: 'I'
}
def isnobal(nc_in=None, nc_out_fname=None, data_tstep=60, nsteps=8758,
init_img="data/init.ipw", precip_file="data/ppt_desc",
mask_file="data/tl2p5mask.ipw", input_prefix="data/inputs/in",
output_frequency=1, em_prefix="data/outputs/em",
snow_prefix="data/outputs/snow", dt='hours', year=2010,
month=10, day='01', event_emitter=None, **kwargs):
""" Wrapper for running the ISNOBAL
(http://cgiss.boisestate.edu/~hpm/software/IPW/man1/isnobal.html)
model.
Arguments:
nc_in (netCDF4.Dataset) Input NetCDF4 dataset. See
AssertISNOBALInput for requirements.
nc_out_fname (str) Name of NetCDF file to write to, if desired
For explanations the rest, see the link above.
** Addition: It expects a pyee event emitter in order to emit messages for progress. if not provided it should just work fine
Returns:
(netCDF4.Dataset) NetCDF Dataset object of the outputs
"""
if not nc_in:
isnobalcmd = " ".join(["isnobal",
"-t " + str(data_tstep),
"-n " + str(nsteps),
"-I " + init_img,
"-p " + precip_file,
"-m " + mask_file,
"-i " + input_prefix,
"-O " + str(output_frequency),
"-e " + em_prefix,
"-s " + snow_prefix])
# TODO sanitize this isnobalcmd or better yet, avoid shell=True
logging.debug('Running isnobal')
kwargs['event_name'] = 'running_isonbal'
kwargs['event_description'] = 'Running the ISNOBAL model'
kwargs['progress_value'] = 50
if event_emitter:
event_emitter.emit('progress', **kwargs)
output = subprocess.check_output(isnobalcmd, shell=True)
logging.debug("ISNOBAL process output: " + output)
logging.debug('done runinig isnobal')
kwargs['event_name'] = 'running_isonbal'
kwargs['event_description'] = 'Done Running model'
kwargs['progress_value'] = 100
if event_emitter:
event_emitter.emit('progress',**kwargs)
# create a NetCDF of the outputs and return it
nc_out = \
generate_standard_nc(dirname(em_prefix), nc_out_fname,
data_tstep=data_tstep,
output_frequency=output_frequency, dt=dt,
year=year, month=month, day=day,
event_emitter=event_emitter, **kwargs)
return nc_out
else:
AssertISNOBALInput(nc_in)
# these are guaranteed to be present by the above assertion
data_tstep = nc_in.data_tstep
nsteps = nc_in.nsteps - 1 # isnobal steps are from one step to another
output_frequency = nc_in.output_frequency
# create standard IPW data in tmpdir; creates tmpdir
tmpdir = '/tmp/isnobalrun' + \
str(datetime.datetime.now()).replace(' ', '')
nc_to_standard_ipw(nc_in, tmpdir,event_emitter=event_emitter,**kwargs)
mkdir(osjoin(tmpdir, 'outputs'))
# nc_to_standard_ipw is well tested, we know these will be present
init_img = osjoin(tmpdir, 'init.ipw')
mask_file = osjoin(tmpdir, 'mask.ipw')
precip_file = osjoin(tmpdir, 'ppt_desc')
em_prefix = osjoin(tmpdir, 'outputs/em')
input_prefix = osjoin(tmpdir, 'inputs/in')
snow_prefix = osjoin(tmpdir, 'outputs/snow')
# recursively run isnobal with nc_in=None
nc_out = isnobal(nc_out_fname=nc_out_fname, data_tstep=data_tstep,
nsteps=nsteps, init_img=init_img,
precip_file=precip_file, mask_file=mask_file,
input_prefix=input_prefix,
output_frequency=output_frequency,
em_prefix=em_prefix, snow_prefix=snow_prefix,event_emitter=event_emitter,**kwargs)
rmtree(tmpdir)
return nc_out
class IPW(object):
"""
Represents an IPW file. Provides a data_frame attribute to access the
variables and their floating point representation as a dataframe. The
dataframe can be modified, the headers recalculated with
recalculateHeaders, and then written back to IPW binary with
writeBinary.
>>> ipw = IPW("in.0000")
>>> ipw.data_frame.T_a = ipw.data_frame.T_a + 1.0 # add 1 dg C to each temp
>>> ipw.writeBinary("in.plusOne.000")
"""
def __init__(self, input_file=None, config_file=None,
water_year=None, dt=None, file_type=None):
assert dt is None or issubclass(type(dt), datetime.timedelta)
if input_file is not None:
ipw_lines = IPWLines(input_file)
input_split = basename(input_file).split('.')
file_type = file_type or input_split[0]
# _make_bands
try:
header_dict = \
_make_bands(ipw_lines.header_lines,
VARNAME_BY_FILETYPE[file_type])
except (KeyError):
raise IPWFileError("Provide explicit file type for file %s" %
input_file)
# extract just bands from the header dictionary
bands = [band for band in header_dict.values()]
# get the nonglobal_bands in a list, ordered by band index
nonglobal_bands =\
sorted([band for varname, band in header_dict.iteritems()
if varname != 'global'],
key=lambda b: b.band_idx)
# the default configuration is used if no config file is given
if config_file is None:
config_file = \
osjoin(dirname(__file__), '../default.conf')
if file_type in ['in', 'em', 'snow']:
# set the water year to default if not given
if not water_year:
water_year = 2010
# note that we have not generalized for non-hour timestep data
if dt is None:
dt = Timedelta('1 hour')
# the iSNOBAL file naming scheme puts the integer time step
# after the dot, really as the extension
# TODO as Roger pointed out, really this is for
# a single point in time, so this timing thing is not right
start_dt = dt * int(input_split[-1])
start_datetime = \
datetime.datetime(water_year, 10, 01) + start_dt
end_datetime = start_datetime + dt
else:
start_datetime = None
end_datetime = None
# initialized when called for below
self._data_frame = None
self.input_file = input_file
self.file_type = file_type
self.header_dict = header_dict
self.binary_data = ipw_lines.binary_data
self.bands = bands
self.nonglobal_bands = nonglobal_bands
# use geo information in band0; all bands have equiv geo info
band0 = nonglobal_bands[0]
self.geotransform = [band0.bsamp - band0.dsamp / 2.0,
band0.dsamp,
0.0,
band0.bline - band0.dline / 2.0,
0.0,
band0.dline]
self.config_file = config_file
self.start_datetime = start_datetime
self.end_datetime = end_datetime
else:
self._data_frame = None
self.input_file = None
self.file_type = None
self.header_dict = None
self.binary_data = None
self.bands = None
self.nonglobal_bands = None
self.geotransform = None
self.start_datetime = None
self.end_datetime = None
return None
def recalculate_header(self):
"""
Recalculate header values
"""
_recalculate_header(self.nonglobal_bands, self.data_frame())
for band in self.nonglobal_bands:
self.header_dict[band.varname] = band
@classmethod
def precip_tuple(self, precip_file, sepchar='\t'):
"""Create list of two-lists where each element's elements are the time
index of the time step when the precipitation happened and an IPW
of the precipitation data.
"""
pptlist = map(lambda l: l.strip().split(sepchar),
open(precip_file, 'r').readlines())
return map(lambda l: (l[0], IPW(l[1], file_type='precip')), pptlist)
@classmethod
def from_nc(cls, nc_in, tstep=None, file_type=None, variable=None,
distance_units='m', coord_sys_ID='UTM'):
"""
Generate an IPW object from a NetCDF file.
>>> ipw = IPW.from_nc('dataset.nc', tstep='1', file_type='in')
>>> ipw = IPW.from_nc(nc_in)
If your data uses units of distance other than meters, set that
with kwarg `distance_units`. Simliar
Arguments:
nc_in (str or NetCDF4.Dataset) NetCDF to convert to IPW
tstep (int) The time step in whatever units are being used
file_type (str) file type of NetCDF variable, one of
'in', 'precip', 'em', 'snow', 'mask', 'init', 'dem'
variable (str or list) One or many variable names to be
incorporated into IPW file
distance_units (str) If you use a measure of distance other
than meters, put the units here
coord_sys_ID (str) Coordinate system being used
Returns:
(IPW) IPW instance built from NetCDF inputs
"""
if type(nc_in) is str:
nc_in = Dataset(nc_in, 'r')
# check and get variables from netcdf
if file_type is None and variable is None:
raise Exception("file_type and variable both 'None': no data to convert!")
# initialize the IPW and set its some global attributes
ipw = IPW()
if file_type is None:
if variable == 'alt':
ipw.file_type = 'dem'
elif variable == 'mask':
ipw.file_type = variable
# this allows same lookup to be used for init or dem/mask
nc_vars = {variable: nc_in.variables[variable]}
else:
nc_vars = {varname: nc_in.variables[varname]
for varname in VARNAME_BY_FILETYPE[file_type]}
ipw.file_type = file_type
# read header info from nc and generate/assign to new IPW
# build global dict
ipw.byteorder = '0123' # TODO read from file
ipw.nlines = len(nc_in.dimensions['northing'])
ipw.nsamps = len(nc_in.dimensions['easting'])
# if the bands are not part of a group, they are handled individually
if file_type:
ipw.nbands = len(nc_vars)
else:
ipw.nbands = 1
globalBand = GlobalBand(ipw.byteorder, ipw.nlines,
ipw.nsamps, ipw.nbands)
# build non-global band(s). Can use recalculate_header so no min/max
# need be set.
# setting all values common to all bands
# use 2 bytes/16 bits for floating point values
bytes_ = NC_NBYTES
bits_ = NC_NBITS
bline = nc_in.bline
dline = nc_in.dline
bsamp = nc_in.bsamp
dsamp = nc_in.dsamp
geo_units = distance_units
coord_sys_ID = coord_sys_ID
# iterate over each item in VARNAME_BY_FILETYPE for the filetype, creating
# a "Band" for each and corresponding entry in the poorly named
# header_dict
varnames = VARNAME_BY_FILETYPE[ipw.file_type]
header_dict = dict(zip(varnames,
[Band() for i in range(len(varnames) + 1)]))
# create a dataframe with nrows = nlines*nsamps and variable colnames
df_shape = (ipw.nlines*ipw.nsamps, len(varnames))
df = DataFrame(zeros(df_shape), columns=varnames)
for idx, var in enumerate(varnames):
header_dict[var] = Band(varname=var, band_idx=idx, nBytes=bytes_,
nBits=bits_, int_max=NC_MAXINT, bline=bline, dline=dline,
bsamp=bsamp, dsamp=dsamp, units=geo_units,
coord_sys_ID=coord_sys_ID)
# insert data to each df column
if tstep is not None:
data = ravel(nc_vars[var][tstep])
else:
data = ravel(nc_vars[var])
df[var] = data
ipw._data_frame = df
ipw.nonglobal_bands = header_dict.values()
# include global band in header dictionary
header_dict.update({'global': globalBand})
ipw.geotransform = [bsamp - dsamp / 2.0,
dsamp,
0.0,
bline - dline / 2.0,
0.0,
dline]
ipw.bands = header_dict.values()
ipw.header_dict = header_dict
# recalculate headers
ipw.recalculate_header()
return ipw
def data_frame(self):
"""
Get the Pandas DataFrame representation of the IPW file
"""
if self._data_frame is None:
self._data_frame = \
_build_ipw_dataframe(self.nonglobal_bands,
self.binary_data)
return self._data_frame
def write(self, fileName):
"""
Write the IPW data to file
"""
last_line = "!<header> image -1 $Revision: 1.5 $"
with open(fileName, 'wb') as f:
header_lines = _bands_to_header_lines(self.header_dict)
for l in header_lines:
f.write(l + '\n')
f.write(last_line + '\n')
_write_floatdf_binstring_to_file(
self.nonglobal_bands, self._data_frame, f)
return None
def generate_standard_nc(base_dir, nc_out=None, inputs_dir='inputs',
dem_file='tl2p5_dem.ipw', mask_file='tl2p5mask.ipw',
init_file='init.ipw', ppt_desc_path='ppt_desc',
data_tstep=60,
output_frequency=1, dt='hours', year=2010, month=10,
day='01',hour='',event_emitter=None,**kwargs):
"""Use the utilities from netcdf.py to convert standard set of either input
or output files to a NetCDF4 file. A standard set of files means
for inputs:
- inputs/ dir with 5/6-band input files named like in.0000, in.0001
- ppt_desc file with time index of precip file and path to ppt file
- ppt_images_dist directory with the 4-band files from ppt_desc
- tl2p5mask.ipw and tl2p5_dem.ipw for mask and DEM images
- init.ipw 7-band initialization file
for outputs:
- an output/ directory with 9-band energy-mass (em) outputs and
snow outputs in time steps named like em.0000 and snow.0000
Arguments:
base_dir (str): base directory of the data
nc_out (str): path to write data to
Returns:
(netCDF4.Dataset) Representation of the data
"""
if 'outputs' in base_dir.split('/')[-1]:
ipw_type = 'outputs'
elif inputs_dir in listdir(base_dir):
ipw_type = 'inputs'
else:
raise IPWFileError("%s does not meet standards" % base_dir)
if ipw_type == 'inputs':
input_files = [osjoin(base_dir, inputs_dir, el) for el in
listdir(osjoin(base_dir, inputs_dir))]
ipw0 = IPW(input_files[0])
gt = ipw0.geotransform
gb = [x for x in ipw0.bands if type(x) is GlobalBand][0]
# in iSNOBAL speak, literally the number of steps, not number of
# time index entries
nsteps = len(input_files) - 1
template_args = dict(bline=gt[3], bsamp=gt[0], dline=gt[5],
dsamp=gt[1], nsamps=gb.nSamps, nlines=gb.nLines,
data_tstep=data_tstep, nsteps=nsteps,
output_frequency=output_frequency, dt=dt,
year=year, month=month, day=day, hour=hour)
# initialize the nc file
nc = ncgen_from_template('ipw_in_template.cdl', nc_out, clobber=True,
**template_args)
# first take care of non-precip files
with ProgressBar(maxval=len(input_files)) as progress:
for i, f in enumerate(input_files):
ipw = IPW(f)
tstep = int(basename(ipw.input_file).split('.')[-1])
_nc_insert_ipw(nc, ipw, tstep, gb.nLines, gb.nSamps)
progress.update(i)
kwargs['event_name'] = 'input_ipw_to_nc'
kwargs['event_description'] = 'creating nc form iw files'
kwargs['progress_value'] = format((float(i)/len(input_files)) * 100,'.2f')
if event_emitter:
event_emitter.emit('progress',**kwargs)
# dem, mask may not exist
dem_mask_init_list = []
try:
dem = IPW(osjoin(base_dir, dem_file), file_type='dem')
dem_mask_init_list.append(dem)
except:
warnings.warn("No dem file found in " + base_dir)
pass
try:
mask = IPW(osjoin(base_dir, mask_file), file_type='mask')
dem_mask_init_list.append(mask)
except:
warnings.warn("No mask file found in " + base_dir)
pass
init = IPW(osjoin(base_dir, init_file))
dem_mask_init_list.append(init)
for el in dem_mask_init_list:
_nc_insert_ipw(nc, el, None, gb.nLines, gb.nSamps)
# precipitation files
# read ppt_desc file and insert to nc with appropriate time step
# we do not explicitly set any value for zero-precip time steps
space_regex = re.compile('\s+')
ppt_pairs = [space_regex.split(ppt_line.strip()) # ppt_line.strip().split('\t')
for ppt_line in
open(osjoin(base_dir, ppt_desc_path), 'r').readlines()]
with ProgressBar(maxval=len(ppt_pairs)) as progress:
for i, ppt_pair in enumerate(ppt_pairs):
tstep = int(ppt_pair[0])
el = IPW(ppt_pair[1], file_type='precip')
_nc_insert_ipw(nc, el, tstep, gb.nLines, gb.nSamps)
progress.update(i)
kwargs['event_name'] = 'input_ipw_to_nc2'
kwargs['event_description'] = 'creating nc form iw files 2'
kwargs['progress_value'] = format((float(i)/len(ppt_pairs)) * 100,'.2f')
if event_emitter:
event_emitter.emit('progress',**kwargs)
kwargs['event_name'] = 'input_ipw_to_nc2'
kwargs['event_description'] = 'creating nc form iw files 2'
kwargs['progress_value'] = 100
if event_emitter:
event_emitter.emit('progress',**kwargs)
else:
output_files = [osjoin(base_dir, el) for el in listdir(base_dir)]
ipw0 = IPW(output_files[0])
gt = ipw0.geotransform
gb = [x for x in ipw0.bands if type(x) is GlobalBand][0]
nsteps = len(output_files)
template_args = dict(bline=gt[3], bsamp=gt[0], dline=gt[5],
dsamp=gt[1], nsamps=gb.nSamps, nlines=gb.nLines,
data_tstep=data_tstep, nsteps=nsteps,
output_frequency=output_frequency, dt=dt,
year=year, month=month, day=day)
# initialize nc file
nc = ncgen_from_template('ipw_out_template.cdl', nc_out, clobber=True,
**template_args)
logging.debug('creating output file')
with ProgressBar(maxval=len(output_files)) as progress:
for i, f in enumerate(output_files):
ipw = IPW(f)
tstep = int(basename(ipw.input_file).split('.')[-1])
_nc_insert_ipw(nc, ipw, tstep, gb.nLines, gb.nSamps)
progress.update(i)
kwargs['event_name'] = 'ouptut_ipw_to_nc'
kwargs['event_description'] = 'creating output netcdf file from output ipw files'
kwargs['progress_value'] = format((float(i)/len(output_files)) * 100,'.2f')
if event_emitter:
event_emitter.emit('progress',**kwargs)
kwargs['event_name'] = 'ouptut_ipw_to_nc'
kwargs['event_description'] = 'creating output nc file fro moutput ipw'
kwargs['progress_value'] = 100
if event_emitter:
event_emitter.emit('progress',**kwargs)
# whether inputs or outputs, we need to include the dimensional values
t = nc.variables['time']
t[:] = arange(len(t))
e = nc.variables['easting']
# eastings are "samples" in IPW
nsamps = len(e)
e[:] = array([nc.bsamp + nc.dsamp*i for i in range(nsamps)])
n = nc.variables['northing']
# northings are "lines" in IPW
nlines = len(n)
n[:] = array([nc.bline + nc.dline*i for i in range(nlines)])
# get a n_points x 2 array of lat/lon pairs at every point on the grid
latlon_arr = utm2latlon(nc.bsamp, nc.bline, nc.dsamp,
nc.dline, nsamps, nlines)
# break this out into lat and lon separately at each point on the grid
lat = nc.variables['lat']
lat[:] = reshape(latlon_arr[:, 0], (nlines, nsamps))
# break this out into lat and lon separately at each point on the grid
lon = nc.variables['lon']
lon[:] = reshape(latlon_arr[:, 1], (nlines, nsamps))
# finish setting attributes
nc.data_tstep = data_tstep
nc.nsteps = len(t)
nc.sync()
return nc
def _nc_insert_ipw(dataset, ipw, tstep, nlines, nsamps):
"""Put IPW data into dataset based on file naming conventions
Args:
dataset (NetCDF4.Dataset): Dataset to be populated
ipw (wcwave_adaptors.isnobal.IPW): source data in IPW format
tstep (int): Positive integer indicating the current time step
nlines (int): number of 'lines' in IPW file, aka n_northings
nsamps (int): number of 'samps' in IPW file, aka n_eastings
Returns:
None. `dataset` is populated in-place.
"""
file_type = ipw.file_type
df = ipw.data_frame()
variables = dataset.variables
if file_type == 'dem':
# dem only has 'alt' information, stored in root group
dataset.variables['alt'][:, :] = reshape(df['alt'],
(nlines, nsamps))
elif file_type == 'in':
for var in VARNAME_BY_FILETYPE['in']:
# can't just assign b/c if sun is 'down' var is absent from df
if var in df.columns:
variables[var][tstep, :, :] = reshape(df[var],
(nlines, nsamps))
else:
variables[var][tstep, :, :] = zeros((nlines, nsamps))
elif file_type == 'precip':
for var in VARNAME_BY_FILETYPE['precip']:
variables[var][tstep, :, :] = reshape(df[var], (nlines, nsamps))
elif file_type == 'mask':
# mask is binary and one-banded; store in root group
dataset.variables['mask'][:, :] = reshape(df['mask'],
(nlines, nsamps))
elif file_type == 'init':
for var in VARNAME_BY_FILETYPE['init']:
variables[var][:, :] = reshape(df[var], (nlines, nsamps))
elif file_type == 'em':
for var in VARNAME_BY_FILETYPE['em']:
variables[var][tstep, :, :] = reshape(df[var], (nlines, nsamps))
elif file_type == 'snow':
for var in VARNAME_BY_FILETYPE['snow']:
variables[var][tstep, :, :] = reshape(df[var], (nlines, nsamps))
# TODO file_type == "em" and "snow" for outputs
else:
raise Exception('File type %s not recognized!' % file_type)
def nc_to_standard_ipw(nc_in, ipw_base_dir, clobber=True, type_='inputs',
event_emitter=None, **kwargs):
"""Convert an iSNOBAL NetCDF file to an iSNOBAL standard directory structure
in IPW format. This means that for
isnobal input nc: all inputs are all in {ipw_base_dir}/inputs and all precip
files are in {ipw_base_dir}/ppt_images_dist. There is a precip
description file {ipw_base_dir}/ppt_desc describing what time index
each precipitation file corresponsds to and the path to the precip
file in ppt_images_dist. There are three more files, the mask, init,
and DEM files at {ipw_base_dir}/ tl2p5mask.ipw, tl2p5_dem.ipw, and
init.ipw
isnobal output nc: files get output to {ipw_base_dir}/outputs to allow for
building a directory of both inputs and outputs. Files are like
em.0000 and snow.0000 for energy-mass and snow outputs, respectively.
Arguments:
nc_in (str) path to input NetCDF file to break out
ipw_base_dir (str) location to store
Returns:
None
"""
if type(nc_in) is str:
nc_in = Dataset(nc_in, 'r')
else:
assert isinstance(nc_in, Dataset)
present_vars = set(nc_in.variables.keys())
expected_vars = set([
u'time', u'easting', u'northing', u'lat', u'lon',
u'alt', u'mask', 'I_lw', 'T_a', 'e_a', 'u', 'T_g', 'S_n',
'm_pp', 'percent_snow', 'rho_snow', 'T_pp',
'z', 'z_0', 'z_s', 'rho', 'T_s_0', 'T_s', 'h2o_sat']
)
assert not expected_vars.difference(present_vars), \
"%s not a valid input iSNOBAL NetCDF; %s are missing" \
% (nc_in.filepath(), expected_vars.difference(present_vars))
if clobber and exists(ipw_base_dir):
rmtree(ipw_base_dir)
elif exists(ipw_base_dir):
raise IPWFileError("clobber=False and %s exists" % ipw_base_dir)
mkdir(ipw_base_dir)
time_index = range(len(nc_in.variables['time']))
if type_ == 'inputs':
# for each time step create an IPW file
inputs_dir = osjoin(ipw_base_dir, 'inputs')
mkdir(inputs_dir)
tsteps = len(time_index)
zeropad_factor = floor(log10(tsteps))
file_type = 'in'
logging.debug('creating input ipw files for each timestep from the input netcdf file (stage 1)')
with ProgressBar(maxval=time_index[-1]) as progress:
if len(time_index) > 1:
for i, idx in enumerate(time_index):
if idx < 10:
idxstr = "0"*zeropad_factor + str(idx)
elif idx < 100:
idxstr = "0"*(zeropad_factor - 1) + str(idx)
elif idx < 1000:
idxstr = "0"*(zeropad_factor - 2) + str(idx)
else:
idxstr = str(idx)
IPW.from_nc(nc_in, tstep=idx, file_type=file_type,
).write(osjoin(inputs_dir, 'in.' + idxstr))
progress.update(i)
kwargs['event_name'] = 'processing_input'
kwargs['event_description'] = 'creating input ipw files for ' \
'each timestep from the input netcdf file (stage 1)'
kwargs['progress_value'] = format(
(float(i)/time_index[-1]) * 100, '.2f')
if event_emitter:
event_emitter.emit('progress', **kwargs)
else:
IPW.from_nc(nc_in, tstep=time_index[0], file_type=file_type,
).write(osjoin(inputs_dir, 'in'))
progress.update(i)
kwargs['event_name'] = 'processing_input'
kwargs['event_description'] = 'creating input ipw files for ' \
'each timestep from the input netcdf file (stage 1)'
kwargs['progress_value'] = format(
(float(i)/time_index[-1]) * 100, '.2f')
if event_emitter:
event_emitter.emit('progress', **kwargs)
kwargs['event_name'] = 'processing_input'
kwargs['event_description'] = \
'creating input ipw for each timestep form nc'
kwargs['progress_value'] = 100
if event_emitter:
event_emitter.emit('progress', **kwargs)
file_type = 'init'
IPW.from_nc(nc_in, file_type=file_type
).write(osjoin(ipw_base_dir, 'init.ipw'))
IPW.from_nc(nc_in, variable='alt'
).write(osjoin(ipw_base_dir, 'dem.ipw'))
IPW.from_nc(nc_in, variable='mask'
).write(osjoin(ipw_base_dir, 'mask.ipw'))
# precip is weird. for no precip tsteps, no IPW exists
# list of tsteps that had precip and associated
# files stored in ppt_desc
file_type = 'precip'
ppt_images_dir = osjoin(ipw_base_dir, 'ppt_images_dist')
mkdir(ppt_images_dir)
# can use just one variable (precip mass) to see which
mpp = nc_in.variables['m_pp'][:]
pctsnow = nc_in.variables['percent_snow'][:]
rhosnow = nc_in.variables['rho_snow'][:]
precip_temp = nc_in.variables['T_pp'][:]
# if no precip at a tstep, variable type is numpy.ma.core.MaskedArray
time_indexes = [i for i, el in enumerate(mpp)
if not (
(
(mpp[i].all() is masked) and
(pctsnow[i].all() is masked) and
(rhosnow[i].all() is masked) and
(precip_temp[i].all() is masked)
)
or
(
(mpp[i] > 1e6).all() and
(pctsnow[i] > 1e6).all() and
(rhosnow[i] > 1e6).all() and
(precip_temp[i] > 1e6).all()
)
)
]
# this should be mostly right except for ppt_desc and ppt data dir
with open(osjoin(ipw_base_dir, 'ppt_desc'), 'w') as ppt_desc:
logging.debug('creating input ipw files for each timestep from the input netcdf file (stage 2)')
with ProgressBar(maxval=len(time_indexes)) as progress:
for i, idx in enumerate(time_indexes):
ppt_desc.write("%s\t%s\n" % (idx,
osjoin(ppt_images_dir,
'ppt_' + str(idx) + '.ipw')))
ipw = IPW.from_nc(nc_in, tstep=idx, file_type=file_type)
ipw.write(osjoin(ppt_images_dir,
'ppt_' + str(idx) + '.ipw'))
progress.update(i)
kwargs['event_name'] = 'processing_input2'
kwargs['event_description'] = 'creating input ipw files for each timestep from the input netcdf file (stage 2)'
kwargs['progress_value'] = format((float(i)/len(time_indexes)) * 100, '.2f')
if event_emitter:
event_emitter.emit('progress',**kwargs)
kwargs['event_name'] = 'processing_input2'
kwargs['event_description'] = 'creating input ipw for each timestep form nc 2'
kwargs['progress_value'] = 100
if event_emitter:
event_emitter.emit('progress',**kwargs)
else:
raise Exception("NetCDF to IPW converter not implemented for type %s" %
type_)
def metadata_from_ipw(ipw, output_file, parent_model_run_uuid, model_run_uuid,
description, model_set=None):
"""
Create the metadata for the IPW object, even if it doesn't existed as
a file on disk.
WARNING: Does not check that output_file exists. Should be used when, e.g.,
a re-sampled IPW file or geotiff is being created and saved, and the
metadata also needs to be created and either saved or sent to the
waterhsed.
Returns: None
"""
fgdc_metadata = make_fgdc_metadata(output_file,
ipw.config, model_run_uuid)
input_prefix = output_file.split('.')[0]
if model_set is None:
model_set = ("outputs", "inputs")[input_prefix == "in"]
return make_watershed_metadata(output_file,
ipw.config,
parent_model_run_uuid,
model_run_uuid,
model_set,
description,
ipw.model_vars,
fgdc_metadata,
ipw.start_datetime,
ipw.end_datetime)
def reaggregate_ipws(ipws, fun=npsum, freq='H', rule='D'):
"""
Resample IPWs using the function fun, but only sum is supported.
`freq` corresponds to the actual frequency of the ipws; rule corresponds to
one of the resampling 'rules' given here:
http://pandas.pydata.org/pandas-docs/dev/timeseries.html#time-date-components
"""
assert fun is npsum, "Cannot use " + fun.func_name + \
", only sum has been implemented"
assert _is_consecutive(ipws)
ipw0 = ipws[0]
start_datetime = ipw0.start_datetime
idx = date_range(start=start_datetime, periods=len(ipws), freq=freq)
series = Series(map(lambda ipw: ipw.data_frame(), ipws), index=idx)
resampled = series.resample(rule, how=npsum)
resampled_idx = resampled.index
resampled_dt = resampled_idx[1] - resampled_idx[0]
resampled_ipws = [IPW() for el in resampled]
header_dict = deepcopy(ipw0.header_dict)
file_type = ipw0.file_type
# bands = deepcopy(ipw0.bands)
bands = ipw0.bands
# nonglobal_bands = deepcopy(ipw0.nonglobal_bands)
nonglobal_bands = ipw0.nonglobal_bands
geotransform = ipw0.geotransform
for ipw_idx, ipw in enumerate(resampled_ipws):
ipw._data_frame = resampled[ipw_idx]
ipw.start_datetime = resampled_idx[ipw_idx]
ipw.end_datetime = resampled_idx[ipw_idx] + resampled_dt
ipw.header_dict = deepcopy(header_dict)
ipw.file_type = file_type
ipw.bands = deepcopy(bands)
ipw.nonglobal_bands = deepcopy(nonglobal_bands)
ipw.geotransform = geotransform
ipw.recalculate_header()
return resampled_ipws
def _is_consecutive(ipws):
"""
Check that a list of ipws is consecutive
"""
ret = True
ipw_prev = ipws[0]
for ipw in ipws[1:]:
ret &= ipw_prev.end_datetime == ipw.start_datetime
ipw_prev = ipw
return ret
def _build_ipw_dataframe(nonglobal_bands, binary_data):
"""
Build a pandas DataFrame using header info to assign column names
"""
colnames = [b.varname for b in nonglobal_bands]
dtype = _bands_to_dtype(nonglobal_bands)
intData = fromstring(binary_data, dtype=dtype)
df = DataFrame(intData, columns=colnames)
for b in nonglobal_bands:
df[b.varname] = _calc_float_value(b, df[b.varname])
return df
def _make_bands(header_lines, varnames):
"""
Make a header dictionary that points to Band objects for each variable
name.
Returns: dict
"""
globalEndIdx = 0
# parse global information from global header
for i, l in enumerate(header_lines[1:-1]):
if IsHeaderStart(l):
globalEndIdx = i
break
global_header_lines = header_lines[1:globalEndIdx+1]
# tried a prettier dictionary comprehension, but wouldn't fly
global_band_dict = defaultdict(int)
for l in global_header_lines:
if l:
spl = l.strip().split()
if spl[0] == 'byteorder':
global_band_dict[spl[0]] = spl[2]
else:
global_band_dict[spl[0]] = int(spl[2])
# these are the standard names in an ISNOBAL header file
byteorder = global_band_dict['byteorder']
nLines = global_band_dict['nlines']
nSamps = global_band_dict['nsamps']
nBands = global_band_dict['nbands']
# this will be put into the return dictionary at the return statement
globalBand = GlobalBand(byteorder, nLines, nSamps, nBands)
# initialize a list of bands to put parsed information into
bands = [Band() for i in range(nBands)]
for i, b in enumerate(bands):
b.varname = varnames[i]
b.band_idx = i
band_type = None
band_idx = None
geo_parsed = False
ref_band = Band()
geo_count = 0
for line in header_lines[globalEndIdx:]:
spl = line.strip().split()
attr = spl[0]
if IsHeaderStart(line):
band_type = spl[BAND_TYPE_LOC]
band_idx = int(spl[BAND_INDEX_LOC])
lqCounter = 0
if band_type == 'geo':
geo_count += 1
if geo_count == 2:
geo_parsed = True
elif band_type == 'basic_image':
# assign byte and bits info that's stored here
if attr in ['bits', 'bytes']:
setattr(bands[band_idx], attr + "_", int(spl[2]))
elif band_type == 'lq':
# assign integer and float min and max. ignore non-"map" fields
if attr == "map":
# minimum values are listed first by IPW
if lqCounter == 0:
bands[band_idx].int_min = float(spl[2])
bands[band_idx].float_min = float(spl[3])
lqCounter += 1
elif lqCounter == 1:
bands[band_idx].int_max = float(spl[2])
bands[band_idx].float_max = float(spl[3])
elif band_type == 'geo':
# Not all bands have geo information. The ones that do are
# expected to be redundant. Check that all available are equal
# and for any that don't have geo information, set them to the
# geo information
if not geo_parsed:
if attr in ["bline", "bsamp", "dline", "dsamp"]:
setattr(ref_band, attr, float(spl[2]))
# setattr(bands[band_idx], attr, float(spl[2]))
elif attr in ["units", "coord_sys_ID"]:
if attr == "units":
attr = "geo_units"
setattr(ref_band, attr, spl[2])
# setattr(bands[band_idx], attr, spl[2])
else:
raise Exception(
"'geo' attribute %s from IPW file not recognized!" %
attr)
else:
if attr == "units":
attr = "geo_units"
assert\
getattr(ref_band, attr) == getattr(bands[band_idx], attr)
# now set all bands to the reference band
for band in bands:
band.bline = ref_band.bline
band.bsamp = ref_band.bsamp
band.dline = ref_band.dline
band.dsamp = ref_band.dsamp
band.geo_units = ref_band.geo_units
band.coord_sys_ID = ref_band.coord_sys_ID
return dict(zip(['global']+varnames[:nBands], [globalBand]+bands))
def _calc_float_value(band, integerValue):
"""
Calculate a floating point value for the integer int_ given the min/max int
and min/max floats in the given bandObj
Returns: Floating point value of the mapped int_
"""
floatRange = band.float_max - band.float_min
return integerValue * (floatRange / band.int_max) + band.float_min
def _bands_to_dtype(bands):
"""
Given a list of Bands, convert them to a numpy.dtype for use in creating
the IPW dataframe.
"""
return dtype([(b.varname, 'uint' + str(b.bytes_ * 8)) for b in bands])
def _bands_to_header_lines(bands_dict):
"""
Convert the bands to a new header assuming the float ranges are up to date
for the current dataframe, df.
"""
firstLine = "!<header> basic_image_i -1 $Revision: 1.11 $"
global_ = bands_dict['global']
firstLines = [firstLine,
"byteorder = {0} ".format(global_.byteorder),
"nlines = {0} ".format(global_.nLines),
"nsamps = {0} ".format(global_.nSamps),
"nbands = {0} ".format(global_.nBands)]
other_lines = []
bands = [b for varname, b in bands_dict.iteritems() if varname != 'global']
bands = sorted(bands, key=lambda b: b.band_idx)
# for some reason IPW has a space at the end of data lines
for i, b in enumerate(bands):
other_lines += ["!<header> basic_image {0} $Revision: 1.11 $".format(i),
"bytes = {0} ".format(b.bytes_),
"bits = {0} ".format(b.bits_)]
# build the linear quantization (lq) headers
for i, b in enumerate(bands):
int_min = int(b.int_min)
int_max = int(b.int_max)
# IPW writes integer floats without a dec point, so remove if necessary
float_min = \
(b.float_min, int(b.float_min))[b.float_min == int(b.float_min)]
float_max = \
(b.float_max, int(b.float_max))[b.float_max == int(b.float_max)]
other_lines += ["!<header> lq {0} $Revision: 1.6 $".format(i),
"map = {0} {1} ".format(int_min, float_min),
"map = {0} {1} ".format(int_max, float_max)]
# import ipdb; ipdb.set_trace()
# build the geographic header
for i, b in enumerate(bands):
bline = b.bline
bsamp = b.bsamp
dline = b.dline
dsamp = b.dsamp
units = b.geo_units
coord_sys_ID = b.coord_sys_ID
other_lines += ["!<header> geo {0} $Revision: 1.7 $".format(i),
"bline = {0} ".format(bline),
"bsamp = {0} ".format(bsamp),
"dline = {0} ".format(dline),
"dsamp = {0} ".format(dsamp),
"units = {0} ".format(units),
"coord_sys_ID = {0} ".format(coord_sys_ID)]
return firstLines + other_lines
def _write_floatdf_binstring_to_file(bands, df, write_file):
"""
Convert the dataframe floating point data to a binary string.
Arguments:
bands: list of Band objects
df: dataframe to be written
write_file: File object ready for writing to
"""
# first convert df to an integer dataframe
int_df = DataFrame(dtype='uint64')
for b in sorted(bands, key=lambda b: b.band_idx):
# check that bands are appropriately made, that b.Max/Min really are
assert df[b.varname].le(b.float_max).all(), \
"Bad band: max not really max.\nb.float_max = %2.10f\n \
df[b.varname].max() = %s" % (b.float_max, df[b.varname].max())
assert df[b.varname].ge(b.float_min).all(), \
"Bad band: min not really min.\nb.float_min = %s\n \
df[b.varname].min() = %2.10f" % (b.float_min, df[b.varname].min())
def _map_fn(x):
if b.float_max - b.float_min == 0.0:
return 0.0
else:
return floor(npround(
((x - b.float_min) * b.int_max)/(b.float_max - b.float_min)
))
int_df[b.varname] = _map_fn(df[b.varname])
# use the struct package to pack ints to bytes; use '=' to prevent padding
# that causes problems with the IPW scheme
# pack_str = "=" + "".join([PACK_DICT[b.bytes_] for b in bands])
int_mat = int_df.as_matrix()
pack_str = "=" + "".join([PACK_DICT[b.bytes_] for b in bands])*len(int_mat)
# for row_idx in range(len(int_mat)):
flat_mat = int_mat.flatten()
write_file.write(pack(pack_str, *flat_mat))
def _recalculate_header(bands, dataframe):
"""
Recalculate the minimum and maximum of each band in bands given a dataframe
that contains data for each band.
Returns: None
"""
assert set(list(dataframe.columns)) == set([b.varname for b in bands]), \
"DataFrame column names do not match bands' variable names!"
for band in bands:
band.float_min = dataframe[band.varname].min()
band.float_max = dataframe[band.varname].max()
if band.float_min == band.float_max:
band.float_max = band.float_min + 1.0
return None
class Band(object):
"""
Container for band information
"""
def __init__(self, varname="", band_idx=0, nBytes=0, nBits=0, int_min=0.0,
int_max=0.0, float_min=0.0, float_max=0.0,
bline=0.0, bsamp=0.0, dline=0.0, dsamp=0.0,
units="meters", coord_sys_ID="UTM"):
"""
Can either pass this information or create an all-None Band.
"""
self.varname = varname
self.band_idx = band_idx
self.bytes_ = nBytes
self.bits_ = nBits
self.int_min = float(int_min)
self.int_max = float(int_max)
self.float_min = float(float_min)
self.float_max = float(float_max)
self.bline = float(bline)
self.bsamp = float(bsamp)
self.dline = float(dline)
self.dsamp = float(dsamp)
assert type(units) is str
self.geo_units = units
assert type(coord_sys_ID) is str
self.coord_sys_ID = coord_sys_ID
def __str__(self):
return "-- " + self.varname + " --\n" +\
"".join([attr + ": " + str(value) + "\n"
for attr, value in
self.__dict__.iteritems()])
class IPWLines(object):
"""
Data structure to wrap header and binary parts of an IPW file.
Arguments: ipwFile -- file name pointing to an IPW file
"""
def __init__(self, ipw_file):
with open(ipw_file, 'rb') as f:
lines = f.readlines()
last_header_idx = \
[(i, l) for i, l in enumerate(lines) if "" in l][0][0]
split_idx = last_header_idx + 1
self.header_lines = lines[:split_idx]
self.binary_data = "".join(lines[split_idx:])
class IPWFileError(Exception):
pass
class ISNOBALNetcdfError(Exception):
pass
| bsd-2-clause |
calebfoss/tensorflow | tensorflow/examples/learn/hdf5_classification.py | 60 | 2190 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, h5 format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import cross_validation
from sklearn import metrics
import tensorflow as tf
import h5py # pylint: disable=g-bad-import-order
learn = tf.contrib.learn
def main(unused_argv):
# Load dataset.
iris = learn.datasets.load_dataset('iris')
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# Note that we are saving and load iris data as h5 format as a simple
# demonstration here.
h5f = h5py.File('/tmp/test_hdf5.h5', 'w')
h5f.create_dataset('X_train', data=x_train)
h5f.create_dataset('X_test', data=x_test)
h5f.create_dataset('y_train', data=y_train)
h5f.create_dataset('y_test', data=y_test)
h5f.close()
h5f = h5py.File('/tmp/test_hdf5.h5', 'r')
x_train = np.array(h5f['X_train'])
x_test = np.array(h5f['X_test'])
y_train = np.array(h5f['y_train'])
y_test = np.array(h5f['y_test'])
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = learn.infer_real_valued_columns_from_input(x_train)
classifier = learn.DNNClassifier(
feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3)
# Fit and predict.
classifier.fit(x_train, y_train, steps=200)
score = metrics.accuracy_score(y_test, classifier.predict(x_test))
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
smblance/ggplot | ggplot/tests/test_colors.py | 12 | 5064 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import pandas as pd
from nose.tools import assert_true
from ggplot import *
from ggplot.components.legend import get_labels
from ggplot.components.colors import assign_continuous_colors, \
assign_discrete_colors
from ggplot.tests import cleanup
@cleanup
def test_assign_colors():
"""
Test how colors are assigned to different column types.
"""
df = pd.DataFrame({"values": np.arange(10),
"int_col": np.arange(10),
"num_col": np.arange(10) / 2,
"bool_col": np.random.randn(10) > 0,
"char_col": ["a", "b"] * 5})
color_mapping_col = ':::color_mapping:::'
fill_mapping_col = ':::fill_mapping:::'
# test integer column
color_col = "int_col"
gg_int = ggplot(df, aes(x="values", y="values", color="int_col"))
gg_int += geom_point()
gg_int.draw()
labels, scale_type, indices = get_labels(df, color_col)
new_data, _ = assign_continuous_colors(df, gg_int, 'color',
color_col, labels, indices)
expected_cols = new_data[color_mapping_col]
actual_cols = gg_int.data[color_mapping_col]
assert_true((actual_cols == expected_cols).all())
# test numeric column
color_col = "num_col"
gg_num = ggplot(df, aes(x="values", y="values", color="num_col"))
gg_num += geom_point()
gg_num.draw()
labels, scale_type, indices = get_labels(df, color_col)
new_data, _ = assign_continuous_colors(df, gg_num, 'color',
color_col, labels, indices)
expected_cols = new_data[color_mapping_col]
actual_cols = gg_num.data[color_mapping_col]
assert_true((actual_cols == expected_cols).all())
# test bool column
color_col = "bool_col"
gg_bool = ggplot(df, aes(x="values", y="values", color="bool_col"))
gg_bool += geom_point()
gg_bool.draw()
labels, scale_type, indices = get_labels(df, color_col)
new_data, _ = assign_discrete_colors(df, gg_bool, 'color',
color_col, labels)
expected_cols = new_data[color_mapping_col]
actual_cols = gg_bool.data[color_mapping_col]
assert_true((actual_cols == expected_cols).all())
# test char column
color_col = "char_col"
gg_char = ggplot(df, aes(x="values", y="values", color="char_col"))
gg_char += geom_point()
gg_char.draw()
labels, scale_type, indices = get_labels(df, color_col)
new_data, _ = assign_discrete_colors(df, gg_char, 'color',
color_col, labels)
expected_cols = new_data[color_mapping_col]
actual_cols = gg_char.data[color_mapping_col]
assert_true((actual_cols == expected_cols).all())
# Fill mapping
# test integer column
fill_col = "int_col"
gg_int = ggplot(df, aes(x="values", y="values", fill="int_col"))
gg_int += geom_point()
gg_int.draw()
labels, scale_type, indices = get_labels(df, fill_col)
new_data, _ = assign_continuous_colors(df, gg_int, 'fill',
fill_col, labels, indices)
expected_cols = new_data[fill_mapping_col]
actual_cols = gg_int.data[fill_mapping_col]
assert_true((actual_cols == expected_cols).all())
# test numeric column
fill_col = "num_col"
gg_num = ggplot(df, aes(x="values", y="values", fill="num_col"))
gg_num += geom_point()
gg_num.draw()
labels, scale_type, indices = get_labels(df, fill_col)
new_data, _ = assign_continuous_colors(df, gg_num, 'fill',
fill_col, labels, indices)
expected_cols = new_data[fill_mapping_col]
actual_cols = gg_num.data[fill_mapping_col]
assert_true((actual_cols == expected_cols).all())
# test bool column
fill_col = "bool_col"
gg_bool = ggplot(df, aes(x="values", y="values", fill="bool_col"))
gg_bool += geom_point()
gg_bool.draw()
labels, scale_type, indices = get_labels(df, fill_col)
new_data, _ = assign_discrete_colors(df, gg_bool, 'fill',
fill_col, labels)
expected_cols = new_data[fill_mapping_col]
actual_cols = gg_bool.data[fill_mapping_col]
assert_true((actual_cols == expected_cols).all())
# test char column
fill_col = "char_col"
gg_char = ggplot(df, aes(x="values", y="values", fill="char_col"))
gg_char += geom_point()
gg_char.draw()
labels, scale_type, indices = get_labels(df, fill_col)
new_data, _ = assign_discrete_colors(df, gg_char, 'fill',
fill_col, labels)
expected_cols = new_data[fill_mapping_col]
actual_cols = gg_char.data[fill_mapping_col]
assert_true((actual_cols == expected_cols).all())
| bsd-2-clause |
parekhmitchell/Machine-Learning | Machine Learning A-Z Template Folder/Part 3 - Classification/Section 17 - Kernel SVM/kernel_svm.py | 6 | 2607 | # Kernel SVM
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Social_Network_Ads.csv')
X = dataset.iloc[:, [2, 3]].values
y = dataset.iloc[:, 4].values
# Splitting the dataset into the Training set and Test set
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Fitting Kernel SVM to the Training set
from sklearn.svm import SVC
classifier = SVC(kernel = 'rbf', random_state = 0)
classifier.fit(X_train, y_train)
# Predicting the Test set results
y_pred = classifier.predict(X_test)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
# Visualising the Training set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Kernel SVM (Training set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
# Visualising the Test set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Kernel SVM (Test set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show() | mit |
mne-tools/mne-tools.github.io | stable/_downloads/5fbe119103806572ba1bc111d82a654a/define_target_events.py | 29 | 3376 | """
============================================================
Define target events based on time lag, plot evoked response
============================================================
This script shows how to define higher order events based on
time lag between reference and target events. For
illustration, we will put face stimuli presented into two
classes, that is 1) followed by an early button press
(within 590 milliseconds) and followed by a late button
press (later than 590 milliseconds). Finally, we will
visualize the evoked responses to both 'quickly-processed'
and 'slowly-processed' face stimuli.
"""
# Authors: Denis Engemann <[email protected]>
#
# License: BSD (3-clause)
import mne
from mne import io
from mne.event import define_target_events
from mne.datasets import sample
import matplotlib.pyplot as plt
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
# Set up pick list: EEG + STI 014 - bad channels (modify to your needs)
include = [] # or stim channels ['STI 014']
raw.info['bads'] += ['EEG 053'] # bads
# pick MEG channels
picks = mne.pick_types(raw.info, meg='mag', eeg=False, stim=False, eog=True,
include=include, exclude='bads')
###############################################################################
# Find stimulus event followed by quick button presses
reference_id = 5 # presentation of a smiley face
target_id = 32 # button press
sfreq = raw.info['sfreq'] # sampling rate
tmin = 0.1 # trials leading to very early responses will be rejected
tmax = 0.59 # ignore face stimuli followed by button press later than 590 ms
new_id = 42 # the new event id for a hit. If None, reference_id is used.
fill_na = 99 # the fill value for misses
events_, lag = define_target_events(events, reference_id, target_id,
sfreq, tmin, tmax, new_id, fill_na)
print(events_) # The 99 indicates missing or too late button presses
# besides the events also the lag between target and reference is returned
# this could e.g. be used as parametric regressor in subsequent analyses.
print(lag[lag != fill_na]) # lag in milliseconds
# #############################################################################
# Construct epochs
tmin_ = -0.2
tmax_ = 0.4
event_id = dict(early=new_id, late=fill_na)
epochs = mne.Epochs(raw, events_, event_id, tmin_,
tmax_, picks=picks, baseline=(None, 0),
reject=dict(mag=4e-12))
# average epochs and get an Evoked dataset.
early, late = [epochs[k].average() for k in event_id]
###############################################################################
# View evoked response
times = 1e3 * epochs.times # time in milliseconds
title = 'Evoked response followed by %s button press'
fig, axes = plt.subplots(2, 1)
early.plot(axes=axes[0], time_unit='s')
axes[0].set(title=title % 'late', ylabel='Evoked field (fT)')
late.plot(axes=axes[1], time_unit='s')
axes[1].set(title=title % 'early', ylabel='Evoked field (fT)')
plt.show()
| bsd-3-clause |
cbertinato/pandas | asv_bench/benchmarks/io/sql.py | 1 | 5390 | import sqlite3
import numpy as np
import pandas.util.testing as tm
from pandas import DataFrame, date_range, read_sql_query, read_sql_table
from sqlalchemy import create_engine
class SQL:
params = ['sqlalchemy', 'sqlite']
param_names = ['connection']
def setup(self, connection):
N = 10000
con = {'sqlalchemy': create_engine('sqlite:///:memory:'),
'sqlite': sqlite3.connect(':memory:')}
self.table_name = 'test_type'
self.query_all = 'SELECT * FROM {}'.format(self.table_name)
self.con = con[connection]
self.df = DataFrame({'float': np.random.randn(N),
'float_with_nan': np.random.randn(N),
'string': ['foo'] * N,
'bool': [True] * N,
'int': np.random.randint(0, N, size=N),
'datetime': date_range('2000-01-01',
periods=N,
freq='s')},
index=tm.makeStringIndex(N))
self.df.loc[1000:3000, 'float_with_nan'] = np.nan
self.df['datetime_string'] = self.df['datetime'].astype(str)
self.df.to_sql(self.table_name, self.con, if_exists='replace')
def time_to_sql_dataframe(self, connection):
self.df.to_sql('test1', self.con, if_exists='replace')
def time_read_sql_query(self, connection):
read_sql_query(self.query_all, self.con)
class WriteSQLDtypes:
params = (['sqlalchemy', 'sqlite'],
['float', 'float_with_nan', 'string', 'bool', 'int', 'datetime'])
param_names = ['connection', 'dtype']
def setup(self, connection, dtype):
N = 10000
con = {'sqlalchemy': create_engine('sqlite:///:memory:'),
'sqlite': sqlite3.connect(':memory:')}
self.table_name = 'test_type'
self.query_col = 'SELECT {} FROM {}'.format(dtype, self.table_name)
self.con = con[connection]
self.df = DataFrame({'float': np.random.randn(N),
'float_with_nan': np.random.randn(N),
'string': ['foo'] * N,
'bool': [True] * N,
'int': np.random.randint(0, N, size=N),
'datetime': date_range('2000-01-01',
periods=N,
freq='s')},
index=tm.makeStringIndex(N))
self.df.loc[1000:3000, 'float_with_nan'] = np.nan
self.df['datetime_string'] = self.df['datetime'].astype(str)
self.df.to_sql(self.table_name, self.con, if_exists='replace')
def time_to_sql_dataframe_column(self, connection, dtype):
self.df[[dtype]].to_sql('test1', self.con, if_exists='replace')
def time_read_sql_query_select_column(self, connection, dtype):
read_sql_query(self.query_col, self.con)
class ReadSQLTable:
def setup(self):
N = 10000
self.table_name = 'test'
self.con = create_engine('sqlite:///:memory:')
self.df = DataFrame({'float': np.random.randn(N),
'float_with_nan': np.random.randn(N),
'string': ['foo'] * N,
'bool': [True] * N,
'int': np.random.randint(0, N, size=N),
'datetime': date_range('2000-01-01',
periods=N,
freq='s')},
index=tm.makeStringIndex(N))
self.df.loc[1000:3000, 'float_with_nan'] = np.nan
self.df['datetime_string'] = self.df['datetime'].astype(str)
self.df.to_sql(self.table_name, self.con, if_exists='replace')
def time_read_sql_table_all(self):
read_sql_table(self.table_name, self.con)
def time_read_sql_table_parse_dates(self):
read_sql_table(self.table_name, self.con, columns=['datetime_string'],
parse_dates=['datetime_string'])
class ReadSQLTableDtypes:
params = ['float', 'float_with_nan', 'string', 'bool', 'int', 'datetime']
param_names = ['dtype']
def setup(self, dtype):
N = 10000
self.table_name = 'test'
self.con = create_engine('sqlite:///:memory:')
self.df = DataFrame({'float': np.random.randn(N),
'float_with_nan': np.random.randn(N),
'string': ['foo'] * N,
'bool': [True] * N,
'int': np.random.randint(0, N, size=N),
'datetime': date_range('2000-01-01',
periods=N,
freq='s')},
index=tm.makeStringIndex(N))
self.df.loc[1000:3000, 'float_with_nan'] = np.nan
self.df['datetime_string'] = self.df['datetime'].astype(str)
self.df.to_sql(self.table_name, self.con, if_exists='replace')
def time_read_sql_table_column(self, dtype):
read_sql_table(self.table_name, self.con, columns=[dtype])
from ..pandas_vb_common import setup # noqa: F401
| bsd-3-clause |
numenta/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_agg.py | 69 | 11729 | """
An agg http://antigrain.com/ backend
Features that are implemented
* capstyles and join styles
* dashes
* linewidth
* lines, rectangles, ellipses
* clipping to a rectangle
* output to RGBA and PNG
* alpha blending
* DPI scaling properly - everything scales properly (dashes, linewidths, etc)
* draw polygon
* freetype2 w/ ft2font
TODO:
* allow save to file handle
* integrate screen dpi w/ ppi and text
"""
from __future__ import division
import numpy as npy
from matplotlib import verbose, rcParams
from matplotlib.backend_bases import RendererBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.cbook import is_string_like, maxdict
from matplotlib.figure import Figure
from matplotlib.font_manager import findfont
from matplotlib.ft2font import FT2Font, LOAD_FORCE_AUTOHINT
from matplotlib.mathtext import MathTextParser
from matplotlib.path import Path
from matplotlib.transforms import Bbox
from _backend_agg import RendererAgg as _RendererAgg
from matplotlib import _png
backend_version = 'v2.2'
class RendererAgg(RendererBase):
"""
The renderer handles all the drawing primitives using a graphics
context instance that controls the colors/styles
"""
debug=1
texd = maxdict(50) # a cache of tex image rasters
_fontd = maxdict(50)
def __init__(self, width, height, dpi):
if __debug__: verbose.report('RendererAgg.__init__', 'debug-annoying')
RendererBase.__init__(self)
self.dpi = dpi
self.width = width
self.height = height
if __debug__: verbose.report('RendererAgg.__init__ width=%s, height=%s'%(width, height), 'debug-annoying')
self._renderer = _RendererAgg(int(width), int(height), dpi, debug=False)
if __debug__: verbose.report('RendererAgg.__init__ _RendererAgg done',
'debug-annoying')
#self.draw_path = self._renderer.draw_path # see below
self.draw_markers = self._renderer.draw_markers
self.draw_path_collection = self._renderer.draw_path_collection
self.draw_quad_mesh = self._renderer.draw_quad_mesh
self.draw_image = self._renderer.draw_image
self.copy_from_bbox = self._renderer.copy_from_bbox
self.restore_region = self._renderer.restore_region
self.tostring_rgba_minimized = self._renderer.tostring_rgba_minimized
self.mathtext_parser = MathTextParser('Agg')
self.bbox = Bbox.from_bounds(0, 0, self.width, self.height)
if __debug__: verbose.report('RendererAgg.__init__ done',
'debug-annoying')
def draw_path(self, gc, path, transform, rgbFace=None):
nmax = rcParams['agg.path.chunksize'] # here at least for testing
npts = path.vertices.shape[0]
if nmax > 100 and npts > nmax and path.should_simplify and rgbFace is None:
nch = npy.ceil(npts/float(nmax))
chsize = int(npy.ceil(npts/nch))
i0 = npy.arange(0, npts, chsize)
i1 = npy.zeros_like(i0)
i1[:-1] = i0[1:] - 1
i1[-1] = npts
for ii0, ii1 in zip(i0, i1):
v = path.vertices[ii0:ii1,:]
c = path.codes
if c is not None:
c = c[ii0:ii1]
c[0] = Path.MOVETO # move to end of last chunk
p = Path(v, c)
self._renderer.draw_path(gc, p, transform, rgbFace)
else:
self._renderer.draw_path(gc, path, transform, rgbFace)
def draw_mathtext(self, gc, x, y, s, prop, angle):
"""
Draw the math text using matplotlib.mathtext
"""
if __debug__: verbose.report('RendererAgg.draw_mathtext',
'debug-annoying')
ox, oy, width, height, descent, font_image, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
x = int(x) + ox
y = int(y) - oy
self._renderer.draw_text_image(font_image, x, y + 1, angle, gc)
def draw_text(self, gc, x, y, s, prop, angle, ismath):
"""
Render the text
"""
if __debug__: verbose.report('RendererAgg.draw_text', 'debug-annoying')
if ismath:
return self.draw_mathtext(gc, x, y, s, prop, angle)
font = self._get_agg_font(prop)
if font is None: return None
if len(s) == 1 and ord(s) > 127:
font.load_char(ord(s), flags=LOAD_FORCE_AUTOHINT)
else:
# We pass '0' for angle here, since it will be rotated (in raster
# space) in the following call to draw_text_image).
font.set_text(s, 0, flags=LOAD_FORCE_AUTOHINT)
font.draw_glyphs_to_bitmap()
#print x, y, int(x), int(y)
self._renderer.draw_text_image(font.get_image(), int(x), int(y) + 1, angle, gc)
def get_text_width_height_descent(self, s, prop, ismath):
"""
get the width and height in display coords of the string s
with FontPropertry prop
# passing rgb is a little hack to make cacheing in the
# texmanager more efficient. It is not meant to be used
# outside the backend
"""
if ismath=='TeX':
# todo: handle props
size = prop.get_size_in_points()
texmanager = self.get_texmanager()
Z = texmanager.get_grey(s, size, self.dpi)
m,n = Z.shape
# TODO: descent of TeX text (I am imitating backend_ps here -JKS)
return n, m, 0
if ismath:
ox, oy, width, height, descent, fonts, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
return width, height, descent
font = self._get_agg_font(prop)
font.set_text(s, 0.0, flags=LOAD_FORCE_AUTOHINT) # the width and height of unrotated string
w, h = font.get_width_height()
d = font.get_descent()
w /= 64.0 # convert from subpixels
h /= 64.0
d /= 64.0
return w, h, d
def draw_tex(self, gc, x, y, s, prop, angle):
# todo, handle props, angle, origins
size = prop.get_size_in_points()
texmanager = self.get_texmanager()
key = s, size, self.dpi, angle, texmanager.get_font_config()
im = self.texd.get(key)
if im is None:
Z = texmanager.get_grey(s, size, self.dpi)
Z = npy.array(Z * 255.0, npy.uint8)
self._renderer.draw_text_image(Z, x, y, angle, gc)
def get_canvas_width_height(self):
'return the canvas width and height in display coords'
return self.width, self.height
def _get_agg_font(self, prop):
"""
Get the font for text instance t, cacheing for efficiency
"""
if __debug__: verbose.report('RendererAgg._get_agg_font',
'debug-annoying')
key = hash(prop)
font = self._fontd.get(key)
if font is None:
fname = findfont(prop)
font = self._fontd.get(fname)
if font is None:
font = FT2Font(str(fname))
self._fontd[fname] = font
self._fontd[key] = font
font.clear()
size = prop.get_size_in_points()
font.set_size(size, self.dpi)
return font
def points_to_pixels(self, points):
"""
convert point measures to pixes using dpi and the pixels per
inch of the display
"""
if __debug__: verbose.report('RendererAgg.points_to_pixels',
'debug-annoying')
return points*self.dpi/72.0
def tostring_rgb(self):
if __debug__: verbose.report('RendererAgg.tostring_rgb',
'debug-annoying')
return self._renderer.tostring_rgb()
def tostring_argb(self):
if __debug__: verbose.report('RendererAgg.tostring_argb',
'debug-annoying')
return self._renderer.tostring_argb()
def buffer_rgba(self,x,y):
if __debug__: verbose.report('RendererAgg.buffer_rgba',
'debug-annoying')
return self._renderer.buffer_rgba(x,y)
def clear(self):
self._renderer.clear()
def option_image_nocomposite(self):
# It is generally faster to composite each image directly to
# the Figure, and there's no file size benefit to compositing
# with the Agg backend
return True
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
if __debug__: verbose.report('backend_agg.new_figure_manager',
'debug-annoying')
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasAgg(thisFig)
manager = FigureManagerBase(canvas, num)
return manager
class FigureCanvasAgg(FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
"""
def copy_from_bbox(self, bbox):
renderer = self.get_renderer()
return renderer.copy_from_bbox(bbox)
def restore_region(self, region):
renderer = self.get_renderer()
return renderer.restore_region(region)
def draw(self):
"""
Draw the figure using the renderer
"""
if __debug__: verbose.report('FigureCanvasAgg.draw', 'debug-annoying')
self.renderer = self.get_renderer()
self.figure.draw(self.renderer)
def get_renderer(self):
l, b, w, h = self.figure.bbox.bounds
key = w, h, self.figure.dpi
try: self._lastKey, self.renderer
except AttributeError: need_new_renderer = True
else: need_new_renderer = (self._lastKey != key)
if need_new_renderer:
self.renderer = RendererAgg(w, h, self.figure.dpi)
self._lastKey = key
return self.renderer
def tostring_rgb(self):
if __debug__: verbose.report('FigureCanvasAgg.tostring_rgb',
'debug-annoying')
return self.renderer.tostring_rgb()
def tostring_argb(self):
if __debug__: verbose.report('FigureCanvasAgg.tostring_argb',
'debug-annoying')
return self.renderer.tostring_argb()
def buffer_rgba(self,x,y):
if __debug__: verbose.report('FigureCanvasAgg.buffer_rgba',
'debug-annoying')
return self.renderer.buffer_rgba(x,y)
def get_default_filetype(self):
return 'png'
def print_raw(self, filename_or_obj, *args, **kwargs):
FigureCanvasAgg.draw(self)
renderer = self.get_renderer()
original_dpi = renderer.dpi
renderer.dpi = self.figure.dpi
if is_string_like(filename_or_obj):
filename_or_obj = file(filename_or_obj, 'wb')
renderer._renderer.write_rgba(filename_or_obj)
renderer.dpi = original_dpi
print_rgba = print_raw
def print_png(self, filename_or_obj, *args, **kwargs):
FigureCanvasAgg.draw(self)
renderer = self.get_renderer()
original_dpi = renderer.dpi
renderer.dpi = self.figure.dpi
if is_string_like(filename_or_obj):
filename_or_obj = file(filename_or_obj, 'wb')
_png.write_png(renderer._renderer.buffer_rgba(0, 0),
renderer.width, renderer.height,
filename_or_obj, self.figure.dpi)
renderer.dpi = original_dpi
| agpl-3.0 |
davemccormick/pyAnimalTrack | src/pyAnimalTrack/ui/Service/SaveDataframe.py | 1 | 1070 | # TODO: Saving to file with a particular separator.
# TODO: Saving to file, output folder the same as where we loaded from maybe?
from PyQt5.QtWidgets import QFileDialog
from pyAnimalTrack.ui.Model.SettingsModel import SettingsModel
class SaveDataframe:
@staticmethod
def save(data, format, save=True):
save_result = QFileDialog.getSaveFileName(filter=SettingsModel.get_value(format + '_SaveFormatsFilter'))
filename = save_result[0]
# No filename, cancelled
if filename == '':
return
# If we don't have a extension, add one
if not SettingsModel.get_value(format + '_SaveFormats').__contains__(filename.split('.')[-1]):
filename += '.' + save_result[1].split('.')[-1]
# Save the pandas dataframe and alert the user
if not save:
return filename
elif format == 'data':
data.to_csv(filename)
return filename
elif format == 'graph':
data.savefig(filename)
return filename
return False | gpl-3.0 |
kylerbrown/scikit-learn | examples/linear_model/plot_sgd_loss_functions.py | 249 | 1095 | """
==========================
SGD: convex loss functions
==========================
A plot that compares the various convex loss functions supported by
:class:`sklearn.linear_model.SGDClassifier` .
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def modified_huber_loss(y_true, y_pred):
z = y_pred * y_true
loss = -4 * z
loss[z >= -1] = (1 - z[z >= -1]) ** 2
loss[z >= 1.] = 0
return loss
xmin, xmax = -4, 4
xx = np.linspace(xmin, xmax, 100)
plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], 'k-',
label="Zero-one loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0), 'g-',
label="Hinge loss")
plt.plot(xx, -np.minimum(xx, 0), 'm-',
label="Perceptron loss")
plt.plot(xx, np.log2(1 + np.exp(-xx)), 'r-',
label="Log loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, 'b-',
label="Squared hinge loss")
plt.plot(xx, modified_huber_loss(xx, 1), 'y--',
label="Modified Huber loss")
plt.ylim((0, 8))
plt.legend(loc="upper right")
plt.xlabel(r"Decision function $f(x)$")
plt.ylabel("$L(y, f(x))$")
plt.show()
| bsd-3-clause |
DistributedML/TorML | ML_experimental/code/global_model4.py | 1 | 4991 | from __future__ import division
import numpy as np
import logistic_model_test
from numpy.linalg import norm
import emcee
#import matplotlib.pyplot as pl
import matplotlib.pyplot as plt
class globalModel4:
# Logistic Regression
def __init__(self, logistic=False, verbose=1, maxEvals=400):
self.verbose = verbose
self.maxEvals = maxEvals
self.models = []
self.weights = np.empty(0)
self.logistic = logistic
def add_model(self, model):
self.models.append(model)
def sgd_fit_private(self, alpha, XBin, yBin, XBinValid, yBinValid, eta, batch_size=0,dataset='', *args):
#print ("Training model via private SGD.")
# Parameters of the Optimization
optTol = 1e-2
n, d = self.models[0].X.shape
collectionX = []
collectionY = []
#print(alpha)
for k in range(0,len(self.models)):
batchesX = []
batchesY = []
within = True
j=0
while within:
if (j+batch_size <= n):
batchesX.append(self.models[k].X[j:j+batch_size,:])
batchesY.append(self.models[k].y[j:j+batch_size])
j+=batch_size
else:
within = False
collectionX.append(batchesX)
collectionY.append(batchesY)
#---------------------------------------------------------------------------
#Generate random samples from isotropic multivariate laplace distribution using emcee
def lnprob(x,alpha):
return -(alpha/2)*np.linalg.norm(x)
ndim = d
#ndim=10
nwalkers = max(4*d,250)
#print(nwalkers)
p0 = [np.random.rand(ndim) for i in range(nwalkers)]
#p0 = np.random.randn(nwalkers, ndim)
sampler = emcee.EnsembleSampler(nwalkers,ndim,lnprob,args=[alpha])
pos, prob, state = sampler.run_mcmc(p0,100)
sampler.reset()
#print(d)
#sampler.run_mcmc(pos, 1000)
sampler.run_mcmc(pos, 1000,rstate0=state)
#print("Mean acceptance fraction:", np.mean(sampler.acceptance_fraction))
#print("Autocorrelation time:", sampler.get_autocorr_time())
sample = sampler.flatchain
#---------------------------------------------------------------------------
fValues = []
iterations = []
# Initial guess
self.w = np.random.rand(d)
#self.w = np.zeros(d)
funEvals = 1
i=1
train_progress = []
test_progress = []
while True:
d1,d2 = sample.shape
z = np.random.randint(0,d1)
Z = sample[z]
l = np.random.randint(0,len(collectionX[0]))
Xbatch = collectionX[0][l]
ybatch = collectionY[0][l]
(delta, f_new, g) = self.models[0].privateFun2(eta,alpha,self.w, Z, Xbatch, ybatch, batch_size, *args)
#if i%1000 == 0:
iterations.append(i)
fValues.append(f_new)
i+=1
funEvals += 1
# Print progress
if self.verbose > 0:
print("%d - loss: %.3f" % (funEvals, f_new))
print("%d - g_norm: %.3f" % (funEvals, norm(g)))
# Update parameters
self.w = self.w + delta
train_progress.append(logistic_model_test.train_error(self.w, XBin, yBin))
test_progress.append(logistic_model_test.test_error(self.w, dataset, XBinValid, yBinValid))
# Test termination conditions
optCond = norm(g, float('inf'))
#print("alpha = ", alpha)
if optCond < optTol:
#print("1",f_new)
if self.verbose:
print("Problem solved up to optimality tolerance %.3f" % optTol)
break
if funEvals >= self.maxEvals:
#print("2",f_new)
if self.verbose:
print("Reached maximum number of function evaluations %d" %
self.maxEvals)
break
# -------------------------------------------------------------------------------------
#For plotting training and validation error against iterations
#s = 'alpha = ' + str(alpha)
s2 = 'dataset = ' + dataset + ' & alpha size = ' + str(alpha)
#fig = plt.figure()
plt.plot(train_progress,label="Training")
plt.plot(test_progress,label="Validation")
plt.ylabel('Training & Validation error')
plt.xlabel('Number of iterations')
plt.title(s2)
plt.legend()
| mit |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/sklearn/base.py | 3 | 20250 | """Base classes for all estimators."""
# Author: Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import copy
import warnings
from collections import defaultdict
import numpy as np
from scipy import sparse
from .externals import six
from .utils.fixes import signature
from . import __version__
##############################################################################
def _first_and_last_element(arr):
"""Returns first and last element of numpy array or sparse matrix."""
if isinstance(arr, np.ndarray) or hasattr(arr, 'data'):
# numpy array or sparse matrix with .data attribute
data = arr.data if sparse.issparse(arr) else arr
return data.flat[0], data.flat[-1]
else:
# Sparse matrices without .data attribute. Only dok_matrix at
# the time of writing, in this case indexing is fast
return arr[0, 0], arr[-1, -1]
def clone(estimator, safe=True):
"""Constructs a new estimator with the same parameters.
Clone does a deep copy of the model in an estimator
without actually copying attached data. It yields a new estimator
with the same parameters that has not been fit on any data.
Parameters
----------
estimator : estimator object, or list, tuple or set of objects
The estimator or group of estimators to be cloned
safe : boolean, optional
If safe is false, clone will fall back to a deep copy on objects
that are not estimators.
"""
estimator_type = type(estimator)
# XXX: not handling dictionaries
if estimator_type in (list, tuple, set, frozenset):
return estimator_type([clone(e, safe=safe) for e in estimator])
elif not hasattr(estimator, 'get_params'):
if not safe:
return copy.deepcopy(estimator)
else:
raise TypeError("Cannot clone object '%s' (type %s): "
"it does not seem to be a scikit-learn estimator "
"as it does not implement a 'get_params' methods."
% (repr(estimator), type(estimator)))
klass = estimator.__class__
new_object_params = estimator.get_params(deep=False)
for name, param in six.iteritems(new_object_params):
new_object_params[name] = clone(param, safe=False)
new_object = klass(**new_object_params)
params_set = new_object.get_params(deep=False)
# quick sanity check of the parameters of the clone
for name in new_object_params:
param1 = new_object_params[name]
param2 = params_set[name]
if param1 is param2:
# this should always happen
continue
if isinstance(param1, np.ndarray):
# For most ndarrays, we do not test for complete equality
if not isinstance(param2, type(param1)):
equality_test = False
elif (param1.ndim > 0
and param1.shape[0] > 0
and isinstance(param2, np.ndarray)
and param2.ndim > 0
and param2.shape[0] > 0):
equality_test = (
param1.shape == param2.shape
and param1.dtype == param2.dtype
and (_first_and_last_element(param1) ==
_first_and_last_element(param2))
)
else:
equality_test = np.all(param1 == param2)
elif sparse.issparse(param1):
# For sparse matrices equality doesn't work
if not sparse.issparse(param2):
equality_test = False
elif param1.size == 0 or param2.size == 0:
equality_test = (
param1.__class__ == param2.__class__
and param1.size == 0
and param2.size == 0
)
else:
equality_test = (
param1.__class__ == param2.__class__
and (_first_and_last_element(param1) ==
_first_and_last_element(param2))
and param1.nnz == param2.nnz
and param1.shape == param2.shape
)
else:
# fall back on standard equality
equality_test = param1 == param2
if equality_test:
warnings.warn("Estimator %s modifies parameters in __init__."
" This behavior is deprecated as of 0.18 and "
"support for this behavior will be removed in 0.20."
% type(estimator).__name__, DeprecationWarning)
else:
raise RuntimeError('Cannot clone object %s, as the constructor '
'does not seem to set parameter %s' %
(estimator, name))
return new_object
###############################################################################
def _pprint(params, offset=0, printer=repr):
"""Pretty print the dictionary 'params'
Parameters
----------
params : dict
The dictionary to pretty print
offset : int
The offset in characters to add at the begin of each line.
printer : callable
The function to convert entries to strings, typically
the builtin str or repr
"""
# Do a multi-line justified repr:
options = np.get_printoptions()
np.set_printoptions(precision=5, threshold=64, edgeitems=2)
params_list = list()
this_line_length = offset
line_sep = ',\n' + (1 + offset // 2) * ' '
for i, (k, v) in enumerate(sorted(six.iteritems(params))):
if type(v) is float:
# use str for representing floating point numbers
# this way we get consistent representation across
# architectures and versions.
this_repr = '%s=%s' % (k, str(v))
else:
# use repr of the rest
this_repr = '%s=%s' % (k, printer(v))
if len(this_repr) > 500:
this_repr = this_repr[:300] + '...' + this_repr[-100:]
if i > 0:
if (this_line_length + len(this_repr) >= 75 or '\n' in this_repr):
params_list.append(line_sep)
this_line_length = len(line_sep)
else:
params_list.append(', ')
this_line_length += 2
params_list.append(this_repr)
this_line_length += len(this_repr)
np.set_printoptions(**options)
lines = ''.join(params_list)
# Strip trailing space to avoid nightmare in doctests
lines = '\n'.join(l.rstrip(' ') for l in lines.split('\n'))
return lines
###############################################################################
class BaseEstimator(object):
"""Base class for all estimators in scikit-learn
Notes
-----
All estimators should specify all the parameters that can be set
at the class level in their ``__init__`` as explicit keyword
arguments (no ``*args`` or ``**kwargs``).
"""
@classmethod
def _get_param_names(cls):
"""Get parameter names for the estimator"""
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
init_signature = signature(init)
# Consider the constructor parameters excluding 'self'
parameters = [p for p in init_signature.parameters.values()
if p.name != 'self' and p.kind != p.VAR_KEYWORD]
for p in parameters:
if p.kind == p.VAR_POSITIONAL:
raise RuntimeError("scikit-learn estimators should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s with constructor %s doesn't "
" follow this convention."
% (cls, init_signature))
# Extract and sort argument names excluding 'self'
return sorted([p.name for p in parameters])
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep : boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
out = dict()
for key in self._get_param_names():
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The latter have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimization to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
nested_params = defaultdict(dict) # grouped by prefix
for key, value in params.items():
key, delim, sub_key = key.partition('__')
if key not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(key, self))
if delim:
nested_params[key][sub_key] = value
else:
setattr(self, key, value)
for key, sub_params in nested_params.items():
valid_params[key].set_params(**sub_params)
return self
def __repr__(self):
class_name = self.__class__.__name__
return '%s(%s)' % (class_name, _pprint(self.get_params(deep=False),
offset=len(class_name),),)
def __getstate__(self):
try:
state = super(BaseEstimator, self).__getstate__()
except AttributeError:
state = self.__dict__.copy()
if type(self).__module__.startswith('sklearn.'):
return dict(state.items(), _sklearn_version=__version__)
else:
return state
def __setstate__(self, state):
if type(self).__module__.startswith('sklearn.'):
pickle_version = state.pop("_sklearn_version", "pre-0.18")
if pickle_version != __version__:
warnings.warn(
"Trying to unpickle estimator {0} from version {1} when "
"using version {2}. This might lead to breaking code or "
"invalid results. Use at your own risk.".format(
self.__class__.__name__, pickle_version, __version__),
UserWarning)
try:
super(BaseEstimator, self).__setstate__(state)
except AttributeError:
self.__dict__.update(state)
###############################################################################
class ClassifierMixin(object):
"""Mixin class for all classifiers in scikit-learn."""
_estimator_type = "classifier"
def score(self, X, y, sample_weight=None):
"""Returns the mean accuracy on the given test data and labels.
In multi-label classification, this is the subset accuracy
which is a harsh metric since you require for each sample that
each label set be correctly predicted.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Test samples.
y : array-like, shape = (n_samples) or (n_samples, n_outputs)
True labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
Mean accuracy of self.predict(X) wrt. y.
"""
from .metrics import accuracy_score
return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
###############################################################################
class RegressorMixin(object):
"""Mixin class for all regression estimators in scikit-learn."""
_estimator_type = "regressor"
def score(self, X, y, sample_weight=None):
"""Returns the coefficient of determination R^2 of the prediction.
The coefficient R^2 is defined as (1 - u/v), where u is the residual
sum of squares ((y_true - y_pred) ** 2).sum() and v is the total
sum of squares ((y_true - y_true.mean()) ** 2).sum().
The best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Test samples.
y : array-like, shape = (n_samples) or (n_samples, n_outputs)
True values for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
R^2 of self.predict(X) wrt. y.
"""
from .metrics import r2_score
return r2_score(y, self.predict(X), sample_weight=sample_weight,
multioutput='variance_weighted')
###############################################################################
class ClusterMixin(object):
"""Mixin class for all cluster estimators in scikit-learn."""
_estimator_type = "clusterer"
def fit_predict(self, X, y=None):
"""Performs clustering on X and returns cluster labels.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Input data.
Returns
-------
y : ndarray, shape (n_samples,)
cluster labels
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
self.fit(X)
return self.labels_
class BiclusterMixin(object):
"""Mixin class for all bicluster estimators in scikit-learn"""
@property
def biclusters_(self):
"""Convenient way to get row and column indicators together.
Returns the ``rows_`` and ``columns_`` members.
"""
return self.rows_, self.columns_
def get_indices(self, i):
"""Row and column indices of the i'th bicluster.
Only works if ``rows_`` and ``columns_`` attributes exist.
Parameters
----------
i : int
The index of the cluster.
Returns
-------
row_ind : np.array, dtype=np.intp
Indices of rows in the dataset that belong to the bicluster.
col_ind : np.array, dtype=np.intp
Indices of columns in the dataset that belong to the bicluster.
"""
rows = self.rows_[i]
columns = self.columns_[i]
return np.nonzero(rows)[0], np.nonzero(columns)[0]
def get_shape(self, i):
"""Shape of the i'th bicluster.
Parameters
----------
i : int
The index of the cluster.
Returns
-------
shape : (int, int)
Number of rows and columns (resp.) in the bicluster.
"""
indices = self.get_indices(i)
return tuple(len(i) for i in indices)
def get_submatrix(self, i, data):
"""Returns the submatrix corresponding to bicluster `i`.
Parameters
----------
i : int
The index of the cluster.
data : array
The data.
Returns
-------
submatrix : array
The submatrix corresponding to bicluster i.
Notes
-----
Works with sparse matrices. Only works if ``rows_`` and
``columns_`` attributes exist.
"""
from .utils.validation import check_array
data = check_array(data, accept_sparse='csr')
row_ind, col_ind = self.get_indices(i)
return data[row_ind[:, np.newaxis], col_ind]
###############################################################################
class TransformerMixin(object):
"""Mixin class for all transformers in scikit-learn."""
def fit_transform(self, X, y=None, **fit_params):
"""Fit to data, then transform it.
Fits transformer to X and y with optional parameters fit_params
and returns a transformed version of X.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Training set.
y : numpy array of shape [n_samples]
Target values.
Returns
-------
X_new : numpy array of shape [n_samples, n_features_new]
Transformed array.
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
if y is None:
# fit method of arity 1 (unsupervised transformation)
return self.fit(X, **fit_params).transform(X)
else:
# fit method of arity 2 (supervised transformation)
return self.fit(X, y, **fit_params).transform(X)
class DensityMixin(object):
"""Mixin class for all density estimators in scikit-learn."""
_estimator_type = "DensityEstimator"
def score(self, X, y=None):
"""Returns the score of the model on the data X
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
score : float
"""
pass
###############################################################################
class MetaEstimatorMixin(object):
"""Mixin class for all meta estimators in scikit-learn."""
# this is just a tag for the moment
###############################################################################
def is_classifier(estimator):
"""Returns True if the given estimator is (probably) a classifier.
Parameters
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if estimator is a classifier and False otherwise.
"""
return getattr(estimator, "_estimator_type", None) == "classifier"
def is_regressor(estimator):
"""Returns True if the given estimator is (probably) a regressor.
Parameters
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if estimator is a regressor and False otherwise.
"""
return getattr(estimator, "_estimator_type", None) == "regressor"
| mit |
beepee14/scikit-learn | sklearn/neural_network/tests/test_rbm.py | 225 | 6278 | import sys
import re
import numpy as np
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix
from sklearn.utils.testing import (assert_almost_equal, assert_array_equal,
assert_true)
from sklearn.datasets import load_digits
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.neural_network import BernoulliRBM
from sklearn.utils.validation import assert_all_finite
np.seterr(all='warn')
Xdigits = load_digits().data
Xdigits -= Xdigits.min()
Xdigits /= Xdigits.max()
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, n_iter=7, random_state=9)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
def test_partial_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=20, random_state=9)
n_samples = X.shape[0]
n_batches = int(np.ceil(float(n_samples) / rbm.batch_size))
batch_slices = np.array_split(X, n_batches)
for i in range(7):
for batch in batch_slices:
rbm.partial_fit(batch)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
assert_array_equal(X, Xdigits)
def test_transform():
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=16, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
Xt1 = rbm1.transform(X)
Xt2 = rbm1._mean_hiddens(X)
assert_array_equal(Xt1, Xt2)
def test_small_sparse():
# BernoulliRBM should work on small sparse matrices.
X = csr_matrix(Xdigits[:4])
BernoulliRBM().fit(X) # no exception
def test_small_sparse_partial_fit():
for sparse in [csc_matrix, csr_matrix]:
X_sparse = sparse(Xdigits[:100])
X = Xdigits[:100].copy()
rbm1 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm2 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm1.partial_fit(X_sparse)
rbm2.partial_fit(X)
assert_almost_equal(rbm1.score_samples(X).mean(),
rbm2.score_samples(X).mean(),
decimal=0)
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1)
def test_fit_gibbs():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]]
# from the same input
rng = np.random.RandomState(42)
X = np.array([[0.], [1.]])
rbm1 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
# you need that much iters
rbm1.fit(X)
assert_almost_equal(rbm1.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm1.gibbs(X), X)
return rbm1
def test_fit_gibbs_sparse():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from
# the same input even when the input is sparse, and test against non-sparse
rbm1 = test_fit_gibbs()
rng = np.random.RandomState(42)
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm2 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
rbm2.fit(X)
assert_almost_equal(rbm2.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm2.gibbs(X), X.toarray())
assert_almost_equal(rbm1.components_, rbm2.components_)
def test_gibbs_smoke():
# Check if we don't get NaNs sampling the full digits dataset.
# Also check that sampling again will yield different results.
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=40,
n_iter=20, random_state=42)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
X_sampled2 = rbm1.gibbs(X)
assert_true(np.all((X_sampled != X_sampled2).max(axis=1)))
def test_score_samples():
# Test score_samples (pseudo-likelihood) method.
# Assert that pseudo-likelihood is computed without clipping.
# See Fabian's blog, http://bit.ly/1iYefRk
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2,
n_iter=10, random_state=rng)
rbm1.fit(X)
assert_true((rbm1.score_samples(X) < -300).all())
# Sparse vs. dense should not affect the output. Also test sparse input
# validation.
rbm1.random_state = 42
d_score = rbm1.score_samples(X)
rbm1.random_state = 42
s_score = rbm1.score_samples(lil_matrix(X))
assert_almost_equal(d_score, s_score)
# Test numerical stability (#2785): would previously generate infinities
# and crash with an exception.
with np.errstate(under='ignore'):
rbm1.score_samples([np.arange(1000) * 100])
def test_rbm_verbose():
rbm = BernoulliRBM(n_iter=2, verbose=10)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
rbm.fit(Xdigits)
finally:
sys.stdout = old_stdout
def test_sparse_and_verbose():
# Make sure RBM works with sparse input when verbose=True
old_stdout = sys.stdout
sys.stdout = StringIO()
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm = BernoulliRBM(n_components=2, batch_size=2, n_iter=1,
random_state=42, verbose=True)
try:
rbm.fit(X)
s = sys.stdout.getvalue()
# make sure output is sound
assert_true(re.match(r"\[BernoulliRBM\] Iteration 1,"
r" pseudo-likelihood = -?(\d)+(\.\d+)?,"
r" time = (\d|\.)+s",
s))
finally:
sys.stdout = old_stdout
| bsd-3-clause |
RayMick/scikit-learn | benchmarks/bench_random_projections.py | 397 | 8900 | """
===========================
Random projection benchmark
===========================
Benchmarks for random projections.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import collections
import numpy as np
import scipy.sparse as sp
from sklearn import clone
from sklearn.externals.six.moves import xrange
from sklearn.random_projection import (SparseRandomProjection,
GaussianRandomProjection,
johnson_lindenstrauss_min_dim)
def type_auto_or_float(val):
if val == "auto":
return "auto"
else:
return float(val)
def type_auto_or_int(val):
if val == "auto":
return "auto"
else:
return int(val)
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_scikit_transformer(X, transfomer):
gc.collect()
clf = clone(transfomer)
# start time
t_start = datetime.now()
clf.fit(X)
delta = (datetime.now() - t_start)
# stop time
time_to_fit = compute_time(t_start, delta)
# start time
t_start = datetime.now()
clf.transform(X)
delta = (datetime.now() - t_start)
# stop time
time_to_transform = compute_time(t_start, delta)
return time_to_fit, time_to_transform
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros,
random_state=None):
rng = np.random.RandomState(random_state)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def print_row(clf_type, time_fit, time_transform):
print("%s | %s | %s" % (clf_type.ljust(30),
("%.4fs" % time_fit).center(12),
("%.4fs" % time_transform).center(12)))
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-features",
dest="n_features", default=10 ** 4, type=int,
help="Number of features in the benchmarks")
op.add_option("--n-components",
dest="n_components", default="auto",
help="Size of the random subspace."
" ('auto' or int > 0)")
op.add_option("--ratio-nonzeros",
dest="ratio_nonzeros", default=10 ** -3, type=float,
help="Number of features in the benchmarks")
op.add_option("--n-samples",
dest="n_samples", default=500, type=int,
help="Number of samples in the benchmarks")
op.add_option("--random-seed",
dest="random_seed", default=13, type=int,
help="Seed used by the random number generators.")
op.add_option("--density",
dest="density", default=1 / 3,
help="Density used by the sparse random projection."
" ('auto' or float (0.0, 1.0]")
op.add_option("--eps",
dest="eps", default=0.5, type=float,
help="See the documentation of the underlying transformers.")
op.add_option("--transformers",
dest="selected_transformers",
default='GaussianRandomProjection,SparseRandomProjection',
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. Available: "
"GaussianRandomProjection,SparseRandomProjection")
op.add_option("--dense",
dest="dense",
default=False,
action="store_true",
help="Set input space as a dense matrix.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
opts.n_components = type_auto_or_int(opts.n_components)
opts.density = type_auto_or_float(opts.density)
selected_transformers = opts.selected_transformers.split(',')
###########################################################################
# Generate dataset
###########################################################################
n_nonzeros = int(opts.ratio_nonzeros * opts.n_features)
print('Dataset statics')
print("===========================")
print('n_samples \t= %s' % opts.n_samples)
print('n_features \t= %s' % opts.n_features)
if opts.n_components == "auto":
print('n_components \t= %s (auto)' %
johnson_lindenstrauss_min_dim(n_samples=opts.n_samples,
eps=opts.eps))
else:
print('n_components \t= %s' % opts.n_components)
print('n_elements \t= %s' % (opts.n_features * opts.n_samples))
print('n_nonzeros \t= %s per feature' % n_nonzeros)
print('ratio_nonzeros \t= %s' % opts.ratio_nonzeros)
print('')
###########################################################################
# Set transformer input
###########################################################################
transformers = {}
###########################################################################
# Set GaussianRandomProjection input
gaussian_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed
}
transformers["GaussianRandomProjection"] = \
GaussianRandomProjection(**gaussian_matrix_params)
###########################################################################
# Set SparseRandomProjection input
sparse_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed,
"density": opts.density,
"eps": opts.eps,
}
transformers["SparseRandomProjection"] = \
SparseRandomProjection(**sparse_matrix_params)
###########################################################################
# Perform benchmark
###########################################################################
time_fit = collections.defaultdict(list)
time_transform = collections.defaultdict(list)
print('Benchmarks')
print("===========================")
print("Generate dataset benchmarks... ", end="")
X_dense, X_sparse = make_sparse_random_data(opts.n_samples,
opts.n_features,
n_nonzeros,
random_state=opts.random_seed)
X = X_dense if opts.dense else X_sparse
print("done")
for name in selected_transformers:
print("Perform benchmarks for %s..." % name)
for iteration in xrange(opts.n_times):
print("\titer %s..." % iteration, end="")
time_to_fit, time_to_transform = bench_scikit_transformer(X_dense,
transformers[name])
time_fit[name].append(time_to_fit)
time_transform[name].append(time_to_transform)
print("done")
print("")
###########################################################################
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Transformer performance:")
print("===========================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
print("%s | %s | %s" % ("Transformer".ljust(30),
"fit".center(12),
"transform".center(12)))
print(31 * "-" + ("|" + "-" * 14) * 2)
for name in sorted(selected_transformers):
print_row(name,
np.mean(time_fit[name]),
np.mean(time_transform[name]))
print("")
print("")
| bsd-3-clause |
deepesch/scikit-learn | sklearn/feature_selection/tests/test_from_model.py | 244 | 1593 | import numpy as np
import scipy.sparse as sp
from nose.tools import assert_raises, assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.svm import LinearSVC
iris = load_iris()
def test_transform_linear_model():
for clf in (LogisticRegression(C=0.1),
LinearSVC(C=0.01, dual=False),
SGDClassifier(alpha=0.001, n_iter=50, shuffle=True,
random_state=0)):
for thresh in (None, ".09*mean", "1e-5 * median"):
for func in (np.array, sp.csr_matrix):
X = func(iris.data)
clf.set_params(penalty="l1")
clf.fit(X, iris.target)
X_new = clf.transform(X, thresh)
if isinstance(clf, SGDClassifier):
assert_true(X_new.shape[1] <= X.shape[1])
else:
assert_less(X_new.shape[1], X.shape[1])
clf.set_params(penalty="l2")
clf.fit(X_new, iris.target)
pred = clf.predict(X_new)
assert_greater(np.mean(pred == iris.target), 0.7)
def test_invalid_input():
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=None)
clf.fit(iris.data, iris.target)
assert_raises(ValueError, clf.transform, iris.data, "gobbledigook")
assert_raises(ValueError, clf.transform, iris.data, ".5 * gobbledigook")
| bsd-3-clause |
yask123/scikit-learn | sklearn/datasets/base.py | 196 | 18554 | """
Base IO code for all datasets
"""
# Copyright (c) 2007 David Cournapeau <[email protected]>
# 2010 Fabian Pedregosa <[email protected]>
# 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
import os
import csv
import shutil
from os import environ
from os.path import dirname
from os.path import join
from os.path import exists
from os.path import expanduser
from os.path import isdir
from os import listdir
from os import makedirs
import numpy as np
from ..utils import check_random_state
class Bunch(dict):
"""Container object for datasets
Dictionary-like object that exposes its keys as attributes.
>>> b = Bunch(a=1, b=2)
>>> b['b']
2
>>> b.b
2
>>> b.a = 3
>>> b['a']
3
>>> b.c = 6
>>> b['c']
6
"""
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
def __setattr__(self, key, value):
self[key] = value
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def __getstate__(self):
return self.__dict__
def get_data_home(data_home=None):
"""Return the path of the scikit-learn data dir.
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'scikit_learn_data'
in the user home folder.
Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment
variable or programmatically by giving an explicit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
"""
if data_home is None:
data_home = environ.get('SCIKIT_LEARN_DATA',
join('~', 'scikit_learn_data'))
data_home = expanduser(data_home)
if not exists(data_home):
makedirs(data_home)
return data_home
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache."""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
def load_files(container_path, description=None, categories=None,
load_content=True, shuffle=True, encoding=None,
decode_error='strict', random_state=0):
"""Load text files with categories as subfolder names.
Individual samples are assumed to be files stored a two levels folder
structure such as the following:
container_folder/
category_1_folder/
file_1.txt
file_2.txt
...
file_42.txt
category_2_folder/
file_43.txt
file_44.txt
...
The folder names are used as supervised signal label names. The
individual file names are not important.
This function does not try to extract features into a numpy array or
scipy sparse matrix. In addition, if load_content is false it
does not try to load the files in memory.
To use text files in a scikit-learn classification or clustering
algorithm, you will need to use the `sklearn.feature_extraction.text`
module to build a feature extraction transformer that suits your
problem.
If you set load_content=True, you should also specify the encoding of
the text using the 'encoding' parameter. For many modern text files,
'utf-8' will be the correct encoding. If you leave encoding equal to None,
then the content will be made of bytes instead of Unicode, and you will
not be able to use most functions in `sklearn.feature_extraction.text`.
Similar feature extractors should be built for other kind of unstructured
data input such as images, audio, video, ...
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
container_path : string or unicode
Path to the main folder holding one subfolder per category
description: string or unicode, optional (default=None)
A paragraph describing the characteristic of the dataset: its source,
reference, etc.
categories : A collection of strings or None, optional (default=None)
If None (default), load all the categories.
If not None, list of category names to load (other categories ignored).
load_content : boolean, optional (default=True)
Whether to load or not the content of the different files. If
true a 'data' attribute containing the text information is present
in the data structure returned. If not, a filenames attribute
gives the path to the files.
encoding : string or None (default is None)
If None, do not try to decode the content of the files (e.g. for
images or other non-text content).
If not None, encoding to use to decode text files to Unicode if
load_content is True.
decode_error: {'strict', 'ignore', 'replace'}, optional
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. Passed as keyword
argument 'errors' to bytes.decode.
shuffle : bool, optional (default=True)
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: either
data, the raw text data to learn, or 'filenames', the files
holding it, 'target', the classification labels (integer index),
'target_names', the meaning of the labels, and 'DESCR', the full
description of the dataset.
"""
target = []
target_names = []
filenames = []
folders = [f for f in sorted(listdir(container_path))
if isdir(join(container_path, f))]
if categories is not None:
folders = [f for f in folders if f in categories]
for label, folder in enumerate(folders):
target_names.append(folder)
folder_path = join(container_path, folder)
documents = [join(folder_path, d)
for d in sorted(listdir(folder_path))]
target.extend(len(documents) * [label])
filenames.extend(documents)
# convert to array for fancy indexing
filenames = np.array(filenames)
target = np.array(target)
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(filenames.shape[0])
random_state.shuffle(indices)
filenames = filenames[indices]
target = target[indices]
if load_content:
data = []
for filename in filenames:
with open(filename, 'rb') as f:
data.append(f.read())
if encoding is not None:
data = [d.decode(encoding, decode_error) for d in data]
return Bunch(data=data,
filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
return Bunch(filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
def load_iris():
"""Load and return the iris dataset (classification).
The iris dataset is a classic and very easy multi-class classification
dataset.
================= ==============
Classes 3
Samples per class 50
Samples total 150
Dimensionality 4
Features real, positive
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the
full description of the dataset.
Examples
--------
Let's say you are interested in the samples 10, 25, and 50, and want to
know their class name.
>>> from sklearn.datasets import load_iris
>>> data = load_iris()
>>> data.target[[10, 25, 50]]
array([0, 0, 1])
>>> list(data.target_names)
['setosa', 'versicolor', 'virginica']
"""
module_path = dirname(__file__)
with open(join(module_path, 'data', 'iris.csv')) as csv_file:
data_file = csv.reader(csv_file)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
target_names = np.array(temp[2:])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
for i, ir in enumerate(data_file):
data[i] = np.asarray(ir[:-1], dtype=np.float)
target[i] = np.asarray(ir[-1], dtype=np.int)
with open(join(module_path, 'descr', 'iris.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data, target=target,
target_names=target_names,
DESCR=fdescr,
feature_names=['sepal length (cm)', 'sepal width (cm)',
'petal length (cm)', 'petal width (cm)'])
def load_digits(n_class=10):
"""Load and return the digits dataset (classification).
Each datapoint is a 8x8 image of a digit.
================= ==============
Classes 10
Samples per class ~180
Samples total 1797
Dimensionality 64
Features integers 0-16
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
n_class : integer, between 0 and 10, optional (default=10)
The number of classes to return.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'images', the images corresponding
to each sample, 'target', the classification labels for each
sample, 'target_names', the meaning of the labels, and 'DESCR',
the full description of the dataset.
Examples
--------
To load the data and visualize the images::
>>> from sklearn.datasets import load_digits
>>> digits = load_digits()
>>> print(digits.data.shape)
(1797, 64)
>>> import pylab as pl #doctest: +SKIP
>>> pl.gray() #doctest: +SKIP
>>> pl.matshow(digits.images[0]) #doctest: +SKIP
>>> pl.show() #doctest: +SKIP
"""
module_path = dirname(__file__)
data = np.loadtxt(join(module_path, 'data', 'digits.csv.gz'),
delimiter=',')
with open(join(module_path, 'descr', 'digits.rst')) as f:
descr = f.read()
target = data[:, -1]
flat_data = data[:, :-1]
images = flat_data.view()
images.shape = (-1, 8, 8)
if n_class < 10:
idx = target < n_class
flat_data, target = flat_data[idx], target[idx]
images = images[idx]
return Bunch(data=flat_data,
target=target.astype(np.int),
target_names=np.arange(10),
images=images,
DESCR=descr)
def load_diabetes():
"""Load and return the diabetes dataset (regression).
============== ==================
Samples total 442
Dimensionality 10
Features real, -.2 < x < .2
Targets integer 25 - 346
============== ==================
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn and 'target', the regression target for each
sample.
"""
base_dir = join(dirname(__file__), 'data')
data = np.loadtxt(join(base_dir, 'diabetes_data.csv.gz'))
target = np.loadtxt(join(base_dir, 'diabetes_target.csv.gz'))
return Bunch(data=data, target=target)
def load_linnerud():
"""Load and return the linnerud dataset (multivariate regression).
Samples total: 20
Dimensionality: 3 for both data and targets
Features: integer
Targets: integer
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: 'data' and
'targets', the two multivariate datasets, with 'data' corresponding to
the exercise and 'targets' corresponding to the physiological
measurements, as well as 'feature_names' and 'target_names'.
"""
base_dir = join(dirname(__file__), 'data/')
# Read data
data_exercise = np.loadtxt(base_dir + 'linnerud_exercise.csv', skiprows=1)
data_physiological = np.loadtxt(base_dir + 'linnerud_physiological.csv',
skiprows=1)
# Read header
with open(base_dir + 'linnerud_exercise.csv') as f:
header_exercise = f.readline().split()
with open(base_dir + 'linnerud_physiological.csv') as f:
header_physiological = f.readline().split()
with open(dirname(__file__) + '/descr/linnerud.rst') as f:
descr = f.read()
return Bunch(data=data_exercise, feature_names=header_exercise,
target=data_physiological,
target_names=header_physiological,
DESCR=descr)
def load_boston():
"""Load and return the boston house-prices dataset (regression).
============== ==============
Samples total 506
Dimensionality 13
Features real, positive
Targets real 5. - 50.
============== ==============
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the regression targets,
and 'DESCR', the full description of the dataset.
Examples
--------
>>> from sklearn.datasets import load_boston
>>> boston = load_boston()
>>> print(boston.data.shape)
(506, 13)
"""
module_path = dirname(__file__)
fdescr_name = join(module_path, 'descr', 'boston_house_prices.rst')
with open(fdescr_name) as f:
descr_text = f.read()
data_file_name = join(module_path, 'data', 'boston_house_prices.csv')
with open(data_file_name) as f:
data_file = csv.reader(f)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,))
temp = next(data_file) # names of features
feature_names = np.array(temp)
for i, d in enumerate(data_file):
data[i] = np.asarray(d[:-1], dtype=np.float)
target[i] = np.asarray(d[-1], dtype=np.float)
return Bunch(data=data,
target=target,
# last column is target value
feature_names=feature_names[:-1],
DESCR=descr_text)
def load_sample_images():
"""Load sample images for image manipulation.
Loads both, ``china`` and ``flower``.
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'images', the two sample images, 'filenames', the file
names for the images, and 'DESCR'
the full description of the dataset.
Examples
--------
To load the data and visualize the images:
>>> from sklearn.datasets import load_sample_images
>>> dataset = load_sample_images() #doctest: +SKIP
>>> len(dataset.images) #doctest: +SKIP
2
>>> first_img_data = dataset.images[0] #doctest: +SKIP
>>> first_img_data.shape #doctest: +SKIP
(427, 640, 3)
>>> first_img_data.dtype #doctest: +SKIP
dtype('uint8')
"""
# Try to import imread from scipy. We do this lazily here to prevent
# this module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
raise ImportError("The Python Imaging Library (PIL) "
"is required to load data from jpeg files")
module_path = join(dirname(__file__), "images")
with open(join(module_path, 'README.txt')) as f:
descr = f.read()
filenames = [join(module_path, filename)
for filename in os.listdir(module_path)
if filename.endswith(".jpg")]
# Load image data for each image in the source folder.
images = [imread(filename) for filename in filenames]
return Bunch(images=images,
filenames=filenames,
DESCR=descr)
def load_sample_image(image_name):
"""Load the numpy array of a single sample image
Parameters
-----------
image_name: {`china.jpg`, `flower.jpg`}
The name of the sample image loaded
Returns
-------
img: 3D array
The image as a numpy array: height x width x color
Examples
---------
>>> from sklearn.datasets import load_sample_image
>>> china = load_sample_image('china.jpg') # doctest: +SKIP
>>> china.dtype # doctest: +SKIP
dtype('uint8')
>>> china.shape # doctest: +SKIP
(427, 640, 3)
>>> flower = load_sample_image('flower.jpg') # doctest: +SKIP
>>> flower.dtype # doctest: +SKIP
dtype('uint8')
>>> flower.shape # doctest: +SKIP
(427, 640, 3)
"""
images = load_sample_images()
index = None
for i, filename in enumerate(images.filenames):
if filename.endswith(image_name):
index = i
break
if index is None:
raise AttributeError("Cannot find sample image: %s" % image_name)
return images.images[index]
| bsd-3-clause |
zhaoshuxue/document | Linux/TensorFlow/4.py | 1 | 2125 | import tensorflow as tf
import numpy
# 在linux服务器端执行python脚本,有时候需要画图,但是linux没有GUI界面,因此需要在导入matplotlib.pyplot库之前先执行
import matplotlib as mpl
mpl.use('Agg')
# 再执行
import matplotlib.pyplot as plt
rng = numpy.random
# Parameters
learning_rate = 0.01
training_epochs = 2000
display_step = 50
# Training Data
train_X = numpy.asarray([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,7.042,10.791,5.313,7.997,5.654,9.27,3.1])
train_Y = numpy.asarray([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,2.827,3.465,1.65,2.904,2.42,2.94,1.3])
n_samples = train_X.shape[0]
# tf Graph Input
X = tf.placeholder("float")
Y = tf.placeholder("float")
# Create Model
# Set model weights
W = tf.Variable(rng.randn(), name="weight")
b = tf.Variable(rng.randn(), name="bias")
# Construct a linear model
activation = tf.add(tf.multiply(X, W), b)
# Minimize the squared errors
cost = tf.reduce_sum(tf.pow(activation-Y, 2))/(2*n_samples) #L2 loss
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) #Gradient descent
# Initializing the variables
init = tf.initialize_all_variables()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
# Fit all training data
for epoch in range(training_epochs):
for (x, y) in zip(train_X, train_Y):
sess.run(optimizer, feed_dict={X: x, Y: y})
#Display logs per epoch step
if epoch % display_step == 0:
print( "Epoch:", '%04d' % (epoch+1), "cost=", \
"{:.9f}".format(sess.run(cost, feed_dict={X: train_X, Y:train_Y})), \
"W=", sess.run(W), "b=", sess.run(b))
print( "Optimization Finished!")
print( "cost=", sess.run(cost, feed_dict={X: train_X, Y: train_Y}), \
"W=", sess.run(W), "b=", sess.run(b))
#Graphic display
plt.plot(train_X, train_Y, 'ro', label='Original data')
plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line')
plt.legend()
#plt.show()
# 需要保存图片到指定的目录
plt.savefig("/data/demo4.png")
| mit |
empeeu/numpy | numpy/lib/recfunctions.py | 148 | 35012 | """
Collection of utilities to manipulate structured arrays.
Most of these functions were initially implemented by John Hunter for
matplotlib. They have been rewritten and extended for convenience.
"""
from __future__ import division, absolute_import, print_function
import sys
import itertools
import numpy as np
import numpy.ma as ma
from numpy import ndarray, recarray
from numpy.ma import MaskedArray
from numpy.ma.mrecords import MaskedRecords
from numpy.lib._iotools import _is_string_like
from numpy.compat import basestring
if sys.version_info[0] < 3:
from future_builtins import zip
_check_fill_value = np.ma.core._check_fill_value
__all__ = [
'append_fields', 'drop_fields', 'find_duplicates',
'get_fieldstructure', 'join_by', 'merge_arrays',
'rec_append_fields', 'rec_drop_fields', 'rec_join',
'recursive_fill_fields', 'rename_fields', 'stack_arrays',
]
def recursive_fill_fields(input, output):
"""
Fills fields from output with fields from input,
with support for nested structures.
Parameters
----------
input : ndarray
Input array.
output : ndarray
Output array.
Notes
-----
* `output` should be at least the same size as `input`
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)])
>>> b = np.zeros((3,), dtype=a.dtype)
>>> rfn.recursive_fill_fields(a, b)
array([(1, 10.0), (2, 20.0), (0, 0.0)],
dtype=[('A', '<i4'), ('B', '<f8')])
"""
newdtype = output.dtype
for field in newdtype.names:
try:
current = input[field]
except ValueError:
continue
if current.dtype.names:
recursive_fill_fields(current, output[field])
else:
output[field][:len(current)] = current
return output
def get_names(adtype):
"""
Returns the field names of the input datatype as a tuple.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names(np.empty((1,), dtype=int)) is None
True
>>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)]))
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names(adtype)
('a', ('b', ('ba', 'bb')))
"""
listnames = []
names = adtype.names
for name in names:
current = adtype[name]
if current.names:
listnames.append((name, tuple(get_names(current))))
else:
listnames.append(name)
return tuple(listnames) or None
def get_names_flat(adtype):
"""
Returns the field names of the input datatype as a tuple. Nested structure
are flattend beforehand.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names_flat(np.empty((1,), dtype=int)) is None
True
>>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', float)]))
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names_flat(adtype)
('a', 'b', 'ba', 'bb')
"""
listnames = []
names = adtype.names
for name in names:
listnames.append(name)
current = adtype[name]
if current.names:
listnames.extend(get_names_flat(current))
return tuple(listnames) or None
def flatten_descr(ndtype):
"""
Flatten a structured data-type description.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('a', '<i4'), ('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.flatten_descr(ndtype)
(('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32')))
"""
names = ndtype.names
if names is None:
return ndtype.descr
else:
descr = []
for field in names:
(typ, _) = ndtype.fields[field]
if typ.names:
descr.extend(flatten_descr(typ))
else:
descr.append((field, typ))
return tuple(descr)
def zip_descr(seqarrays, flatten=False):
"""
Combine the dtype description of a series of arrays.
Parameters
----------
seqarrays : sequence of arrays
Sequence of arrays
flatten : {boolean}, optional
Whether to collapse nested descriptions.
"""
newdtype = []
if flatten:
for a in seqarrays:
newdtype.extend(flatten_descr(a.dtype))
else:
for a in seqarrays:
current = a.dtype
names = current.names or ()
if len(names) > 1:
newdtype.append(('', current.descr))
else:
newdtype.extend(current.descr)
return np.dtype(newdtype).descr
def get_fieldstructure(adtype, lastname=None, parents=None,):
"""
Returns a dictionary with fields indexing lists of their parent fields.
This function is used to simplify access to fields nested in other fields.
Parameters
----------
adtype : np.dtype
Input datatype
lastname : optional
Last processed field name (used internally during recursion).
parents : dictionary
Dictionary of parent fields (used interbally during recursion).
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('A', int),
... ('B', [('BA', int),
... ('BB', [('BBA', int), ('BBB', int)])])])
>>> rfn.get_fieldstructure(ndtype)
... # XXX: possible regression, order of BBA and BBB is swapped
{'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
"""
if parents is None:
parents = {}
names = adtype.names
for name in names:
current = adtype[name]
if current.names:
if lastname:
parents[name] = [lastname, ]
else:
parents[name] = []
parents.update(get_fieldstructure(current, name, parents))
else:
lastparent = [_ for _ in (parents.get(lastname, []) or [])]
if lastparent:
lastparent.append(lastname)
elif lastname:
lastparent = [lastname, ]
parents[name] = lastparent or []
return parents or None
def _izip_fields_flat(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays,
collapsing any nested structure.
"""
for element in iterable:
if isinstance(element, np.void):
for f in _izip_fields_flat(tuple(element)):
yield f
else:
yield element
def _izip_fields(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays.
"""
for element in iterable:
if (hasattr(element, '__iter__') and
not isinstance(element, basestring)):
for f in _izip_fields(element):
yield f
elif isinstance(element, np.void) and len(tuple(element)) == 1:
for f in _izip_fields(element):
yield f
else:
yield element
def izip_records(seqarrays, fill_value=None, flatten=True):
"""
Returns an iterator of concatenated items from a sequence of arrays.
Parameters
----------
seqarrays : sequence of arrays
Sequence of arrays.
fill_value : {None, integer}
Value used to pad shorter iterables.
flatten : {True, False},
Whether to
"""
# OK, that's a complete ripoff from Python2.6 itertools.izip_longest
def sentinel(counter=([fill_value] * (len(seqarrays) - 1)).pop):
"Yields the fill_value or raises IndexError"
yield counter()
#
fillers = itertools.repeat(fill_value)
iters = [itertools.chain(it, sentinel(), fillers) for it in seqarrays]
# Should we flatten the items, or just use a nested approach
if flatten:
zipfunc = _izip_fields_flat
else:
zipfunc = _izip_fields
#
try:
for tup in zip(*iters):
yield tuple(zipfunc(tup))
except IndexError:
pass
def _fix_output(output, usemask=True, asrecarray=False):
"""
Private function: return a recarray, a ndarray, a MaskedArray
or a MaskedRecords depending on the input parameters
"""
if not isinstance(output, MaskedArray):
usemask = False
if usemask:
if asrecarray:
output = output.view(MaskedRecords)
else:
output = ma.filled(output)
if asrecarray:
output = output.view(recarray)
return output
def _fix_defaults(output, defaults=None):
"""
Update the fill_value and masked data of `output`
from the default given in a dictionary defaults.
"""
names = output.dtype.names
(data, mask, fill_value) = (output.data, output.mask, output.fill_value)
for (k, v) in (defaults or {}).items():
if k in names:
fill_value[k] = v
data[k][mask[k]] = v
return output
def merge_arrays(seqarrays, fill_value=-1, flatten=False,
usemask=False, asrecarray=False):
"""
Merge arrays field by field.
Parameters
----------
seqarrays : sequence of ndarrays
Sequence of arrays
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
flatten : {False, True}, optional
Whether to collapse nested fields.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])))
masked_array(data = [(1, 10.0) (2, 20.0) (--, 30.0)],
mask = [(False, False) (False, False) (True, False)],
fill_value = (999999, 1e+20),
dtype = [('f0', '<i4'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])),
... usemask=False)
array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('f0', '<i4'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]).view([('a', int)]),
... np.array([10., 20., 30.])),
... usemask=False, asrecarray=True)
rec.array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('a', '<i4'), ('f1', '<f8')])
Notes
-----
* Without a mask, the missing value will be filled with something,
* depending on what its corresponding type:
-1 for integers
-1.0 for floating point numbers
'-' for characters
'-1' for strings
True for boolean values
* XXX: I just obtained these values empirically
"""
# Only one item in the input sequence ?
if (len(seqarrays) == 1):
seqarrays = np.asanyarray(seqarrays[0])
# Do we have a single ndarray as input ?
if isinstance(seqarrays, (ndarray, np.void)):
seqdtype = seqarrays.dtype
if (not flatten) or \
(zip_descr((seqarrays,), flatten=True) == seqdtype.descr):
# Minimal processing needed: just make sure everythng's a-ok
seqarrays = seqarrays.ravel()
# Make sure we have named fields
if not seqdtype.names:
seqdtype = [('', seqdtype)]
# Find what type of array we must return
if usemask:
if asrecarray:
seqtype = MaskedRecords
else:
seqtype = MaskedArray
elif asrecarray:
seqtype = recarray
else:
seqtype = ndarray
return seqarrays.view(dtype=seqdtype, type=seqtype)
else:
seqarrays = (seqarrays,)
else:
# Make sure we have arrays in the input sequence
seqarrays = [np.asanyarray(_m) for _m in seqarrays]
# Find the sizes of the inputs and their maximum
sizes = tuple(a.size for a in seqarrays)
maxlength = max(sizes)
# Get the dtype of the output (flattening if needed)
newdtype = zip_descr(seqarrays, flatten=flatten)
# Initialize the sequences for data and mask
seqdata = []
seqmask = []
# If we expect some kind of MaskedArray, make a special loop.
if usemask:
for (a, n) in zip(seqarrays, sizes):
nbmissing = (maxlength - n)
# Get the data and mask
data = a.ravel().__array__()
mask = ma.getmaskarray(a).ravel()
# Get the filling value (if needed)
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
fmsk = True
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
fmsk = np.ones((1,), dtype=mask.dtype)
else:
fval = None
fmsk = True
# Store an iterator padding the input to the expected length
seqdata.append(itertools.chain(data, [fval] * nbmissing))
seqmask.append(itertools.chain(mask, [fmsk] * nbmissing))
# Create an iterator for the data
data = tuple(izip_records(seqdata, flatten=flatten))
output = ma.array(np.fromiter(data, dtype=newdtype, count=maxlength),
mask=list(izip_records(seqmask, flatten=flatten)))
if asrecarray:
output = output.view(MaskedRecords)
else:
# Same as before, without the mask we don't need...
for (a, n) in zip(seqarrays, sizes):
nbmissing = (maxlength - n)
data = a.ravel().__array__()
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
else:
fval = None
seqdata.append(itertools.chain(data, [fval] * nbmissing))
output = np.fromiter(tuple(izip_records(seqdata, flatten=flatten)),
dtype=newdtype, count=maxlength)
if asrecarray:
output = output.view(recarray)
# And we're done...
return output
def drop_fields(base, drop_names, usemask=True, asrecarray=False):
"""
Return a new array with fields in `drop_names` dropped.
Nested fields are supported.
Parameters
----------
base : array
Input array
drop_names : string or sequence
String or sequence of strings corresponding to the names of the
fields to drop.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : string or sequence, optional
Whether to return a recarray or a mrecarray (`asrecarray=True`) or
a plain ndarray or masked array with flexible dtype. The default
is False.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
... dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
>>> rfn.drop_fields(a, 'a')
array([((2.0, 3),), ((5.0, 6),)],
dtype=[('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.drop_fields(a, 'ba')
array([(1, (3,)), (4, (6,))],
dtype=[('a', '<i4'), ('b', [('bb', '<i4')])])
>>> rfn.drop_fields(a, ['ba', 'bb'])
array([(1,), (4,)],
dtype=[('a', '<i4')])
"""
if _is_string_like(drop_names):
drop_names = [drop_names, ]
else:
drop_names = set(drop_names)
def _drop_descr(ndtype, drop_names):
names = ndtype.names
newdtype = []
for name in names:
current = ndtype[name]
if name in drop_names:
continue
if current.names:
descr = _drop_descr(current, drop_names)
if descr:
newdtype.append((name, descr))
else:
newdtype.append((name, current))
return newdtype
newdtype = _drop_descr(base.dtype, drop_names)
if not newdtype:
return None
output = np.empty(base.shape, dtype=newdtype)
output = recursive_fill_fields(base, output)
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def rec_drop_fields(base, drop_names):
"""
Returns a new numpy.recarray with fields in `drop_names` dropped.
"""
return drop_fields(base, drop_names, usemask=False, asrecarray=True)
def rename_fields(base, namemapper):
"""
Rename the fields from a flexible-datatype ndarray or recarray.
Nested fields are supported.
Parameters
----------
base : ndarray
Input array whose fields must be modified.
namemapper : dictionary
Dictionary mapping old field names to their new version.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])])
>>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'})
array([(1, (2.0, [3.0, 30.0])), (4, (5.0, [6.0, 60.0]))],
dtype=[('A', '<i4'), ('b', [('ba', '<f8'), ('BB', '<f8', 2)])])
"""
def _recursive_rename_fields(ndtype, namemapper):
newdtype = []
for name in ndtype.names:
newname = namemapper.get(name, name)
current = ndtype[name]
if current.names:
newdtype.append(
(newname, _recursive_rename_fields(current, namemapper))
)
else:
newdtype.append((newname, current))
return newdtype
newdtype = _recursive_rename_fields(base.dtype, namemapper)
return base.view(newdtype)
def append_fields(base, names, data, dtypes=None,
fill_value=-1, usemask=True, asrecarray=False):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
"""
# Check the names
if isinstance(names, (tuple, list)):
if len(names) != len(data):
msg = "The number of arrays does not match the number of names"
raise ValueError(msg)
elif isinstance(names, basestring):
names = [names, ]
data = [data, ]
#
if dtypes is None:
data = [np.array(a, copy=False, subok=True) for a in data]
data = [a.view([(name, a.dtype)]) for (name, a) in zip(names, data)]
else:
if not isinstance(dtypes, (tuple, list)):
dtypes = [dtypes, ]
if len(data) != len(dtypes):
if len(dtypes) == 1:
dtypes = dtypes * len(data)
else:
msg = "The dtypes argument must be None, a dtype, or a list."
raise ValueError(msg)
data = [np.array(a, copy=False, subok=True, dtype=d).view([(n, d)])
for (a, n, d) in zip(data, names, dtypes)]
#
base = merge_arrays(base, usemask=usemask, fill_value=fill_value)
if len(data) > 1:
data = merge_arrays(data, flatten=True, usemask=usemask,
fill_value=fill_value)
else:
data = data.pop()
#
output = ma.masked_all(max(len(base), len(data)),
dtype=base.dtype.descr + data.dtype.descr)
output = recursive_fill_fields(base, output)
output = recursive_fill_fields(data, output)
#
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def rec_append_fields(base, names, data, dtypes=None):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
See Also
--------
append_fields
Returns
-------
appended_array : np.recarray
"""
return append_fields(base, names, data=data, dtypes=dtypes,
asrecarray=True, usemask=False)
def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
autoconvert=False):
"""
Superposes arrays fields by fields
Parameters
----------
arrays : array or sequence
Sequence of input arrays.
defaults : dictionary, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is
`asrecarray==True`) or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`)
or just a flexible-type ndarray.
autoconvert : {False, True}, optional
Whether automatically cast the type of the field to the maximum.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> x = np.array([1, 2,])
>>> rfn.stack_arrays(x) is x
True
>>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)])
>>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
... dtype=[('A', '|S3'), ('B', float), ('C', float)])
>>> test = rfn.stack_arrays((z,zz))
>>> test
masked_array(data = [('A', 1.0, --) ('B', 2.0, --) ('a', 10.0, 100.0) ('b', 20.0, 200.0)
('c', 30.0, 300.0)],
mask = [(False, False, True) (False, False, True) (False, False, False)
(False, False, False) (False, False, False)],
fill_value = ('N/A', 1e+20, 1e+20),
dtype = [('A', '|S3'), ('B', '<f8'), ('C', '<f8')])
"""
if isinstance(arrays, ndarray):
return arrays
elif len(arrays) == 1:
return arrays[0]
seqarrays = [np.asanyarray(a).ravel() for a in arrays]
nrecords = [len(a) for a in seqarrays]
ndtype = [a.dtype for a in seqarrays]
fldnames = [d.names for d in ndtype]
#
dtype_l = ndtype[0]
newdescr = dtype_l.descr
names = [_[0] for _ in newdescr]
for dtype_n in ndtype[1:]:
for descr in dtype_n.descr:
name = descr[0] or ''
if name not in names:
newdescr.append(descr)
names.append(name)
else:
nameidx = names.index(name)
current_descr = newdescr[nameidx]
if autoconvert:
if np.dtype(descr[1]) > np.dtype(current_descr[-1]):
current_descr = list(current_descr)
current_descr[-1] = descr[1]
newdescr[nameidx] = tuple(current_descr)
elif descr[1] != current_descr[-1]:
raise TypeError("Incompatible type '%s' <> '%s'" %
(dict(newdescr)[name], descr[1]))
# Only one field: use concatenate
if len(newdescr) == 1:
output = ma.concatenate(seqarrays)
else:
#
output = ma.masked_all((np.sum(nrecords),), newdescr)
offset = np.cumsum(np.r_[0, nrecords])
seen = []
for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]):
names = a.dtype.names
if names is None:
output['f%i' % len(seen)][i:j] = a
else:
for name in n:
output[name][i:j] = a[name]
if name not in seen:
seen.append(name)
#
return _fix_output(_fix_defaults(output, defaults),
usemask=usemask, asrecarray=asrecarray)
def find_duplicates(a, key=None, ignoremask=True, return_index=False):
"""
Find the duplicates in a structured array along a given key
Parameters
----------
a : array-like
Input array
key : {string, None}, optional
Name of the fields along which to check the duplicates.
If None, the search is performed by records
ignoremask : {True, False}, optional
Whether masked data should be discarded or considered as duplicates.
return_index : {False, True}, optional
Whether to return the indices of the duplicated values.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = [('a', int)]
>>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3],
... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
>>> rfn.find_duplicates(a, ignoremask=True, return_index=True)
... # XXX: judging by the output, the ignoremask flag has no effect
"""
a = np.asanyarray(a).ravel()
# Get a dictionary of fields
fields = get_fieldstructure(a.dtype)
# Get the sorting data (by selecting the corresponding field)
base = a
if key:
for f in fields[key]:
base = base[f]
base = base[key]
# Get the sorting indices and the sorted data
sortidx = base.argsort()
sortedbase = base[sortidx]
sorteddata = sortedbase.filled()
# Compare the sorting data
flag = (sorteddata[:-1] == sorteddata[1:])
# If masked data must be ignored, set the flag to false where needed
if ignoremask:
sortedmask = sortedbase.recordmask
flag[sortedmask[1:]] = False
flag = np.concatenate(([False], flag))
# We need to take the point on the left as well (else we're missing it)
flag[:-1] = flag[:-1] + flag[1:]
duplicates = a[sortidx][flag]
if return_index:
return (duplicates, sortidx[flag])
else:
return duplicates
def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None, usemask=True, asrecarray=False):
"""
Join arrays `r1` and `r2` on key `key`.
The key should be either a string or a sequence of string corresponding
to the fields used to join the array. An exception is raised if the
`key` field cannot be found in the two input arrays. Neither `r1` nor
`r2` should have any duplicates along `key`: the presence of duplicates
will make the output quite unreliable. Note that duplicates are not
looked for by the algorithm.
Parameters
----------
key : {string, sequence}
A string or a sequence of strings corresponding to the fields used
for comparison.
r1, r2 : arrays
Structured arrays.
jointype : {'inner', 'outer', 'leftouter'}, optional
If 'inner', returns the elements common to both r1 and r2.
If 'outer', returns the common elements as well as the elements of
r1 not in r2 and the elements of not in r2.
If 'leftouter', returns the common elements and the elements of r1
not in r2.
r1postfix : string, optional
String appended to the names of the fields of r1 that are present
in r2 but absent of the key.
r2postfix : string, optional
String appended to the names of the fields of r2 that are present
in r1 but absent of the key.
defaults : {dictionary}, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is
`asrecarray==True`) or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`)
or just a flexible-type ndarray.
Notes
-----
* The output is sorted along the key.
* A temporary array is formed by dropping the fields not in the key for
the two arrays and concatenating the result. This array is then
sorted, and the common entries selected. The output is constructed by
filling the fields with the selected entries. Matching is not
preserved if there are some duplicates...
"""
# Check jointype
if jointype not in ('inner', 'outer', 'leftouter'):
raise ValueError(
"The 'jointype' argument should be in 'inner', "
"'outer' or 'leftouter' (got '%s' instead)" % jointype
)
# If we have a single key, put it in a tuple
if isinstance(key, basestring):
key = (key,)
# Check the keys
for name in key:
if name not in r1.dtype.names:
raise ValueError('r1 does not have key field %s' % name)
if name not in r2.dtype.names:
raise ValueError('r2 does not have key field %s' % name)
# Make sure we work with ravelled arrays
r1 = r1.ravel()
r2 = r2.ravel()
# Fixme: nb2 below is never used. Commenting out for pyflakes.
# (nb1, nb2) = (len(r1), len(r2))
nb1 = len(r1)
(r1names, r2names) = (r1.dtype.names, r2.dtype.names)
# Check the names for collision
if (set.intersection(set(r1names), set(r2names)).difference(key) and
not (r1postfix or r2postfix)):
msg = "r1 and r2 contain common names, r1postfix and r2postfix "
msg += "can't be empty"
raise ValueError(msg)
# Make temporary arrays of just the keys
r1k = drop_fields(r1, [n for n in r1names if n not in key])
r2k = drop_fields(r2, [n for n in r2names if n not in key])
# Concatenate the two arrays for comparison
aux = ma.concatenate((r1k, r2k))
idx_sort = aux.argsort(order=key)
aux = aux[idx_sort]
#
# Get the common keys
flag_in = ma.concatenate(([False], aux[1:] == aux[:-1]))
flag_in[:-1] = flag_in[1:] + flag_in[:-1]
idx_in = idx_sort[flag_in]
idx_1 = idx_in[(idx_in < nb1)]
idx_2 = idx_in[(idx_in >= nb1)] - nb1
(r1cmn, r2cmn) = (len(idx_1), len(idx_2))
if jointype == 'inner':
(r1spc, r2spc) = (0, 0)
elif jointype == 'outer':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
idx_2 = np.concatenate((idx_2, idx_out[(idx_out >= nb1)] - nb1))
(r1spc, r2spc) = (len(idx_1) - r1cmn, len(idx_2) - r2cmn)
elif jointype == 'leftouter':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
(r1spc, r2spc) = (len(idx_1) - r1cmn, 0)
# Select the entries from each input
(s1, s2) = (r1[idx_1], r2[idx_2])
#
# Build the new description of the output array .......
# Start with the key fields
ndtype = [list(_) for _ in r1k.dtype.descr]
# Add the other fields
ndtype.extend(list(_) for _ in r1.dtype.descr if _[0] not in key)
# Find the new list of names (it may be different from r1names)
names = list(_[0] for _ in ndtype)
for desc in r2.dtype.descr:
desc = list(desc)
name = desc[0]
# Have we seen the current name already ?
if name in names:
nameidx = ndtype.index(desc)
current = ndtype[nameidx]
# The current field is part of the key: take the largest dtype
if name in key:
current[-1] = max(desc[1], current[-1])
# The current field is not part of the key: add the suffixes
else:
current[0] += r1postfix
desc[0] += r2postfix
ndtype.insert(nameidx + 1, desc)
#... we haven't: just add the description to the current list
else:
names.extend(desc[0])
ndtype.append(desc)
# Revert the elements to tuples
ndtype = [tuple(_) for _ in ndtype]
# Find the largest nb of common fields :
# r1cmn and r2cmn should be equal, but...
cmn = max(r1cmn, r2cmn)
# Construct an empty array
output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype)
names = output.dtype.names
for f in r1names:
selected = s1[f]
if f not in names or (f in r2names and not r2postfix and f not in key):
f += r1postfix
current = output[f]
current[:r1cmn] = selected[:r1cmn]
if jointype in ('outer', 'leftouter'):
current[cmn:cmn + r1spc] = selected[r1cmn:]
for f in r2names:
selected = s2[f]
if f not in names or (f in r1names and not r1postfix and f not in key):
f += r2postfix
current = output[f]
current[:r2cmn] = selected[:r2cmn]
if (jointype == 'outer') and r2spc:
current[-r2spc:] = selected[r2cmn:]
# Sort and finalize the output
output.sort(order=key)
kwargs = dict(usemask=usemask, asrecarray=asrecarray)
return _fix_output(_fix_defaults(output, defaults), **kwargs)
def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None):
"""
Join arrays `r1` and `r2` on keys.
Alternative to join_by, that always returns a np.recarray.
See Also
--------
join_by : equivalent function
"""
kwargs = dict(jointype=jointype, r1postfix=r1postfix, r2postfix=r2postfix,
defaults=defaults, usemask=False, asrecarray=True)
return join_by(key, r1, r2, **kwargs)
| bsd-3-clause |
RomainBrault/scikit-learn | examples/preprocessing/plot_robust_scaling.py | 85 | 2698 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Robust Scaling on Toy Data
=========================================================
Making sure that each Feature has approximately the same scale can be a
crucial preprocessing step. However, when data contains outliers,
:class:`StandardScaler <sklearn.preprocessing.StandardScaler>` can often
be mislead. In such cases, it is better to use a scaler that is robust
against outliers.
Here, we demonstrate this on a toy dataset, where one single datapoint
is a large outlier.
"""
from __future__ import print_function
print(__doc__)
# Code source: Thomas Unterthiner
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import StandardScaler, RobustScaler
# Create training and test data
np.random.seed(42)
n_datapoints = 100
Cov = [[0.9, 0.0], [0.0, 20.0]]
mu1 = [100.0, -3.0]
mu2 = [101.0, -3.0]
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_train = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_train = np.vstack([X1, X2])
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_test = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_test = np.vstack([X1, X2])
X_train[0, 0] = -1000 # a fairly large outlier
# Scale data
standard_scaler = StandardScaler()
Xtr_s = standard_scaler.fit_transform(X_train)
Xte_s = standard_scaler.transform(X_test)
robust_scaler = RobustScaler()
Xtr_r = robust_scaler.fit_transform(X_train)
Xte_r = robust_scaler.transform(X_test)
# Plot data
fig, ax = plt.subplots(1, 3, figsize=(12, 4))
ax[0].scatter(X_train[:, 0], X_train[:, 1],
color=np.where(Y_train > 0, 'r', 'b'))
ax[1].scatter(Xtr_s[:, 0], Xtr_s[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[2].scatter(Xtr_r[:, 0], Xtr_r[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[0].set_title("Unscaled data")
ax[1].set_title("After standard scaling (zoomed in)")
ax[2].set_title("After robust scaling (zoomed in)")
# for the scaled data, we zoom in to the data center (outlier can't be seen!)
for a in ax[1:]:
a.set_xlim(-3, 3)
a.set_ylim(-3, 3)
plt.tight_layout()
plt.show()
# Classify using k-NN
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
knn.fit(Xtr_s, Y_train)
acc_s = knn.score(Xte_s, Y_test)
print("Testset accuracy using standard scaler: %.3f" % acc_s)
knn.fit(Xtr_r, Y_train)
acc_r = knn.score(Xte_r, Y_test)
print("Testset accuracy using robust scaler: %.3f" % acc_r)
| bsd-3-clause |
tosolveit/scikit-learn | examples/svm/plot_oneclass.py | 249 | 2302 | """
==========================================
One-class SVM with non-linear kernel (RBF)
==========================================
An example using a one-class SVM for novelty detection.
:ref:`One-class SVM <svm_outlier_detection>` is an unsupervised
algorithm that learns a decision function for novelty detection:
classifying new data as similar or different to the training set.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
# Generate train data
X = 0.3 * np.random.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * np.random.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
n_error_train = y_pred_train[y_pred_train == -1].size
n_error_test = y_pred_test[y_pred_test == -1].size
n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size
# plot the line, the points, and the nearest vectors to the plane
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("Novelty Detection")
plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.Blues_r)
a = plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='red')
plt.contourf(xx, yy, Z, levels=[0, Z.max()], colors='orange')
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white')
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='green')
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='red')
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([a.collections[0], b1, b2, c],
["learned frontier", "training observations",
"new regular observations", "new abnormal observations"],
loc="upper left",
prop=matplotlib.font_manager.FontProperties(size=11))
plt.xlabel(
"error train: %d/200 ; errors novel regular: %d/40 ; "
"errors novel abnormal: %d/40"
% (n_error_train, n_error_test, n_error_outliers))
plt.show()
| bsd-3-clause |
rahul-c1/scikit-learn | sklearn/tests/test_common.py | 2 | 16115 | """
General tests for all estimators in sklearn.
"""
# Authors: Andreas Mueller <[email protected]>
# Gael Varoquaux [email protected]
# License: BSD 3 clause
from __future__ import print_function
import os
import warnings
import sys
import pkgutil
from sklearn.externals.six import PY3
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_false, clean_warning_registry
from sklearn.utils.testing import all_estimators
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
import sklearn
from sklearn.base import (ClassifierMixin, RegressorMixin,
TransformerMixin, ClusterMixin)
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_classification
from sklearn.cross_validation import train_test_split
from sklearn.linear_model.base import LinearClassifierMixin
from sklearn.utils.estimator_checks import (
check_parameters_default_constructible,
check_regressors_classifiers_sparse_data,
check_transformer,
check_clustering,
check_regressors_int,
check_regressors_train,
check_regressors_pickle,
check_transformer_sparse_data,
check_transformer_pickle,
check_estimators_nan_inf,
check_classifiers_one_label,
check_classifiers_train,
check_classifiers_classes,
check_classifiers_input_shapes,
check_classifiers_pickle,
check_class_weight_classifiers,
check_class_weight_auto_classifiers,
check_class_weight_auto_linear_classifier,
check_estimators_overwrite_params,
check_cluster_overwrite_params,
check_sparsify_binary_classifier,
check_sparsify_multiclass_classifier,
check_classifier_data_not_an_array,
check_regressor_data_not_an_array,
check_transformer_data_not_an_array,
check_transformer_n_iter,
check_non_transformer_estimators_n_iter,
CROSS_DECOMPOSITION)
def test_all_estimator_no_base_class():
# test that all_estimators doesn't find abstract classes.
for name, Estimator in all_estimators():
msg = ("Base estimators such as {0} should not be included"
" in all_estimators").format(name)
assert_false(name.lower().startswith('base'), msg=msg)
def test_all_estimators():
# Test that estimators are default-constructible, clonable
# and have working repr.
estimators = all_estimators(include_meta_estimators=True)
# Meta sanity-check to make sure that the estimator introspection runs
# properly
assert_greater(len(estimators), 0)
for name, Estimator in estimators:
# some can just not be sensibly default constructed
yield check_parameters_default_constructible, name, Estimator
def test_estimators_sparse_data():
# All estimators should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
estimators = all_estimators()
estimators = [(name, Estimator) for name, Estimator in estimators
if issubclass(Estimator, (ClassifierMixin, RegressorMixin))]
for name, Estimator in estimators:
yield check_regressors_classifiers_sparse_data, name, Estimator
def test_transformers():
# test if transformers do something sensible on training set
# also test all shapes / shape errors
transformers = all_estimators(type_filter='transformer')
for name, Transformer in transformers:
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
yield check_transformer_sparse_data, name, Transformer
yield check_transformer_pickle, name, Transformer
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array, name, Transformer
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer']:
# basic tests
yield check_transformer, name, Transformer
def test_estimators_nan_inf():
# Test that all estimators check their input for NaN's and infs
estimators = all_estimators()
estimators = [(name, E) for name, E in estimators
if (issubclass(E, ClassifierMixin) or
issubclass(E, RegressorMixin) or
issubclass(E, TransformerMixin) or
issubclass(E, ClusterMixin))]
for name, Estimator in estimators:
if name not in CROSS_DECOMPOSITION + ['Imputer']:
yield check_estimators_nan_inf, name, Estimator
def test_clustering():
# test if clustering algorithms do something sensible
# also test all shapes / shape errors
clustering = all_estimators(type_filter='cluster')
for name, Alg in clustering:
# test whether any classifier overwrites his init parameters during fit
yield check_cluster_overwrite_params, name, Alg
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering, name, Alg
def test_classifiers():
# test if classifiers can cope with non-consecutive classes
classifiers = all_estimators(type_filter='classifier')
for name, Classifier in classifiers:
# test classfiers can handle non-array data
yield check_classifier_data_not_an_array, name, Classifier
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label, name, Classifier
yield check_classifiers_classes, name, Classifier
yield check_classifiers_pickle, name, Classifier
# basic consistency testing
yield check_classifiers_train, name, Classifier
if (name not in ["MultinomialNB", "LabelPropagation", "LabelSpreading"]
# TODO some complication with -1 label
and name not in ["DecisionTreeClassifier", "ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
# test if classifiers can cope with y.shape = (n_samples, 1)
yield check_classifiers_input_shapes, name, Classifier
def test_regressors():
regressors = all_estimators(type_filter='regressor')
# TODO: test with intercept
# TODO: test with multiple responses
for name, Regressor in regressors:
# basic testing
yield check_regressors_train, name, Regressor
yield check_regressor_data_not_an_array, name, Regressor
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_regressors_pickle, name, Regressor
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int, name, Regressor
def test_configure():
# Smoke test the 'configure' step of setup, this tests all the
# 'configure' functions in the setup.pys in the scikit
cwd = os.getcwd()
setup_path = os.path.abspath(os.path.join(sklearn.__path__[0], '..'))
setup_filename = os.path.join(setup_path, 'setup.py')
if not os.path.exists(setup_filename):
return
try:
os.chdir(setup_path)
old_argv = sys.argv
sys.argv = ['setup.py', 'config']
clean_warning_registry()
with warnings.catch_warnings():
# The configuration spits out warnings when not finding
# Blas/Atlas development headers
warnings.simplefilter('ignore', UserWarning)
if PY3:
with open('setup.py') as f:
exec(f.read(), dict(__name__='__main__'))
else:
execfile('setup.py', dict(__name__='__main__'))
finally:
sys.argv = old_argv
os.chdir(cwd)
def test_class_weight_classifiers():
# test that class_weight works and that the semantics are consistent
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
classifiers = [c for c in classifiers
if 'class_weight' in c[1]().get_params().keys()]
for name, Classifier in classifiers:
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
continue
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
continue
yield check_class_weight_classifiers, name, Classifier
def test_class_weight_auto_classifiers():
"""Test that class_weight="auto" improves f1-score"""
# This test is broken; its success depends on:
# * a rare fortuitous RNG seed for make_classification; and
# * the use of binary F1 over a seemingly arbitrary positive class for two
# datasets, and weighted average F1 for the third.
# Its expectations need to be clarified and reimplemented.
raise SkipTest('This test requires redefinition')
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
classifiers = [c for c in classifiers
if 'class_weight' in c[1]().get_params().keys()]
for n_classes, weights in zip([2, 3], [[.8, .2], [.8, .1, .1]]):
# create unbalanced dataset
X, y = make_classification(n_classes=n_classes, n_samples=200,
n_features=10, weights=weights,
random_state=0, n_informative=n_classes)
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
for name, Classifier in classifiers:
if (name != "NuSVC"
# the sparse version has a parameter that doesn't do anything
and not name.startswith("RidgeClassifier")
# RidgeClassifier behaves unexpected
# FIXME!
and not name.endswith("NB")):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
yield (check_class_weight_auto_classifiers, name, Classifier,
X_train, y_train, X_test, y_test, weights)
def test_class_weight_auto_linear_classifiers():
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
linear_classifiers = [
(name, clazz)
for name, clazz in classifiers
if 'class_weight' in clazz().get_params().keys()
and issubclass(clazz, LinearClassifierMixin)]
for name, Classifier in linear_classifiers:
if name == "LogisticRegressionCV":
# Contrary to RidgeClassifierCV, LogisticRegressionCV use actual
# CV folds and fit a model for each CV iteration before averaging
# the coef. Therefore it is expected to not behave exactly as the
# other linear model.
continue
yield check_class_weight_auto_linear_classifier, name, Classifier
def test_estimators_overwrite_params():
# test whether any classifier overwrites his init parameters during fit
for est_type in ["classifier", "regressor", "transformer"]:
estimators = all_estimators(type_filter=est_type)
for name, Estimator in estimators:
if (name not in ['CCA', '_CCA', 'PLSCanonical', 'PLSRegression',
'PLSSVD', 'GaussianProcess']):
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params, name, Estimator
@ignore_warnings
def test_import_all_consistency():
# Smoke test to check that any name in a __all__ list is actually defined
# in the namespace of the module or package.
pkgs = pkgutil.walk_packages(path=sklearn.__path__, prefix='sklearn.',
onerror=lambda _: None)
submods = [modname for _, modname, _ in pkgs]
for modname in submods + ['sklearn']:
if ".tests." in modname:
continue
package = __import__(modname, fromlist="dummy")
for name in getattr(package, '__all__', ()):
if getattr(package, name, None) is None:
raise AttributeError(
"Module '{0}' has no attribute '{1}'".format(
modname, name))
def test_root_import_all_completeness():
EXCEPTIONS = ('utils', 'tests', 'base', 'setup')
for _, modname, _ in pkgutil.walk_packages(path=sklearn.__path__,
onerror=lambda _: None):
if '.' in modname or modname.startswith('_') or modname in EXCEPTIONS:
continue
assert_in(modname, sklearn.__all__)
def test_sparsify_estimators():
#Test if predict with sparsified estimators works.
#Tests regression, binary classification, and multi-class classification.
estimators = all_estimators()
# test regression and binary classification
for name, Estimator in estimators:
try:
Estimator.sparsify
yield check_sparsify_binary_classifier, name, Estimator
except:
pass
# test multiclass classification
classifiers = all_estimators(type_filter='classifier')
for name, Classifier in classifiers:
try:
Classifier.sparsify
yield check_sparsify_multiclass_classifier, name, Classifier
except:
pass
def test_non_transformer_estimators_n_iter():
# Test that all estimators of type which are non-transformer
# and which have an attribute of max_iter, return the attribute
# of n_iter atleast 1.
for est_type in ['regressor', 'classifier', 'cluster']:
regressors = all_estimators(type_filter=est_type)
for name, Estimator in regressors:
# LassoLars stops early for the default alpha=1.0 for
# the iris dataset.
if name == 'LassoLars':
estimator = Estimator(alpha=0.)
else:
estimator = Estimator()
if hasattr(estimator, "max_iter"):
# These models are dependent on external solvers like
# libsvm and accessing the iter parameter is non-trivial.
if name in (['Ridge', 'SVR', 'NuSVR', 'NuSVC',
'RidgeClassifier', 'SVC', 'RandomizedLasso',
'LogisticRegressionCV']):
continue
# Tested in test_transformer_n_iter below
elif name in CROSS_DECOMPOSITION or (
name in ['LinearSVC', 'LogisticRegression']
):
continue
else:
# Multitask models related to ENet cannot handle
# if y is mono-output.
yield (check_non_transformer_estimators_n_iter,
name, estimator, 'Multi' in name)
def test_transformer_n_iter():
transformers = all_estimators(type_filter='transformer')
for name, Estimator in transformers:
estimator = Estimator()
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding',
'RandomizedLasso', 'LogisticRegressionCV']
if hasattr(estimator, "max_iter") and name not in external_solver:
yield check_transformer_n_iter, name, estimator
| bsd-3-clause |
JesseLivezey/sklearn-theano | sklearn_theano/sandbox/overfeat_wordnet_hierarchy.py | 7 | 2367 | from __future__ import print_function
import numpy as np
from nltk.corpus import wordnet
from sklearn_theano.feature_extraction.overfeat_class_labels import (
get_all_overfeat_labels)
import json
labels = get_all_overfeat_labels()
synsets = [wordnet.synset(
label.split(',')[0].replace(" ", "_") + ".n.1")
for label in labels]
wordnet_to_labels = dict(zip([synset.name() for synset in synsets], labels))
labels_to_wordnet = dict(zip(labels, [synset.name() for synset in synsets]))
hypernym_paths = [synset.hypernym_paths() for synset in synsets]
hierarchy = dict()
for synset, hpaths in zip(synsets, hypernym_paths):
print(synset)
hierarchy[synset.name()] = hierarchy.get(synset.name(),
dict(children=[], parents=[]))
for hpath in hpaths:
old_item = synset.name()
for item in hpath[::-1][1:]:
new_element = hierarchy[item.name()] = hierarchy.get(item.name(),
dict(children=[], parents=[]))
hierarchy[old_item]["parents"] = list(np.unique(
hierarchy[old_item]["parents"] + [item.name()]))
new_element["children"] = list(np.unique(
new_element["children"] + [old_item]))
old_item = item.name()
def get_all_leafs(synset_name):
hitem = hierarchy.get(synset_name, None)
if hitem is None:
raise Exception('synset is not in hierarchy')
if hitem['children']:
leafs = []
for csynset in hitem['children']:
leafs = leafs + get_all_leafs(csynset)
leafs = list(np.unique(leafs))
return leafs
else:
return [synset_name]
overfeat_leafs_for_wordnet_concept = dict()
for synset_name in hierarchy.keys():
overfeat_leafs_for_wordnet_concept[synset_name] = [
wordnet_to_labels[leaf]
for leaf in get_all_leafs(synset_name)]
wordnet_to_labels_file = "wordnet_to_labels.json"
labels_to_wordnet_file = "labels_to_wordnet.json"
overfeat_leafs_file = "overfeat_leafs.json"
hierarchy_file = "hierarchy.json"
json.dump(wordnet_to_labels, open(wordnet_to_labels_file, "w"))
json.dump(labels_to_wordnet, open(labels_to_wordnet_file, "w"))
json.dump(overfeat_leafs_for_wordnet_concept, open(overfeat_leafs_file, "w"))
json.dump(hierarchy, open(hierarchy_file, "w"))
| bsd-3-clause |
icdishb/scikit-learn | examples/cluster/plot_digits_agglomeration.py | 377 | 1694 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Feature agglomeration
=========================================================
These images how similar features are merged together using
feature agglomeration.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, cluster
from sklearn.feature_extraction.image import grid_to_graph
digits = datasets.load_digits()
images = digits.images
X = np.reshape(images, (len(images), -1))
connectivity = grid_to_graph(*images[0].shape)
agglo = cluster.FeatureAgglomeration(connectivity=connectivity,
n_clusters=32)
agglo.fit(X)
X_reduced = agglo.transform(X)
X_restored = agglo.inverse_transform(X_reduced)
images_restored = np.reshape(X_restored, images.shape)
plt.figure(1, figsize=(4, 3.5))
plt.clf()
plt.subplots_adjust(left=.01, right=.99, bottom=.01, top=.91)
for i in range(4):
plt.subplot(3, 4, i + 1)
plt.imshow(images[i], cmap=plt.cm.gray, vmax=16, interpolation='nearest')
plt.xticks(())
plt.yticks(())
if i == 1:
plt.title('Original data')
plt.subplot(3, 4, 4 + i + 1)
plt.imshow(images_restored[i], cmap=plt.cm.gray, vmax=16,
interpolation='nearest')
if i == 1:
plt.title('Agglomerated data')
plt.xticks(())
plt.yticks(())
plt.subplot(3, 4, 10)
plt.imshow(np.reshape(agglo.labels_, images[0].shape),
interpolation='nearest', cmap=plt.cm.spectral)
plt.xticks(())
plt.yticks(())
plt.title('Labels')
plt.show()
| bsd-3-clause |
theflofly/tensorflow | tensorflow/contrib/timeseries/examples/known_anomaly.py | 24 | 7880 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example of using an exogenous feature to ignore a known anomaly."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
from os import path
import numpy as np
import tensorflow as tf
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example, namely train_and_predict.
HAS_MATPLOTLIB = False
_MODULE_PATH = path.dirname(__file__)
_DATA_FILE = path.join(_MODULE_PATH, "data/changepoints.csv")
def state_space_estimator(exogenous_feature_columns):
"""Constructs a StructuralEnsembleRegressor."""
def _exogenous_update_condition(times, features):
del times # unused
# Make exogenous updates sparse by setting an update condition. This in
# effect allows missing exogenous features: if the condition evaluates to
# False, no update is performed. Otherwise we sometimes end up with "leaky"
# updates which add unnecessary uncertainty to the model even when there is
# no changepoint.
return tf.equal(tf.squeeze(features["is_changepoint"], axis=-1), "yes")
return (
tf.contrib.timeseries.StructuralEnsembleRegressor(
periodicities=12,
# Extract a smooth period by constraining the number of latent values
# being cycled between.
cycle_num_latent_values=3,
num_features=1,
exogenous_feature_columns=exogenous_feature_columns,
exogenous_update_condition=_exogenous_update_condition),
# Use truncated backpropagation with a window size of 64, batching
# together 4 of these windows (random offsets) per training step. Training
# with exogenous features often requires somewhat larger windows.
4, 64)
def autoregressive_estimator(exogenous_feature_columns):
input_window_size = 8
output_window_size = 2
return (
tf.contrib.timeseries.ARRegressor(
periodicities=12,
num_features=1,
input_window_size=input_window_size,
output_window_size=output_window_size,
exogenous_feature_columns=exogenous_feature_columns),
64, input_window_size + output_window_size)
def train_and_evaluate_exogenous(
estimator_fn, csv_file_name=_DATA_FILE, train_steps=300):
"""Training, evaluating, and predicting on a series with changepoints."""
# Indicate the format of our exogenous feature, in this case a string
# representing a boolean value.
string_feature = tf.feature_column.categorical_column_with_vocabulary_list(
key="is_changepoint", vocabulary_list=["no", "yes"])
# Specify the way this feature is presented to the model, here using a one-hot
# encoding.
one_hot_feature = tf.feature_column.indicator_column(
categorical_column=string_feature)
estimator, batch_size, window_size = estimator_fn(
exogenous_feature_columns=[one_hot_feature])
reader = tf.contrib.timeseries.CSVReader(
csv_file_name,
# Indicate the format of our CSV file. First we have two standard columns,
# one for times and one for values. The third column is a custom exogenous
# feature indicating whether each timestep is a changepoint. The
# changepoint feature name must match the string_feature column name
# above.
column_names=(tf.contrib.timeseries.TrainEvalFeatures.TIMES,
tf.contrib.timeseries.TrainEvalFeatures.VALUES,
"is_changepoint"),
# Indicate dtypes for our features.
column_dtypes=(tf.int64, tf.float32, tf.string),
# This CSV has a header line; here we just ignore it.
skip_header_lines=1)
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
reader, batch_size=batch_size, window_size=window_size)
estimator.train(input_fn=train_input_fn, steps=train_steps)
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
# Create an input_fn for prediction, with a simulated changepoint. Since all
# of the anomalies in the training data are explained by the exogenous
# feature, we should get relatively confident predictions before the indicated
# changepoint (since we are telling the model that no changepoint exists at
# those times) and relatively uncertain predictions after.
(predictions,) = tuple(estimator.predict(
input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
evaluation, steps=100,
exogenous_features={
"is_changepoint": [["no"] * 49 + ["yes"] + ["no"] * 50]})))
times = evaluation["times"][0]
observed = evaluation["observed"][0, :, 0]
mean = np.squeeze(np.concatenate(
[evaluation["mean"][0], predictions["mean"]], axis=0))
variance = np.squeeze(np.concatenate(
[evaluation["covariance"][0], predictions["covariance"]], axis=0))
all_times = np.concatenate([times, predictions["times"]], axis=0)
upper_limit = mean + np.sqrt(variance)
lower_limit = mean - np.sqrt(variance)
# Indicate the locations of the changepoints for plotting vertical lines.
anomaly_locations = []
with open(csv_file_name, "r") as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
if row["is_changepoint"] == "yes":
anomaly_locations.append(int(row["time"]))
anomaly_locations.append(predictions["times"][49])
return (times, observed, all_times, mean, upper_limit, lower_limit,
anomaly_locations)
def make_plot(name, training_times, observed, all_times, mean,
upper_limit, lower_limit, anomaly_locations):
"""Plot the time series and anomalies in a new figure."""
pyplot.figure()
pyplot.plot(training_times, observed, "b", label="training series")
pyplot.plot(all_times, mean, "r", label="forecast")
pyplot.axvline(anomaly_locations[0], linestyle="dotted", label="changepoints")
for anomaly_location in anomaly_locations[1:]:
pyplot.axvline(anomaly_location, linestyle="dotted")
pyplot.fill_between(all_times, lower_limit, upper_limit, color="grey",
alpha="0.2")
pyplot.axvline(training_times[-1], color="k", linestyle="--")
pyplot.xlabel("time")
pyplot.ylabel("observations")
pyplot.legend(loc=0)
pyplot.title(name)
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
make_plot("Ignoring a known anomaly (state space)",
*train_and_evaluate_exogenous(
estimator_fn=state_space_estimator))
make_plot("Ignoring a known anomaly (autoregressive)",
*train_and_evaluate_exogenous(
estimator_fn=autoregressive_estimator, train_steps=3000))
pyplot.show()
if __name__ == "__main__":
tf.app.run(main=main)
| apache-2.0 |
nmartensen/pandas | doc/sphinxext/numpydoc/plot_directive.py | 89 | 20530 | """
A special directive for generating a matplotlib plot.
.. warning::
This is a hacked version of plot_directive.py from Matplotlib.
It's very much subject to change!
Usage
-----
Can be used like this::
.. plot:: examples/example.py
.. plot::
import matplotlib.pyplot as plt
plt.plot([1,2,3], [4,5,6])
.. plot::
A plotting example:
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3], [4,5,6])
The content is interpreted as doctest formatted if it has a line starting
with ``>>>``.
The ``plot`` directive supports the options
format : {'python', 'doctest'}
Specify the format of the input
include-source : bool
Whether to display the source code. Default can be changed in conf.py
and the ``image`` directive options ``alt``, ``height``, ``width``,
``scale``, ``align``, ``class``.
Configuration options
---------------------
The plot directive has the following configuration options:
plot_include_source
Default value for the include-source option
plot_pre_code
Code that should be executed before each plot.
plot_basedir
Base directory, to which plot:: file names are relative to.
(If None or empty, file names are relative to the directoly where
the file containing the directive is.)
plot_formats
File formats to generate. List of tuples or strings::
[(suffix, dpi), suffix, ...]
that determine the file format and the DPI. For entries whose
DPI was omitted, sensible defaults are chosen.
plot_html_show_formats
Whether to show links to the files in HTML.
TODO
----
* Refactor Latex output; now it's plain images, but it would be nice
to make them appear side-by-side, or in floats.
"""
from __future__ import division, absolute_import, print_function
import sys, os, glob, shutil, imp, warnings, re, textwrap, traceback
import sphinx
if sys.version_info[0] >= 3:
from io import StringIO
else:
from io import StringIO
import warnings
warnings.warn("A plot_directive module is also available under "
"matplotlib.sphinxext; expect this numpydoc.plot_directive "
"module to be deprecated after relevant features have been "
"integrated there.",
FutureWarning, stacklevel=2)
#------------------------------------------------------------------------------
# Registration hook
#------------------------------------------------------------------------------
def setup(app):
setup.app = app
setup.config = app.config
setup.confdir = app.confdir
app.add_config_value('plot_pre_code', '', True)
app.add_config_value('plot_include_source', False, True)
app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True)
app.add_config_value('plot_basedir', None, True)
app.add_config_value('plot_html_show_formats', True, True)
app.add_directive('plot', plot_directive, True, (0, 1, False),
**plot_directive_options)
#------------------------------------------------------------------------------
# plot:: directive
#------------------------------------------------------------------------------
from docutils.parsers.rst import directives
from docutils import nodes
def plot_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return run(arguments, content, options, state_machine, state, lineno)
plot_directive.__doc__ = __doc__
def _option_boolean(arg):
if not arg or not arg.strip():
# no argument given, assume used as a flag
return True
elif arg.strip().lower() in ('no', '0', 'false'):
return False
elif arg.strip().lower() in ('yes', '1', 'true'):
return True
else:
raise ValueError('"%s" unknown boolean' % arg)
def _option_format(arg):
return directives.choice(arg, ('python', 'lisp'))
def _option_align(arg):
return directives.choice(arg, ("top", "middle", "bottom", "left", "center",
"right"))
plot_directive_options = {'alt': directives.unchanged,
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'scale': directives.nonnegative_int,
'align': _option_align,
'class': directives.class_option,
'include-source': _option_boolean,
'format': _option_format,
}
#------------------------------------------------------------------------------
# Generating output
#------------------------------------------------------------------------------
from docutils import nodes, utils
try:
# Sphinx depends on either Jinja or Jinja2
import jinja2
def format_template(template, **kw):
return jinja2.Template(template).render(**kw)
except ImportError:
import jinja
def format_template(template, **kw):
return jinja.from_string(template, **kw)
TEMPLATE = """
{{ source_code }}
{{ only_html }}
{% if source_link or (html_show_formats and not multi_image) %}
(
{%- if source_link -%}
`Source code <{{ source_link }}>`__
{%- endif -%}
{%- if html_show_formats and not multi_image -%}
{%- for img in images -%}
{%- for fmt in img.formats -%}
{%- if source_link or not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
{%- endfor -%}
{%- endif -%}
)
{% endif %}
{% for img in images %}
.. figure:: {{ build_dir }}/{{ img.basename }}.png
{%- for option in options %}
{{ option }}
{% endfor %}
{% if html_show_formats and multi_image -%}
(
{%- for fmt in img.formats -%}
{%- if not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
)
{%- endif -%}
{% endfor %}
{{ only_latex }}
{% for img in images %}
.. image:: {{ build_dir }}/{{ img.basename }}.pdf
{% endfor %}
"""
class ImageFile(object):
def __init__(self, basename, dirname):
self.basename = basename
self.dirname = dirname
self.formats = []
def filename(self, format):
return os.path.join(self.dirname, "%s.%s" % (self.basename, format))
def filenames(self):
return [self.filename(fmt) for fmt in self.formats]
def run(arguments, content, options, state_machine, state, lineno):
if arguments and content:
raise RuntimeError("plot:: directive can't have both args and content")
document = state_machine.document
config = document.settings.env.config
options.setdefault('include-source', config.plot_include_source)
# determine input
rst_file = document.attributes['source']
rst_dir = os.path.dirname(rst_file)
if arguments:
if not config.plot_basedir:
source_file_name = os.path.join(rst_dir,
directives.uri(arguments[0]))
else:
source_file_name = os.path.join(setup.confdir, config.plot_basedir,
directives.uri(arguments[0]))
code = open(source_file_name, 'r').read()
output_base = os.path.basename(source_file_name)
else:
source_file_name = rst_file
code = textwrap.dedent("\n".join(map(str, content)))
counter = document.attributes.get('_plot_counter', 0) + 1
document.attributes['_plot_counter'] = counter
base, ext = os.path.splitext(os.path.basename(source_file_name))
output_base = '%s-%d.py' % (base, counter)
base, source_ext = os.path.splitext(output_base)
if source_ext in ('.py', '.rst', '.txt'):
output_base = base
else:
source_ext = ''
# ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
output_base = output_base.replace('.', '-')
# is it in doctest format?
is_doctest = contains_doctest(code)
if 'format' in options:
if options['format'] == 'python':
is_doctest = False
else:
is_doctest = True
# determine output directory name fragment
source_rel_name = relpath(source_file_name, setup.confdir)
source_rel_dir = os.path.dirname(source_rel_name)
while source_rel_dir.startswith(os.path.sep):
source_rel_dir = source_rel_dir[1:]
# build_dir: where to place output files (temporarily)
build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
'plot_directive',
source_rel_dir)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
# output_dir: final location in the builder's directory
dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
source_rel_dir))
# how to link to files from the RST file
dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
source_rel_dir).replace(os.path.sep, '/')
build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
source_link = dest_dir_link + '/' + output_base + source_ext
# make figures
try:
results = makefig(code, source_file_name, build_dir, output_base,
config)
errors = []
except PlotError as err:
reporter = state.memo.reporter
sm = reporter.system_message(
2, "Exception occurred in plotting %s: %s" % (output_base, err),
line=lineno)
results = [(code, [])]
errors = [sm]
# generate output restructuredtext
total_lines = []
for j, (code_piece, images) in enumerate(results):
if options['include-source']:
if is_doctest:
lines = ['']
lines += [row.rstrip() for row in code_piece.split('\n')]
else:
lines = ['.. code-block:: python', '']
lines += [' %s' % row.rstrip()
for row in code_piece.split('\n')]
source_code = "\n".join(lines)
else:
source_code = ""
opts = [':%s: %s' % (key, val) for key, val in list(options.items())
if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]
only_html = ".. only:: html"
only_latex = ".. only:: latex"
if j == 0:
src_link = source_link
else:
src_link = None
result = format_template(
TEMPLATE,
dest_dir=dest_dir_link,
build_dir=build_dir_link,
source_link=src_link,
multi_image=len(images) > 1,
only_html=only_html,
only_latex=only_latex,
options=opts,
images=images,
source_code=source_code,
html_show_formats=config.plot_html_show_formats)
total_lines.extend(result.split("\n"))
total_lines.extend("\n")
if total_lines:
state_machine.insert_input(total_lines, source=source_file_name)
# copy image files to builder's output directory
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
for code_piece, images in results:
for img in images:
for fn in img.filenames():
shutil.copyfile(fn, os.path.join(dest_dir,
os.path.basename(fn)))
# copy script (if necessary)
if source_file_name == rst_file:
target_name = os.path.join(dest_dir, output_base + source_ext)
f = open(target_name, 'w')
f.write(unescape_doctest(code))
f.close()
return errors
#------------------------------------------------------------------------------
# Run code and capture figures
#------------------------------------------------------------------------------
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.image as image
from matplotlib import _pylab_helpers
import exceptions
def contains_doctest(text):
try:
# check if it's valid Python as-is
compile(text, '<string>', 'exec')
return False
except SyntaxError:
pass
r = re.compile(r'^\s*>>>', re.M)
m = r.search(text)
return bool(m)
def unescape_doctest(text):
"""
Extract code from a piece of text, which contains either Python code
or doctests.
"""
if not contains_doctest(text):
return text
code = ""
for line in text.split("\n"):
m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line)
if m:
code += m.group(2) + "\n"
elif line.strip():
code += "# " + line.strip() + "\n"
else:
code += "\n"
return code
def split_code_at_show(text):
"""
Split code at plt.show()
"""
parts = []
is_doctest = contains_doctest(text)
part = []
for line in text.split("\n"):
if (not is_doctest and line.strip() == 'plt.show()') or \
(is_doctest and line.strip() == '>>> plt.show()'):
part.append(line)
parts.append("\n".join(part))
part = []
else:
part.append(line)
if "\n".join(part).strip():
parts.append("\n".join(part))
return parts
class PlotError(RuntimeError):
pass
def run_code(code, code_path, ns=None):
# Change the working directory to the directory of the example, so
# it can get at its data files, if any.
pwd = os.getcwd()
old_sys_path = list(sys.path)
if code_path is not None:
dirname = os.path.abspath(os.path.dirname(code_path))
os.chdir(dirname)
sys.path.insert(0, dirname)
# Redirect stdout
stdout = sys.stdout
sys.stdout = StringIO()
# Reset sys.argv
old_sys_argv = sys.argv
sys.argv = [code_path]
try:
try:
code = unescape_doctest(code)
if ns is None:
ns = {}
if not ns:
exec(setup.config.plot_pre_code, ns)
exec(code, ns)
except (Exception, SystemExit) as err:
raise PlotError(traceback.format_exc())
finally:
os.chdir(pwd)
sys.argv = old_sys_argv
sys.path[:] = old_sys_path
sys.stdout = stdout
return ns
#------------------------------------------------------------------------------
# Generating figures
#------------------------------------------------------------------------------
def out_of_date(original, derived):
"""
Returns True if derivative is out-of-date wrt original,
both of which are full file paths.
"""
return (not os.path.exists(derived)
or os.stat(derived).st_mtime < os.stat(original).st_mtime)
def makefig(code, code_path, output_dir, output_base, config):
"""
Run a pyplot script *code* and save the images under *output_dir*
with file names derived from *output_base*
"""
# -- Parse format list
default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 50}
formats = []
for fmt in config.plot_formats:
if isinstance(fmt, str):
formats.append((fmt, default_dpi.get(fmt, 80)))
elif type(fmt) in (tuple, list) and len(fmt)==2:
formats.append((str(fmt[0]), int(fmt[1])))
else:
raise PlotError('invalid image format "%r" in plot_formats' % fmt)
# -- Try to determine if all images already exist
code_pieces = split_code_at_show(code)
# Look for single-figure output files first
all_exists = True
img = ImageFile(output_base, output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
if all_exists:
return [(code, [img])]
# Then look for multi-figure output files
results = []
all_exists = True
for i, code_piece in enumerate(code_pieces):
images = []
for j in range(1000):
img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
# assume that if we have one, we have them all
if not all_exists:
all_exists = (j > 0)
break
images.append(img)
if not all_exists:
break
results.append((code_piece, images))
if all_exists:
return results
# -- We didn't find the files, so build them
results = []
ns = {}
for i, code_piece in enumerate(code_pieces):
# Clear between runs
plt.close('all')
# Run code
run_code(code_piece, code_path, ns)
# Collect images
images = []
fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()
for j, figman in enumerate(fig_managers):
if len(fig_managers) == 1 and len(code_pieces) == 1:
img = ImageFile(output_base, output_dir)
else:
img = ImageFile("%s_%02d_%02d" % (output_base, i, j),
output_dir)
images.append(img)
for format, dpi in formats:
try:
figman.canvas.figure.savefig(img.filename(format), dpi=dpi)
except exceptions.BaseException as err:
raise PlotError(traceback.format_exc())
img.formats.append(format)
# Results
results.append((code_piece, images))
return results
#------------------------------------------------------------------------------
# Relative pathnames
#------------------------------------------------------------------------------
try:
from os.path import relpath
except ImportError:
# Copied from Python 2.7
if 'posix' in sys.builtin_module_names:
def relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
from os.path import sep, curdir, join, abspath, commonprefix, \
pardir
if not path:
raise ValueError("no path specified")
start_list = abspath(start).split(sep)
path_list = abspath(path).split(sep)
# Work out how much of the filepath is shared by start and path.
i = len(commonprefix([start_list, path_list]))
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
elif 'nt' in sys.builtin_module_names:
def relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
from os.path import sep, curdir, join, abspath, commonprefix, \
pardir, splitunc
if not path:
raise ValueError("no path specified")
start_list = abspath(start).split(sep)
path_list = abspath(path).split(sep)
if start_list[0].lower() != path_list[0].lower():
unc_path, rest = splitunc(path)
unc_start, rest = splitunc(start)
if bool(unc_path) ^ bool(unc_start):
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
% (path, start))
else:
raise ValueError("path is on drive %s, start on drive %s"
% (path_list[0], start_list[0]))
# Work out how much of the filepath is shared by start and path.
for i in range(min(len(start_list), len(path_list))):
if start_list[i].lower() != path_list[i].lower():
break
else:
i += 1
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
else:
raise RuntimeError("Unsupported platform (no relpath available!)")
| bsd-3-clause |
shashanksingh28/code-similarity | evaluation/evaluate.py | 1 | 2592 | #!/usr/bin/env python3
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from math import log2, log
from collections import Counter
from scipy.stats import entropy, ttest_rel
def counterJaccardSim(c1, c2):
cU = c1 | c2
cI = c1 & c2
sum_cU = sum(cU.values())
if sum_cU == 0:
return 0
return sum(cI.values()) / sum_cU
def getCounter(x):
return eval(x)
def transform(csv_file):
""" Apply evaluation functions and simple count enhancements and return """
df = pd.read_csv(csv_file, usecols=['Rank','Sample_Concepts','Codereco_Concepts','Baseline_Concepts'])
df.loc[:,'Sample_Concepts'] = df['Sample_Concepts'].apply(getCounter)
df.loc[:,'Sample_Concepts_Count'] = df['Sample_Concepts'].apply(len)
df.loc[:,'Codereco_Concepts'] = df['Codereco_Concepts'].apply(getCounter)
df.loc[:,'Codereco_Concepts_Count'] = df['Codereco_Concepts'].apply(len)
df.loc[:,'Baseline_Concepts'] = df['Baseline_Concepts'].apply(getCounter)
df.loc[:,'Baseline_Concepts_Count'] = df['Baseline_Concepts'].apply(len)
return df
def entropy(counter):
ent = 0
if len(counter) is None:
return ent
total = sum(counter.values())
for key in counter:
p = counter[key] / total
ent -= p * log(p)
return ent
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Provide csv file containing data")
sys.exit(1)
df = transform(sys.argv[1])
_ = df[['Sample_Concepts_Count','Baseline_Concepts_Count','Codereco_Concepts_Count']].plot.box(title='Unique Concepts Distribution')
print(ttest_rel(df['Baseline_Concepts_Count'],df['Codereco_Concepts_Count']))
plt.show()
df['Codereco_Similarity'] = df.apply(lambda x: counterJaccardSim(x['Sample_Concepts'],x['Codereco_Concepts']), axis=1)
df['Baseline_Similarity'] = df.apply(lambda x: counterJaccardSim(x['Sample_Concepts'],x['Baseline_Concepts']), axis=1)
print(ttest_rel(df['Baseline_Similarity'],df['Codereco_Similarity']))
_ = df[['Baseline_Similarity','Codereco_Similarity']].plot.box(title='Sample Concepts Jaccard Similarity')
plt.show()
df['Baseline_Entropy'] = df['Baseline_Concepts'].apply(entropy)
df['Codereco_Entropy'] = df['Codereco_Concepts'].apply(entropy)
print(ttest_rel(df['Baseline_Entropy'],df['Codereco_Entropy']))
_ = df[['Baseline_Entropy','Codereco_Entropy']].plot.box(title='Concepts Entropy')
plt.show()
df.to_csv('evaluation_data.csv')
df.describe().to_csv('evaluation_summary.csv')
print(df.describe())
| mit |
daodaoliang/neural-network-animation | matplotlib/tests/test_subplots.py | 9 | 4999 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import warnings
import six
from six.moves import xrange
import numpy
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import image_comparison, cleanup
from nose.tools import assert_raises
def check_shared(results, f, axs):
"""
results is a 4 x 4 x 2 matrix of boolean values where
if [i, j, 0] == True, X axis for subplots i and j should be shared
if [i, j, 1] == False, Y axis for subplots i and j should not be shared
"""
shared_str = ['x', 'y']
shared = [axs[0]._shared_x_axes, axs[0]._shared_y_axes]
#shared = {
# 'x': a1._shared_x_axes,
# 'y': a1._shared_y_axes,
# }
tostr = lambda r: "not " if r else ""
for i1 in xrange(len(axs)):
for i2 in xrange(i1 + 1, len(axs)):
for i3 in xrange(len(shared)):
assert shared[i3].joined(axs[i1], axs[i2]) == \
results[i1, i2, i3], \
"axes %i and %i incorrectly %ssharing %s axis" % \
(i1, i2, tostr(results[i1, i2, i3]), shared_str[i3])
def check_visible(result, f, axs):
tostr = lambda v: "invisible" if v else "visible"
for (ax, vx, vy) in zip(axs, result['x'], result['y']):
for l in ax.get_xticklabels():
assert l.get_visible() == vx, \
"X axis was incorrectly %s" % (tostr(vx))
for l in ax.get_yticklabels():
assert l.get_visible() == vy, \
"Y axis was incorrectly %s" % (tostr(vy))
def test_shared():
rdim = (4, 4, 2)
share = {
'all': numpy.ones(rdim[:2], dtype=bool),
'none': numpy.zeros(rdim[:2], dtype=bool),
'row': numpy.array([
[False, True, False, False],
[True, False, False, False],
[False, False, False, True],
[False, False, True, False]]),
'col': numpy.array([
[False, False, True, False],
[False, False, False, True],
[True, False, False, False],
[False, True, False, False]]),
}
visible = {
'x': {
'all': [False, False, True, True],
'col': [False, False, True, True],
'row': [True] * 4,
'none': [True] * 4,
False: [True] * 4,
True: [False, False, True, True],
},
'y': {
'all': [True, False, True, False],
'col': [True] * 4,
'row': [True, False, True, False],
'none': [True] * 4,
False: [True] * 4,
True: [True, False, True, False],
},
}
share[False] = share['none']
share[True] = share['all']
# test default
f, ((a1, a2), (a3, a4)) = plt.subplots(2, 2)
axs = [a1, a2, a3, a4]
check_shared(numpy.dstack((share['none'], share['none'])), \
f, axs)
plt.close(f)
# test all option combinations
ops = [False, True, 'all', 'none', 'row', 'col']
for xo in ops:
for yo in ops:
f, ((a1, a2), (a3, a4)) = plt.subplots(2, 2, sharex=xo, sharey=yo)
axs = [a1, a2, a3, a4]
check_shared(numpy.dstack((share[xo], share[yo])), \
f, axs)
check_visible(dict(x=visible['x'][xo], y=visible['y'][yo]), \
f, axs)
plt.close(f)
def test_exceptions():
# TODO should this test more options?
assert_raises(ValueError, plt.subplots, 2, 2, sharex='blah')
assert_raises(ValueError, plt.subplots, 2, 2, sharey='blah')
# We filter warnings in this test which are genuine since
# the pount of this test is to ensure that this raises.
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
message='.*sharex\ argument\ to\ subplots',
category=UserWarning)
assert_raises(ValueError, plt.subplots, 2, 2, -1)
# uncomment this for 1.5
# assert_raises(ValueError, plt.subplots, 2, 2, 0)
assert_raises(ValueError, plt.subplots, 2, 2, 5)
@image_comparison(baseline_images=['subplots_offset_text'], remove_text=False)
def test_subplots_offsettext():
x = numpy.arange(0, 1e10, 1e9)
y = numpy.arange(0, 100, 10)+1e4
fig, axes = plt.subplots(2, 2, sharex='col', sharey='all')
axes[0, 0].plot(x, x)
axes[1, 0].plot(x, x)
axes[0, 1].plot(y, x)
axes[1, 1].plot(y, x)
@cleanup
def test_subplots():
# things to test
# - are axes actually shared?
# - are tickmarks correctly hidden?
test_shared()
# - are exceptions thrown correctly
test_exceptions()
if __name__ == "__main__":
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| mit |
procoder317/scikit-learn | examples/linear_model/plot_sgd_iris.py | 286 | 2202 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.title("Decision surface of multi-class SGD")
plt.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
| bsd-3-clause |
nesterione/scikit-learn | sklearn/metrics/tests/test_pairwise.py | 105 | 22788 | import numpy as np
from numpy import linalg
from scipy.sparse import dok_matrix, csr_matrix, issparse
from scipy.spatial.distance import cosine, cityblock, minkowski, wminkowski
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.externals.six import iteritems
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import PAIRED_DISTANCES
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.metrics.pairwise import check_paired_arrays
from sklearn.metrics.pairwise import _parallel_pairwise
from sklearn.metrics.pairwise import paired_distances
from sklearn.metrics.pairwise import paired_euclidean_distances
from sklearn.metrics.pairwise import paired_manhattan_distances
from sklearn.preprocessing import normalize
def test_pairwise_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses sklearn metric, cityblock (function) is scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Low-level function for manhattan can divide in blocks to avoid
# using too much memory during the broadcasting
S3 = manhattan_distances(X, Y, size_threshold=10)
assert_array_almost_equal(S, S3)
# Test cosine as a string metric versus cosine callable
# "cosine" uses sklearn metric, cosine (function) is scipy.spatial
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Tests that precomputed metric returns pointer to, and not copy of, X.
S = np.dot(X, X.T)
S2 = pairwise_distances(S, metric="precomputed")
assert_true(S is S2)
# Test with sparse X and Y,
# currently only supported for Euclidean, L1 and cosine.
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse, metric="cosine")
S2 = cosine_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan")
S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo())
assert_array_almost_equal(S, S2)
S2 = manhattan_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
# Test that a value error is raised if the metric is unkown
assert_raises(ValueError, pairwise_distances, X, Y, metric="blah")
def check_pairwise_parallel(func, metric, kwds):
rng = np.random.RandomState(0)
for make_data in (np.array, csr_matrix):
X = make_data(rng.random_sample((5, 4)))
Y = make_data(rng.random_sample((3, 4)))
try:
S = func(X, metric=metric, n_jobs=1, **kwds)
except (TypeError, ValueError) as exc:
# Not all metrics support sparse input
# ValueError may be triggered by bad callable
if make_data is csr_matrix:
assert_raises(type(exc), func, X, metric=metric,
n_jobs=2, **kwds)
continue
else:
raise
S2 = func(X, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
S = func(X, Y, metric=metric, n_jobs=1, **kwds)
S2 = func(X, Y, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
def test_pairwise_parallel():
wminkowski_kwds = {'w': np.arange(1, 5).astype('double'), 'p': 1}
metrics = [(pairwise_distances, 'euclidean', {}),
(pairwise_distances, wminkowski, wminkowski_kwds),
(pairwise_distances, 'wminkowski', wminkowski_kwds),
(pairwise_kernels, 'polynomial', {'degree': 1}),
(pairwise_kernels, callable_rbf_kernel, {'gamma': .1}),
]
for func, metric, kwds in metrics:
yield check_pairwise_parallel, func, metric, kwds
def test_pairwise_callable_nonstrict_metric():
# paired_distances should allow callable metric where metric(x, x) != 0
# Knowing that the callable is a strict metric would allow the diagonal to
# be left uncalculated and set to 0.
assert_equal(pairwise_distances([[1]], metric=lambda x, y: 5)[0, 0], 5)
def callable_rbf_kernel(x, y, **kwds):
# Callable version of pairwise.rbf_kernel.
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
return K
def test_pairwise_kernels():
# Test the pairwise_kernels helper function.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.
test_metrics = ["rbf", "sigmoid", "polynomial", "linear", "chi2",
"additive_chi2"]
for metric in test_metrics:
function = PAIRWISE_KERNEL_FUNCTIONS[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_array_almost_equal(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_array_almost_equal(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
assert_raises(ValueError, pairwise_kernels,
X_sparse, Y=Y_sparse, metric=metric)
continue
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
kwds = {}
kwds['gamma'] = 0.1
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
# callable function, X=Y
K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=X, **kwds)
assert_array_almost_equal(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_array_almost_equal(K, K2)
assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params)
def test_paired_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
# Euclidean distance, with Y != X.
Y = rng.random_sample((5, 4))
for metric, func in iteritems(PAIRED_DISTANCES):
S = paired_distances(X, Y, metric=metric)
S2 = func(X, Y)
assert_array_almost_equal(S, S2)
S3 = func(csr_matrix(X), csr_matrix(Y))
assert_array_almost_equal(S, S3)
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
# Check the the pairwise_distances implementation
# gives the same value
distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y)
distances = np.diag(distances)
assert_array_almost_equal(distances, S)
# Check the callable implementation
S = paired_distances(X, Y, metric='manhattan')
S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0))
assert_array_almost_equal(S, S2)
# Test that a value error is raised when the lengths of X and Y should not
# differ
Y = rng.random_sample((3, 4))
assert_raises(ValueError, paired_distances, X, Y)
def test_pairwise_distances_argmin_min():
# Check pairwise minimum distances computation for any metric
X = [[0], [1]]
Y = [[-1], [2]]
Xsp = dok_matrix(X)
Ysp = csr_matrix(Y, dtype=np.float32)
# euclidean metric
D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean")
D2 = pairwise_distances_argmin(X, Y, metric="euclidean")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# sparse matrix case
Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean")
assert_array_equal(Dsp, D)
assert_array_equal(Esp, E)
# We don't want np.matrix here
assert_equal(type(Dsp), np.ndarray)
assert_equal(type(Esp), np.ndarray)
# Non-euclidean sklearn metric
D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan")
D2 = pairwise_distances_argmin(X, Y, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(E, [1., 1.])
D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan")
D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (callable)
D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski,
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (string)
D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski",
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Compare with naive implementation
rng = np.random.RandomState(0)
X = rng.randn(97, 149)
Y = rng.randn(111, 149)
dist = pairwise_distances(X, Y, metric="manhattan")
dist_orig_ind = dist.argmin(axis=0)
dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))]
dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min(
X, Y, axis=0, metric="manhattan", batch_size=50)
np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7)
np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7)
def test_euclidean_distances():
# Check the pairwise Euclidean distances computation
X = [[0]]
Y = [[1], [2]]
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
X = csr_matrix(X)
Y = csr_matrix(Y)
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
# Paired distances
def test_paired_euclidean_distances():
# Check the paired Euclidean distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_euclidean_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_paired_manhattan_distances():
# Check the paired manhattan distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_manhattan_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_chi_square_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((10, 4))
K_add = additive_chi2_kernel(X, Y)
gamma = 0.1
K = chi2_kernel(X, Y, gamma=gamma)
assert_equal(K.dtype, np.float)
for i, x in enumerate(X):
for j, y in enumerate(Y):
chi2 = -np.sum((x - y) ** 2 / (x + y))
chi2_exp = np.exp(gamma * chi2)
assert_almost_equal(K_add[i, j], chi2)
assert_almost_equal(K[i, j], chi2_exp)
# check diagonal is ones for data with itself
K = chi2_kernel(Y)
assert_array_equal(np.diag(K), 1)
# check off-diagonal is < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
# check that float32 is preserved
X = rng.random_sample((5, 4)).astype(np.float32)
Y = rng.random_sample((10, 4)).astype(np.float32)
K = chi2_kernel(X, Y)
assert_equal(K.dtype, np.float32)
# check integer type gets converted,
# check that zeros are handled
X = rng.random_sample((10, 4)).astype(np.int32)
K = chi2_kernel(X, X)
assert_true(np.isfinite(K).all())
assert_equal(K.dtype, np.float)
# check that kernel of similar things is greater than dissimilar ones
X = [[.3, .7], [1., 0]]
Y = [[0, 1], [.9, .1]]
K = chi2_kernel(X, Y)
assert_greater(K[0, 0], K[0, 1])
assert_greater(K[1, 1], K[1, 0])
# test negative input
assert_raises(ValueError, chi2_kernel, [[0, -1]])
assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]])
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]])
# different n_features in X and Y
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]])
# sparse matrices
assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y))
assert_raises(ValueError, additive_chi2_kernel,
csr_matrix(X), csr_matrix(Y))
def test_kernel_symmetry():
# Valid kernels should be symmetric
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
assert_array_almost_equal(K, K.T, 15)
def test_kernel_sparse():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
X_sparse = csr_matrix(X)
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
K2 = kernel(X_sparse, X_sparse)
assert_array_almost_equal(K, K2)
def test_linear_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = linear_kernel(X, X)
# the diagonal elements of a linear kernel are their squared norm
assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])
def test_rbf_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = rbf_kernel(X, X)
# the diagonal elements of a rbf kernel are 1
assert_array_almost_equal(K.flat[::6], np.ones(5))
def test_cosine_similarity_sparse_output():
# Test if cosine_similarity correctly produces sparse output.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
K1 = cosine_similarity(Xcsr, Ycsr, dense_output=False)
assert_true(issparse(K1))
K2 = pairwise_kernels(Xcsr, Y=Ycsr, metric="cosine")
assert_array_almost_equal(K1.todense(), K2)
def test_cosine_similarity():
# Test the cosine_similarity.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
for X_, Y_ in ((X, None), (X, Y),
(Xcsr, None), (Xcsr, Ycsr)):
# Test that the cosine is kernel is equal to a linear kernel when data
# has been previously normalized by L2-norm.
K1 = pairwise_kernels(X_, Y=Y_, metric="cosine")
X_ = normalize(X_)
if Y_ is not None:
Y_ = normalize(Y_)
K2 = pairwise_kernels(X_, Y=Y_, metric="linear")
assert_array_almost_equal(K1, K2)
def test_check_dense_matrices():
# Ensure that pairwise array check works for dense matrices.
# Check that if XB is None, XB is returned as reference to XA
XA = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_true(XA_checked is XB_checked)
assert_array_equal(XA, XA_checked)
def test_check_XB_returned():
# Ensure that if XA and XB are given correctly, they return as equal.
# Check that if XB is not None, it is returned equal.
# Note that the second dimension of XB is the same as XA.
XA = np.resize(np.arange(40), (5, 8))
XB = np.resize(np.arange(32), (4, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
XB = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_paired_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
def test_check_different_dimensions():
# Ensure an error is raised if the dimensions are different.
XA = np.resize(np.arange(45), (5, 9))
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XB = np.resize(np.arange(4 * 9), (4, 9))
assert_raises(ValueError, check_paired_arrays, XA, XB)
def test_check_invalid_dimensions():
# Ensure an error is raised on 1D input arrays.
XA = np.arange(45)
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XA = np.resize(np.arange(45), (5, 9))
XB = np.arange(32)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_sparse_arrays():
# Ensures that checks return valid sparse matrices.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_sparse = csr_matrix(XA)
XB = rng.random_sample((5, 4))
XB_sparse = csr_matrix(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)
# compare their difference because testing csr matrices for
# equality with '==' does not work as expected.
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XB_checked))
assert_equal(abs(XB_sparse - XB_checked).sum(), 0)
XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse)
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XA_2_checked))
assert_equal(abs(XA_2_checked - XA_checked).sum(), 0)
def tuplify(X):
# Turns a numpy matrix (any n-dimensional array) into tuples.
s = X.shape
if len(s) > 1:
# Tuplify each sub-array in the input.
return tuple(tuplify(row) for row in X)
else:
# Single dimension input, just return tuple of contents.
return tuple(r for r in X)
def test_check_tuple_input():
# Ensures that checks return valid tuples.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_tuples = tuplify(XA)
XB = rng.random_sample((5, 4))
XB_tuples = tuplify(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)
assert_array_equal(XA_tuples, XA_checked)
assert_array_equal(XB_tuples, XB_checked)
def test_check_preserve_type():
# Ensures that type float32 is preserved.
XA = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XB = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_equal(XA_checked.dtype, np.float32)
# both float32
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_equal(XA_checked.dtype, np.float32)
assert_equal(XB_checked.dtype, np.float32)
# mismatched A
XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float),
XB)
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
# mismatched B
XA_checked, XB_checked = check_pairwise_arrays(XA,
XB.astype(np.float))
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
| bsd-3-clause |
JosmanPS/scikit-learn | examples/classification/plot_lda_qda.py | 164 | 4806 | """
====================================================================
Linear and Quadratic Discriminant Analysis with confidence ellipsoid
====================================================================
Plot the confidence ellipsoids of each class and decision boundary
"""
print(__doc__)
from scipy import linalg
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import colors
from sklearn.lda import LDA
from sklearn.qda import QDA
###############################################################################
# colormap
cmap = colors.LinearSegmentedColormap(
'red_blue_classes',
{'red': [(0, 1, 1), (1, 0.7, 0.7)],
'green': [(0, 0.7, 0.7), (1, 0.7, 0.7)],
'blue': [(0, 0.7, 0.7), (1, 1, 1)]})
plt.cm.register_cmap(cmap=cmap)
###############################################################################
# generate datasets
def dataset_fixed_cov():
'''Generate 2 Gaussians samples with the same covariance matrix'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -0.23], [0.83, .23]])
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C) + np.array([1, 1])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
def dataset_cov():
'''Generate 2 Gaussians samples with different covariance matrices'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -1.], [2.5, .7]]) * 2.
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C.T) + np.array([1, 4])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
###############################################################################
# plot functions
def plot_data(lda, X, y, y_pred, fig_index):
splot = plt.subplot(2, 2, fig_index)
if fig_index == 1:
plt.title('Linear Discriminant Analysis')
plt.ylabel('Data with fixed covariance')
elif fig_index == 2:
plt.title('Quadratic Discriminant Analysis')
elif fig_index == 3:
plt.ylabel('Data with varying covariances')
tp = (y == y_pred) # True Positive
tp0, tp1 = tp[y == 0], tp[y == 1]
X0, X1 = X[y == 0], X[y == 1]
X0_tp, X0_fp = X0[tp0], X0[~tp0]
X1_tp, X1_fp = X1[tp1], X1[~tp1]
xmin, xmax = X[:, 0].min(), X[:, 0].max()
ymin, ymax = X[:, 1].min(), X[:, 1].max()
# class 0: dots
plt.plot(X0_tp[:, 0], X0_tp[:, 1], 'o', color='red')
plt.plot(X0_fp[:, 0], X0_fp[:, 1], '.', color='#990000') # dark red
# class 1: dots
plt.plot(X1_tp[:, 0], X1_tp[:, 1], 'o', color='blue')
plt.plot(X1_fp[:, 0], X1_fp[:, 1], '.', color='#000099') # dark blue
# class 0 and 1 : areas
nx, ny = 200, 100
x_min, x_max = plt.xlim()
y_min, y_max = plt.ylim()
xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),
np.linspace(y_min, y_max, ny))
Z = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()])
Z = Z[:, 1].reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap='red_blue_classes',
norm=colors.Normalize(0., 1.))
plt.contour(xx, yy, Z, [0.5], linewidths=2., colors='k')
# means
plt.plot(lda.means_[0][0], lda.means_[0][1],
'o', color='black', markersize=10)
plt.plot(lda.means_[1][0], lda.means_[1][1],
'o', color='black', markersize=10)
return splot
def plot_ellipse(splot, mean, cov, color):
v, w = linalg.eigh(cov)
u = w[0] / linalg.norm(w[0])
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
# filled Gaussian at 2 standard deviation
ell = mpl.patches.Ellipse(mean, 2 * v[0] ** 0.5, 2 * v[1] ** 0.5,
180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
splot.set_xticks(())
splot.set_yticks(())
def plot_lda_cov(lda, splot):
plot_ellipse(splot, lda.means_[0], lda.covariance_, 'red')
plot_ellipse(splot, lda.means_[1], lda.covariance_, 'blue')
def plot_qda_cov(qda, splot):
plot_ellipse(splot, qda.means_[0], qda.covariances_[0], 'red')
plot_ellipse(splot, qda.means_[1], qda.covariances_[1], 'blue')
###############################################################################
for i, (X, y) in enumerate([dataset_fixed_cov(), dataset_cov()]):
# LDA
lda = LDA(solver="svd", store_covariance=True)
y_pred = lda.fit(X, y).predict(X)
splot = plot_data(lda, X, y, y_pred, fig_index=2 * i + 1)
plot_lda_cov(lda, splot)
plt.axis('tight')
# QDA
qda = QDA()
y_pred = qda.fit(X, y, store_covariances=True).predict(X)
splot = plot_data(qda, X, y, y_pred, fig_index=2 * i + 2)
plot_qda_cov(qda, splot)
plt.axis('tight')
plt.suptitle('LDA vs QDA')
plt.show()
| bsd-3-clause |
synthicity/activitysim | example_multiple_zone/extensions/los.py | 2 | 6923 | # ActivitySim
# See full license in LICENSE.txt.
import logging
import numpy as np
import pandas as pd
from activitysim.core import inject
from activitysim.core import skim as askim
from activitysim.core.util import quick_loc_df
from activitysim.core.tracing import print_elapsed_time
logger = logging.getLogger('activitysim')
class NetworkLOS(object):
def __init__(self, taz, maz, tap, maz2maz, maz2tap,
taz_skim_dict, tap_skim_dict):
self.taz_df = taz
self.maz_df = maz
self.tap_df = tap
# print "maz_df unique maz", len(self.maz_df.index)
# maz2maz_df
self.maz2maz_df = maz2maz
# create single index for fast lookup
m = maz2maz.DMAZ.max() + 1
maz2maz['i'] = maz2maz.OMAZ * m + maz2maz.DMAZ
maz2maz.set_index('i', drop=True, inplace=True, verify_integrity=True)
self.maz2maz_cardinality = m
# maz2tap_df
self.maz2tap_df = maz2tap
# create single index for fast lookup
m = maz2tap.TAP.max() + 1
maz2tap['i'] = maz2tap.MAZ * m + maz2tap.TAP
maz2tap.set_index('i', drop=True, inplace=True, verify_integrity=True)
self.maz2tap_cardinality = m
self.taz_skim_dict = taz_skim_dict
self.taz_skim_stack = askim.SkimStack(taz_skim_dict)
self.tap_skim_dict = tap_skim_dict
self.tap_skim_stack = askim.SkimStack(tap_skim_dict)
def get_taz(self, taz_list, attribute):
return quick_loc_df(taz_list, self.taz_df, attribute)
def get_tap(self, tap_list, attribute):
return quick_loc_df(tap_list, self.tap_df, attribute)
def get_maz(self, maz_list, attribute):
return quick_loc_df(maz_list, self.maz_df, attribute)
def get_tazpairs(self, otaz, dtaz, key):
skim = self.taz_skim_dict.get(key)
s = skim.get(otaz, dtaz)
return s
def get_tazpairs3d(self, otaz, dtaz, dim3, key):
s = self.taz_skim_stack.lookup(otaz, dtaz, dim3, key)
return s
def get_tappairs(self, otap, dtap, key):
skim = self.tap_skim_dict.get(key)
s = skim.get(otap, dtap)
n = (skim.data < 0).sum()
p = (skim.data >= 0).sum()
nan = np.isnan(skim.data).sum()
print "get_tappairs %s %s neg %s po %s nan" % (key, n, p, nan)
return s
def get_tappairs3d(self, otap, dtap, dim3, key):
s = self.tap_skim_stack.lookup(otap, dtap, dim3, key)
return s
def get_mazpairs(self, omaz, dmaz, attribute):
# # this is slower
# s = pd.merge(pd.DataFrame({'OMAZ': omaz, 'DMAZ': dmaz}),
# self.maz2maz_df,
# how="left")[attribute]
# synthetic index method i : omaz_dmaz
i = np.asanyarray(omaz) * self.maz2maz_cardinality + np.asanyarray(dmaz)
s = quick_loc_df(i, self.maz2maz_df, attribute)
# FIXME - no point in returning series? unless maz and tap have same index?
return np.asanyarray(s)
def get_maztappairs(self, maz, tap, attribute):
# synthetic i method : maz_tap
i = np.asanyarray(maz) * self.maz2tap_cardinality + np.asanyarray(tap)
s = quick_loc_df(i, self.maz2tap_df, attribute)
# FIXME - no point in returning series? unless maz and tap have sme index?
return np.asanyarray(s)
def get_taps_mazs(self, maz, attribute=None, filter=None):
# we return multiple tap rows for each maz, so we add an 'idx' row to tell caller
# which maz-taz rows belong to which row in the original maz list
# i.e. idx contains the index of the original maz series so we know which
# rows belong together
# if maz is a series, then idx has the original maz series index values
# otherwise it has the 0-based integer offset of the original maz
if filter:
maz2tap_df = self.maz2tap_df[pd.notnull(self.maz2tap_df[filter])]
else:
maz2tap_df = self.maz2tap_df
if attribute:
# FIXME - not sure anyone needs this feature
maz2tap_df = maz2tap_df[['MAZ', 'TAP', attribute]]
# filter out null attribute rows
maz2tap_df = maz2tap_df[pd.notnull(self.maz2tap_df[attribute])]
else:
maz2tap_df = maz2tap_df[['MAZ', 'TAP']]
if isinstance(maz, pd.Series):
# idx based on index of original maz series
maz_df = pd.DataFrame({'MAZ': maz, 'idx': maz.index})
else:
# 0-based index of original maz
maz_df = pd.DataFrame({'MAZ': maz, 'idx': range(len(maz))})
df = pd.merge(maz_df, maz2tap_df, how="inner", sort=False)
return df
def get_tappairs_mazpairs(network_los, omaz, dmaz, ofilter=None, dfilter=None):
# get nearby boarding TAPs to origin
omaz_btap_df = network_los.get_taps_mazs(omaz, ofilter)
# get nearby alighting TAPs to destination
dmaz_atap_df = network_los.get_taps_mazs(dmaz, dfilter)
# expand to one row for every btab-atap pair
atap_btap_df = pd.merge(omaz_btap_df, dmaz_atap_df, on='idx', how="inner")
atap_btap_df.rename(
columns={'MAZ_x': 'omaz', 'TAP_x': 'btap', 'MAZ_y': 'dmaz', 'TAP_y': 'atap'},
inplace=True)
return atap_btap_df
def __str__(self):
return "\n".join((
"taz (%s)" % len(self.taz_df.index),
"maz (%s)" % len(self.maz_df.index),
"tap (%s)" % len(self.tap_df.index),
"maz2maz (%s)" % len(self.maz2maz_df.index),
"maz2tap (%s)" % len(self.maz2tap_df.index),
"taz_skim_dict (%s keys)" % self.taz_skim_dict.key_count(),
"tap_skim_dict (%s keys)" % self.tap_skim_dict.key_count(),
"taz_skim_stack (%s keys)" % self.taz_skim_stack.key_count(),
"tap_skim_stack (%s keys)" % self.tap_skim_stack.key_count(),
))
@inject.injectable(cache=True)
def network_los(store, taz_skim_dict, tap_skim_dict):
taz = store["TAZ"]
maz = store["MAZ"]
tap = store["TAP"]
maz2maz = store["MAZtoMAZ"]
maz2tap = store["MAZtoTAP"]
print "taz index %s columns %s" % (taz.index.name, taz.columns.values)
print "tap index %s columns %s" % (tap.index.name, tap.columns.values)
print "maz index %s columns %s" % (maz.index.name, maz.columns.values)
print "maz2maz index %s columns %s" % (maz2maz.index.name, maz2maz.columns.values)
print "maz2tap index %s columns %s" % (maz2tap.index.name, maz2tap.columns.values)
# print "tap index %s columns %s" % (tap.index.name, tap.columns.values)
# print "tap_skim_offsets index %s columns %s" % (tap_skim_offsets.index.name,
# tap_skim_offsets.columns.values)
nlos = NetworkLOS(taz, maz, tap, maz2maz, maz2tap, taz_skim_dict, tap_skim_dict)
return nlos
| agpl-3.0 |
xwolf12/scikit-learn | examples/tree/plot_tree_regression_multioutput.py | 206 | 1800 | """
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_3 = DecisionTreeRegressor(max_depth=8)
regr_1.fit(X, y)
regr_2.fit(X, y)
regr_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
y_3 = regr_3.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(y[:, 0], y[:, 1], c="k", label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="g", label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="r", label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="b", label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("data")
plt.ylabel("target")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
fivejjs/pyhsmm | examples/hmm.py | 3 | 2159 | from __future__ import division
import numpy as np
np.seterr(divide='ignore') # these warnings are usually harmless for this code
from matplotlib import pyplot as plt
import matplotlib
import os
matplotlib.rcParams['font.size'] = 8
import pyhsmm
from pyhsmm.util.text import progprint_xrange
print \
'''
This demo shows how HDP-HMMs can fail when the underlying data has state
persistence without some kind of temporal regularization (in the form of a
sticky bias or duration modeling): without setting the number of states to be
the correct number a priori, lots of extra states can be intsantiated.
BUT the effect is much more relevant on real data (when the data doesn't exactly
fit the model). Maybe this demo should use multinomial emissions...
'''
###############
# load data #
###############
data = np.loadtxt(os.path.join(os.path.dirname(__file__),'example-data.txt'))[:2500]
#########################
# posterior inference #
#########################
# Set the weak limit truncation level
Nmax = 25
# and some hyperparameters
obs_dim = data.shape[1]
obs_hypparams = {'mu_0':np.zeros(obs_dim),
'sigma_0':np.eye(obs_dim),
'kappa_0':0.25,
'nu_0':obs_dim+2}
### HDP-HMM without the sticky bias
obs_distns = [pyhsmm.distributions.Gaussian(**obs_hypparams) for state in xrange(Nmax)]
posteriormodel = pyhsmm.models.WeakLimitHDPHMM(alpha=6.,gamma=6.,init_state_concentration=1.,
obs_distns=obs_distns)
posteriormodel.add_data(data)
for idx in progprint_xrange(100):
posteriormodel.resample_model()
posteriormodel.plot()
plt.gcf().suptitle('HDP-HMM sampled model after 100 iterations')
### Sticky-HDP-HMM
obs_distns = [pyhsmm.distributions.Gaussian(**obs_hypparams) for state in xrange(Nmax)]
posteriormodel = pyhsmm.models.WeakLimitStickyHDPHMM(
kappa=50.,alpha=6.,gamma=6.,init_state_concentration=1.,
obs_distns=obs_distns)
posteriormodel.add_data(data)
for idx in progprint_xrange(100):
posteriormodel.resample_model()
posteriormodel.plot()
plt.gcf().suptitle('Sticky HDP-HMM sampled model after 100 iterations')
plt.show()
| mit |
tonnpa/opleaders | analysis/graphprop.py | 1 | 2513 | from math import exp, log, pow
import matplotlib.pyplot as plt
import networkx as nx
class GraphProp:
def __init__(self, graph):
self.graph = graph
self.m = None
self.c = None
def avg_path_length(self):
if nx.number_connected_components(self.graph) == 1:
return nx.average_shortest_path_length(self.graph)
else:
print('[Error] graph is not connected')
def degree_distribution(self):
deg = [self.graph.degree(node) for node in self.graph.nodes()]
x = sorted(list(set(deg)))
y = [deg.count(i) for i in x]
return x, y
def max_node_degree(self):
return max(self.graph.degree(n) for n in self.graph.nodes())
def plot_degree_distribution(self, line=False, axis=None, dot_size=7):
if line and not (self.m and self.c):
self.power_law_coefficients()
x, y = self.degree_distribution()
# plt.title('Node degree distribution')
plt.xlabel('Node degree')
plt.ylabel('Count')
plt.plot(x, y, 'co', markersize=dot_size)
if axis:
plt.axis(axis)
else:
plt.axis([0, max(x)*1.2, 0, max(y)*1.2])
if line:
yy = [self.c * pow(x[i], self.m) for i in range(len(x))]
plt.plot(x, yy)
plt.show()
def plot_degree_distribution_loglog(self, line=False):
if line and not (self.m and self.c):
self.power_law_coefficients()
x, y = self.degree_distribution()
plt.title('Node degree distribution logarithmic scale')
plt.xlabel('Node degree')
plt.ylabel('Count')
plt.loglog(x, y, 'co')
if line:
yy = [self.c * pow(x[i], self.m) for i in range(len(x))]
plt.loglog(x, yy)
plt.show()
def power_law_coefficients(self):
x, y = self.degree_distribution()
n = len(x)
log_x = [log(i) for i in x]
log_y = [log(i) for i in y]
x_avg = sum(log_x) / float(n)
y_avg = sum(log_y) / float(n)
m = sum((log_x[i] * log_y[i] for i in range(n))) - n * x_avg * y_avg
m /= sum(n * n for n in log_x) - n * x_avg * x_avg
log_c = y_avg * sum(log_x[i] * log_x[i] for i in range(n)) - x_avg * sum(log_x[i] * log_y[i] for i in range(n))
log_c /= sum(n * n for n in log_x) - n * x_avg * x_avg
# log_c = y_avg - m * x_avg
c = exp(log_c)
self.m, self.c = m, c
return m, c
| apache-2.0 |
RPGOne/scikit-learn | sklearn/cluster/tests/test_hierarchical.py | 58 | 19797 | """
Several basic tests for hierarchical clustering procedures
"""
# Authors: Vincent Michel, 2010, Gael Varoquaux 2012,
# Matteo Visconti di Oleggio Castello 2014
# License: BSD 3 clause
from tempfile import mkdtemp
import shutil
from functools import partial
import numpy as np
from scipy import sparse
from scipy.cluster import hierarchy
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.cluster import ward_tree
from sklearn.cluster import AgglomerativeClustering, FeatureAgglomeration
from sklearn.cluster.hierarchical import (_hc_cut, _TREE_BUILDERS,
linkage_tree)
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.metrics.pairwise import PAIRED_DISTANCES, cosine_distances,\
manhattan_distances, pairwise_distances
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.neighbors.graph import kneighbors_graph
from sklearn.cluster._hierarchical import average_merge, max_merge
from sklearn.utils.fast_dict import IntFloatDict
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
def test_linkage_misc():
# Misc tests on linkage
rng = np.random.RandomState(42)
X = rng.normal(size=(5, 5))
assert_raises(ValueError, AgglomerativeClustering(linkage='foo').fit, X)
assert_raises(ValueError, linkage_tree, X, linkage='foo')
assert_raises(ValueError, linkage_tree, X, connectivity=np.ones((4, 4)))
# Smoke test FeatureAgglomeration
FeatureAgglomeration().fit(X)
# test hierarchical clustering on a precomputed distances matrix
dis = cosine_distances(X)
res = linkage_tree(dis, affinity="precomputed")
assert_array_equal(res[0], linkage_tree(X, affinity="cosine")[0])
# test hierarchical clustering on a precomputed distances matrix
res = linkage_tree(X, affinity=manhattan_distances)
assert_array_equal(res[0], linkage_tree(X, affinity="manhattan")[0])
def test_structured_linkage_tree():
# Check that we obtain the correct solution for structured linkage trees.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
# Avoiding a mask with only 'True' entries
mask[4:7, 4:7] = 0
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for tree_builder in _TREE_BUILDERS.values():
children, n_components, n_leaves, parent = \
tree_builder(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
# Check that ward_tree raises a ValueError with a connectivity matrix
# of the wrong shape
assert_raises(ValueError,
tree_builder, X.T, np.ones((4, 4)))
# Check that fitting with no samples raises an error
assert_raises(ValueError,
tree_builder, X.T[:0], connectivity)
def test_unstructured_linkage_tree():
# Check that we obtain the correct solution for unstructured linkage trees.
rng = np.random.RandomState(0)
X = rng.randn(50, 100)
for this_X in (X, X[0]):
# With specified a number of clusters just for the sake of
# raising a warning and testing the warning code
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, ward_tree, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
for tree_builder in _TREE_BUILDERS.values():
for this_X in (X, X[0]):
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, tree_builder, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
def test_height_linkage_tree():
# Check that the height of the results of linkage tree is sorted.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for linkage_func in _TREE_BUILDERS.values():
children, n_nodes, n_leaves, parent = linkage_func(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
def test_agglomerative_clustering():
# Check that we obtain the correct number of clusters with
# agglomerative clustering.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
n_samples = 100
X = rng.randn(n_samples, 50)
connectivity = grid_to_graph(*mask.shape)
for linkage in ("ward", "complete", "average"):
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage=linkage)
clustering.fit(X)
# test caching
try:
tempdir = mkdtemp()
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity,
memory=tempdir,
linkage=linkage)
clustering.fit(X)
labels = clustering.labels_
assert_true(np.size(np.unique(labels)) == 10)
finally:
shutil.rmtree(tempdir)
# Turn caching off now
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity, linkage=linkage)
# Check that we obtain the same solution with early-stopping of the
# tree building
clustering.compute_full_tree = False
clustering.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering.labels_,
labels), 1)
clustering.connectivity = None
clustering.fit(X)
assert_true(np.size(np.unique(clustering.labels_)) == 10)
# Check that we raise a TypeError on dense matrices
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=sparse.lil_matrix(
connectivity.toarray()[:10, :10]),
linkage=linkage)
assert_raises(ValueError, clustering.fit, X)
# Test that using ward with another metric than euclidean raises an
# exception
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=connectivity.toarray(),
affinity="manhattan",
linkage="ward")
assert_raises(ValueError, clustering.fit, X)
# Test using another metric than euclidean works with linkage complete
for affinity in PAIRED_DISTANCES.keys():
# Compare our (structured) implementation to scipy
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=np.ones((n_samples, n_samples)),
affinity=affinity,
linkage="complete")
clustering.fit(X)
clustering2 = AgglomerativeClustering(
n_clusters=10,
connectivity=None,
affinity=affinity,
linkage="complete")
clustering2.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering2.labels_,
clustering.labels_),
1)
# Test that using a distance matrix (affinity = 'precomputed') has same
# results (with connectivity constraints)
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage="complete")
clustering.fit(X)
X_dist = pairwise_distances(X)
clustering2 = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
affinity='precomputed',
linkage="complete")
clustering2.fit(X_dist)
assert_array_equal(clustering.labels_, clustering2.labels_)
def test_ward_agglomeration():
# Check that we obtain the correct solution in a simplistic case
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
agglo = FeatureAgglomeration(n_clusters=5, connectivity=connectivity)
agglo.fit(X)
assert_true(np.size(np.unique(agglo.labels_)) == 5)
X_red = agglo.transform(X)
assert_true(X_red.shape[1] == 5)
X_full = agglo.inverse_transform(X_red)
assert_true(np.unique(X_full[0]).size == 5)
assert_array_almost_equal(agglo.transform(X_full), X_red)
# Check that fitting with no samples raises a ValueError
assert_raises(ValueError, agglo.fit, X[:0])
def assess_same_labelling(cut1, cut2):
"""Util for comparison with scipy"""
co_clust = []
for cut in [cut1, cut2]:
n = len(cut)
k = cut.max() + 1
ecut = np.zeros((n, k))
ecut[np.arange(n), cut] = 1
co_clust.append(np.dot(ecut, ecut.T))
assert_true((co_clust[0] == co_clust[1]).all())
def test_scikit_vs_scipy():
# Test scikit linkage with full connectivity (i.e. unstructured) vs scipy
n, p, k = 10, 5, 3
rng = np.random.RandomState(0)
# Not using a lil_matrix here, just to check that non sparse
# matrices are well handled
connectivity = np.ones((n, n))
for linkage in _TREE_BUILDERS.keys():
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out = hierarchy.linkage(X, method=linkage)
children_ = out[:, :2].astype(np.int)
children, _, n_leaves, _ = _TREE_BUILDERS[linkage](X, connectivity)
cut = _hc_cut(k, children, n_leaves)
cut_ = _hc_cut(k, children_, n_leaves)
assess_same_labelling(cut, cut_)
# Test error management in _hc_cut
assert_raises(ValueError, _hc_cut, n_leaves + 1, children, n_leaves)
def test_connectivity_propagation():
# Check that connectivity in the ward tree is propagated correctly during
# merging.
X = np.array([(.014, .120), (.014, .099), (.014, .097),
(.017, .153), (.017, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .152), (.018, .149), (.018, .144)])
connectivity = kneighbors_graph(X, 10, include_self=False)
ward = AgglomerativeClustering(
n_clusters=4, connectivity=connectivity, linkage='ward')
# If changes are not propagated correctly, fit crashes with an
# IndexError
ward.fit(X)
def test_ward_tree_children_order():
# Check that children are ordered in the same way for both structured and
# unstructured versions of ward_tree.
# test on five random datasets
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X)
out_structured = ward_tree(X, connectivity=connectivity)
assert_array_equal(out_unstructured[0], out_structured[0])
def test_ward_linkage_tree_return_distance():
# Test return_distance option on linkage and ward trees
# test that return_distance when set true, gives same
# output on both structured and unstructured clustering.
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X, return_distance=True)
out_structured = ward_tree(X, connectivity=connectivity,
return_distance=True)
# get children
children_unstructured = out_unstructured[0]
children_structured = out_structured[0]
# check if we got the same clusters
assert_array_equal(children_unstructured, children_structured)
# check if the distances are the same
dist_unstructured = out_unstructured[-1]
dist_structured = out_structured[-1]
assert_array_almost_equal(dist_unstructured, dist_structured)
for linkage in ['average', 'complete']:
structured_items = linkage_tree(
X, connectivity=connectivity, linkage=linkage,
return_distance=True)[-1]
unstructured_items = linkage_tree(
X, linkage=linkage, return_distance=True)[-1]
structured_dist = structured_items[-1]
unstructured_dist = unstructured_items[-1]
structured_children = structured_items[0]
unstructured_children = unstructured_items[0]
assert_array_almost_equal(structured_dist, unstructured_dist)
assert_array_almost_equal(
structured_children, unstructured_children)
# test on the following dataset where we know the truth
# taken from scipy/cluster/tests/hierarchy_test_data.py
X = np.array([[1.43054825, -7.5693489],
[6.95887839, 6.82293382],
[2.87137846, -9.68248579],
[7.87974764, -6.05485803],
[8.24018364, -6.09495602],
[7.39020262, 8.54004355]])
# truth
linkage_X_ward = np.array([[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 9.10208346, 4.],
[7., 9., 24.7784379, 6.]])
linkage_X_complete = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.96742194, 4.],
[7., 9., 18.77445997, 6.]])
linkage_X_average = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.55832839, 4.],
[7., 9., 15.44089605, 6.]])
n_samples, n_features = np.shape(X)
connectivity_X = np.ones((n_samples, n_samples))
out_X_unstructured = ward_tree(X, return_distance=True)
out_X_structured = ward_tree(X, connectivity=connectivity_X,
return_distance=True)
# check that the labels are the same
assert_array_equal(linkage_X_ward[:, :2], out_X_unstructured[0])
assert_array_equal(linkage_X_ward[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_unstructured[4])
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_structured[4])
linkage_options = ['complete', 'average']
X_linkage_truth = [linkage_X_complete, linkage_X_average]
for (linkage, X_truth) in zip(linkage_options, X_linkage_truth):
out_X_unstructured = linkage_tree(
X, return_distance=True, linkage=linkage)
out_X_structured = linkage_tree(
X, connectivity=connectivity_X, linkage=linkage,
return_distance=True)
# check that the labels are the same
assert_array_equal(X_truth[:, :2], out_X_unstructured[0])
assert_array_equal(X_truth[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(X_truth[:, 2], out_X_unstructured[4])
assert_array_almost_equal(X_truth[:, 2], out_X_structured[4])
def test_connectivity_fixing_non_lil():
# Check non regression of a bug if a non item assignable connectivity is
# provided with more than one component.
# create dummy data
x = np.array([[0, 0], [1, 1]])
# create a mask with several components to force connectivity fixing
m = np.array([[True, False], [False, True]])
c = grid_to_graph(n_x=2, n_y=2, mask=m)
w = AgglomerativeClustering(connectivity=c, linkage='ward')
assert_warns(UserWarning, w.fit, x)
def test_int_float_dict():
rng = np.random.RandomState(0)
keys = np.unique(rng.randint(100, size=10).astype(np.intp))
values = rng.rand(len(keys))
d = IntFloatDict(keys, values)
for key, value in zip(keys, values):
assert d[key] == value
other_keys = np.arange(50).astype(np.intp)[::2]
other_values = 0.5 * np.ones(50)[::2]
other = IntFloatDict(other_keys, other_values)
# Complete smoke test
max_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
average_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
def test_connectivity_callable():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(
connectivity=partial(kneighbors_graph, n_neighbors=3, include_self=False))
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_connectivity_ignores_diagonal():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
connectivity_include_self = kneighbors_graph(X, 3, include_self=True)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(connectivity=connectivity_include_self)
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_compute_full_tree():
# Test that the full tree is computed if n_clusters is small
rng = np.random.RandomState(0)
X = rng.randn(10, 2)
connectivity = kneighbors_graph(X, 5, include_self=False)
# When n_clusters is less, the full tree should be built
# that is the number of merges should be n_samples - 1
agc = AgglomerativeClustering(n_clusters=2, connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - 1)
# When n_clusters is large, greater than max of 100 and 0.02 * n_samples.
# we should stop when there are n_clusters.
n_clusters = 101
X = rng.randn(200, 2)
connectivity = kneighbors_graph(X, 10, include_self=False)
agc = AgglomerativeClustering(n_clusters=n_clusters,
connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - n_clusters)
def test_n_components():
# Test n_components returned by linkage, average and ward tree
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Connectivity matrix having five components.
connectivity = np.eye(5)
for linkage_func in _TREE_BUILDERS.values():
assert_equal(ignore_warnings(linkage_func)(X, connectivity)[1], 5)
def test_agg_n_clusters():
# Test that an error is raised when n_clusters <= 0
rng = np.random.RandomState(0)
X = rng.rand(20, 10)
for n_clus in [-1, 0]:
agc = AgglomerativeClustering(n_clusters=n_clus)
msg = ("n_clusters should be an integer greater than 0."
" %s was provided." % str(agc.n_clusters))
assert_raise_message(ValueError, msg, agc.fit, X)
| bsd-3-clause |
twareproj/tware | vizdom/server.py | 2 | 5144 | #!/usr/bin/python
import json
import pandas as pd
import traceback
import uuid
from BaseHTTPServer import BaseHTTPRequestHandler
from BaseHTTPServer import HTTPServer
from csv import DictReader
from cache import Cache
from executor import BasicExecutor
from util import Timer
icd9s = [('infectious',1,140),
('metabolic',240,280),
('blood',280,290),
('neurologic',320,390),
('heart_hypertensive',401,406),
('heart_ischemic',410,415),
('heart_failure',428,429),
('pulmonary',460,520),
('digestive',520,580),
('renal_insufficiency',580,630)]
schema = [['icd9',[(icd9[0],False) for icd9 in icd9s]],
['demo',[('sex',False),
('age',True),
('race',False),
('marital',False),
('religion',False)]],
['phys',[('height',True),
('weight',True),
('bmi',True),
('temperature',True),
('heart_rate',True),
('resp_rate',True),
('systolic_bp',True),
('diastolic_bp',True),
('spo2',True),
('sapsi',False),
('sofa',False),
('gcs',False),
('braden',False)]],
['blood',[['bmp',[('sodium',True),
('potassium',True),
('chloride',True),
('magnesium',True),
('calcium',True),
('anion_gap',True),
('bun',True),
('creatinine',True),
('glucose',True)]],
['abg',[('ph',True),
('be',True),
('total_co2',True),
('total_o2',True),
('pco2',True),
('po2',True)]],
['cbc',[('wbc',True),
('rbc',True),
('hgb',True),
('hct',True),
('mcv',True),
('mch',True),
('mchc',True),
('rdw',True),
('plates',True),
('neuts',True),
('lymphs',True),
('monos',True),
('basos',True),
('eos',True),
('pt',True),
('inr_pt',True),
('ptt',True)]],
['cardiac',[('ckmb',True),
('cpkmb',True),
('ldh',True),
('bnp',True),
('tropi',True),
('tropt',True)]],
['hepatic',[('total_bili',True),
('direct_bili',True),
('indirect_bili',True),
('albumin',True),
('tg',True)]]]]]
class Server(HTTPServer):
def __init__(self, addr, handler, file_dir):
HTTPServer.__init__(self, addr, handler)
self.catalog = {}
self.cache = Cache()
#file_uuid = str(uuid.uuid4())
file_uuid = '0'
self.cache[file_uuid] = pd.read_csv(file_dir)
self.catalog['mimic2'] = {'uuid': file_uuid,
'schema': schema}
class RequestHandler(BaseHTTPRequestHandler):
def do_POST(self):
t = Timer()
t.start()
response = 200
result = {}
try:
content_length = int(self.headers.getheader('content-length'))
req = json.loads(self.rfile.read(content_length))
print req
req_type = req['type']
result = None
if req_type == 'catalog':
result = json.dumps(self.server.catalog)
elif req_type == 'execute':
task = req['args']['task']
json.dumps(BasicExecutor(self.server.cache, task).execute())
elif req_type == 'lookup':
uuid = req['args']['uuid']
result = self.server.cache[uuid]
if type(result) is pd.DataFrame:
page_size = int(req['args']['page_size'])
page_num = int(req['args']['page_num'])
i = page_size * page_num
j = i + page_size
result = result[i:j]
result = result.to_json()
except:
print traceback.format_exc()
response = 500
result = '{}'
t.stop()
self.send_response(response)
self.send_header('Content-type','application/json')
self.end_headers()
self.wfile.write(result)
print 'Run Time:', t.time()
| apache-2.0 |
CrazyGuo/bokeh | bokeh/charts/builder/step_builder.py | 43 | 5445 | """This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Step class which lets you build your Step charts just
passing the arguments to the Chart class and calling the proper functions.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
import numpy as np
from six import string_types
from ..utils import cycle_colors
from .._builder import create_and_build, Builder
from ...models import ColumnDataSource, DataRange1d, GlyphRenderer
from ...models.glyphs import Line
from ...properties import Any
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def Step(values, index=None, **kws):
""" Create a step chart using :class:`StepBuilder <bokeh.charts.builder.step_builder.StepBuilder>`
render the geometry from values and index.
Args:
values (iterable): iterable 2d representing the data series
values matrix.
index (str|1d iterable, optional): can be used to specify a common custom
index for all data series as an **1d iterable** of any sort that will be used as
series common index or a **string** that corresponds to the key of the
mapping to be used as index (and not as data series) if
area.values is a mapping (like a dict, an OrderedDict
or a pandas DataFrame)
In addition the the parameters specific to this chart,
:ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
from collections import OrderedDict
from bokeh.charts import Step, output_file, show
# (dict, OrderedDict, lists, arrays and DataFrames are valid inputs)
xyvalues = [[2, 3, 7, 5, 26], [12, 33, 47, 15, 126], [22, 43, 10, 25, 26]]
step = Step(xyvalues, title="Steps", legend="top_left", ylabel='Languages')
output_file('step.html')
show(step)
"""
return create_and_build(StepBuilder, values, index=index, **kws)
class StepBuilder(Builder):
"""This is the Step class and it is in charge of plotting
Step charts in an easy and intuitive way.
Essentially, we provide a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the ranges.
And finally add the needed lines taking the references from the
source.
"""
index = Any(help="""
An index to be used for all data series as follows:
- A 1d iterable of any sort that will be used as
series common index
- As a string that corresponds to the key of the
mapping to be used as index (and not as data
series) if area.values is a mapping (like a dict,
an OrderedDict or a pandas DataFrame)
""")
def _process_data(self):
"""It calculates the chart properties accordingly from Step.values.
Then build a dict containing references to all the points to be
used by the segment glyph inside the ``_yield_renderers`` method.
"""
self._data = dict()
self._groups = []
orig_xs = self._values_index
xs = np.empty(2*len(orig_xs)-1, dtype=np.int)
xs[::2] = orig_xs[:]
xs[1::2] = orig_xs[1:]
self._data['x'] = xs
for i, col in enumerate(self._values.keys()):
if isinstance(self.index, string_types) and col == self.index:
continue
# save every new group we find
self._groups.append(col)
orig_ys = np.array([self._values[col][x] for x in orig_xs])
ys = np.empty(2*len(orig_ys)-1)
ys[::2] = orig_ys[:]
ys[1::2] = orig_ys[:-1]
self._data['y_%s' % col] = ys
def _set_sources(self):
""" Push the Step data into the ColumnDataSource and calculate
the proper ranges.
"""
self._source = ColumnDataSource(self._data)
self.x_range = DataRange1d()
#y_sources = [sc.columns("y_%s" % col) for col in self._groups]
self.y_range = DataRange1d()
def _yield_renderers(self):
"""Use the line glyphs to connect the xy points in the Step.
Takes reference points from the data loaded at the ColumnDataSource.
"""
colors = cycle_colors(self._groups, self.palette)
for i, name in enumerate(self._groups):
# draw the step horizontal segment
glyph = Line(x="x", y="y_%s" % name, line_color=colors[i], line_width=2)
renderer = GlyphRenderer(data_source=self._source, glyph=glyph)
self._legends.append((self._groups[i], [renderer]))
yield renderer
| bsd-3-clause |
viisar/brew | brew/selection/dynamic/knora.py | 3 | 8361 | # -*- coding: utf-8 -*-
import numpy as np
from .base import DCS
from brew.base import Ensemble
# do not use this class directly, call it's subclasses instead (e.g. KNORA_E)
class KNORA(DCS):
def _get_best_classifiers(self, ensemble, neighbors_X, neighbors_y, x):
ensemble_out = ensemble.output(neighbors_X, mode='labels')
ensemble_mask = ensemble_out == neighbors_y[:, np.newaxis]
correct = np.sum(ensemble_mask, axis=0)
idx = np.argmax(correct) # best classifier idx
all_idx = correct == correct[idx]
pool = [ensemble.classifiers[i] for i in all_idx]
return pool
class KNORA_ELIMINATE(KNORA):
"""K-nearest-oracles Eliminate.
The KNORA Eliminate reduces the neighborhood until finds an
ensemble of classifiers that correctly classify all neighbors.
Attributes
----------
`Xval` : array-like, shape = [indeterminated, n_features]
Validation set.
`yval` : array-like, shape = [indeterminated]
Labels of the validation set.
`knn` : sklearn KNeighborsClassifier,
Classifier used to find neighborhood.
`weighted` : bool, (makes no difference in knora_eliminate)
Bool that defines if the classifiers uses weights or not
Examples
--------
>>> from brew.selection.dynamic.knora import KNORA_ELIMINATE
>>> from brew.generation.bagging import Bagging
>>> from brew.base import EnsembleClassifier
>>>
>>> from sklearn.tree import DecisionTreeClassifier
>>> import numpy as np
>>>
>>> X = np.array([[-1, 0], [-0.8, 1], [-0.8, -1], [-0.5, 0],
[0.5, 0], [1, 0], [0.8, 1], [0.8, -1]])
>>> y = np.array([1, 1, 1, 2, 1, 2, 2, 2])
>>>
>>> dt = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1)
>>> bag = Bagging(base_classifier=dt, n_classifiers=10)
>>> bag.fit(X, y)
>>>
>>> ke = KNORA_ELIMINATE(X, y, K=5)
>>>
>>> clf = EnsembleClassifier(bag.ensemble, selector=ke)
>>> clf.predict([-1.1,-0.5])
[1]
See also
--------
brew.selection.dynamic.knora.KNORA_UNION: KNORA Union.
brew.selection.dynamic.lca.LCA: Local Class Accuracy.
brew.selection.dynamic.ola.OLA: Overall Local Accuracy.
References
----------
Ko, Albert HR, Robert Sabourin, and Alceu Souza Britto Jr.
"From dynamic classifier selection to dynamic ensemble selection."
Pattern Recognition 41.5 (2008): 1718-1731.
Britto, Alceu S., Robert Sabourin, and Luiz ES Oliveira.
"Dynamic selection of classifiers—A comprehensive review."
Pattern Recognition 47.11 (2014): 3665-3680.
Hung-Ren Ko, A., Robert Sabourin, and A. de Souza Britto.
"K-nearest oracle for dynamic ensemble selection."
Document Analysis and Recognition, 2007. ICDAR 2007.
Ninth International Conference on. Vol. 1. IEEE, 2007
"""
def __init__(self, Xval, yval, K=5, weighted=False, knn=None, v2007=False):
self.v2007 = v2007
super(KNORA_ELIMINATE, self).__init__(
Xval, yval, K=K, weighted=weighted, knn=knn)
def select(self, ensemble, x):
ensemble_mask = None
neighbors_X, neighbors_y = self.get_neighbors(x)
pool_output = ensemble.output(neighbors_X, mode='labels')
# gradually decrease neighborhood size if no
# classifier predicts ALL the neighbors correctly
for i in range(self.K, 0, -1):
pool_mask = _get_pool_mask(
pool_output[:i], neighbors_y[:i], np.all)
# if at least one classifier gets all neighbors right
if pool_mask is not None:
ensemble_mask = pool_mask
break
# if NO classifiers get the nearest neighbor correctly
if ensemble_mask is None:
if self.v2007:
# Increase neighborhood until one classifier
# gets at least ONE (i.e. ANY) neighbors correctly.
# Starts with 2 because mask_all with k=1 is
# the same as mask_any with k=1
for i in range(2, self.K + 1):
pool_mask = _get_pool_mask(
pool_output[:i], neighbors_y[:i], np.any)
if pool_mask is not None:
ensemble_mask = pool_mask
break
[selected_idx] = np.where(ensemble_mask)
if selected_idx.size > 0:
pool = Ensemble(
classifiers=[ensemble.classifiers[i] for i in selected_idx])
else: # use all classifiers
# pool = ensemble
classifiers = self._get_best_classifiers(
ensemble, neighbors_X, neighbors_y, x)
pool = Ensemble(classifiers=classifiers)
# KNORA-ELIMINATE-W that supposedly uses weights, does not make
# any sense, so even if self.weighted is True, always return
# None for the weights
return pool, None
class KNORA_UNION(KNORA):
"""K-nearest-oracles Union.
The KNORA union reduces the neighborhood until finds an
ensemble of classifiers that correctly classify all neighbors.
Attributes
----------
`Xval` : array-like, shape = [indeterminated, n_features]
Validation set.
`yval` : array-like, shape = [indeterminated]
Labels of the validation set.
`knn` : sklearn KNeighborsClassifier,
Classifier used to find neighborhood.
`weighted` : bool, (makes no difference in knora_eliminate)
Bool that defines if the classifiers uses weights or not
Examples
--------
>>> from brew.selection.dynamic.knora import KNORA_UNION
>>> from brew.generation.bagging import Bagging
>>> from brew.base import EnsembleClassifier
>>>
>>> from sklearn.tree import DecisionTreeClassifier
>>> import numpy as np
>>>
>>> X = np.array([[-1, 0], [-0.8, 1], [-0.8, -1], [-0.5, 0],
[0.5, 0], [1, 0], [0.8, 1], [0.8, -1]])
>>> y = np.array([1, 1, 1, 2, 1, 2, 2, 2])
>>>
>>> dt = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1)
>>> bag = Bagging(base_classifier=dt, n_classifiers=10)
>>> bag.fit(X, y)
>>>
>>> ku = KNORA_UNION(X, y, K=5)
>>>
>>> clf = EnsembleClassifier(bag.ensemble, selector=ku)
>>> clf.predict([-1.1,-0.5])
[1]
See also
--------
brew.selection.dynamic.knora.KNORA_ELIMINATE: Knora Eliminate.
brew.selection.dynamic.lca.LCA: Local Class Accuracy.
brew.selection.dynamic.ola.OLA: Overall Local Accuracy.
References
----------
Ko, Albert HR, Robert Sabourin, and Alceu Souza Britto Jr.
"From dynamic classifier selection to dynamic ensemble selection."
Pattern Recognition 41.5 (2008): 1718-1731.
Britto, Alceu S., Robert Sabourin, and Luiz ES Oliveira.
"Dynamic selection of classifiers—A comprehensive review."
Pattern Recognition 47.11 (2014): 3665-3680.
Hung-Ren Ko, A., Robert Sabourin, and A. de Souza Britto.
"K-nearest oracle for dynamic ensemble selection."
Document Analysis and Recognition, 2007. ICDAR 2007.
Ninth International Conference on. Vol. 1. IEEE, 2007.
"""
def select(self, ensemble, x):
neighbors_X, neighbors_y = self.get_neighbors(x)
pool_output = ensemble.output(neighbors_X, mode='labels')
output_mask = (pool_output == neighbors_y[:, np.newaxis])
[selected_idx] = np.where(np.any(output_mask, axis=0))
if selected_idx.size > 0:
if self.weighted:
weights = 1.0 / \
(np.sqrt(np.sum((x - neighbors_X)**2, axis=1)) + 10e-8)
weighted_votes = np.dot(weights, output_mask[:, selected_idx])
else:
weighted_votes = np.sum(output_mask[:, selected_idx], axis=0)
pool = Ensemble(
classifiers=[ensemble.classifiers[i] for i in selected_idx])
# if no classifiers are selected,
# use all classifiers with no weights
else:
pool = ensemble
weighted_votes = None
return pool, weighted_votes
def _get_pool_mask(pool_output, neighbors_target, func):
pool_mask = func(pool_output == neighbors_target[:, np.newaxis], axis=0)
if np.sum(pool_mask) > 0:
return pool_mask
return None
| mit |
nesterione/scikit-learn | sklearn/gaussian_process/gaussian_process.py | 83 | 34544 | # -*- coding: utf-8 -*-
# Author: Vincent Dubourg <[email protected]>
# (mostly translation, see implementation details)
# Licence: BSD 3 clause
from __future__ import print_function
import numpy as np
from scipy import linalg, optimize
from ..base import BaseEstimator, RegressorMixin
from ..metrics.pairwise import manhattan_distances
from ..utils import check_random_state, check_array, check_X_y
from ..utils.validation import check_is_fitted
from . import regression_models as regression
from . import correlation_models as correlation
MACHINE_EPSILON = np.finfo(np.double).eps
def l1_cross_distances(X):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X.
Parameters
----------
X: array_like
An array with shape (n_samples, n_features)
Returns
-------
D: array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
ij: arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = check_array(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples])
return D, ij
class GaussianProcess(BaseEstimator, RegressorMixin):
"""The Gaussian Process model class.
Read more in the :ref:`User Guide <gaussian_process>`.
Parameters
----------
regr : string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. The number of observations n_samples
should be greater than the size p of this basis.
Default assumes a simple constant regression trend.
Available built-in regression models are::
'constant', 'linear', 'quadratic'
corr : string or callable, optional
A stationary autocorrelation function returning the autocorrelation
between two points x and x'.
Default assumes a squared-exponential autocorrelation model.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
beta0 : double array_like, optional
The regression weight vector to perform Ordinary Kriging (OK).
Default assumes Universal Kriging (UK) so that the vector beta of
regression weights is estimated using the maximum likelihood
principle.
storage_mode : string, optional
A string specifying whether the Cholesky decomposition of the
correlation matrix should be stored in the class (storage_mode =
'full') or not (storage_mode = 'light').
Default assumes storage_mode = 'full', so that the
Cholesky decomposition of the correlation matrix is stored.
This might be a useful parameter when one is not interested in the
MSE and only plan to estimate the BLUP, for which the correlation
matrix is not required.
verbose : boolean, optional
A boolean specifying the verbose level.
Default is verbose = False.
theta0 : double array_like, optional
An array with shape (n_features, ) or (1, ).
The parameters in the autocorrelation model.
If thetaL and thetaU are also specified, theta0 is considered as
the starting point for the maximum likelihood estimation of the
best set of parameters.
Default assumes isotropic autocorrelation model with theta0 = 1e-1.
thetaL : double array_like, optional
An array with shape matching theta0's.
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
thetaU : double array_like, optional
An array with shape matching theta0's.
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
normalize : boolean, optional
Input X and observations y are centered and reduced wrt
means and standard deviations estimated from the n_samples
observations provided.
Default is normalize = True so that data is normalized to ease
maximum likelihood estimation.
nugget : double or ndarray, optional
Introduce a nugget effect to allow smooth predictions from noisy
data. If nugget is an ndarray, it must be the same length as the
number of data points used for the fit.
The nugget is added to the diagonal of the assumed training covariance;
in this way it acts as a Tikhonov regularization in the problem. In
the special case of the squared exponential correlation function, the
nugget mathematically represents the variance of the input values.
Default assumes a nugget close to machine precision for the sake of
robustness (nugget = 10. * MACHINE_EPSILON).
optimizer : string, optional
A string specifying the optimization algorithm to be used.
Default uses 'fmin_cobyla' algorithm from scipy.optimize.
Available optimizers are::
'fmin_cobyla', 'Welch'
'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.
It consists in iterating over several one-dimensional optimizations
instead of running one single multi-dimensional optimization.
random_start : int, optional
The number of times the Maximum Likelihood Estimation should be
performed from a random starting point.
The first MLE always uses the specified starting point (theta0),
the next starting points are picked at random according to an
exponential distribution (log-uniform on [thetaL, thetaU]).
Default does not use random starting point (random_start = 1).
random_state: integer or numpy.RandomState, optional
The generator used to shuffle the sequence of coordinates of theta in
the Welch optimizer. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
Attributes
----------
theta_ : array
Specified theta OR the best set of autocorrelation parameters (the \
sought maximizer of the reduced likelihood function).
reduced_likelihood_function_value_ : array
The optimal reduced likelihood function value.
Examples
--------
>>> import numpy as np
>>> from sklearn.gaussian_process import GaussianProcess
>>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T
>>> y = (X * np.sin(X)).ravel()
>>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)
>>> gp.fit(X, y) # doctest: +ELLIPSIS
GaussianProcess(beta0=None...
...
Notes
-----
The presentation implementation is based on a translation of the DACE
Matlab toolbox, see reference [NLNS2002]_.
References
----------
.. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.
Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)
http://www2.imm.dtu.dk/~hbn/dace/dace.pdf
.. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,
and M.D. Morris (1992). Screening, predicting, and computer
experiments. Technometrics, 34(1) 15--25.`
http://www.jstor.org/pss/1269548
"""
_regression_types = {
'constant': regression.constant,
'linear': regression.linear,
'quadratic': regression.quadratic}
_correlation_types = {
'absolute_exponential': correlation.absolute_exponential,
'squared_exponential': correlation.squared_exponential,
'generalized_exponential': correlation.generalized_exponential,
'cubic': correlation.cubic,
'linear': correlation.linear}
_optimizer_types = [
'fmin_cobyla',
'Welch']
def __init__(self, regr='constant', corr='squared_exponential', beta0=None,
storage_mode='full', verbose=False, theta0=1e-1,
thetaL=None, thetaU=None, optimizer='fmin_cobyla',
random_start=1, normalize=True,
nugget=10. * MACHINE_EPSILON, random_state=None):
self.regr = regr
self.corr = corr
self.beta0 = beta0
self.storage_mode = storage_mode
self.verbose = verbose
self.theta0 = theta0
self.thetaL = thetaL
self.thetaU = thetaU
self.normalize = normalize
self.nugget = nugget
self.optimizer = optimizer
self.random_start = random_start
self.random_state = random_state
def fit(self, X, y):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) or shape (n_samples, n_targets)
with the observations of the output to be predicted.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
# Run input checks
self._check_params()
self.random_state = check_random_state(self.random_state)
# Force data to 2D numpy.array
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
self.y_ndim_ = y.ndim
if y.ndim == 1:
y = y[:, np.newaxis]
# Check shapes of DOE & observations
n_samples, n_features = X.shape
_, n_targets = y.shape
# Run input checks
self._check_params(n_samples)
# Normalize data or don't
if self.normalize:
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
y_mean = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
X_std[X_std == 0.] = 1.
y_std[y_std == 0.] = 1.
# center and scale X if necessary
X = (X - X_mean) / X_std
y = (y - y_mean) / y_std
else:
X_mean = np.zeros(1)
X_std = np.ones(1)
y_mean = np.zeros(1)
y_std = np.ones(1)
# Calculate matrix of distances D between samples
D, ij = l1_cross_distances(X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple input features cannot have the same"
" target value.")
# Regression matrix and parameters
F = self.regr(X)
n_samples_F = F.shape[0]
if F.ndim > 1:
p = F.shape[1]
else:
p = 1
if n_samples_F != n_samples:
raise Exception("Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model.")
if p > n_samples_F:
raise Exception(("Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the "
"regression model size p=%d.") % (n_samples, p))
if self.beta0 is not None:
if self.beta0.shape[0] != p:
raise Exception("Shapes of beta0 and F do not match.")
# Set attributes
self.X = X
self.y = y
self.D = D
self.ij = ij
self.F = F
self.X_mean, self.X_std = X_mean, X_std
self.y_mean, self.y_std = y_mean, y_std
# Determine Gaussian Process model parameters
if self.thetaL is not None and self.thetaU is not None:
# Maximum Likelihood Estimation of the parameters
if self.verbose:
print("Performing Maximum Likelihood Estimation of the "
"autocorrelation parameters...")
self.theta_, self.reduced_likelihood_function_value_, par = \
self._arg_max_reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad parameter region. "
"Try increasing upper bound")
else:
# Given parameters
if self.verbose:
print("Given autocorrelation parameters. "
"Computing Gaussian Process model parameters...")
self.theta_ = self.theta0
self.reduced_likelihood_function_value_, par = \
self.reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad point. Try increasing theta0.")
self.beta = par['beta']
self.gamma = par['gamma']
self.sigma2 = par['sigma2']
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
if self.storage_mode == 'light':
# Delete heavy data (it will be computed again if required)
# (it is required only when MSE is wanted in self.predict)
if self.verbose:
print("Light storage mode specified. "
"Flushing autocorrelation matrix...")
self.D = None
self.ij = None
self.F = None
self.C = None
self.Ft = None
self.G = None
return self
def predict(self, X, eval_MSE=False, batch_size=None):
"""
This function evaluates the Gaussian Process model at x.
Parameters
----------
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE = False and evaluates only the BLUP (mean
prediction).
batch_size : integer, optional
An integer giving the maximum number of points that can be
evaluated simultaneously (depending on the available memory).
Default is None so that all given points are evaluated at the same
time.
Returns
-------
y : array_like, shape (n_samples, ) or (n_samples, n_targets)
An array with shape (n_eval, ) if the Gaussian Process was trained
on an array of shape (n_samples, ) or an array with shape
(n_eval, n_targets) if the Gaussian Process was trained on an array
of shape (n_samples, n_targets) with the Best Linear Unbiased
Prediction at x.
MSE : array_like, optional (if eval_MSE == True)
An array with shape (n_eval, ) or (n_eval, n_targets) as with y,
with the Mean Squared Error at x.
"""
check_is_fitted(self, "X")
# Check input shapes
X = check_array(X)
n_eval, _ = X.shape
n_samples, n_features = self.X.shape
n_samples_y, n_targets = self.y.shape
# Run input checks
self._check_params(n_samples)
if X.shape[1] != n_features:
raise ValueError(("The number of features in X (X.shape[1] = %d) "
"should match the number of features used "
"for fit() "
"which is %d.") % (X.shape[1], n_features))
if batch_size is None:
# No memory management
# (evaluates all given points in a single batch run)
# Normalize input
X = (X - self.X_mean) / self.X_std
# Initialize output
y = np.zeros(n_eval)
if eval_MSE:
MSE = np.zeros(n_eval)
# Get pairwise componentwise L1-distances to the input training set
dx = manhattan_distances(X, Y=self.X, sum_over_features=False)
# Get regression function and correlation
f = self.regr(X)
r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)
# Scaled predictor
y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)
# Predictor
y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets)
if self.y_ndim_ == 1:
y = y.ravel()
# Mean Squared Error
if eval_MSE:
C = self.C
if C is None:
# Light storage mode (need to recompute C, F, Ft and G)
if self.verbose:
print("This GaussianProcess used 'light' storage mode "
"at instantiation. Need to recompute "
"autocorrelation matrix...")
reduced_likelihood_function_value, par = \
self.reduced_likelihood_function()
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
rt = linalg.solve_triangular(self.C, r.T, lower=True)
if self.beta0 is None:
# Universal Kriging
u = linalg.solve_triangular(self.G.T,
np.dot(self.Ft.T, rt) - f.T,
lower=True)
else:
# Ordinary Kriging
u = np.zeros((n_targets, n_eval))
MSE = np.dot(self.sigma2.reshape(n_targets, 1),
(1. - (rt ** 2.).sum(axis=0)
+ (u ** 2.).sum(axis=0))[np.newaxis, :])
MSE = np.sqrt((MSE ** 2.).sum(axis=0) / n_targets)
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < 0.] = 0.
if self.y_ndim_ == 1:
MSE = MSE.ravel()
return y, MSE
else:
return y
else:
# Memory management
if type(batch_size) is not int or batch_size <= 0:
raise Exception("batch_size must be a positive integer")
if eval_MSE:
y, MSE = np.zeros(n_eval), np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to], MSE[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y, MSE
else:
y = np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y
def reduced_likelihood_function(self, theta=None):
"""
This function determines the BLUP parameters and evaluates the reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Parameters
----------
theta : array_like, optional
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
(ie ``theta = self.theta_``).
Returns
-------
reduced_likelihood_function_value : double
The value of the reduced likelihood function associated to the
given autocorrelation parameters theta.
par : dict
A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or given beta0 for Ordinary
Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
G
QR decomposition of the matrix Ft.
"""
check_is_fitted(self, "X")
if theta is None:
# Use built-in autocorrelation parameters
theta = self.theta_
# Initialize output
reduced_likelihood_function_value = - np.inf
par = {}
# Retrieve data
n_samples = self.X.shape[0]
D = self.D
ij = self.ij
F = self.F
if D is None:
# Light storage mode (need to recompute D, ij and F)
D, ij = l1_cross_distances(self.X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple X are not allowed")
F = self.regr(self.X)
# Set up R
r = self.corr(theta, D)
R = np.eye(n_samples) * (1. + self.nugget)
R[ij[:, 0], ij[:, 1]] = r
R[ij[:, 1], ij[:, 0]] = r
# Cholesky decomposition of R
try:
C = linalg.cholesky(R, lower=True)
except linalg.LinAlgError:
return reduced_likelihood_function_value, par
# Get generalized least squares solution
Ft = linalg.solve_triangular(C, F, lower=True)
try:
Q, G = linalg.qr(Ft, econ=True)
except:
#/usr/lib/python2.6/dist-packages/scipy/linalg/decomp.py:1177:
# DeprecationWarning: qr econ argument will be removed after scipy
# 0.7. The economy transform will then be available through the
# mode='economic' argument.
Q, G = linalg.qr(Ft, mode='economic')
pass
sv = linalg.svd(G, compute_uv=False)
rcondG = sv[-1] / sv[0]
if rcondG < 1e-10:
# Check F
sv = linalg.svd(F, compute_uv=False)
condF = sv[0] / sv[-1]
if condF > 1e15:
raise Exception("F is too ill conditioned. Poor combination "
"of regression model and observations.")
else:
# Ft is too ill conditioned, get out (try different theta)
return reduced_likelihood_function_value, par
Yt = linalg.solve_triangular(C, self.y, lower=True)
if self.beta0 is None:
# Universal Kriging
beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))
else:
# Ordinary Kriging
beta = np.array(self.beta0)
rho = Yt - np.dot(Ft, beta)
sigma2 = (rho ** 2.).sum(axis=0) / n_samples
# The determinant of R is equal to the squared product of the diagonal
# elements of its Cholesky decomposition C
detR = (np.diag(C) ** (2. / n_samples)).prod()
# Compute/Organize output
reduced_likelihood_function_value = - sigma2.sum() * detR
par['sigma2'] = sigma2 * self.y_std ** 2.
par['beta'] = beta
par['gamma'] = linalg.solve_triangular(C.T, rho)
par['C'] = C
par['Ft'] = Ft
par['G'] = G
return reduced_likelihood_function_value, par
def _arg_max_reduced_likelihood_function(self):
"""
This function estimates the autocorrelation parameters theta as the
maximizer of the reduced likelihood function.
(Minimization of the opposite reduced likelihood function is used for
convenience)
Parameters
----------
self : All parameters are stored in the Gaussian Process model object.
Returns
-------
optimal_theta : array_like
The best set of autocorrelation parameters (the sought maximizer of
the reduced likelihood function).
optimal_reduced_likelihood_function_value : double
The optimal reduced likelihood function value.
optimal_par : dict
The BLUP parameters associated to thetaOpt.
"""
# Initialize output
best_optimal_theta = []
best_optimal_rlf_value = []
best_optimal_par = []
if self.verbose:
print("The chosen optimizer is: " + str(self.optimizer))
if self.random_start > 1:
print(str(self.random_start) + " random starts are required.")
percent_completed = 0.
# Force optimizer to fmin_cobyla if the model is meant to be isotropic
if self.optimizer == 'Welch' and self.theta0.size == 1:
self.optimizer = 'fmin_cobyla'
if self.optimizer == 'fmin_cobyla':
def minus_reduced_likelihood_function(log10t):
return - self.reduced_likelihood_function(
theta=10. ** log10t)[0]
constraints = []
for i in range(self.theta0.size):
constraints.append(lambda log10t, i=i:
log10t[i] - np.log10(self.thetaL[0, i]))
constraints.append(lambda log10t, i=i:
np.log10(self.thetaU[0, i]) - log10t[i])
for k in range(self.random_start):
if k == 0:
# Use specified starting point as first guess
theta0 = self.theta0
else:
# Generate a random starting point log10-uniformly
# distributed between bounds
log10theta0 = (np.log10(self.thetaL)
+ self.random_state.rand(*self.theta0.shape)
* np.log10(self.thetaU / self.thetaL))
theta0 = 10. ** log10theta0
# Run Cobyla
try:
log10_optimal_theta = \
optimize.fmin_cobyla(minus_reduced_likelihood_function,
np.log10(theta0).ravel(), constraints,
iprint=0)
except ValueError as ve:
print("Optimization failed. Try increasing the ``nugget``")
raise ve
optimal_theta = 10. ** log10_optimal_theta
optimal_rlf_value, optimal_par = \
self.reduced_likelihood_function(theta=optimal_theta)
# Compare the new optimizer to the best previous one
if k > 0:
if optimal_rlf_value > best_optimal_rlf_value:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
else:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
if self.verbose and self.random_start > 1:
if (20 * k) / self.random_start > percent_completed:
percent_completed = (20 * k) / self.random_start
print("%s completed" % (5 * percent_completed))
optimal_rlf_value = best_optimal_rlf_value
optimal_par = best_optimal_par
optimal_theta = best_optimal_theta
elif self.optimizer == 'Welch':
# Backup of the given atrributes
theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU
corr = self.corr
verbose = self.verbose
# This will iterate over fmin_cobyla optimizer
self.optimizer = 'fmin_cobyla'
self.verbose = False
# Initialize under isotropy assumption
if verbose:
print("Initialize under isotropy assumption...")
self.theta0 = check_array(self.theta0.min())
self.thetaL = check_array(self.thetaL.min())
self.thetaU = check_array(self.thetaU.max())
theta_iso, optimal_rlf_value_iso, par_iso = \
self._arg_max_reduced_likelihood_function()
optimal_theta = theta_iso + np.zeros(theta0.shape)
# Iterate over all dimensions of theta allowing for anisotropy
if verbose:
print("Now improving allowing for anisotropy...")
for i in self.random_state.permutation(theta0.size):
if verbose:
print("Proceeding along dimension %d..." % (i + 1))
self.theta0 = check_array(theta_iso)
self.thetaL = check_array(thetaL[0, i])
self.thetaU = check_array(thetaU[0, i])
def corr_cut(t, d):
return corr(check_array(np.hstack([optimal_theta[0][0:i],
t[0],
optimal_theta[0][(i +
1)::]])),
d)
self.corr = corr_cut
optimal_theta[0, i], optimal_rlf_value, optimal_par = \
self._arg_max_reduced_likelihood_function()
# Restore the given atrributes
self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU
self.corr = corr
self.optimizer = 'Welch'
self.verbose = verbose
else:
raise NotImplementedError("This optimizer ('%s') is not "
"implemented yet. Please contribute!"
% self.optimizer)
return optimal_theta, optimal_rlf_value, optimal_par
def _check_params(self, n_samples=None):
# Check regression model
if not callable(self.regr):
if self.regr in self._regression_types:
self.regr = self._regression_types[self.regr]
else:
raise ValueError("regr should be one of %s or callable, "
"%s was given."
% (self._regression_types.keys(), self.regr))
# Check regression weights if given (Ordinary Kriging)
if self.beta0 is not None:
self.beta0 = check_array(self.beta0)
if self.beta0.shape[1] != 1:
# Force to column vector
self.beta0 = self.beta0.T
# Check correlation model
if not callable(self.corr):
if self.corr in self._correlation_types:
self.corr = self._correlation_types[self.corr]
else:
raise ValueError("corr should be one of %s or callable, "
"%s was given."
% (self._correlation_types.keys(), self.corr))
# Check storage mode
if self.storage_mode != 'full' and self.storage_mode != 'light':
raise ValueError("Storage mode should either be 'full' or "
"'light', %s was given." % self.storage_mode)
# Check correlation parameters
self.theta0 = check_array(self.theta0)
lth = self.theta0.size
if self.thetaL is not None and self.thetaU is not None:
self.thetaL = check_array(self.thetaL)
self.thetaU = check_array(self.thetaU)
if self.thetaL.size != lth or self.thetaU.size != lth:
raise ValueError("theta0, thetaL and thetaU must have the "
"same length.")
if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):
raise ValueError("The bounds must satisfy O < thetaL <= "
"thetaU.")
elif self.thetaL is None and self.thetaU is None:
if np.any(self.theta0 <= 0):
raise ValueError("theta0 must be strictly positive.")
elif self.thetaL is None or self.thetaU is None:
raise ValueError("thetaL and thetaU should either be both or "
"neither specified.")
# Force verbose type to bool
self.verbose = bool(self.verbose)
# Force normalize type to bool
self.normalize = bool(self.normalize)
# Check nugget value
self.nugget = np.asarray(self.nugget)
if np.any(self.nugget) < 0.:
raise ValueError("nugget must be positive or zero.")
if (n_samples is not None
and self.nugget.shape not in [(), (n_samples,)]):
raise ValueError("nugget must be either a scalar "
"or array of length n_samples.")
# Check optimizer
if self.optimizer not in self._optimizer_types:
raise ValueError("optimizer should be one of %s"
% self._optimizer_types)
# Force random_start type to int
self.random_start = int(self.random_start)
| bsd-3-clause |
zhfhe/cuda-convnet2 | convdata.py | 174 | 14675 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from python_util.data import *
import numpy.random as nr
import numpy as n
import random as r
from time import time
from threading import Thread
from math import sqrt
import sys
#from matplotlib import pylab as pl
from PIL import Image
from StringIO import StringIO
from time import time
import itertools as it
class JPEGBatchLoaderThread(Thread):
def __init__(self, dp, batch_num, label_offset, list_out):
Thread.__init__(self)
self.list_out = list_out
self.label_offset = label_offset
self.dp = dp
self.batch_num = batch_num
@staticmethod
def load_jpeg_batch(rawdics, dp, label_offset):
if type(rawdics) != list:
rawdics = [rawdics]
nc_total = sum(len(r['data']) for r in rawdics)
jpeg_strs = list(it.chain.from_iterable(rd['data'] for rd in rawdics))
labels = list(it.chain.from_iterable(rd['labels'] for rd in rawdics))
img_mat = n.empty((nc_total * dp.data_mult, dp.inner_pixels * dp.num_colors), dtype=n.float32)
lab_mat = n.zeros((nc_total, dp.get_num_classes()), dtype=n.float32)
dp.convnet.libmodel.decodeJpeg(jpeg_strs, img_mat, dp.img_size, dp.inner_size, dp.test, dp.multiview)
lab_vec = n.tile(n.asarray([(l[nr.randint(len(l))] if len(l) > 0 else -1) + label_offset for l in labels], dtype=n.single).reshape((nc_total, 1)), (dp.data_mult,1))
for c in xrange(nc_total):
lab_mat[c, [z + label_offset for z in labels[c]]] = 1
lab_mat = n.tile(lab_mat, (dp.data_mult, 1))
return {'data': img_mat[:nc_total * dp.data_mult,:],
'labvec': lab_vec[:nc_total * dp.data_mult,:],
'labmat': lab_mat[:nc_total * dp.data_mult,:]}
def run(self):
rawdics = self.dp.get_batch(self.batch_num)
p = JPEGBatchLoaderThread.load_jpeg_batch(rawdics,
self.dp,
self.label_offset)
self.list_out.append(p)
class ColorNoiseMakerThread(Thread):
def __init__(self, pca_stdevs, pca_vecs, num_noise, list_out):
Thread.__init__(self)
self.pca_stdevs, self.pca_vecs = pca_stdevs, pca_vecs
self.num_noise = num_noise
self.list_out = list_out
def run(self):
noise = n.dot(nr.randn(self.num_noise, 3).astype(n.single) * self.pca_stdevs.T, self.pca_vecs.T)
self.list_out.append(noise)
class ImageDataProvider(LabeledDataProvider):
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params=None, test=False):
LabeledDataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
self.data_mean = self.batch_meta['data_mean'].astype(n.single)
self.color_eig = self.batch_meta['color_pca'][1].astype(n.single)
self.color_stdevs = n.c_[self.batch_meta['color_pca'][0].astype(n.single)]
self.color_noise_coeff = dp_params['color_noise']
self.num_colors = 3
self.img_size = int(sqrt(self.batch_meta['num_vis'] / self.num_colors))
self.mini = dp_params['minibatch_size']
self.inner_size = dp_params['inner_size'] if dp_params['inner_size'] > 0 else self.img_size
self.inner_pixels = self.inner_size **2
self.border_size = (self.img_size - self.inner_size) / 2
self.multiview = dp_params['multiview_test'] and test
self.num_views = 5*2
self.data_mult = self.num_views if self.multiview else 1
self.batch_size = self.batch_meta['batch_size']
self.label_offset = 0 if 'label_offset' not in self.batch_meta else self.batch_meta['label_offset']
self.scalar_mean = dp_params['scalar_mean']
# Maintain pointers to previously-returned data matrices so they don't get garbage collected.
self.data = [None, None] # These are pointers to previously-returned data matrices
self.loader_thread, self.color_noise_thread = None, None
self.convnet = dp_params['convnet']
self.num_noise = self.batch_size
self.batches_generated, self.loaders_started = 0, 0
self.data_mean_crop = self.data_mean.reshape((self.num_colors,self.img_size,self.img_size))[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size].reshape((1,3*self.inner_size**2))
if self.scalar_mean >= 0:
self.data_mean_crop = self.scalar_mean
def showimg(self, img):
from matplotlib import pylab as pl
pixels = img.shape[0] / 3
size = int(sqrt(pixels))
img = img.reshape((3,size,size)).swapaxes(0,2).swapaxes(0,1)
pl.imshow(img, interpolation='nearest')
pl.show()
def get_data_dims(self, idx=0):
if idx == 0:
return self.inner_size**2 * 3
if idx == 2:
return self.get_num_classes()
return 1
def start_loader(self, batch_idx):
self.load_data = []
self.loader_thread = JPEGBatchLoaderThread(self,
self.batch_range[batch_idx],
self.label_offset,
self.load_data)
self.loader_thread.start()
def start_color_noise_maker(self):
color_noise_list = []
self.color_noise_thread = ColorNoiseMakerThread(self.color_stdevs, self.color_eig, self.num_noise, color_noise_list)
self.color_noise_thread.start()
return color_noise_list
def set_labels(self, datadic):
pass
def get_data_from_loader(self):
if self.loader_thread is None:
self.start_loader(self.batch_idx)
self.loader_thread.join()
self.data[self.d_idx] = self.load_data[0]
self.start_loader(self.get_next_batch_idx())
else:
# Set the argument to join to 0 to re-enable batch reuse
self.loader_thread.join()
if not self.loader_thread.is_alive():
self.data[self.d_idx] = self.load_data[0]
self.start_loader(self.get_next_batch_idx())
#else:
# print "Re-using batch"
self.advance_batch()
def add_color_noise(self):
# At this point the data already has 0 mean.
# So I'm going to add noise to it, but I'm also going to scale down
# the original data. This is so that the overall scale of the training
# data doesn't become too different from the test data.
s = self.data[self.d_idx]['data'].shape
cropped_size = self.get_data_dims(0) / 3
ncases = s[0]
if self.color_noise_thread is None:
self.color_noise_list = self.start_color_noise_maker()
self.color_noise_thread.join()
self.color_noise = self.color_noise_list[0]
self.color_noise_list = self.start_color_noise_maker()
else:
self.color_noise_thread.join(0)
if not self.color_noise_thread.is_alive():
self.color_noise = self.color_noise_list[0]
self.color_noise_list = self.start_color_noise_maker()
self.data[self.d_idx]['data'] = self.data[self.d_idx]['data'].reshape((ncases*3, cropped_size))
self.color_noise = self.color_noise[:ncases,:].reshape((3*ncases, 1))
self.data[self.d_idx]['data'] += self.color_noise * self.color_noise_coeff
self.data[self.d_idx]['data'] = self.data[self.d_idx]['data'].reshape((ncases, 3* cropped_size))
self.data[self.d_idx]['data'] *= 1.0 / (1.0 + self.color_noise_coeff) # <--- NOTE: This is the slow line, 0.25sec. Down from 0.75sec when I used division.
def get_next_batch(self):
self.d_idx = self.batches_generated % 2
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.get_data_from_loader()
# Subtract mean
self.data[self.d_idx]['data'] -= self.data_mean_crop
if self.color_noise_coeff > 0 and not self.test:
self.add_color_noise()
self.batches_generated += 1
return epoch, batchnum, [self.data[self.d_idx]['data'].T, self.data[self.d_idx]['labvec'].T, self.data[self.d_idx]['labmat'].T]
# Takes as input an array returned by get_next_batch
# Returns a (numCases, imgSize, imgSize, 3) array which can be
# fed to pylab for plotting.
# This is used by shownet.py to plot test case predictions.
def get_plottable_data(self, data, add_mean=True):
mean = self.data_mean_crop.reshape((data.shape[0],1)) if data.flags.f_contiguous or self.scalar_mean else self.data_mean_crop.reshape((data.shape[0],1))
return n.require((data + (mean if add_mean else 0)).T.reshape(data.shape[1], 3, self.inner_size, self.inner_size).swapaxes(1,3).swapaxes(1,2) / 255.0, dtype=n.single)
class CIFARDataProvider(LabeledDataProvider):
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params=None, test=False):
LabeledDataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
self.img_size = 32
self.num_colors = 3
self.inner_size = dp_params['inner_size'] if dp_params['inner_size'] > 0 else self.batch_meta['img_size']
self.border_size = (self.img_size - self.inner_size) / 2
self.multiview = dp_params['multiview_test'] and test
self.num_views = 9
self.scalar_mean = dp_params['scalar_mean']
self.data_mult = self.num_views if self.multiview else 1
self.data_dic = []
for i in batch_range:
self.data_dic += [unpickle(self.get_data_file_name(i))]
self.data_dic[-1]["labels"] = n.require(self.data_dic[-1]['labels'], dtype=n.single)
self.data_dic[-1]["labels"] = n.require(n.tile(self.data_dic[-1]["labels"].reshape((1, n.prod(self.data_dic[-1]["labels"].shape))), (1, self.data_mult)), requirements='C')
self.data_dic[-1]['data'] = n.require(self.data_dic[-1]['data'] - self.scalar_mean, dtype=n.single, requirements='C')
self.cropped_data = [n.zeros((self.get_data_dims(), self.data_dic[0]['data'].shape[1]*self.data_mult), dtype=n.single) for x in xrange(2)]
self.batches_generated = 0
self.data_mean = self.batch_meta['data_mean'].reshape((self.num_colors,self.img_size,self.img_size))[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size].reshape((self.get_data_dims(), 1))
def get_next_batch(self):
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.advance_batch()
bidx = batchnum - self.batch_range[0]
cropped = self.cropped_data[self.batches_generated % 2]
self.__trim_borders(self.data_dic[bidx]['data'], cropped)
cropped -= self.data_mean
self.batches_generated += 1
return epoch, batchnum, [cropped, self.data_dic[bidx]['labels']]
def get_data_dims(self, idx=0):
return self.inner_size**2 * self.num_colors if idx == 0 else 1
# Takes as input an array returned by get_next_batch
# Returns a (numCases, imgSize, imgSize, 3) array which can be
# fed to pylab for plotting.
# This is used by shownet.py to plot test case predictions.
def get_plottable_data(self, data):
return n.require((data + self.data_mean).T.reshape(data.shape[1], 3, self.inner_size, self.inner_size).swapaxes(1,3).swapaxes(1,2) / 255.0, dtype=n.single)
def __trim_borders(self, x, target):
y = x.reshape(self.num_colors, self.img_size, self.img_size, x.shape[1])
if self.test: # don't need to loop over cases
if self.multiview:
start_positions = [(0,0), (0, self.border_size), (0, self.border_size*2),
(self.border_size, 0), (self.border_size, self.border_size), (self.border_size, self.border_size*2),
(self.border_size*2, 0), (self.border_size*2, self.border_size), (self.border_size*2, self.border_size*2)]
end_positions = [(sy+self.inner_size, sx+self.inner_size) for (sy,sx) in start_positions]
for i in xrange(self.num_views):
target[:,i * x.shape[1]:(i+1)* x.shape[1]] = y[:,start_positions[i][0]:end_positions[i][0],start_positions[i][1]:end_positions[i][1],:].reshape((self.get_data_dims(),x.shape[1]))
else:
pic = y[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size, :] # just take the center for now
target[:,:] = pic.reshape((self.get_data_dims(), x.shape[1]))
else:
for c in xrange(x.shape[1]): # loop over cases
startY, startX = nr.randint(0,self.border_size*2 + 1), nr.randint(0,self.border_size*2 + 1)
endY, endX = startY + self.inner_size, startX + self.inner_size
pic = y[:,startY:endY,startX:endX, c]
if nr.randint(2) == 0: # also flip the image with 50% probability
pic = pic[:,:,::-1]
target[:,c] = pic.reshape((self.get_data_dims(),))
class DummyConvNetLogRegDataProvider(LabeledDummyDataProvider):
def __init__(self, data_dim):
LabeledDummyDataProvider.__init__(self, data_dim)
self.img_size = int(sqrt(data_dim/3))
def get_next_batch(self):
epoch, batchnum, dic = LabeledDummyDataProvider.get_next_batch(self)
dic = {'data': dic[0], 'labels': dic[1]}
print dic['data'].shape, dic['labels'].shape
return epoch, batchnum, [dic['data'], dic['labels']]
# Returns the dimensionality of the two data matrices returned by get_next_batch
def get_data_dims(self, idx=0):
return self.batch_meta['num_vis'] if idx == 0 else 1
| apache-2.0 |
brenthuisman/phd_tools | analysis.lyso.falloff.py | 1 | 3720 | #!/usr/bin/env python
import numpy as np,plot,auger
#OPT: quickly get sorted rundirs
# zb autogen | sort -k1.13 -r
#OPT: fix seed
#np.random.seed(65983247)
#we dont know what the added or reduced noise level is when changing energy windows, so we cant compare performance for ipnl3 and iba1.
typs=['ipnl-auger-tof-1.root','iba-auger-tof-1.root']
def megaplot(ctsets,studyname,emisfops=None,labels=["$10^9$","$10^8$","$10^7$","$10^6$"],axlabel='Primaries [nr]'):
# if emisfops is not None:
# for emisfop in emisfops:
# emisfop[0]+=15.863
# emisfop[1]+=15.863
# print 'FOP shift all overlaid'
if len(ctsets) == 4:
f, ((ax1,ax2),(ax3,ax4)) = plot.subplots(nrows=2, ncols=2, sharex=False, sharey=False)
auger.plot_all_ranges(ax1,ctsets[0])
auger.plot_all_ranges(ax2,ctsets[1])
auger.plot_all_ranges(ax3,ctsets[2])
auger.plot_all_ranges(ax4,ctsets[3])
if not 'Primaries' in axlabel:
ax1.set_title(labels[0])
ax2.set_title(labels[1])
ax3.set_title(labels[2])
ax4.set_title(labels[3])
f.subplots_adjust(hspace=.5)
ax1.set_xlabel('')
ax2.set_xlabel('')
ax2.set_ylabel('')
ax4.set_ylabel('')
f.savefig(studyname+'-'+typ+'-FOP.pdf', bbox_inches='tight')
plot.close('all')
#############################################################################################
print 'FOP shift distributions'
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
fig = plt.figure()
ax1 = plt.axes(projection='3d')
ax1.view_init(30, -50)
for i,ctset in enumerate(ctsets):
auger.plotfodiffdist(ax1,ctset,i,emisfops,labels,axlabel)
if not emisfops == None:
fopshifts=[]
for fopset in emisfops:
fopshifts.append( fopset[-1]-fopset[0] )
ax1.set_xlim3d(np.mean(fopshifts)-20,np.mean(fopshifts)+20)
if emisfops is not None and len(emisfops) == 1:
ax1.set_title(studyname+', $Shift_{em}$ = '+str(emisfops[0][-1]-emisfops[0][0]), y=1.08)
#plt.tight_layout(rect = [-0.1, 0.0, 1.0, 1.1])#L,B,R,T
fig.savefig(studyname+'-'+typ+'-FOP-shift.pdf')#, bbox_inches='tight')
plt.close('all')
#############################################################################################
print 'FOP distributions'
fig = plt.figure()
ax1 = plt.axes(projection='3d')
ax1.view_init(30, -50)
for i,ctset in enumerate(ctsets):
auger.plotfodist(ax1,ctset,i,emisfops,labels,axlabel)
if emisfops is not None and len(emisfops) == 1:
ax1.set_title(studyname+', $CT_{FOP_{em}}$ = '+str(emisfops[0][0])[:5]+', $RPCT_{FOP_{em}}$ = '+str(emisfops[0][1])[:5], y=1.08)
#plt.legend()#shadow = True,frameon = True,fancybox = True,ncol = 1,fontsize = 'x-small',loc = 'lower right')
#plt.tight_layout(rect = [-0.1, 0.0, 1.0, 1.1])#L,B,R,T
plt.savefig(studyname+'-'+typ+'-FOP-dist.pdf')#, bbox_inches='tight')
plt.close('all')
#############################################################################################
# TODO add pgemissions plots.
for typ in typs:
ctsetsets = []
ctsetsets.append( auger.getctset(1e9,'1e9','1e9',typ) )
ctsetsets.append( auger.getctset(1e8,'1e8','1e8',typ) )
ctsetsets.append( auger.getctset(1e7,'1e7','1e7',typ) )
ctsetsets.append( auger.getctset(1e6,'1e6','1e6',typ) )
#ctsetsets.append( auger.getctset(1e9,'run.3poV','run.wucX',typ) )
#ctsetsets.append( auger.getctset(1e8,'run.1XRe','run.lTdI',typ) )
#ctsetsets.append( auger.getctset(1e7,'run.oPE7','run.RWkp',typ) )
#ctsetsets.append( auger.getctset(1e6,'run.7bG6','run.pijb',typ) )
megaplot(ctsetsets,'waterbox_redo')
print 'Mean detection yield in',typ,'study over',sum([ctset['totnprim'] for ctset in ctsetsets]),'primaries in',sum([ctset['nreal'] for ctset in ctsetsets]),'realisations:',sum([ctset['detyieldmu'] for ctset in ctsetsets])
| lgpl-3.0 |
toastedcornflakes/scikit-learn | examples/model_selection/plot_validation_curve.py | 141 | 1931 | """
==========================
Plotting Validation Curves
==========================
In this plot you can see the training scores and validation scores of an SVM
for different values of the kernel parameter gamma. For very low values of
gamma, you can see that both the training score and the validation score are
low. This is called underfitting. Medium values of gamma will result in high
values for both scores, i.e. the classifier is performing fairly well. If gamma
is too high, the classifier will overfit, which means that the training score
is good but the validation score is poor.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.model_selection import validation_curve
digits = load_digits()
X, y = digits.data, digits.target
param_range = np.logspace(-6, -1, 5)
train_scores, test_scores = validation_curve(
SVC(), X, y, param_name="gamma", param_range=param_range,
cv=10, scoring="accuracy", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVM")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
lw = 2
plt.semilogx(param_range, train_scores_mean, label="Training score",
color="darkorange", lw=lw)
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2,
color="darkorange", lw=lw)
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="navy", lw=lw)
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2,
color="navy", lw=lw)
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
geledek/mrec | mrec/mf/evaluate.py | 3 | 2362 | def retrain_recommender(model,dataset):
model.fit(dataset.X)
if __name__ == '__main__':
try:
from sklearn.grid_search import ParameterGrid
except ImportError:
from sklearn.grid_search import IterGrid as ParameterGrid
from optparse import OptionParser
from warp import WARPMFRecommender
from mrec.evaluation.metrics import *
parser = OptionParser()
parser.add_option('-m','--main_split_dir',dest='main_split_dir',help='directory containing 50/50 splits for main evaluation')
parser.add_option('-l','--loo_split_dir',dest='loo_split_dir',help='directory containing LOO splits for hit rate evaluation')
parser.add_option('-n','--num_splits',dest='num_splits',type='int',default=5,help='number of splits in each directory (default: %default)')
(opts,args) = parser.parse_args()
if not (opts.main_split_dir or opts.loo_split_dir) or not opts.num_splits:
parser.print_help()
raise SystemExit
print 'doing a grid search for regularization parameters...'
params = {'d':[100],'gamma':[0.01],'C':[100],'max_iter':[100000],'validation_iters':[500]}
models = [WARPMFRecommender(**a) for a in ParameterGrid(params)]
for train in glob:
# get test
# load em both up
# put them into something that returns train,test.keys(),test in a generator()
# test is a dict id->[id,id,...]
if opts.main_split_dir:
generate_main_metrics = generate_metrics(get_known_items_from_dict,compute_main_metrics)
main_metrics = run_evaluation(models,
retrain_recommender,
load_splits(opts.main_split_dir,opts.num_splits),
opts.num_splits,
generate_main_metrics)
print_report(models,main_metrics)
if opts.loo_split_dir:
generate_hit_rate = generate_metrics(get_known_items_from_dict,compute_hit_rate)
hit_rate_metrics = run_evaluation(models,
retrain_recommender,
load_splits(opts.loo_split_dir,opts.num_splits),
opts.num_splits,
generate_hit_rate)
print_report(models,hit_rate_metrics)
| bsd-3-clause |
rvraghav93/scikit-learn | examples/tree/plot_unveil_tree_structure.py | 47 | 4852 | """
=========================================
Understanding the decision tree structure
=========================================
The decision tree structure can be analysed to gain further insight on the
relation between the features and the target to predict. In this example, we
show how to retrieve:
- the binary tree structure;
- the depth of each node and whether or not it's a leaf;
- the nodes that were reached by a sample using the ``decision_path`` method;
- the leaf that was reached by a sample using the apply method;
- the rules that were used to predict a sample;
- the decision path shared by a group of samples.
"""
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
iris = load_iris()
X = iris.data
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
estimator = DecisionTreeClassifier(max_leaf_nodes=3, random_state=0)
estimator.fit(X_train, y_train)
# The decision estimator has an attribute called tree_ which stores the entire
# tree structure and allows access to low level attributes. The binary tree
# tree_ is represented as a number of parallel arrays. The i-th element of each
# array holds information about the node `i`. Node 0 is the tree's root. NOTE:
# Some of the arrays only apply to either leaves or split nodes, resp. In this
# case the values of nodes of the other type are arbitrary!
#
# Among those arrays, we have:
# - left_child, id of the left child of the node
# - right_child, id of the right child of the node
# - feature, feature used for splitting the node
# - threshold, threshold value at the node
#
# Using those arrays, we can parse the tree structure:
n_nodes = estimator.tree_.node_count
children_left = estimator.tree_.children_left
children_right = estimator.tree_.children_right
feature = estimator.tree_.feature
threshold = estimator.tree_.threshold
# The tree structure can be traversed to compute various properties such
# as the depth of each node and whether or not it is a leaf.
node_depth = np.zeros(shape=n_nodes, dtype=np.int64)
is_leaves = np.zeros(shape=n_nodes, dtype=bool)
stack = [(0, -1)] # seed is the root node id and its parent depth
while len(stack) > 0:
node_id, parent_depth = stack.pop()
node_depth[node_id] = parent_depth + 1
# If we have a test node
if (children_left[node_id] != children_right[node_id]):
stack.append((children_left[node_id], parent_depth + 1))
stack.append((children_right[node_id], parent_depth + 1))
else:
is_leaves[node_id] = True
print("The binary tree structure has %s nodes and has "
"the following tree structure:"
% n_nodes)
for i in range(n_nodes):
if is_leaves[i]:
print("%snode=%s leaf node." % (node_depth[i] * "\t", i))
else:
print("%snode=%s test node: go to node %s if X[:, %s] <= %s else to "
"node %s."
% (node_depth[i] * "\t",
i,
children_left[i],
feature[i],
threshold[i],
children_right[i],
))
print()
# First let's retrieve the decision path of each sample. The decision_path
# method allows to retrieve the node indicator functions. A non zero element of
# indicator matrix at the position (i, j) indicates that the sample i goes
# through the node j.
node_indicator = estimator.decision_path(X_test)
# Similarly, we can also have the leaves ids reached by each sample.
leave_id = estimator.apply(X_test)
# Now, it's possible to get the tests that were used to predict a sample or
# a group of samples. First, let's make it for the sample.
sample_id = 0
node_index = node_indicator.indices[node_indicator.indptr[sample_id]:
node_indicator.indptr[sample_id + 1]]
print('Rules used to predict sample %s: ' % sample_id)
for node_id in node_index:
if leave_id[sample_id] != node_id:
continue
if (X_test[sample_id, feature[node_id]] <= threshold[node_id]):
threshold_sign = "<="
else:
threshold_sign = ">"
print("decision id node %s : (X_test[%s, %s] (= %s) %s %s)"
% (node_id,
sample_id,
feature[node_id],
X_test[sample_id, feature[node_id]],
threshold_sign,
threshold[node_id]))
# For a group of samples, we have the following common node.
sample_ids = [0, 1]
common_nodes = (node_indicator.toarray()[sample_ids].sum(axis=0) ==
len(sample_ids))
common_node_id = np.arange(n_nodes)[common_nodes]
print("\nThe following samples %s share the node %s in the tree"
% (sample_ids, common_node_id))
print("It is %s %% of all nodes." % (100 * len(common_node_id) / n_nodes,))
| bsd-3-clause |
micahhausler/pandashells | pandashells/test/p_hist_tests.py | 7 | 2350 | #! /usr/bin/env python
from mock import patch, MagicMock
from unittest import TestCase
import pandas as pd
from pandashells.bin.p_hist import main, get_input_args, validate_args
class GetInputArgsTests(TestCase):
@patch('pandashells.bin.p_hist.sys.argv', 'p.hist -c x -n 30'.split())
def test_right_number_of_args(self):
args = get_input_args()
self.assertEqual(len(args.__dict__), 26)
class ValidateArgs(TestCase):
def test_okay(self):
# passing test means nothing raised
args = MagicMock(quiet=False)
cols = ['a']
df = MagicMock(columns=['a'])
validate_args(args, cols, df)
@patch('pandashells.bin.p_hist.sys.stderr')
def test_bad_cols(self, stderr_mock):
# passing test means nothing raised
args = MagicMock(quiet=False)
cols = ['b']
df = MagicMock(columns=['a'])
with self.assertRaises(SystemExit):
validate_args(args, cols, df)
@patch('pandashells.bin.p_hist.sys.stderr')
def test_bad_quiet(self, stderr_mock):
# passing test means nothing raised
args = MagicMock(quiet=True)
cols = ['a', 'b']
df = MagicMock(columns=['a', 'b'])
with self.assertRaises(SystemExit):
validate_args(args, cols, df)
class MainTests(TestCase):
@patch(
'pandashells.bin.p_hist.sys.argv',
'p.hist -c x -q -n 10'.split())
@patch('pandashells.bin.p_hist.io_lib.df_to_output')
@patch('pandashells.bin.p_hist.io_lib.df_from_input')
def test_cli_quiet(self, df_from_input_mock, df_to_output_mock):
df_in = pd.DataFrame({
'x': range(1, 101)
})
df_from_input_mock.return_value = df_in
main()
df_out = df_to_output_mock.call_args_list[0][0][1]
self.assertEqual(set(df_out.columns), {'bins', 'counts'})
self.assertEqual(set(df_out.counts), {10})
@patch(
'pandashells.bin.p_hist.sys.argv',
'p.hist -c x -n 10'.split())
@patch('pandashells.bin.p_hist.plot_lib.show')
@patch('pandashells.bin.p_hist.io_lib.df_from_input')
def test_cli(self, df_from_input_mock, show_mock):
df_in = pd.DataFrame({
'x': range(1, 101)
})
df_from_input_mock.return_value = df_in
main()
self.assertTrue(show_mock.called)
| bsd-2-clause |
Adai0808/BuildingMachineLearningSystemsWithPython | ch11/demo_mds.py | 25 | 3724 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import os
import numpy as np
from matplotlib import pylab
from mpl_toolkits.mplot3d import Axes3D
from sklearn import linear_model, manifold, decomposition, datasets
logistic = linear_model.LogisticRegression()
from utils import CHART_DIR
np.random.seed(3)
# all examples will have three classes in this file
colors = ['r', 'g', 'b']
markers = ['o', 6, '*']
def plot_demo_1():
X = np.c_[np.ones(5), 2 * np.ones(5), 10 * np.ones(5)].T
y = np.array([0, 1, 2])
fig = pylab.figure(figsize=(10, 4))
ax = fig.add_subplot(121, projection='3d')
ax.set_axis_bgcolor('white')
mds = manifold.MDS(n_components=3)
Xtrans = mds.fit_transform(X)
for cl, color, marker in zip(np.unique(y), colors, markers):
ax.scatter(
Xtrans[y == cl][:, 0], Xtrans[y == cl][:, 1], Xtrans[y == cl][:, 2], c=color, marker=marker, edgecolor='black')
pylab.title("MDS on example data set in 3 dimensions")
ax.view_init(10, -15)
mds = manifold.MDS(n_components=2)
Xtrans = mds.fit_transform(X)
ax = fig.add_subplot(122)
for cl, color, marker in zip(np.unique(y), colors, markers):
ax.scatter(
Xtrans[y == cl][:, 0], Xtrans[y == cl][:, 1], c=color, marker=marker, edgecolor='black')
pylab.title("MDS on example data set in 2 dimensions")
filename = "mds_demo_1.png"
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
def plot_iris_mds():
iris = datasets.load_iris()
X = iris.data
y = iris.target
# MDS
fig = pylab.figure(figsize=(10, 4))
ax = fig.add_subplot(121, projection='3d')
ax.set_axis_bgcolor('white')
mds = manifold.MDS(n_components=3)
Xtrans = mds.fit_transform(X)
for cl, color, marker in zip(np.unique(y), colors, markers):
ax.scatter(
Xtrans[y == cl][:, 0], Xtrans[y == cl][:, 1], Xtrans[y == cl][:, 2], c=color, marker=marker, edgecolor='black')
pylab.title("MDS on Iris data set in 3 dimensions")
ax.view_init(10, -15)
mds = manifold.MDS(n_components=2)
Xtrans = mds.fit_transform(X)
ax = fig.add_subplot(122)
for cl, color, marker in zip(np.unique(y), colors, markers):
ax.scatter(
Xtrans[y == cl][:, 0], Xtrans[y == cl][:, 1], c=color, marker=marker, edgecolor='black')
pylab.title("MDS on Iris data set in 2 dimensions")
filename = "mds_demo_iris.png"
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
# PCA
fig = pylab.figure(figsize=(10, 4))
ax = fig.add_subplot(121, projection='3d')
ax.set_axis_bgcolor('white')
pca = decomposition.PCA(n_components=3)
Xtrans = pca.fit(X).transform(X)
for cl, color, marker in zip(np.unique(y), colors, markers):
ax.scatter(
Xtrans[y == cl][:, 0], Xtrans[y == cl][:, 1], Xtrans[y == cl][:, 2], c=color, marker=marker, edgecolor='black')
pylab.title("PCA on Iris data set in 3 dimensions")
ax.view_init(50, -35)
pca = decomposition.PCA(n_components=2)
Xtrans = pca.fit_transform(X)
ax = fig.add_subplot(122)
for cl, color, marker in zip(np.unique(y), colors, markers):
ax.scatter(
Xtrans[y == cl][:, 0], Xtrans[y == cl][:, 1], c=color, marker=marker, edgecolor='black')
pylab.title("PCA on Iris data set in 2 dimensions")
filename = "pca_demo_iris.png"
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
if __name__ == '__main__':
plot_demo_1()
plot_iris_mds()
| mit |
LCAV/pyroomacoustics | pyroomacoustics/room.py | 1 | 91375 | # Main Room class using to encapsulate the room acoustics simulator
# Copyright (C) 2019 Robin Scheibler, Ivan Dokmanic, Sidney Barthe, Cyril Cadoux
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# You should have received a copy of the MIT License along with this program. If
# not, see <https://opensource.org/licenses/MIT>.
r"""
Room
====
The three main classes are :py:obj:`pyroomacoustics.room.Room`,
:py:obj:`pyroomacoustics.soundsource.SoundSource`, and
:py:obj:`pyroomacoustics.beamforming.MicrophoneArray`. On a high level, a
simulation scenario is created by first defining a room to which a few sound
sources and a microphone array are attached. The actual audio is attached to
the source as raw audio samples.
Then, a simulation method is used to create artificial room impulse responses
(RIR) between the sources and microphones. The current default method is the
image source which considers the walls as perfect reflectors. An experimental
hybrid simulator based on image source method (ISM) [1]_ and ray tracing (RT) [2]_, [3]_, is also available. Ray tracing
better capture the later reflections and can also model effects such as
scattering.
The microphone signals are then created by convolving audio samples associated
to sources with the appropriate RIR. Since the simulation is done on
discrete-time signals, a sampling frequency is specified for the room and the
sources it contains. Microphones can optionally operate at a different sampling
frequency; a rate conversion is done in this case.
Simulating a Shoebox Room with the Image Source Model
-----------------------------------------------------
We will first walk through the steps to simulate a shoebox-shaped room in 3D.
We use the ISM is to find all image sources up to a maximum specified order and
room impulse responses (RIR) are generated from their positions.
The code for the full example can be found in `examples/room_from_rt60.py`.
Create the room
~~~~~~~~~~~~~~~
So-called shoebox rooms are pallelepipedic rooms with 4 or 6 walls (in 2D and
3D respectiely), all at right angles. They are defined by a single vector that
contains the lengths of the walls. They have the advantage of being simple to
define and very efficient to simulate. In the following example, we define a
``9m x 7.5m x 3.5m`` room. In addition, we use `Sabine's formula <https://en.wikipedia.org/wiki/Reverberation>`_
to find the wall energy absorption and maximum order of the ISM required
to achieve a desired reverberation time (*RT60*, i.e. the time it takes for
the RIR to decays by 60 dB).
.. code-block:: python
import pyroomacoustics as pra
# The desired reverberation time and dimensions of the room
rt60 = 0.5 # seconds
room_dim = [9, 7.5, 3.5] # meters
# We invert Sabine's formula to obtain the parameters for the ISM simulator
e_absorption, max_order = pra.inverse_sabine(rt60, room_dim)
# Create the room
room = pra.ShoeBox(
room_dim, fs=16000, materials=pra.Material(e_absorption), max_order=max_order
)
The second argument is the sampling frequency at which the RIR will be
generated. Note that the default value of ``fs`` is 8 kHz.
The third argument is the material of the wall, that itself takes the absorption as a parameter.
The fourth and last argument is the maximum number of reflections allowed in the ISM.
.. note::
Note that Sabine's formula is only an approximation and that the actually
simulated RT60 may vary by quite a bit.
.. warning::
Until recently, rooms would take an ``absorption`` parameter that was
actually **not** the energy absorption we use now. The ``absorption``
parameter is now deprecated and will be removed in the future.
Add sources and microphones
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Sources are fairly straighforward to create. They take their location as single
mandatory argument, and a signal and start time as optional arguments. Here we
create a source located at ``[2.5, 3.73, 1.76]`` within the room, that will utter
the content of the wav file ``speech.wav`` starting at ``1.3 s`` into the
simulation. The ``signal`` keyword argument to the
:py:func:`~pyroomacoustics.room.Room.add_source` method should be a
one-dimensional ``numpy.ndarray`` containing the desired sound signal.
.. code-block:: python
# import a mono wavfile as the source signal
# the sampling frequency should match that of the room
from scipy.io import wavfile
_, audio = wavfile.read('speech.wav')
# place the source in the room
room.add_source([2.5, 3.73, 1.76], signal=audio, delay=1.3)
The locations of the microphones in the array should be provided in a numpy
``nd-array`` of size ``(ndim, nmics)``, that is each column contains the
coordinates of one microphone. Note that it can be different from that
of the room, in which case resampling will occur. Here, we create an array
with two microphones placed at ``[6.3, 4.87, 1.2]`` and ``[6.3, 4.93, 1.2]``.
.. code-block:: python
# define the locations of the microphones
import numpy as np
mic_locs = np.c_[
[6.3, 4.87, 1.2], # mic 1
[6.3, 4.93, 1.2], # mic 2
]
# finally place the array in the room
room.add_microphone_array(mic_locs)
A number of routines exist to create regular array geometries in 2D.
- :py:func:`~pyroomacoustics.beamforming.linear_2D_array`
- :py:func:`~pyroomacoustics.beamforming.circular_2D_array`
- :py:func:`~pyroomacoustics.beamforming.square_2D_array`
- :py:func:`~pyroomacoustics.beamforming.poisson_2D_array`
- :py:func:`~pyroomacoustics.beamforming.spiral_2D_array`
Create the Room Impulse Response
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
At this point, the RIRs are simply created by invoking the ISM via
:py:func:`~pyroomacoustics.room.Room.image_source_model`. This function will
generate all the images sources up to the order required and use them to
generate the RIRs, which will be stored in the ``rir`` attribute of ``room``.
The attribute ``rir`` is a list of lists so that the outer list is on microphones
and the inner list over sources.
.. code-block:: python
room.compute_rir()
# plot the RIR between mic 1 and source 0
import matplotlib.pyplot as plt
plt.plot(room.rir[1][0])
plt.show()
.. warning::
The simulator uses a fractional delay filter that introduce a global delay
in the RIR. The delay can be obtained as follows.
.. code-block:: python
global_delay = pra.constants.get("frac_delay_length") // 2
Simulate sound propagation
~~~~~~~~~~~~~~~~~~~~~~~~~~
By calling :py:func:`~pyroomacoustics.room.Room.simulate`, a convolution of the
signal of each source (if not ``None``) will be performed with the
corresponding room impulse response. The output from the convolutions will be summed up
at the microphones. The result is stored in the ``signals`` attribute of ``room.mic_array``
with each row corresponding to one microphone.
.. code-block:: python
room.simulate()
# plot signal at microphone 1
plt.plot(room.mic_array.signals[1,:])
Full Example
~~~~~~~~~~~~
This example is partly exctracted from `./examples/room_from_rt60.py`.
.. code-block:: python
import numpy as np
import matplotlib.pyplot as plt
import pyroomacoustics as pra
from scipy.io import wavfile
# The desired reverberation time and dimensions of the room
rt60_tgt = 0.3 # seconds
room_dim = [10, 7.5, 3.5] # meters
# import a mono wavfile as the source signal
# the sampling frequency should match that of the room
fs, audio = wavfile.read("examples/samples/guitar_16k.wav")
# We invert Sabine's formula to obtain the parameters for the ISM simulator
e_absorption, max_order = pra.inverse_sabine(rt60_tgt, room_dim)
# Create the room
room = pra.ShoeBox(
room_dim, fs=fs, materials=pra.Material(e_absorption), max_order=max_order
)
# place the source in the room
room.add_source([2.5, 3.73, 1.76], signal=audio, delay=0.5)
# define the locations of the microphones
mic_locs = np.c_[
[6.3, 4.87, 1.2], [6.3, 4.93, 1.2], # mic 1 # mic 2
]
# finally place the array in the room
room.add_microphone_array(mic_locs)
# Run the simulation (this will also build the RIR automatically)
room.simulate()
room.mic_array.to_wav(
f"examples/samples/guitar_16k_reverb_{args.method}.wav",
norm=True,
bitdepth=np.int16,
)
# measure the reverberation time
rt60 = room.measure_rt60()
print("The desired RT60 was {}".format(rt60_tgt))
print("The measured RT60 is {}".format(rt60[1, 0]))
# Create a plot
plt.figure()
# plot one of the RIR. both can also be plotted using room.plot_rir()
rir_1_0 = room.rir[1][0]
plt.subplot(2, 1, 1)
plt.plot(np.arange(len(rir_1_0)) / room.fs, rir_1_0)
plt.title("The RIR from source 0 to mic 1")
plt.xlabel("Time [s]")
# plot signal at microphone 1
plt.subplot(2, 1, 2)
plt.plot(room.mic_array.signals[1, :])
plt.title("Microphone 1 signal")
plt.xlabel("Time [s]")
plt.tight_layout()
plt.show()
Hybrid ISM/Ray Tracing Simulator
--------------------------------
.. warning::
The hybrid simulator has not been thoroughly tested yet and should be used with
care. The exact implementation and default settings may also change in the future.
Currently, the default behavior of :py:obj:`~pyroomacoustics.room.Room`
and :py:obj:`~pyroomacoustics.room.ShoeBox` has been kept as in previous
versions of the package. Bugs and user experience can be reported on
`github <https://github.com/LCAV/pyroomacoustics>`_.
The hybrid ISM/RT simulator uses ISM to simulate the early reflections in the RIR
and RT for the diffuse tail. Our implementation is based on [2]_ and [3]_.
The simulator has the following features.
- Scattering: Wall scattering can be defined by assigning a scattering
coefficient to the walls together with the energy absorption.
- Multi-band: The simulation can be carried out with different parameters for
different `octave bands <https://en.wikipedia.org/wiki/Octave_band>`_. The
octave bands go from 125 Hz to half the sampling frequency.
- Air absorption: The frequency dependent absorption of the air can be turned
by providing the keyword argument ``air_absorption=True`` to the room
constructor.
Here is a simple example using the hybrid simulator.
We suggest to use ``max_order=3`` with the hybrid simulator.
.. code-block:: python
# Create the room
room = pra.ShoeBox(
room_dim,
fs=16000,
materials=pra.Material(e_absorption),
max_order=3,
ray_tracing=True,
air_absorption=True,
)
# Activate the ray tracing
room.set_ray_tracing()
A few example programs are provided in ``./examples``.
- ``./examples/ray_tracing.py`` demonstrates use of ray tracing for rooms of different sizes
and with different amounts of reverberation
- ``./examples/room_L_shape_3d_rt.py`` shows how to simulate a polyhedral room
- ``./examples/room_from_stl.py`` demonstrates how to import a model from an STL file
Wall Materials
--------------
The wall materials are handled by the
:py:obj:`~pyroomacoustics.parameters.Material` objects. A material is defined
by at least one *absorption* coefficient that represents the ratio of sound
energy absorbed by a wall upon reflection.
A material may have multiple absorption coefficients corresponding to different
abosrptions at different octave bands.
When only one coefficient is provided, the absorption is assumed to be uniform at
all frequencies.
In addition, materials may have one or more scattering coefficients
corresponding to the ratio of energy scattered upon reflection.
The materials can be defined by providing the coefficients directly, or they can
be defined by chosing a material from the :doc:`materials database<pyroomacoustics.materials.database>` [2]_.
.. code-block:: python
import pyroomacoustics as pra
m = pra.Material(energy_absorption="hard_surface")
room = pra.ShoeBox([9, 7.5, 3.5], fs=16000, materials=m, max_order=17)
We can use different materials for different walls. In this case, the materials should be
provided in a dictionary. For a shoebox room, this can be done as follows.
.. code-block:: python
import pyroomacoustics as pra
m = pra.make_materials(
ceiling="hard_surface",
floor="6mm_carpet",
east="brickwork",
west="brickwork",
north="brickwork",
south="brickwork",
)
room = pra.ShoeBox(
[9, 7.5, 3.5], fs=16000, materials=m, max_order=17, air_absorption=True, ray_tracing=True
)
.. note::
For shoebox rooms, the walls are labelled as follows:
- ``west``/``east`` for the walls in the y-z plane with a small/large x coordinates, respectively
- ``south``/``north`` for the walls in the x-z plane with a small/large y coordinates, respectively
- ``floor``/``ceiling`` for the walls int x-y plane with small/large z coordinates, respectively
Controlling the signal-to-noise ratio
-------------------------------------
It is in general necessary to scale the signals from different sources to
obtain a specific signal-to-noise or signal-to-interference ratio (SNR and SIR,
respectively). This can be done by passing some options to the :py:func:`simulate()`
function. Because the relative amplitude of signals will change at different microphones
due to propagation, it is necessary to choose a reference microphone. By default, this
will be the first microphone in the array (index 0). The simplest choice is to choose
the variance of the noise \\(\\sigma_n^2\\) to achieve a desired SNR with respect
to the cumulative signal from all sources. Assuming that the signals from all sources
are scaled to have the same amplitude (e.g., unit amplitude) at the reference microphone,
the SNR is defined as
.. math::
\mathsf{SNR} = 10 \log_{10} \frac{K}{\sigma_n^2}
where \\(K\\) is the number of sources. For example, an SNR of 10 decibels (dB)
can be obtained using the following code
.. code-block:: python
room.simulate(reference_mic=0, snr=10)
Sometimes, more challenging normalizations are necessary. In that case,
a custom callback function can be provided to simulate. For example,
we can imagine a scenario where we have ``n_src`` out of which ``n_tgt``
are the targets, the rest being interferers. We will assume all
targets have unit variance, and all interferers have equal
variance \\(\\sigma_i^2\\) (at the reference microphone). In
addition, there is uncorrelated noise \\(\\sigma_n^2\\) at
every microphones. We will define SNR and SIR with respect
to a single target source:
.. math::
\mathsf{SNR} & = 10 \log_{10} \frac{1}{\sigma_n^2}
\mathsf{SIR} & = 10 \log_{10} \frac{1}{(\mathsf{n_{src}} - \mathsf{n_{tgt}}) \sigma_i^2}
The callback function ``callback_mix`` takes as argument an nd-array
``premix_signals`` of shape ``(n_src, n_mics, n_samples)`` that contains the
microphone signals prior to mixing. The signal propagated from the ``k``-th
source to the ``m``-th microphone is contained in ``premix_signals[k,m,:]``. It
is possible to provide optional arguments to the callback via
``callback_mix_kwargs`` optional argument. Here is the code
implementing the example described.
.. code-block:: python
# the extra arguments are given in a dictionary
callback_mix_kwargs = {
'snr' : 30, # SNR target is 30 decibels
'sir' : 10, # SIR target is 10 decibels
'n_src' : 6,
'n_tgt' : 2,
'ref_mic' : 0,
}
def callback_mix(premix, snr=0, sir=0, ref_mic=0, n_src=None, n_tgt=None):
# first normalize all separate recording to have unit power at microphone one
p_mic_ref = np.std(premix[:,ref_mic,:], axis=1)
premix /= p_mic_ref[:,None,None]
# now compute the power of interference signal needed to achieve desired SIR
sigma_i = np.sqrt(10 ** (- sir / 10) / (n_src - n_tgt))
premix[n_tgt:n_src,:,:] *= sigma_i
# compute noise variance
sigma_n = np.sqrt(10 ** (- snr / 10))
# Mix down the recorded signals
mix = np.sum(premix[:n_src,:], axis=0) + sigma_n * np.random.randn(*premix.shape[1:])
return mix
# Run the simulation
room.simulate(
callback_mix=callback_mix,
callback_mix_kwargs=callback_mix_kwargs,
)
mics_signals = room.mic_array.signals
In addition, it is desirable in some cases to obtain the microphone signals
with individual sources, prior to mixing. For example, this is useful to
evaluate the output from blind source separation algorithms. In this case, the
``return_premix`` argument should be set to ``True``
.. code-block:: python
premix = room.simulate(return_premix=True)
Reverberation Time
------------------
The reverberation time (RT60) is defined as the time needed for the enery of
the RIR to decrease by 60 dB. It is a useful measure of the amount of
reverberation. We provide a method in the
:py:func:`~pyroomacoustics.experimental.rt60.measure_rt60` to measure the RT60
of recorded or simulated RIR.
The method is also directly integrated in the :py:obj:`~pyroomacoustics.room.Room` object as the method :py:func:`~pyroomacoustics.room.Room.measure_rt60`.
.. code-block:: python
# assuming the simulation has already been carried out
rt60 = room.measure_rt60()
for m in room.n_mics:
for s in room.n_sources:
print(
"RT60 between the {}th mic and {}th source: {:.3f} s".format(m, s, rt60[m, s])
)
References
----------
.. [1] J. B. Allen and D. A. Berkley, *Image method for efficiently simulating small-room acoustics,* J. Acoust. Soc. Am., vol. 65, no. 4, p. 943, 1979.
.. [2] M. Vorlaender, Auralization, 1st ed. Berlin: Springer-Verlag, 2008, pp. 1-340.
.. [3] D. Schroeder, Physically based real-time auralization of interactive virtual environments. PhD Thesis, RWTH Aachen University, 2011.
"""
from __future__ import division, print_function
import math
import warnings
import numpy as np
import scipy.spatial as spatial
from scipy.interpolate import interp1d
from . import beamforming as bf
from . import libroom
from .acoustics import OctaveBandsFactory, rt60_eyring, rt60_sabine
from .beamforming import MicrophoneArray
from .doa import GridCircle, GridSphere
from .experimental import measure_rt60
from .libroom import Wall, Wall2D
from .parameters import Material, Physics, constants, eps, make_materials
from .soundsource import SoundSource
from .utilities import fractional_delay
def wall_factory(corners, absorption, scattering, name=""):
""" Call the correct method according to wall dimension """
if corners.shape[0] == 3:
return Wall(corners, absorption, scattering, name,)
elif corners.shape[0] == 2:
return Wall2D(corners, absorption, scattering, name,)
else:
raise ValueError("Rooms can only be 2D or 3D")
def sequence_generation(volume, duration, c, fs, max_rate=10000):
# repeated constant
fpcv = 4 * np.pi * c ** 3 / volume
# initial time
t0 = ((2 * np.log(2)) / fpcv) ** (1.0 / 3.0)
times = [t0]
while times[-1] < t0 + duration:
# uniform random variable
z = np.random.rand()
# rate of the point process at this time
mu = np.minimum(fpcv * (t0 + times[-1]) ** 2, max_rate)
# time interval to next point
dt = np.log(1 / z) / mu
times.append(times[-1] + dt)
# convert from continuous to discrete time
indices = (np.array(times) * fs).astype(np.int)
seq = np.zeros(indices[-1] + 1)
seq[indices] = np.random.choice([1, -1], size=len(indices))
return seq
def find_non_convex_walls(walls):
"""
Finds the walls that are not in the convex hull
Parameters
----------
walls: list of Wall objects
The walls that compose the room
Returns
-------
list of int
The indices of the walls no in the convex hull
"""
all_corners = []
for wall in walls[1:]:
all_corners.append(wall.corners.T)
X = np.concatenate(all_corners, axis=0)
convex_hull = spatial.ConvexHull(X, incremental=True)
# Now we need to check which walls are on the surface
# of the hull
in_convex_hull = [False] * len(walls)
for i, wall in enumerate(walls):
# We check if the center of the wall is co-linear or co-planar
# with a face of the convex hull
point = np.mean(wall.corners, axis=1)
for simplex in convex_hull.simplices:
if point.shape[0] == 2:
# check if co-linear
p0 = convex_hull.points[simplex[0]]
p1 = convex_hull.points[simplex[1]]
if libroom.ccw3p(p0, p1, point) == 0:
# co-linear point add to hull
in_convex_hull[i] = True
elif point.shape[0] == 3:
# Check if co-planar
p0 = convex_hull.points[simplex[0]]
p1 = convex_hull.points[simplex[1]]
p2 = convex_hull.points[simplex[2]]
normal = np.cross(p1 - p0, p2 - p0)
if np.abs(np.inner(normal, point - p0)) < eps:
# co-planar point found!
in_convex_hull[i] = True
return [i for i in range(len(walls)) if not in_convex_hull[i]]
class Room(object):
"""
A Room object has as attributes a collection of
:py:obj:`pyroomacoustics.wall.Wall` objects, a
:py:obj:`pyroomacoustics.beamforming.MicrophoneArray` array, and a list of
:py:obj:`pyroomacoustics.soundsource.SoundSource`. The room can be two
dimensional (2D), in which case the walls are simply line segments. A factory method
:py:func:`pyroomacoustics.room.Room.from_corners`
can be used to create the room from a polygon. In three dimensions (3D), the
walls are two dimensional polygons, namely a collection of points lying on a
common plane. Creating rooms in 3D is more tedious and for convenience a method
:py:func:`pyroomacoustics.room.Room.extrude` is provided to lift a 2D room
into 3D space by adding vertical walls and parallel floor and ceiling.
The Room is sub-classed by :py:obj:pyroomacoustics.room.ShoeBox` which
creates a rectangular (2D) or parallelepipedic (3D) room. Such rooms
benefit from an efficient algorithm for the image source method.
:attribute walls: (Wall array) list of walls forming the room
:attribute fs: (int) sampling frequency
:attribute max_order: (int) the maximum computed order for images
:attribute sources: (SoundSource array) list of sound sources
:attribute mics: (MicrophoneArray) array of microphones
:attribute corners: (numpy.ndarray 2xN or 3xN, N=number of walls) array containing a point belonging to each wall, used for calculations
:attribute absorption: (numpy.ndarray size N, N=number of walls) array containing the absorption factor for each wall, used for calculations
:attribute dim: (int) dimension of the room (2 or 3 meaning 2D or 3D)
:attribute wallsId: (int dictionary) stores the mapping "wall name -> wall id (in the array walls)"
Parameters
----------
walls: list of Wall or Wall2D objects
The walls forming the room.
fs: int, optional
The sampling frequency in Hz. Default is 8000.
t0: float, optional
The global starting time of the simulation in seconds. Default is 0.
max_order: int, optional
The maximum reflection order in the image source model. Default is 1,
namely direct sound and first order reflections.
sigma2_awgn: float, optional
The variance of the additive white Gaussian noise added during
simulation. By default, none is added.
sources: list of SoundSource objects, optional
Sources to place in the room. Sources can be added after room creating
with the `add_source` method by providing coordinates.
mics: MicrophoneArray object, optional
The microphone array to place in the room. A single microphone or
microphone array can be added after room creation with the
`add_microphone_array` method.
temperature: float, optional
The air temperature in the room in degree Celsius. By default, set so
that speed of sound is 343 m/s.
humidity: float, optional
The relative humidity of the air in the room (between 0 and 100). By
default set to 0.
air_absorption: bool, optional
If set to True, absorption of sound energy by the air will be
simulated.
ray_tracing: bool, optional
If set to True, the ray tracing simulator will be used along with
image source model.
"""
def __init__(
self,
walls,
fs=8000,
t0=0.0,
max_order=1,
sigma2_awgn=None,
sources=None,
mics=None,
temperature=None,
humidity=None,
air_absorption=False,
ray_tracing=False,
):
self.walls = walls
# Get the room dimension from that of the walls
self.dim = walls[0].dim
# Create a mapping with friendly names for walls
self._wall_mapping()
# initialize everything else
self._var_init(
fs,
t0,
max_order,
sigma2_awgn,
temperature,
humidity,
air_absorption,
ray_tracing,
)
# initialize the C++ room engine
self._init_room_engine()
# add the sources
self.sources = []
if sources is not None and isinstance(sources, list):
for src in sources:
self.add_soundsource(src)
# add the microphone array
if mics is not None:
self.add_microphone_array(mics)
else:
self.mic_array = None
def _var_init(
self,
fs,
t0,
max_order,
sigma2_awgn,
temperature,
humidity,
air_absorption,
ray_tracing,
):
self.fs = fs
if t0 != 0.0:
raise NotImplementedError(
"Global simulation delay not " "implemented (aka t0)"
)
self.t0 = t0
self.max_order = max_order
self.sigma2_awgn = sigma2_awgn
self.octave_bands = OctaveBandsFactory(fs=self.fs)
# Keep track of the state of the simulator
self.simulator_state = {
"ism_needed": (self.max_order >= 0),
"rt_needed": ray_tracing,
"air_abs_needed": air_absorption,
"ism_done": False,
"rt_done": False,
"rir_done": False,
}
# make it clear the room (C++) engine is not ready yet
self.room_engine = None
if temperature is None and humidity is None:
# default to package wide setting when nothing is provided
self.physics = Physics().from_speed(constants.get("c"))
else:
# use formulas when temperature and/or humidity are provided
self.physics = Physics(temperature=temperature, humidity=humidity)
self.set_sound_speed(self.physics.get_sound_speed())
self.air_absorption = None
if air_absorption:
self.set_air_absorption()
# default values for ray tracing parameters
self.set_ray_tracing()
if not ray_tracing:
self.unset_ray_tracing()
# in the beginning, nothing has been
self.visibility = None
# initialize the attribute for the impulse responses
self.rir = None
def _init_room_engine(self, *args):
args = list(args)
if len(args) == 0:
# This is a polygonal room
# find the non convex walls
obstructing_walls = find_non_convex_walls(self.walls)
args += [self.walls, obstructing_walls]
# for shoebox rooms, the required arguments are passed to
# the function
# initialize the C++ room engine
args += [
[],
self.c, # speed of sound
self.max_order,
self.rt_args["energy_thres"],
self.rt_args["time_thres"],
self.rt_args["receiver_radius"],
self.rt_args["hist_bin_size"],
self.simulator_state["ism_needed"] and self.simulator_state["rt_needed"],
]
# Create the real room object
if self.dim == 2:
self.room_engine = libroom.Room2D(*args)
else:
self.room_engine = libroom.Room(*args)
def _update_room_engine_params(self):
# Now, if it exists, set the parameters of room engine
if self.room_engine is not None:
self.room_engine.set_params(
self.c, # speed of sound
self.max_order,
self.rt_args["energy_thres"],
self.rt_args["time_thres"],
self.rt_args["receiver_radius"],
self.rt_args["hist_bin_size"],
(
self.simulator_state["ism_needed"]
and self.simulator_state["rt_needed"]
),
)
@property
def is_multi_band(self):
multi_band = False
for w in self.walls:
if len(w.absorption) > 1:
multi_band = True
return multi_band
def set_ray_tracing(
self,
n_rays=None,
receiver_radius=0.5,
energy_thres=1e-7,
time_thres=10.0,
hist_bin_size=0.004,
):
"""
Activates the ray tracer.
Parameters
----------
n_rays: int, optional
The number of rays to shoot in the simulation
receiver_radius: float, optional
The radius of the sphere around the microphone in which to
integrate the energy (default: 0.5 m)
energy_thres: float, optional
The energy thresold at which rays are stopped (default: 1e-7)
time_thres: float, optional
The maximum time of flight of rays (default: 10 s)
hist_bin_size: float
The time granularity of bins in the energy histogram (default: 4 ms)
"""
self.simulator_state["rt_needed"] = True
self.rt_args = {}
self.rt_args["energy_thres"] = energy_thres
self.rt_args["time_thres"] = time_thres
self.rt_args["receiver_radius"] = receiver_radius
self.rt_args["hist_bin_size"] = hist_bin_size
# set the histogram bin size so that it is an integer number of samples
self.rt_args["hist_bin_size_samples"] = math.floor(
self.fs * self.rt_args["hist_bin_size"]
)
self.rt_args["hist_bin_size"] = self.rt_args["hist_bin_size_samples"] / self.fs
if n_rays is None:
n_rays_auto_flag = True
# We follow Vorlaender 2008, Eq. (11.12) to set the default number of rays
# It depends on the mean hit rate we want to target
target_mean_hit_count = 20
# This is the multiplier for a single hit in average
k1 = self.get_volume() / (
np.pi
* (self.rt_args["receiver_radius"] ** 2)
* self.c
* self.rt_args["hist_bin_size"]
)
n_rays = int(target_mean_hit_count * k1)
if n_rays > 100000:
import warnings
warnings.warn(
"The number of rays used for ray tracing is larger than"
"100000 which may result in slow simulation. The number"
"of rays was automatically chosen to provide accurate"
"room impulse response based on the room volume and the"
"receiver radius around the microphones. The number of"
"rays may be reduced by increasing the size of the"
"receiver. This tends to happen especially for large"
"rooms with small receivers. The receiver is a sphere"
"around the microphone and its radius (in meters) may be"
"specified by providing the `receiver_radius` keyword"
"argument to the `set_ray_tracing` method."
)
self.rt_args["n_rays"] = n_rays
self._update_room_engine_params()
def unset_ray_tracing(self):
""" Deactivates the ray tracer """
self.simulator_state["rt_needed"] = False
self._update_room_engine_params()
def set_air_absorption(self, coefficients=None):
"""
Activates or deactivates air absorption in the simulation.
Parameters
----------
coefficients: list of float
List of air absorption coefficients, one per octave band
"""
self.simulator_state["air_abs_needed"] = True
if coefficients is None:
self.air_absorption = self.octave_bands(**self.physics.get_air_absorption())
else:
# ignore temperature and humidity if coefficients are provided
self.air_absorption = self.physics().get_air_absorption()
def unset_air_absorption(self):
""" Deactivates air absorption in the simulation """
self.simulator_state["air_abs_needed"] = False
def set_sound_speed(self, c):
""" Sets the speed of sound unconditionnaly """
self.c = c
self._update_room_engine_params()
def _wall_mapping(self):
# mapping between wall names and indices
self.wallsId = {}
for i in range(len(self.walls)):
if self.walls[i].name is not None:
self.wallsId[self.walls[i].name] = i
@classmethod
def from_corners(
cls,
corners,
absorption=None,
fs=8000,
t0=0.0,
max_order=1,
sigma2_awgn=None,
sources=None,
mics=None,
materials=None,
**kwargs
):
"""
Creates a 2D room by giving an array of corners.
Parameters
----------
corners: (np.array dim 2xN, N>2)
list of corners, must be antiClockwise oriented
absorption: float array or float
list of absorption factor for each wall or single value
for all walls
Returns
-------
Instance of a 2D room
"""
# make sure the corners are wrapped in an ndarray
corners = np.array(corners)
n_walls = corners.shape[1]
corners = np.array(corners)
if corners.shape[0] != 2 or n_walls < 3:
raise ValueError("Arg corners must be more than two 2D points.")
# We want to make sure the corners are ordered counter-clockwise
if libroom.area_2d_polygon(corners) <= 0:
corners = corners[:, ::-1]
############################
# BEGIN COMPATIBILITY CODE #
############################
if absorption is None:
absorption = 0.0
absorption_compatibility_request = False
else:
absorption_compatibility_request = True
absorption = np.array(absorption, dtype="float64")
if absorption.ndim == 0:
absorption = absorption * np.ones(n_walls)
elif absorption.ndim >= 1 and n_walls != len(absorption):
raise ValueError(
"Arg absorption must be the same size as corners or must be a single value."
)
############################
# BEGIN COMPATIBILITY CODE #
############################
if materials is not None:
if absorption_compatibility_request:
import warnings
warnings.warn(
"Because materials were specified, deprecated absorption parameter is ignored.",
DeprecationWarning,
)
if not isinstance(materials, list):
materials = [materials] * n_walls
if len(materials) != n_walls:
raise ValueError("One material per wall is necessary.")
for i in range(n_walls):
assert isinstance(
materials[i], Material
), "Material not specified using correct class"
elif absorption_compatibility_request:
import warnings
warnings.warn(
"Using absorption parameter is deprecated. In the future, use materials instead."
)
# Fix the absorption
# 1 - a1 == sqrt(1 - a2) <-- a1 is former incorrect absorption, a2 is the correct definition based on energy
# <=> a2 == 1 - (1 - a1) ** 2
correct_absorption = 1.0 - (1.0 - absorption) ** 2
materials = make_materials(*correct_absorption)
else:
# In this case, no material is provided, use totally reflective walls, no scattering
materials = [Material(0.0, 0.0)] * n_walls
# Resample material properties at octave bands
octave_bands = OctaveBandsFactory(fs=fs)
if not Material.all_flat(materials):
for mat in materials:
mat.resample(octave_bands)
# Create the walls
walls = []
for i in range(n_walls):
walls.append(
wall_factory(
np.array([corners[:, i], corners[:, (i + 1) % n_walls]]).T,
materials[i].absorption_coeffs,
materials[i].scattering_coeffs,
"wall_" + str(i),
)
)
return cls(
walls,
fs=fs,
t0=t0,
max_order=max_order,
sigma2_awgn=sigma2_awgn,
sources=sources,
mics=mics,
**kwargs
)
def extrude(
self, height, v_vec=None, absorption=None, materials=None,
):
"""
Creates a 3D room by extruding a 2D polygon.
The polygon is typically the floor of the room and will have z-coordinate zero. The ceiling
Parameters
----------
height : float
The extrusion height
v_vec : array-like 1D length 3, optional
A unit vector. An orientation for the extrusion direction. The
ceiling will be placed as a translation of the floor with respect
to this vector (The default is [0,0,1]).
absorption : float or array-like, optional
Absorption coefficients for all the walls. If a scalar, then all the walls
will have the same absorption. If an array is given, it should have as many elements
as there will be walls, that is the number of vertices of the polygon plus two. The two
last elements are for the floor and the ceiling, respectively.
It is recommended to use materials instead of absorption parameter. (Default: 1)
materials : dict
Absorption coefficients for floor and ceiling. This parameter overrides absorption.
(Default: {"floor": 1, "ceiling": 1})
"""
if self.dim != 2:
raise ValueError("Can only extrude a 2D room.")
# default orientation vector is pointing up
if v_vec is None:
v_vec = np.array([0.0, 0.0, 1.0])
# check that the walls are ordered counterclock wise
# that should be the case if created from from_corners function
nw = len(self.walls)
floor_corners = np.zeros((2, nw))
floor_corners[:, 0] = self.walls[0].corners[:, 0]
ordered = True
for iw, wall in enumerate(self.walls[1:]):
if not np.allclose(self.walls[iw].corners[:, 1], wall.corners[:, 0]):
ordered = False
floor_corners[:, iw + 1] = wall.corners[:, 0]
if not np.allclose(self.walls[-1].corners[:, 1], self.walls[0].corners[:, 0]):
ordered = False
if not ordered:
raise ValueError(
"The wall list should be ordered counter-clockwise, which is the case \
if the room is created with Room.from_corners"
)
# make sure the floor_corners are ordered anti-clockwise (for now)
if libroom.area_2d_polygon(floor_corners) <= 0:
floor_corners = np.fliplr(floor_corners)
walls = []
for i in range(nw):
corners = np.array(
[
np.r_[floor_corners[:, i], 0],
np.r_[floor_corners[:, (i + 1) % nw], 0],
np.r_[floor_corners[:, (i + 1) % nw], 0] + height * v_vec,
np.r_[floor_corners[:, i], 0] + height * v_vec,
]
).T
walls.append(
wall_factory(
corners,
self.walls[i].absorption,
self.walls[i].scatter,
name=str(i),
)
)
############################
# BEGIN COMPATIBILITY CODE #
############################
if absorption is not None:
absorption = 0.0
absorption_compatibility_request = True
else:
absorption_compatibility_request = False
##########################
# END COMPATIBILITY CODE #
##########################
if materials is not None:
if absorption_compatibility_request:
import warnings
warnings.warn(
"Because materials were specified, "
"deprecated absorption parameter is ignored.",
DeprecationWarning,
)
if not isinstance(materials, dict):
materials = {"floor": materials, "ceiling": materials}
for mat in materials.values():
assert isinstance(
mat, Material
), "Material not specified using correct class"
elif absorption_compatibility_request:
import warnings
warnings.warn(
"absorption parameter is deprecated for Room.extrude",
DeprecationWarning,
)
absorption = np.array(absorption)
if absorption.ndim == 0:
absorption = absorption * np.ones(2)
elif absorption.ndim == 1 and absorption.shape[0] != 2:
raise ValueError(
"The size of the absorption array must be 2 for extrude, "
"for the floor and ceiling"
)
materials = make_materials(
floor=(absorption[0], 0.0), ceiling=(absorption[0], 0.0),
)
else:
# In this case, no material is provided, use totally reflective walls, no scattering
new_mat = Material(0.0, 0.0)
materials = {"floor": new_mat, "ceiling": new_mat}
new_corners = {}
new_corners["floor"] = np.pad(floor_corners, ((0, 1), (0, 0)), mode="constant")
new_corners["ceiling"] = (new_corners["floor"].T + height * v_vec).T
# we need the floor corners to ordered clockwise (for the normal to point outward)
new_corners["floor"] = np.fliplr(new_corners["floor"])
for key in ["floor", "ceiling"]:
walls.append(
wall_factory(
new_corners[key],
materials[key].absorption_coeffs,
materials[key].scattering_coeffs,
name=key,
)
)
self.walls = walls
self.dim = 3
# Update the real room object
self._init_room_engine()
def plot(
self,
img_order=None,
freq=None,
figsize=None,
no_axis=False,
mic_marker_size=10,
**kwargs
):
""" Plots the room with its walls, microphones, sources and images """
try:
import matplotlib
from matplotlib.patches import Circle, Wedge, Polygon
from matplotlib.collections import PatchCollection
import matplotlib.pyplot as plt
except ImportError:
import warnings
warnings.warn("Matplotlib is required for plotting")
return
if self.dim == 2:
fig = plt.figure(figsize=figsize)
if no_axis is True:
ax = fig.add_axes([0, 0, 1, 1], aspect="equal", **kwargs)
ax.axis("off")
rect = fig.patch
rect.set_facecolor("gray")
rect.set_alpha(0.15)
else:
ax = fig.add_subplot(111, aspect="equal", **kwargs)
# draw room
corners = np.array([wall.corners[:, 0] for wall in self.walls]).T
polygons = [Polygon(corners.T, True)]
p = PatchCollection(
polygons,
cmap=matplotlib.cm.jet,
facecolor=np.array([1, 1, 1]),
edgecolor=np.array([0, 0, 0]),
)
ax.add_collection(p)
# draw the microphones
if self.mic_array is not None:
for mic in self.mic_array.R.T:
ax.scatter(
mic[0],
mic[1],
marker="x",
linewidth=0.5,
s=mic_marker_size,
c="k",
)
# draw the beam pattern of the beamformer if requested (and available)
if (
freq is not None
and isinstance(self.mic_array, bf.Beamformer)
and (
self.mic_array.weights is not None
or self.mic_array.filters is not None
)
):
freq = np.array(freq)
if freq.ndim == 0:
freq = np.array([freq])
# define a new set of colors for the beam patterns
newmap = plt.get_cmap("autumn")
desat = 0.7
try:
# this is for matplotlib >= 2.0.0
ax.set_prop_cycle(
color=[
newmap(k) for k in desat * np.linspace(0, 1, len(freq))
]
)
except:
# keep this for backward compatibility
ax.set_color_cycle(
[newmap(k) for k in desat * np.linspace(0, 1, len(freq))]
)
phis = np.arange(360) * 2 * np.pi / 360.0
newfreq = np.zeros(freq.shape)
H = np.zeros((len(freq), len(phis)), dtype=complex)
for i, f in enumerate(freq):
newfreq[i], H[i] = self.mic_array.response(phis, f)
# normalize max amplitude to one
H = np.abs(H) ** 2 / np.abs(H).max() ** 2
# a normalization factor according to room size
norm = np.linalg.norm(
(corners - self.mic_array.center), axis=0
).max()
# plot all the beam patterns
i = 0
for f, h in zip(newfreq, H):
x = np.cos(phis) * h * norm + self.mic_array.center[0, 0]
y = np.sin(phis) * h * norm + self.mic_array.center[1, 0]
ax.plot(x, y, "-", linewidth=0.5)
# define some markers for different sources and colormap for damping
markers = ["o", "s", "v", "."]
cmap = plt.get_cmap("YlGnBu")
# use this to check some image sources were drawn
has_drawn_img = False
# draw the scatter of images
for i, source in enumerate(self.sources):
# draw source
ax.scatter(
source.position[0],
source.position[1],
c=[cmap(1.0)],
s=20,
marker=markers[i % len(markers)],
edgecolor=cmap(1.0),
)
# draw images
if img_order is None:
img_order = 0
elif img_order == "max":
img_order = self.max_order
I = source.orders <= img_order
if len(I) > 0:
has_drawn_img = True
val = (np.log2(np.mean(source.damping, axis=0)[I]) + 10.0) / 10.0
# plot the images
ax.scatter(
source.images[0, I],
source.images[1, I],
c=cmap(val),
s=20,
marker=markers[i % len(markers)],
edgecolor=cmap(val),
)
# When no image source has been drawn, we need to use the bounding box
# to set correctly the limits of the plot
if not has_drawn_img or img_order == 0:
bbox = self.get_bbox()
ax.set_xlim(bbox[0, :])
ax.set_ylim(bbox[1, :])
return fig, ax
if self.dim == 3:
import mpl_toolkits.mplot3d as a3
import matplotlib.colors as colors
import matplotlib.pyplot as plt
import scipy as sp
fig = plt.figure(figsize=figsize)
ax = a3.Axes3D(fig)
# plot the walls
for w in self.walls:
tri = a3.art3d.Poly3DCollection([w.corners.T], alpha=0.5)
tri.set_color(colors.rgb2hex(sp.rand(3)))
tri.set_edgecolor("k")
ax.add_collection3d(tri)
# define some markers for different sources and colormap for damping
markers = ["o", "s", "v", "."]
cmap = plt.get_cmap("YlGnBu")
# use this to check some image sources were drawn
has_drawn_img = False
# draw the scatter of images
for i, source in enumerate(self.sources):
# draw source
ax.scatter(
source.position[0],
source.position[1],
source.position[2],
c=[cmap(1.0)],
s=20,
marker=markers[i % len(markers)],
edgecolor=cmap(1.0),
)
# draw images
if img_order is None:
img_order = self.max_order
I = source.orders <= img_order
if len(I) > 0:
has_drawn_img = True
val = (np.log2(np.mean(source.damping, axis=0)[I]) + 10.0) / 10.0
# plot the images
ax.scatter(
source.images[0, I],
source.images[1, I],
source.images[2, I],
c=cmap(val),
s=20,
marker=markers[i % len(markers)],
edgecolor=cmap(val),
)
# When no image source has been drawn, we need to use the bounding box
# to set correctly the limits of the plot
if not has_drawn_img or img_order == 0:
bbox = self.get_bbox()
ax.set_xlim3d(bbox[0, :])
ax.set_ylim3d(bbox[1, :])
ax.set_zlim3d(bbox[2, :])
# draw the microphones
if self.mic_array is not None:
for mic in self.mic_array.R.T:
ax.scatter(
mic[0],
mic[1],
mic[2],
marker="x",
linewidth=0.5,
s=mic_marker_size,
c="k",
)
return fig, ax
def plot_rir(self, select=None, FD=False):
"""
Plot room impulse responses. Compute if not done already.
Parameters
----------
select: list of tuples OR int
List of RIR pairs `(mic, src)` to plot, e.g. `[(0,0), (0,1)]`. Or
`int` to plot RIR from particular microphone to all sources. Note
that microphones and sources are zero-indexed. Default is to plot
all microphone-source pairs.
FD: bool
Whether to plot in the frequency domain, namely the transfer
function. Default is False.
"""
n_src = len(self.sources)
n_mic = self.mic_array.M
if select is None:
pairs = [(r, s) for r in range(n_mic) for s in range(n_src)]
elif isinstance(select, int):
pairs = [(select, s) for s in range(n_src)]
elif isinstance(select, list):
pairs = select
else:
raise ValueError('Invalid type for "select".')
if not self.simulator_state["rir_done"]:
self.compute_rir()
# for plotting
n_mic = len(list(set(pair[0] for pair in pairs)))
n_src = len(list(set(pair[1] for pair in pairs)))
r_plot = dict()
s_plot = dict()
for k, r in enumerate(list(set(pair[0] for pair in pairs))):
r_plot[r] = k
for k, s in enumerate(list(set(pair[1] for pair in pairs))):
s_plot[s] = k
try:
import matplotlib.pyplot as plt
except ImportError:
import warnings
warnings.warn("Matplotlib is required for plotting")
return
from . import utilities as u
for k, _pair in enumerate(pairs):
r = _pair[0]
s = _pair[1]
h = self.rir[r][s]
if select is None: # matrix plot
plt.subplot(n_mic, n_src, r_plot[r] * n_src + s_plot[s] + 1)
else: # one column
plt.subplot(len(pairs), 1, k + 1)
if not FD:
plt.plot(np.arange(len(h)) / float(self.fs), h)
else:
u.real_spectrum(h)
plt.title("RIR: mic" + str(r) + " source" + str(s))
if r == n_mic - 1:
if not FD:
plt.xlabel("Time [s]")
else:
plt.xlabel("Normalized frequency")
plt.tight_layout()
def add(self, obj):
"""
Adds a sound source or microphone to a room
Parameters
----------
obj: :py:obj:`~pyroomacoustics.soundsource.SoundSource` or :py:obj:`~pyroomacoustics.beamforming.Microphone` object
The object to add
Returns
-------
:py:obj:`~pyroomacoustics.room.Room`
The room is returned for further tweaking.
"""
if isinstance(obj, SoundSource):
if obj.dim != self.dim:
raise ValueError(
(
"The Room and SoundSource objects must be of the same "
"dimensionality. The Room is {}D but the SoundSource "
"is {}D"
).format(self.dim, obj.dim)
)
if not self.is_inside(np.array(obj.position)):
raise ValueError("The source must be added inside the room.")
self.sources.append(obj)
elif isinstance(obj, MicrophoneArray):
if obj.dim != self.dim:
raise ValueError(
(
"The Room and MicrophoneArray objects must be of the same "
"dimensionality. The Room is {}D but the SoundSource "
"is {}D"
).format(self.dim, obj.dim)
)
if "mic_array" not in self.__dict__ or self.mic_array is None:
self.mic_array = obj
else:
self.mic_array.append(obj)
# microphone need to be added to the room_engine
for m in range(len(obj)):
self.room_engine.add_mic(obj.R[:, None, m])
else:
raise TypeError(
"The add method from Room only takes SoundSource or "
"MicrophoneArray objects as parameter"
)
return self
def add_microphone(self, loc, fs=None):
"""
Adds a single microphone in the room.
Parameters
----------
loc: array_like or ndarray
The location of the microphone. The length should be the same as the room dimension.
fs: float, optional
The sampling frequency of the microphone, if different from that of the room.
Returns
-------
:py:obj:`~pyroomacoustics.room.Room`
The room is returned for further tweaking.
"""
# make sure this is a
loc = np.array(loc)
# if array, make it a 2D array as expected
if loc.ndim == 1:
loc = loc[:, None]
if fs is None:
fs = self.fs
return self.add(MicrophoneArray(loc, fs))
def add_microphone_array(self, mic_array):
"""
Adds a microphone array (i.e. several microphones) in the room.
Parameters
----------
mic_array: array_like or ndarray or MicrophoneArray object
The array can be provided as an array of size ``(dim, n_mics)``,
where ``dim`` is the dimension of the room and ``n_mics`` is the
number of microphones in the array.
As an alternative, a
:py:obj:`~pyroomacoustics.beamforming.MicrophoneArray` can be
provided.
Returns
-------
:py:obj:`~pyroomacoustics.room.Room`
The room is returned for further tweaking.
"""
if not isinstance(mic_array, MicrophoneArray):
# if the type is not a microphone array, try to parse a numpy array
mic_array = MicrophoneArray(mic_array, self.fs)
return self.add(mic_array)
def add_source(self, position, signal=None, delay=0):
"""
Adds a sound source given by its position in the room. Optionally
a source signal and a delay can be provided.
Parameters
-----------
position: ndarray, shape: (2,) or (3,)
The location of the source in the room
signal: ndarray, shape: (n_samples,), optional
The signal played by the source
delay: float, optional
A time delay until the source signal starts
in the simulation
Returns
-------
:py:obj:`~pyroomacoustics.room.Room`
The room is returned for further tweaking.
"""
if isinstance(position, SoundSource):
return self.add(position)
else:
return self.add(SoundSource(position, signal=signal, delay=delay))
def add_soundsource(self, sndsrc):
"""
Adds a :py:obj:`pyroomacoustics.soundsource.SoundSource` object to the room.
Parameters
----------
sndsrc: :py:obj:`~pyroomacoustics.soundsource.SoundSource` object
The SoundSource object to add to the room
"""
return self.add(sndsrc)
def image_source_model(self):
if not self.simulator_state["ism_needed"]:
return
self.visibility = []
for source in self.sources:
n_sources = self.room_engine.image_source_model(source.position)
if n_sources > 0:
# Copy to python managed memory
source.images = self.room_engine.sources.copy()
source.orders = self.room_engine.orders.copy()
source.walls = self.room_engine.gen_walls.copy()
source.damping = self.room_engine.attenuations.copy()
source.generators = -np.ones(source.walls.shape)
self.visibility.append(self.room_engine.visible_mics.copy())
# We need to check that microphones are indeed in the room
for m in range(self.mic_array.R.shape[1]):
# if not, it's not visible from anywhere!
if not self.is_inside(self.mic_array.R[:, m]):
self.visibility[-1][m, :] = 0
# Update the state
self.simulator_state["ism_done"] = True
def ray_tracing(self):
if not self.simulator_state["rt_needed"]:
return
# this will be a list of lists with
# shape (n_mics, n_src, n_directions, n_bands, n_time_bins)
self.rt_histograms = [[] for r in range(self.mic_array.M)]
for s, src in enumerate(self.sources):
self.room_engine.ray_tracing(self.rt_args["n_rays"], src.position)
for r in range(self.mic_array.M):
self.rt_histograms[r].append([])
for h in self.room_engine.microphones[r].histograms:
# get a copy of the histogram
self.rt_histograms[r][s].append(h.get_hist())
# reset all the receivers' histograms
self.room_engine.reset_mics()
# update the state
self.simulator_state["rt_done"] = True
def compute_rir(self):
"""
Compute the room impulse response between every source and microphone.
"""
if self.simulator_state["ism_needed"] and not self.simulator_state["ism_done"]:
self.image_source_model()
if self.simulator_state["rt_needed"] and not self.simulator_state["rt_done"]:
self.ray_tracing()
self.rir = []
volume_room = self.get_volume()
for m, mic in enumerate(self.mic_array.R.T):
self.rir.append([])
for s, src in enumerate(self.sources):
"""
Compute the room impulse response between the source
and the microphone whose position is given as an
argument.
"""
# fractional delay length
fdl = constants.get("frac_delay_length")
fdl2 = fdl // 2
# default, just in case both ism and rt are disabled (should never happen)
N = fdl
if self.simulator_state["ism_needed"]:
# compute the distance from image sources
dist = np.sqrt(np.sum((src.images - mic[:, None]) ** 2, axis=0))
time = dist / self.c
t_max = time.max()
N = int(math.ceil(t_max * self.fs))
else:
t_max = 0.0
if self.simulator_state["rt_needed"]:
# get the maximum length from the histograms
nz_bins_loc = np.nonzero(self.rt_histograms[m][s][0].sum(axis=0))[0]
if len(nz_bins_loc) == 0:
n_bins = 0
else:
n_bins = nz_bins_loc[-1] + 1
t_max = np.maximum(t_max, n_bins * self.rt_args["hist_bin_size"])
# the number of samples needed
# round up to multiple of the histogram bin size
# add the lengths of the fractional delay filter
hbss = int(self.rt_args["hist_bin_size_samples"])
N = int(math.ceil(t_max * self.fs / hbss) * hbss)
# this is where we will compose the RIR
ir = np.zeros(N + fdl)
# This is the distance travelled wrt time
distance_rir = np.arange(N) / self.fs * self.c
# this is the random sequence for the tail generation
seq = sequence_generation(volume_room, N / self.fs, self.c, self.fs)
seq = seq[:N]
# Do band-wise RIR construction
is_multi_band = self.is_multi_band
bws = self.octave_bands.get_bw() if is_multi_band else [self.fs / 2]
rir_bands = []
for b, bw in enumerate(bws):
ir_loc = np.zeros_like(ir)
# IS method
if self.simulator_state["ism_needed"]:
alpha = src.damping[b, :] / (dist)
# Use the Cython extension for the fractional delays
from .build_rir import fast_rir_builder
vis = self.visibility[s][m, :].astype(np.int32)
# we add the delay due to the factional delay filter to
# the arrival times to avoid problems when propagation
# is shorter than the delay to to the filter
# hence: time + fdl2
time_adjust = time + fdl2 / self.fs
fast_rir_builder(ir_loc, time_adjust, alpha, vis, self.fs, fdl)
if is_multi_band:
ir_loc = self.octave_bands.analysis(ir_loc, band=b)
ir += ir_loc
# Ray Tracing
if self.simulator_state["rt_needed"]:
if is_multi_band:
seq_bp = self.octave_bands.analysis(seq, band=b)
else:
seq_bp = seq.copy()
# interpolate the histogram and multiply the sequence
seq_bp_rot = seq_bp.reshape((-1, hbss))
new_n_bins = seq_bp_rot.shape[0]
hist = self.rt_histograms[m][s][0][b, :new_n_bins]
normalization = np.linalg.norm(seq_bp_rot, axis=1)
indices = normalization > 0.0
seq_bp_rot[indices, :] /= normalization[indices, None]
seq_bp_rot *= np.sqrt(hist[:, None])
# Normalize the band power
# The bands should normally sum up to fs / 2
seq_bp *= np.sqrt(bw / self.fs * 2.0)
ir_loc[fdl2 : fdl2 + N] += seq_bp
# keep for further processing
rir_bands.append(ir_loc)
# Do Air absorption
if self.simulator_state["air_abs_needed"]:
# In case this was not multi-band, do the band pass filtering
if len(rir_bands) == 1:
rir_bands = self.octave_bands.analysis(rir_bands[0]).T
# Now apply air absorption
for band, air_abs in zip(rir_bands, self.air_absorption):
air_decay = np.exp(-0.5 * air_abs * distance_rir)
band[fdl2 : N + fdl2] *= air_decay
# Sum up all the bands
np.sum(rir_bands, axis=0, out=ir)
self.rir[-1].append(ir)
self.simulator_state["rir_done"] = True
def simulate(
self,
snr=None,
reference_mic=0,
callback_mix=None,
callback_mix_kwargs={},
return_premix=False,
recompute_rir=False,
):
r"""
Simulates the microphone signal at every microphone in the array
Parameters
----------
reference_mic: int, optional
The index of the reference microphone to use for SNR computations.
The default reference microphone is the first one (index 0)
snr: float, optional
The target signal-to-noise ratio (SNR) in decibels at the reference microphone.
When this option is used the argument
:py:attr:`pyroomacoustics.room.Room.sigma2_awgn` is ignored. The variance of
every source at the reference microphone is normalized to one and
the variance of the noise \\(\\sigma_n^2\\) is chosen
.. math::
\mathsf{SNR} = 10 \log_{10} \frac{ K }{ \sigma_n^2 }
The value of :py:attr:`pyroomacoustics.room.Room.sigma2_awgn` is also set
to \\(\\sigma_n^2\\) automatically
callback_mix: func, optional
A function that will perform the mix, it takes as first argument
an array of shape ``(n_sources, n_mics, n_samples)`` that contains
the source signals convolved with the room impulse response prior
to mixture at the microphone. It should return an array of shape
``(n_mics, n_samples)`` containing the mixed microphone signals.
If such a function is provided, the ``snr`` option is ignored
and :py:attr:`pyroomacoustics.room.Room.sigma2_awgn` is set to ``None``.
callback_mix_kwargs: dict, optional
A dictionary that contains optional arguments for ``callback_mix``
function
return_premix: bool, optional
If set to ``True``, the function will return an array of shape
``(n_sources, n_mics, n_samples)`` containing the microphone
signals with individual sources, convolved with the room impulse
response but prior to mixing
recompute_rir: bool, optional
If set to ``True``, the room impulse responses will be recomputed
prior to simulation
Returns
-------
Nothing or an array of shape ``(n_sources, n_mics, n_samples)``
Depends on the value of ``return_premix`` option
"""
# import convolution routine
from scipy.signal import fftconvolve
# Throw an error if we are missing some hardware in the room
if len(self.sources) == 0:
raise ValueError("There are no sound sources in the room.")
if self.mic_array is None:
raise ValueError("There is no microphone in the room.")
# compute RIR if necessary
if self.rir is None or len(self.rir) == 0 or recompute_rir:
self.compute_rir()
# number of mics and sources
M = self.mic_array.M
S = len(self.sources)
# compute the maximum signal length
from itertools import product
max_len_rir = np.array(
[len(self.rir[i][j]) for i, j in product(range(M), range(S))]
).max()
f = lambda i: len(self.sources[i].signal) + np.floor(
self.sources[i].delay * self.fs
)
max_sig_len = np.array([f(i) for i in range(S)]).max()
L = int(max_len_rir) + int(max_sig_len) - 1
if L % 2 == 1:
L += 1
# the array that will receive all the signals
premix_signals = np.zeros((S, M, L))
# compute the signal at every microphone in the array
for m in np.arange(M):
for s in np.arange(S):
sig = self.sources[s].signal
if sig is None:
continue
d = int(np.floor(self.sources[s].delay * self.fs))
h = self.rir[m][s]
premix_signals[s, m, d : d + len(sig) + len(h) - 1] += fftconvolve(
h, sig
)
if callback_mix is not None:
# Execute user provided callback
signals = callback_mix(premix_signals, **callback_mix_kwargs)
self.sigma2_awgn = None
elif snr is not None:
# Normalize all signals so that
denom = np.std(premix_signals[:, reference_mic, :], axis=1)
premix_signals /= denom[:, None, None]
signals = np.sum(premix_signals, axis=0)
# Compute the variance of the microphone noise
self.sigma2_awgn = 10 ** (-snr / 10) * S
else:
signals = np.sum(premix_signals, axis=0)
# add white gaussian noise if necessary
if self.sigma2_awgn is not None:
signals += np.random.normal(0.0, np.sqrt(self.sigma2_awgn), signals.shape)
# record the signals in the microphones
self.mic_array.record(signals, self.fs)
if return_premix:
return premix_signals
def direct_snr(self, x, source=0):
""" Computes the direct Signal-to-Noise Ratio """
if source >= len(self.sources):
raise ValueError("No such source")
if self.sources[source].signal is None:
raise ValueError("No signal defined for source " + str(source))
if self.sigma2_awgn is None:
return float("inf")
x = np.array(x)
sigma2_s = np.mean(self.sources[0].signal ** 2)
d2 = np.sum((x - self.sources[source].position) ** 2)
return sigma2_s / self.sigma2_awgn / (16 * np.pi ** 2 * d2)
def get_wall_by_name(self, name):
"""
Returns the instance of the wall by giving its name.
:arg name: (string) name of the wall
:returns: (Wall) instance of the wall with this name
"""
if name in self.wallsId:
return self.walls[self.wallsId[name]]
else:
raise ValueError("The wall " + name + " cannot be found.")
def get_bbox(self):
""" Returns a bounding box for the room """
lower = np.amin(np.concatenate([w.corners for w in self.walls], axis=1), axis=1)
upper = np.amax(np.concatenate([w.corners for w in self.walls], axis=1), axis=1)
return np.c_[lower, upper]
def is_inside(self, p, include_borders=True):
"""
Checks if the given point is inside the room.
Parameters
----------
p: array_like, length 2 or 3
point to be tested
include_borders: bool, optional
set true if a point on the wall must be considered inside the room
Returns
-------
True if the given point is inside the room, False otherwise.
"""
p = np.array(p)
if self.dim != p.shape[0]:
raise ValueError("Dimension of room and p must match.")
# The method works as follows: we pick a reference point *outside* the room and
# draw a line between the point to check and the reference.
# If the point to check is inside the room, the line will intersect an odd
# number of walls. If it is outside, an even number.
# Unfortunately, there are a lot of corner cases when the line intersects
# precisely on a corner of the room for example, or is aligned with a wall.
# To avoid all these corner cases, we will do a randomized test.
# We will pick a point at random outside the room so that the probability
# a corner case happen is virtually zero. If the test raises a corner
# case, we will repeat the test with a different reference point.
# get the bounding box
bbox = self.get_bbox()
bbox_center = np.mean(bbox, axis=1)
bbox_max_dist = np.linalg.norm(bbox[:, 1] - bbox[:, 0]) / 2
# re-run until we get a non-ambiguous result
it = 0
while it < constants.get("room_isinside_max_iter"):
# Get random point outside the bounding box
random_vec = np.random.randn(self.dim)
random_vec /= np.linalg.norm(random_vec)
p0 = bbox_center + 2 * bbox_max_dist * random_vec
ambiguous = False # be optimistic
is_on_border = False # we have to know if the point is on the boundary
count = 0 # wall intersection counter
for i in range(len(self.walls)):
# intersects, border_of_wall, border_of_segment = self.walls[i].intersects(p0, p)
# ret = self.walls[i].intersects(p0, p)
loc = np.zeros(self.dim, dtype=np.float32)
ret = self.walls[i].intersection(p0, p, loc)
if (
ret == int(Wall.Isect.ENDPT) or ret == 3
): # this flag is True when p is on the wall
is_on_border = True
elif ret == Wall.Isect.BNDRY:
# the intersection is on a corner of the room
# but the point to check itself is *not* on the wall
# then things get tricky
ambiguous = True
# count the wall intersections
if ret >= 0: # valid intersection
count += 1
# start over when ambiguous
if ambiguous:
it += 1
continue
else:
if is_on_border and not include_borders:
return False
elif is_on_border and include_borders:
return True
elif count % 2 == 1:
return True
else:
return False
return False
# We should never reach this
raise ValueError(
"""
Error could not determine if point is in or out in maximum number of iterations.
This is most likely a bug, please report it.
"""
)
def wall_area(self, wall):
"""Computes the area of a 3D planar wall.
:param wall: the wall object that is defined in the 3D space"""
# Algo : http://geomalgorithms.com/a01-_area.
# Recall that the wall corners have the following shape :
# [ [x1, x2, ...], [y1, y2, ...], [z1, z2, ...] ]
c = wall.corners
n = wall.normal / np.linalg.norm(wall.normal)
if len(c) != 3:
raise ValueError("The function wall_area3D only supports ")
sum_vect = [0.0, 0.0, 0.0]
num_vertices = len(c[0])
for i in range(num_vertices):
sum_vect = sum_vect + np.cross(c[:, (i - 1) % num_vertices], c[:, i])
return abs(np.dot(n, sum_vect)) / 2.0
def get_volume(self):
"""
Computes the volume of a room
:param room: the room object
:return: the volume in cubic unit
"""
wall_sum = 0.0
for w in self.walls:
n = (w.normal) / np.linalg.norm(w.normal)
one_point = w.corners[:, 0]
wall_sum += np.dot(n, one_point) * w.area()
return wall_sum / 3.0
@property
def volume(self):
return self.get_volume()
@property
def n_mics(self):
return len(self.mic_array) if self.mic_array is not None else 0
@property
def n_sources(self):
return len(self.sources) if self.sources is not None else 0
def rt60_theory(self, formula="sabine"):
"""
Compute the theoretical reverberation time (RT60) for the room.
Parameters
----------
formula: str
The formula to use for the calculation, 'sabine' (default) or 'eyring'
"""
rt60 = 0.0
if self.is_multi_band:
bandwidths = self.octave_bands.get_bw()
else:
bandwidths = [1.0]
V = self.volume
S = np.sum([w.area() for w in self.walls])
c = self.c
for i, bw in enumerate(bandwidths):
# average absorption coefficients
a = 0.0
for w in self.walls:
if len(w.absorption) == 1:
a += w.area() * w.absorption[0]
else:
a += w.area() * w.absorption[i]
a /= S
try:
m = self.air_absorption[i]
except:
m = 0.0
if formula == "eyring":
rt60_loc = rt60_eyring(S, V, a, m, c)
elif formula == "sabine":
rt60_loc = rt60_sabine(S, V, a, m, c)
else:
raise ValueError("Only Eyring and Sabine's formulas are supported")
rt60 += rt60_loc * bw
rt60 /= np.sum(bandwidths)
return rt60
def measure_rt60(self, decay_db=60, plot=False):
"""
Measures the reverberation time (RT60) of the simulated RIR.
Parameters
----------
decay_db: float
This is the actual decay of the RIR used for the computation. The
default is 60, meaning that the RT60 is exactly what we measure.
In some cases, the signal may be too short to measure 60 dB decay.
In this case, we can specify a lower value. For example, with 30
dB, the RT60 is twice the time measured.
plot: bool
Displays a graph of the Schroeder curve and the estimated RT60.
Returns
-------
ndarray (n_mics, n_sources)
An array that contains the measured RT60 for all the RIR.
"""
rt60 = np.zeros((self.n_mics, self.n_sources))
for m in range(self.n_mics):
for s in range(self.n_sources):
rt60[m, s] = measure_rt60(
self.rir[m][s], fs=self.fs, plot=plot, decay_db=decay_db
)
return rt60
class ShoeBox(Room):
"""
This class provides an API for creating a ShoeBox room in 2D or 3D.
Parameters
----------
p : array
Length 2 (width, length) or 3 (width, lenght, height) depending on
the desired dimension of the room.
fs: int, optional
The sampling frequency in Hz. Default is 8000.
t0: float, optional
The global starting time of the simulation in seconds. Default is 0.
absorption : float
Average amplitude absorption of walls. Note that this parameter is
deprecated; use `materials` instead!
max_order: int, optional
The maximum reflection order in the image source model. Default is 1,
namely direct sound and first order reflections.
sigma2_awgn: float, optional
The variance of the additive white Gaussian noise added during
simulation. By default, none is added.
sources: list of SoundSource objects, optional
Sources to place in the room. Sources can be added after room creating
with the `add_source` method by providing coordinates.
mics: MicrophoneArray object, optional
The microphone array to place in the room. A single microphone or
microphone array can be added after room creation with the
`add_microphone_array` method.
materials : `Material` object or `dict` of `Material` objects
See `pyroomacoustics.parameters.Material`. If providing a `dict`,
you must provide a `Material` object for each wall: 'east',
'west', 'north', 'south', 'ceiling' (3D), 'floor' (3D).
temperature: float, optional
The air temperature in the room in degree Celsius. By default, set so
that speed of sound is 343 m/s.
humidity: float, optional
The relative humidity of the air in the room (between 0 and 100). By
default set to 0.
air_absorption: bool, optional
If set to True, absorption of sound energy by the air will be
simulated.
ray_tracing: bool, optional
If set to True, the ray tracing simulator will be used along with
image source model.
"""
def __init__(
self,
p,
fs=8000,
t0=0.0,
absorption=None, # deprecated
max_order=1,
sigma2_awgn=None,
sources=None,
mics=None,
materials=None,
temperature=None,
humidity=None,
air_absorption=False,
ray_tracing=False,
):
p = np.array(p, dtype=np.float32)
if len(p.shape) > 1 and (len(p) != 2 or len(p) != 3):
raise ValueError("`p` must be a vector of length 2 or 3.")
self.dim = p.shape[0]
# record shoebox dimension in object
self.shoebox_dim = np.array(p)
# initialize the attributes of the room
self._var_init(
fs,
t0,
max_order,
sigma2_awgn,
temperature,
humidity,
air_absorption,
ray_tracing,
)
# Keep the correctly ordered naming of walls
# This is the correct order for the shoebox computation later
# W/E is for axis x, S/N for y-axis, F/C for z-axis
self.wall_names = ["west", "east", "south", "north"]
if self.dim == 3:
self.wall_names += ["floor", "ceiling"]
n_walls = len(self.wall_names)
############################
# BEGIN COMPATIBILITY CODE #
############################
if absorption is None:
absorption_compatibility_request = False
absorption = 0.0
else:
absorption_compatibility_request = True
# copy over the absorption coefficient
if isinstance(absorption, float):
absorption = dict(zip(self.wall_names, [absorption] * n_walls))
##########################
# END COMPATIBILITY CODE #
##########################
if materials is not None:
if absorption_compatibility_request:
warnings.warn(
"Because `materials` were specified, deprecated "
"`absorption` parameter is ignored.",
DeprecationWarning,
)
if isinstance(materials, Material):
materials = dict(zip(self.wall_names, [materials] * n_walls))
elif not isinstance(materials, dict):
raise ValueError(
"`materials` must be a `Material` object or "
"a `dict` specifying a `Material` object for"
" each wall: 'east', 'west', 'north', "
"'south', 'ceiling' (3D), 'floor' (3D)."
)
for w_name in self.wall_names:
assert isinstance(
materials[w_name], Material
), "Material not specified using correct class"
elif absorption_compatibility_request:
warnings.warn(
"Using absorption parameter is deprecated. Use `materials` with "
"`Material` object instead.",
DeprecationWarning,
)
# order the wall absorptions
if not isinstance(absorption, dict):
raise ValueError(
"`absorption` must be either a scalar or a "
"2x dim dictionary with entries for each "
"wall, namely: 'east', 'west', 'north', "
"'south', 'ceiling' (3d), 'floor' (3d)."
)
materials = {}
for w_name in self.wall_names:
if w_name in absorption:
# Fix the absorption
# 1 - a1 == sqrt(1 - a2) <-- a1 is former incorrect absorption, a2 is the correct definition based on energy
# <=> a2 == 1 - (1 - a1) ** 2
correct_abs = 1.0 - (1.0 - absorption[w_name]) ** 2
materials[w_name] = Material(energy_absorption=correct_abs)
else:
raise KeyError(
"Absorption needs to have keys 'east', 'west', "
"'north', 'south', 'ceiling' (3d), 'floor' (3d)."
)
else:
# In this case, no material is provided, use totally reflective
# walls, no scattering
materials = dict(
zip(self.wall_names, [Material(energy_absorption=0.0)] * n_walls)
)
# If some of the materials used are multi-band, we need to resample
# all of them to have the same number of values
if not Material.all_flat(materials):
for name, mat in materials.items():
mat.resample(self.octave_bands)
# Get the absorption and scattering as arrays
# shape: (n_bands, n_walls)
absorption_array = np.array(
[materials[w].absorption_coeffs for w in self.wall_names]
).T
scattering_array = np.array(
[materials[w].scattering_coeffs for w in self.wall_names]
).T
# Create the real room object
self._init_room_engine(
self.shoebox_dim, absorption_array, scattering_array,
)
self.walls = self.room_engine.walls
Room._wall_mapping(self)
# add the sources
self.sources = []
if sources is not None and isinstance(sources, list):
for src in sources:
self.add_soundsource(src)
# add the microphone array
if mics is not None:
self.add_microphone_array(mics)
else:
self.mic_array = None
def extrude(self, height):
""" Overload the extrude method from 3D rooms """
if height < 0.0:
raise ValueError("Room height must be positive")
Room.extrude(self, np.array([0.0, 0.0, height]))
# update the shoebox dim
self.shoebox_dim = np.append(self.shoebox_dim, height)
def get_volume(self):
"""
Computes the volume of a room
Returns
-------
the volume in cubic unit
"""
return np.prod(self.shoebox_dim)
def is_inside(self, pos):
"""
Parameters
----------
pos: array_like
The position to test in an array of size 2 for a 2D room and 3 for a 3D room
Returns
-------
True if ``pos`` is a point in the room, ``False`` otherwise.
"""
pos = np.array(pos)
return np.all(pos) >= 0 and np.all(pos <= self.shoebox_dim)
| mit |
ernestyalumni/Propulsion | Propulsion.py | 1 | 11699 | ## Propulsion.py
## Problems and solutions in Propulsion
######################################################################################
## Copyleft 2015, Ernest Yeung <[email protected]>
## 20151112
## This program, along with all its code, is free software; you can redistribute
## it and/or modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## Governing the ethics of using this program, I default to the Caltech Honor Code:
## ``No member of the Caltech community shall take unfair advantage of
## any other member of the Caltech community.''
##
## Donate, and support my other scientific and engineering endeavors at
## ernestyalumni.tilt.com
######################################################################################
import decimal
from decimal import Decimal
import sympy
from sympy import *
from sympy.abc import a,A, t, u, psi, rho, theta
from sympy import Rational as Rat
from sympy.utilities.lambdify import lambdify, implemented_function
import Physique
from Physique import FCconv, KCconv, FundConst, conv, plnfacts, T_C, T_K, T_F
t_p = Symbol('t_p', real=True) # burn rate
g_0 = Symbol('g_0', positive=True) # standard gravity
F_thrust = Function('F_thrust')(t)
I_t = integrate(F_thrust,(t,0,t_p)) # total impulse
m = Function('m')(t) # mass of propellant flowing out
W_p = Symbol('W_p',positive=True) # weight of propellant
I_sp = I_t/W_p
I_sp.subs( W_p, (g_0*integrate(m.diff(t),(t,0,t_p))) ) # specific impulse
M0 = Symbol('M0',positive=True)
m_p = Symbol('m_p',positive=True)
M = Function('M')(t) # mass of rocket+propellant system
massflow = Symbol('massflow',real=True)
u_e = Symbol('u_e',real=True) # effective exhaust velocity, $c$ for Bibliarz and Sutton
u=Function('u')(t)
M_constantflow = M0 - t*m_p/t_p
# assume constant mass flow
I_t.subs(F_thrust, massflow*u_e).doit()
I_sp.subs(F_thrust,massflow*u_e).subs(W_p, g_0*massflow*t_p).doit() # u_e/g_0
# cf. 4.1 Gravity-Free, Drag-Free Space Flight
# Biblarz, Sutton, Rocket Propulsion Elements (2001)
gravityfreedragfreespaceflight = Eq( u.diff(t), massflow*u_e/M )
gravityfreedragfreespaceflight.subs(M,M_constantflow)
Deltau_g0D0 = integrate( gravityfreedragfreespaceflight.subs(M,M_constantflow).rhs , (t,0,t_p)).simplify() # \Delta u for g_0 = 0, D = 0(gravity-free, drag-free)
# cf. 4.2 Forces acting on a Vehicle in the Atmosphere
# Biblarz, Sutton, Rocket Propulsion Elements (2001)
C_L = Symbol('C_L', positive=True)
C_D = Symbol('C_D', positive=True)
Lift = C_L*(Rat(1)/Rat(2))*rho*A*u**2
Drag = C_D*(Rat(1)/Rat(2))*rho*A*u**2
theta = Function('theta')(t)
flightpathdirection = Eq( u.diff(t), F_thrust/M*cos(psi-theta) - Drag/M - g_0*sin(theta) )
tangentialflightdirection = Eq( u*theta.diff(t) , F_thrust/M*sin(psi-theta)+ Lift/M - g_0*cos(theta) )
# Example 4-1 Biblarz, Sutton, Rocket Propulsion Elements (2001)
# assume constant thrust
F_thrust0 = Symbol('F_thrust0',real=True)
I_sp = Symbol('I_sp',positive=True)
I_spEq = Eq(I_sp, I_t/W_p) # specific impulse equation
# Given
# Launch weight 4.0 lbf
# Useful propellant mass 0.4 lbm
# Effective specific impulse 120 sec
# Launch angle (relative to horizontal 80 degrees
# Burn time (with constant thrust) 1.0 sec
theta_0 = Symbol("theta_0",real=True)
I_spEq.subs(I_t,F_thrust0*t_p) # I_sp == F_thrust0*t_p/W_p
solve( I_spEq.subs(I_t,F_thrust0*t_p).subs(I_sp,120.).subs(t_p,1.0).subs(W_p, 0.4), F_thrust0) # [48.0000000000000] lbf
# "The direction of thrust and the flight path are the same
udot = Matrix([ [flightpathdirection.rhs],[tangentialflightdirection.rhs]])
Rot = Matrix([[ cos(theta), -sin(theta)],[sin(theta),cos(theta)]])
# assume negligible Drag (low velocity), no lift (wingless)
udot.subs(Lift,0).subs(Drag,0).subs(psi,theta)
( Rot * udot.subs(Lift,0).subs(Drag,0).subs(psi,theta)).expand() # This reproduces the acceleration in x and y components of the powered flight stage
( Rot * udot.subs(Lift,0).subs(Drag,0).subs(psi,theta)).expand().subs(F_thrust, 48.0).subs(M,4.0/32.2).subs(g_0,32.0).subs( theta, 80./180.*N(pi) ) # 67.1 ft/sec^2 in x direction, 348.5 ft/sec^2 in y direction
uxuydot = (Rot*udot.subs(Lift,0).subs(Drag,0).subs(psi,theta)).expand().subs(F_thrust,48.0).subs(g_0,32.0).subs(theta,80./180.*N(pi)).subs(M,M_constantflow).subs(M0,4.0/32.2).subs(m_p,0.4/32.2).subs(t_p,1.0)
u_p = integrate(uxuydot,(t,0,1.0) ) # Matrix([
# [70.6944361984026], 70.7 ft/sec
# [368.928070760125]]) 375 ft/sec
# EY : 20151113 Launch weight is in 4.0 lbf, useful propellant mass was in 0.4 lbm, and yet Biblarz and Sutton divides by 32.2 ft/sec^2 for both, and lbf and lbm are along the same footing as the units for initial weight and final weight on pp. 114; is this wrong? Answer is no. lbm is lbf, but in different contexts, see this clear explanation: https://youtu.be/4ePaKh9QyC8
atan( u_p[1]/u_p[0])*180/N(pi) # 79.1524086456152
# Problems Ch. 4 Flight Performance pp. 154
# 3.
Problem0403 = flightpathdirection.subs(Drag,0).subs(psi,theta).subs(theta,pi/2).subs(F_thrust, m_p/t_p*u_e).subs(M,M_constantflow).subs(m_p,M0*0.57).subs(t_p,5.).subs(u_e,2209.).subs(g_0,9.8).factor(M0).rhs # Chapter 4 Flight Performance, Problem 3
integrate( Problem0403, (t,0,5.0) ) # 1815.32988528061
integrate( integrate(Problem0403,(t,0,t) ),(t,0,5.0) ) # 3890.37850288891
# Problem 6, Ch. 4 Flight Performance pp. 155
M_earth = plnfacts.loc[plnfacts['Planet']=="EARTH","Mass (1024kg)"].values[0]*10**(24) # in kg
R_earth = plnfacts.loc[plnfacts['Planet']=="EARTH","Diameter (km)"].values[0]/Decimal(2)
Gconst = FundConst[ FundConst["Quantity"].str.contains("gravitation") ].loc[243,"Value"]
v0406 = sqrt( Gconst*M_earth/((R_earth + Decimal(500))*10**3) ) # velocity of satellite v of Chapter 4, Problem 6 of Biblarz and Sutton
T0406 = (2.*N(pi)*float((R_earth + Decimal(500))*10**3 )**(3./2))/float(sqrt( Gconst*M_earth))
Eperm0406 = Gconst*M_earth*(-1/(2*((R_earth+Decimal(500))*10**3)) + 1/(R_earth*10**3)) # Energy per mass
Eperm0406.quantize(Decimal('100000.'))
# cf. https://gist.github.com/jackiekazil/6201722
# cf. http://stackoverflow.com/questions/6913532/display-a-decimal-in-scientific-notation
'%.6E' % Eperm0406
##############################
##### AE 121
##############################
#########################
#### PS 2
#########################
####################
### Problem 1
####################
gstd = FundConst[ FundConst["Quantity"].str.contains("gravity") ].loc[303,:].Value
M_0 = Symbol('M_0',positive=True)
Deltau = -I_sp*g_0*ln( (M_0 -m_p)/M_0)
# part (a)
Deltau.subs(I_sp,268.8).subs(g_0,gstd).subs(M_0,805309.).subs(m_p, (1-0.1396)*586344) # 2595.74521034101 m/s
# part (b)
Deltau.subs(I_sp,452.1).subs(g_0,gstd).subs(M_0,183952+35013.).subs(m_p, (1-0.1110)*183952) # 6090.68716730318 m/s
# part (c)
1.5*805309./268.8 # 4493.911830357143
####################
### Problem 3
####################
import scipy
from scipy import exp, array
from scipy.integrate import ode
import matplotlib.pyplot as plt
M_cannonball = (7.8*(10**2)**3/(10**3))*4./3.*N(pi)*(15./2./100.)**3
(1.225)*(0.1)/(2.*M_cannonball)*(N(pi)*(15./2./100.)**2) # 7.85256410256411e-5
def deriv(t,u): # return derivatives of the array u
"""
cf. http://bulldog2.redlands.edu/facultyfolder/deweerd/tutorials/Tutorial-ODEs.pdf
"""
uxdot = (7.853*10**(-5))*exp( -u[3]/(10000.))*(u[0]**2 + u[1]**2)**(0.5)*(-u[0])
uydot = -9.8 + (7.853*10**(-5))*exp(-u[3]/(10000.))*(u[0]**2 + u[1]**2)**(0.5)*(-u[1])
return array([ uxdot,uydot,u[0],u[1] ])
u0 = [300.*cos(50./180.*N(pi)), 300.*sin(50./180.*N(pi)),0,0]
Prob0203 = ode(deriv).set_integrator('dopri5') # Problem 3 from Problem Set 2 for AE121 Fall 2015
# cf. http://stackoverflow.com/questions/26738676/does-scipy-integrate-ode-set-solout-work
Prob0203.set_initial_value(u0)
t1 = 41.575
dt = 0.005
while Prob0203.successful() and Prob0203.t < t1:
Prob0203.integrate(Prob0203.t+dt)
print(" %g " % Prob0203.t )
print Prob0203.y
Prob0203.set_initial_value(u0)
Prob0203_solution = []
while Prob0203.successful() and Prob0203.t < t1:
Prob0203_solution.append( [Prob0203.t+dt,] + list( Prob0203.integrate(Prob0203.t+dt) ) )
# take the transpose of a list of lists
Prob0203_solution = map(list, zip(*Prob0203_solution))
plt.figure(1)
plt.plot( Prob0203_solution[3],Prob0203_solution[4])
plt.xlabel('x (m)')
plt.ylabel('y (m)')
plt.title('Cannonball trajectory with Drag: Variable density')
# part (b)
def deriv_b(t,u): # return derivatives of the array u
"""
cf. http://bulldog2.redlands.edu/facultyfolder/deweerd/tutorials/Tutorial-ODEs.pdf
"""
uxdot = (7.853*10**(-5)) *(u[0]**2 + u[1]**2)**(0.5)*(-u[0])
uydot = -9.8 + (7.853*10**(-5)) *(u[0]**2 + u[1]**2)**(0.5)*(-u[1])
return array([ uxdot,uydot,u[0],u[1] ])
Prob0203b = ode(deriv_b).set_integrator('dopri5')
Prob0203b.set_initial_value(u0)
Prob0203b.integrate(41.23)
t1b = 41.225
Prob0203b.set_initial_value(u0)
Prob0203b_solution = []
while Prob0203b.successful() and Prob0203b.t < t1b:
Prob0203b_solution.append( [Prob0203b.t+dt,] + list( Prob0203b.integrate(Prob0203b.t+dt) ) )
Prob0203b_solution = map(list, zip(*Prob0203b_solution))
plt.figure(2)
plt.plot( Prob0203b_solution[3],Prob0203b_solution[4])
plt.xlabel('x (m)')
plt.ylabel('y (m)')
plt.title('Cannonball trajectory with Drag: Constant density')
# part (c)
300.**2/9.8*sin(2.*50./180.*N(pi) ) # 9044.15283378558
#parabola trajectory data
Prob0203c_x = [i*10 for i in range(905)]
Prob0203c_y = [ tan(50./180.*N(pi))*x - (9.8/2.)*x**2/(300.*cos(50./180.*N(pi)))**2 for x in Prob0203c_x]
plt.figure(3)
plt.plot( Prob0203_solution[3],Prob0203_solution[4], label="Drag: Variable density")
plt.plot( Prob0203b_solution[3],Prob0203b_solution[4], label="Drag: Constant density")
plt.plot( Prob0203c_x,Prob0203c_y, label="No Drag")
plt.xlabel('x (m)')
plt.ylabel('y (m)')
plt.title('Trajectories of cannonball with Drag of variable density, Drag of constant density, and no drag')
plt.legend()
#########################
#### PS 4
#########################
####################
### Problem 1
####################
# (b)
k_Boltz = FundConst[ FundConst["Quantity"].str.contains("Boltzmann") ].loc[49,:]
k_Boltz.Value
k_Boltz.Unit
N_Avog = FundConst[FundConst["Quantity"].str.contains("Avogadro") ]
c_V = float( Decimal(1.5)*(N_Avog.Value)*(k_Boltz.Value))/M_0
c_P = float( Decimal(2.5)*(N_Avog.Value)*(k_Boltz.Value))/M_0
c_V.subs(M_0, 39.948/1000.) # 312.198102337360
c_V.subs(M_0, 131.293/1000.) # 94.9912774647001
c_P.subs(M_0, 39.948/1000.) # 520.330170562267
c_P.subs(M_0, 131.293/1000.) # 158.318795774500
tau = Symbol("tau",real=True)
tau_0 = Symbol("tau_0",real=True)
GAMMA = Symbol("GAMMA",positive=True)
MachNo = Symbol("MachNo",positive=True)
TratiovsMachNo = Eq( tau_0/tau, Rat(1) + (GAMMA - Rat(1))/Rat(2)*MachNo )
| gpl-2.0 |
icdishb/scikit-learn | benchmarks/bench_plot_omp_lars.py | 266 | 4447 | """Benchmarks of orthogonal matching pursuit (:ref:`OMP`) versus least angle
regression (:ref:`least_angle_regression`)
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path, orthogonal_mp
from sklearn.datasets.samples_generator import make_sparse_coded_signal
def compute_bench(samples_range, features_range):
it = 0
results = dict()
lars = np.empty((len(features_range), len(samples_range)))
lars_gram = lars.copy()
omp = lars.copy()
omp_gram = lars.copy()
max_it = len(samples_range) * len(features_range)
for i_s, n_samples in enumerate(samples_range):
for i_f, n_features in enumerate(features_range):
it += 1
n_informative = n_features / 10
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
# dataset_kwargs = {
# 'n_train_samples': n_samples,
# 'n_test_samples': 2,
# 'n_features': n_features,
# 'n_informative': n_informative,
# 'effective_rank': min(n_samples, n_features) / 10,
# #'effective_rank': None,
# 'bias': 0.0,
# }
dataset_kwargs = {
'n_samples': 1,
'n_components': n_features,
'n_features': n_samples,
'n_nonzero_coefs': n_informative,
'random_state': 0
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
y, X, _ = make_sparse_coded_signal(**dataset_kwargs)
X = np.asfortranarray(X)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, max_iter=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
lars_gram[i_f, i_s] = delta
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, Gram=None, max_iter=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
lars[i_f, i_s] = delta
gc.collect()
print("benchmarking orthogonal_mp (with Gram):", end='')
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
omp_gram[i_f, i_s] = delta
gc.collect()
print("benchmarking orthogonal_mp (without Gram):", end='')
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute=False,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
omp[i_f, i_s] = delta
results['time(LARS) / time(OMP)\n (w/ Gram)'] = (lars_gram / omp_gram)
results['time(LARS) / time(OMP)\n (w/o Gram)'] = (lars / omp)
return results
if __name__ == '__main__':
samples_range = np.linspace(1000, 5000, 5).astype(np.int)
features_range = np.linspace(1000, 5000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(np.max(t) for t in results.values())
import pylab as pl
fig = pl.figure('scikit-learn OMP vs. LARS benchmark results')
for i, (label, timings) in enumerate(sorted(results.iteritems())):
ax = fig.add_subplot(1, 2, i)
vmax = max(1 - timings.min(), -1 + timings.max())
pl.matshow(timings, fignum=False, vmin=1 - vmax, vmax=1 + vmax)
ax.set_xticklabels([''] + map(str, samples_range))
ax.set_yticklabels([''] + map(str, features_range))
pl.xlabel('n_samples')
pl.ylabel('n_features')
pl.title(label)
pl.subplots_adjust(0.1, 0.08, 0.96, 0.98, 0.4, 0.63)
ax = pl.axes([0.1, 0.08, 0.8, 0.06])
pl.colorbar(cax=ax, orientation='horizontal')
pl.show()
| bsd-3-clause |
perimosocordiae/scipy | scipy/spatial/_geometric_slerp.py | 20 | 7668 | from __future__ import division, print_function, absolute_import
__all__ = ['geometric_slerp']
import warnings
import numpy as np
from scipy.spatial.distance import euclidean
def _geometric_slerp(start, end, t):
# create an orthogonal basis using QR decomposition
basis = np.vstack([start, end])
Q, R = np.linalg.qr(basis.T)
signs = 2 * (np.diag(R) >= 0) - 1
Q = Q.T * signs.T[:, np.newaxis]
R = R.T * signs.T[:, np.newaxis]
# calculate the angle between `start` and `end`
c = np.dot(start, end)
s = np.linalg.det(R)
omega = np.arctan2(s, c)
# interpolate
start, end = Q
s = np.sin(t * omega)
c = np.cos(t * omega)
return start * c[:, np.newaxis] + end * s[:, np.newaxis]
def geometric_slerp(start,
end,
t,
tol=1e-7):
"""
Geometric spherical linear interpolation.
The interpolation occurs along a unit-radius
great circle arc in arbitrary dimensional space.
Parameters
----------
start : (n_dimensions, ) array-like
Single n-dimensional input coordinate in a 1-D array-like
object. `n` must be greater than 1.
end : (n_dimensions, ) array-like
Single n-dimensional input coordinate in a 1-D array-like
object. `n` must be greater than 1.
t: float or (n_points,) array-like
A float or array-like of doubles representing interpolation
parameters, with values required in the inclusive interval
between 0 and 1. A common approach is to generate the array
with ``np.linspace(0, 1, n_pts)`` for linearly spaced points.
Ascending, descending, and scrambled orders are permitted.
tol: float
The absolute tolerance for determining if the start and end
coordinates are antipodes.
Returns
-------
result : (t.size, D)
An array of doubles containing the interpolated
spherical path and including start and
end when 0 and 1 t are used. The
interpolated values should correspond to the
same sort order provided in the t array. The result
may be 1-dimensional if ``t`` is a float.
Raises
------
ValueError
If ``start`` and ``end`` are antipodes, not on the
unit n-sphere, or for a variety of degenerate conditions.
Notes
-----
The implementation is based on the mathematical formula provided in [1]_,
and the first known presentation of this algorithm, derived from study of
4-D geometry, is credited to Glenn Davis in a footnote of the original
quaternion Slerp publication by Ken Shoemake [2]_.
.. versionadded:: 1.5.0
References
----------
.. [1] https://en.wikipedia.org/wiki/Slerp#Geometric_Slerp
.. [2] Ken Shoemake (1985) Animating rotation with quaternion curves.
ACM SIGGRAPH Computer Graphics, 19(3): 245-254.
See Also
--------
scipy.spatial.transform.Slerp : 3-D Slerp that works with quaternions
Examples
--------
Interpolate four linearly-spaced values on the circumference of
a circle spanning 90 degrees:
>>> from scipy.spatial import geometric_slerp
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> start = np.array([1, 0])
>>> end = np.array([0, 1])
>>> t_vals = np.linspace(0, 1, 4)
>>> result = geometric_slerp(start,
... end,
... t_vals)
The interpolated results should be at 30 degree intervals
recognizable on the unit circle:
>>> ax.scatter(result[...,0], result[...,1], c='k')
>>> circle = plt.Circle((0, 0), 1, color='grey')
>>> ax.add_artist(circle)
>>> ax.set_aspect('equal')
>>> plt.show()
Attempting to interpolate between antipodes on a circle is
ambiguous because there are two possible paths, and on a
sphere there are infinite possible paths on the geodesic surface.
Nonetheless, one of the ambiguous paths is returned along
with a warning:
>>> opposite_pole = np.array([-1, 0])
>>> with np.testing.suppress_warnings() as sup:
... sup.filter(UserWarning)
... geometric_slerp(start,
... opposite_pole,
... t_vals)
array([[ 1.00000000e+00, 0.00000000e+00],
[ 5.00000000e-01, 8.66025404e-01],
[-5.00000000e-01, 8.66025404e-01],
[-1.00000000e+00, 1.22464680e-16]])
Extend the original example to a sphere and plot interpolation
points in 3D:
>>> from mpl_toolkits.mplot3d import proj3d
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111, projection='3d')
Plot the unit sphere for reference (optional):
>>> u = np.linspace(0, 2 * np.pi, 100)
>>> v = np.linspace(0, np.pi, 100)
>>> x = np.outer(np.cos(u), np.sin(v))
>>> y = np.outer(np.sin(u), np.sin(v))
>>> z = np.outer(np.ones(np.size(u)), np.cos(v))
>>> ax.plot_surface(x, y, z, color='y', alpha=0.1)
Interpolating over a larger number of points
may provide the appearance of a smooth curve on
the surface of the sphere, which is also useful
for discretized integration calculations on a
sphere surface:
>>> start = np.array([1, 0, 0])
>>> end = np.array([0, 0, 1])
>>> t_vals = np.linspace(0, 1, 200)
>>> result = geometric_slerp(start,
... end,
... t_vals)
>>> ax.plot(result[...,0],
... result[...,1],
... result[...,2],
... c='k')
>>> plt.show()
"""
start = np.asarray(start, dtype=np.float64)
end = np.asarray(end, dtype=np.float64)
if start.ndim != 1 or end.ndim != 1:
raise ValueError("Start and end coordinates "
"must be one-dimensional")
if start.size != end.size:
raise ValueError("The dimensions of start and "
"end must match (have same size)")
if start.size < 2 or end.size < 2:
raise ValueError("The start and end coordinates must "
"both be in at least two-dimensional "
"space")
if np.array_equal(start, end):
return [start] * np.asarray(t).size
# for points that violate equation for n-sphere
for coord in [start, end]:
if not np.allclose(np.linalg.norm(coord), 1.0,
rtol=1e-9,
atol=0):
raise ValueError("start and end are not"
" on a unit n-sphere")
if not isinstance(tol, float):
raise ValueError("tol must be a float")
else:
tol = np.fabs(tol)
coord_dist = euclidean(start, end)
# diameter of 2 within tolerance means antipodes, which is a problem
# for all unit n-spheres (even the 0-sphere would have an ambiguous path)
if np.allclose(coord_dist, 2.0, rtol=0, atol=tol):
warnings.warn("start and end are antipodes"
" using the specified tolerance;"
" this may cause ambiguous slerp paths")
t = np.asarray(t, dtype=np.float64)
if t.size == 0:
return np.empty((0, start.size))
if t.min() < 0 or t.max() > 1:
raise ValueError("interpolation parameter must be in [0, 1]")
if t.ndim == 0:
return _geometric_slerp(start,
end,
np.atleast_1d(t)).ravel()
else:
return _geometric_slerp(start,
end,
t)
| bsd-3-clause |
d-grossman/pelops | pelops/etl/computeMatrixCMC.py | 3 | 2454 | import json
import time
from collections import defaultdict
from matplotlib import pyplot
def makeTransDicts(reindexFile):
reindex = open(reindexFile, 'r')
file2num = dict()
num2file = dict()
index = 0
for line in reindex:
line = line.strip()
file2num[line] = index
num2file[index] = line
index += 1
return (file2num, num2file)
def makeMatrix(matrixFilename, num2file, file2num, measure='cosine'):
a = open(matrixFilename, 'r')
lines = 0
for line in a:
lines += 1
a.close()
Matrix = [[0 for x in range(lines)] for y in range(lines)]
matrixFile = open(matrixFilename, 'r')
for line in matrixFile:
line = line.strip()
line = json.loads(line)
x = file2num[line['x']]
y = file2num[line['y']]
Matrix[x][y] = line[measure]
Matrix[y][x] = line[measure]
for index in range(0, lines):
Matrix[index][index] = 8675309
return Matrix
def getrank(car, s, maxval=-1):
for sidx, work in enumerate(s):
# sval = work[0]
scar = work[1]
if scar == car:
return sidx
return maxval
def preCMC(Matrix, num2file, downto=50):
retval = defaultdict(int)
start = time.time()
size = len(Matrix[0])
for oindex in range(size):
if oindex % 1000 == 0:
print('index:{0} time:{1}'.format(oindex, time.time() - start))
start = time.time()
car = num2file[oindex].split('_')[0]
current = list()
for idx, val in enumerate(Matrix[oindex]):
current.append((float(val), num2file[idx].split('_')[0]))
s = sorted(current, key=lambda tup: tup[0])[:downto]
maxSearch = downto + 1
r = getrank(car, s, maxval=maxSearch)
retval[r] += 1
return retval
def computeCMC(rawCounts, num):
idx = sorted(rawCounts)
sum = 0
CMC = list()
for index in range(0, len(idx)):
sum += rawCounts[index]
print (index, sum)
CMC.append(sum / float(num))
return CMC
testFilesName = '/local_data/dgrossman/VeRi/test_uniqfiles'
matrixFilename = '/local_data/dgrossman/VeRi/matrixFile.test_uniqfile'
file2num, num2file = makeTransDicts(testFilesName)
Matrix = makeMatrix(matrixFilename, num2file, file2num)
rawCounts = preCMC(Matrix, num2file)
CMC = computeCMC(rawCounts, len(Matrix[0]))
# pyplot.ylim(0,1)
pyplot.plot(CMC[:-1])
pyplot.show()
| apache-2.0 |
mdelorme/MNn | docs/conf.py | 1 | 8722 | # -*- coding: utf-8 -*-
#
# MNn documentation build configuration file, created by
# sphinx-quickstart on Mon Apr 11 10:13:20 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import mock
MOCK_MODULES = ['numpy', 'scipy', 'matplotlib', 'matplotlib.pyplot', 'scipy.optimize', 'matplotlib.ticker', 'corner', 'emcee']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.doctest'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'MNn'
copyright = u'2016, Armando Rojas-Niño, Justin I. Read, Luis Aguilar, Maxime Delorme'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
napoleon_use_param = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'bizstyle'
html_theme = 'sphinx_rtd_theme'
html_theme_path = ["_themes", ]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'MNndoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'MNn.tex', u'MNn Documentation',
u'Armando Rojas-Niño, Justin I. Read, Luis Aguilar, Maxime Delorme', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'mnn', u'MNn Documentation',
[u'Armando Rojas-Niño, Justin I. Read, Luis Aguilar, Maxime Delorme'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'MNn', u'MNn Documentation',
u'Armando Rojas-Niño, Justin I. Read, Luis Aguilar, Maxime Delorme', 'MNn', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit |
jeffery-do/Vizdoombot | doom/lib/python3.5/site-packages/matplotlib/tests/test_basic.py | 7 | 1290 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from nose.tools import assert_equal
from matplotlib.testing.decorators import knownfailureif
from pylab import *
def test_simple():
assert_equal(1 + 1, 2)
@knownfailureif(True)
def test_simple_knownfail():
# Test the known fail mechanism.
assert_equal(1 + 1, 3)
def test_override_builtins():
ok_to_override = set([
'__name__',
'__doc__',
'__package__',
'__loader__',
'__spec__',
'any',
'all',
'sum'
])
# We could use six.moves.builtins here, but that seems
# to do a little more than just this.
if six.PY3:
builtins = sys.modules['builtins']
else:
builtins = sys.modules['__builtin__']
overridden = False
for key in globals().keys():
if key in dir(builtins):
if (globals()[key] != getattr(builtins, key) and
key not in ok_to_override):
print("'%s' was overridden in globals()." % key)
overridden = True
assert not overridden
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| mit |
Winand/pandas | pandas/tests/series/test_apply.py | 4 | 20591 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
import pytest
from collections import Counter, defaultdict, OrderedDict
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isna)
from pandas.compat import lrange
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesApply(TestData):
def test_apply(self):
with np.errstate(all='ignore'):
tm.assert_series_equal(self.ts.apply(np.sqrt), np.sqrt(self.ts))
# element-wise apply
import math
tm.assert_series_equal(self.ts.apply(math.exp), np.exp(self.ts))
# empty series
s = Series(dtype=object, name='foo', index=pd.Index([], name='bar'))
rs = s.apply(lambda x: x)
tm.assert_series_equal(s, rs)
# check all metadata (GH 9322)
assert s is not rs
assert s.index is rs.index
assert s.dtype == rs.dtype
assert s.name == rs.name
# index but no data
s = Series(index=[1, 2, 3])
rs = s.apply(lambda x: x)
tm.assert_series_equal(s, rs)
def test_apply_same_length_inference_bug(self):
s = Series([1, 2])
f = lambda x: (x, x + 1)
result = s.apply(f)
expected = s.map(f)
assert_series_equal(result, expected)
s = Series([1, 2, 3])
result = s.apply(f)
expected = s.map(f)
assert_series_equal(result, expected)
def test_apply_dont_convert_dtype(self):
s = Series(np.random.randn(10))
f = lambda x: x if x > 0 else np.nan
result = s.apply(f, convert_dtype=False)
assert result.dtype == object
def test_with_string_args(self):
for arg in ['sum', 'mean', 'min', 'max', 'std']:
result = self.ts.apply(arg)
expected = getattr(self.ts, arg)()
assert result == expected
def test_apply_args(self):
s = Series(['foo,bar'])
result = s.apply(str.split, args=(',', ))
assert result[0] == ['foo', 'bar']
assert isinstance(result[0], list)
def test_apply_box(self):
# ufunc will not be boxed. Same test cases as the test_map_box
vals = [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]
s = pd.Series(vals)
assert s.dtype == 'datetime64[ns]'
# boxed value must be Timestamp instance
res = s.apply(lambda x: '{0}_{1}_{2}'.format(x.__class__.__name__,
x.day, x.tz))
exp = pd.Series(['Timestamp_1_None', 'Timestamp_2_None'])
tm.assert_series_equal(res, exp)
vals = [pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern')]
s = pd.Series(vals)
assert s.dtype == 'datetime64[ns, US/Eastern]'
res = s.apply(lambda x: '{0}_{1}_{2}'.format(x.__class__.__name__,
x.day, x.tz))
exp = pd.Series(['Timestamp_1_US/Eastern', 'Timestamp_2_US/Eastern'])
tm.assert_series_equal(res, exp)
# timedelta
vals = [pd.Timedelta('1 days'), pd.Timedelta('2 days')]
s = pd.Series(vals)
assert s.dtype == 'timedelta64[ns]'
res = s.apply(lambda x: '{0}_{1}'.format(x.__class__.__name__, x.days))
exp = pd.Series(['Timedelta_1', 'Timedelta_2'])
tm.assert_series_equal(res, exp)
# period (object dtype, not boxed)
vals = [pd.Period('2011-01-01', freq='M'),
pd.Period('2011-01-02', freq='M')]
s = pd.Series(vals)
assert s.dtype == 'object'
res = s.apply(lambda x: '{0}_{1}'.format(x.__class__.__name__,
x.freqstr))
exp = pd.Series(['Period_M', 'Period_M'])
tm.assert_series_equal(res, exp)
def test_apply_datetimetz(self):
values = pd.date_range('2011-01-01', '2011-01-02',
freq='H').tz_localize('Asia/Tokyo')
s = pd.Series(values, name='XX')
result = s.apply(lambda x: x + pd.offsets.Day())
exp_values = pd.date_range('2011-01-02', '2011-01-03',
freq='H').tz_localize('Asia/Tokyo')
exp = pd.Series(exp_values, name='XX')
tm.assert_series_equal(result, exp)
# change dtype
# GH 14506 : Returned dtype changed from int32 to int64
result = s.apply(lambda x: x.hour)
exp = pd.Series(list(range(24)) + [0], name='XX', dtype=np.int64)
tm.assert_series_equal(result, exp)
# not vectorized
def f(x):
if not isinstance(x, pd.Timestamp):
raise ValueError
return str(x.tz)
result = s.map(f)
exp = pd.Series(['Asia/Tokyo'] * 25, name='XX')
tm.assert_series_equal(result, exp)
def test_apply_dict_depr(self):
tsdf = pd.DataFrame(np.random.randn(10, 3),
columns=['A', 'B', 'C'],
index=pd.date_range('1/1/2000', periods=10))
with tm.assert_produces_warning(FutureWarning):
tsdf.A.agg({'foo': ['sum', 'mean']})
class TestSeriesAggregate(TestData):
_multiprocess_can_split_ = True
def test_transform(self):
# transforming functions
with np.errstate(all='ignore'):
f_sqrt = np.sqrt(self.series)
f_abs = np.abs(self.series)
# ufunc
result = self.series.transform(np.sqrt)
expected = f_sqrt.copy()
assert_series_equal(result, expected)
result = self.series.apply(np.sqrt)
assert_series_equal(result, expected)
# list-like
result = self.series.transform([np.sqrt])
expected = f_sqrt.to_frame().copy()
expected.columns = ['sqrt']
assert_frame_equal(result, expected)
result = self.series.transform([np.sqrt])
assert_frame_equal(result, expected)
result = self.series.transform(['sqrt'])
assert_frame_equal(result, expected)
# multiple items in list
# these are in the order as if we are applying both functions per
# series and then concatting
expected = pd.concat([f_sqrt, f_abs], axis=1)
expected.columns = ['sqrt', 'absolute']
result = self.series.apply([np.sqrt, np.abs])
assert_frame_equal(result, expected)
result = self.series.transform(['sqrt', 'abs'])
expected.columns = ['sqrt', 'abs']
assert_frame_equal(result, expected)
# dict, provide renaming
expected = pd.concat([f_sqrt, f_abs], axis=1)
expected.columns = ['foo', 'bar']
expected = expected.unstack().rename('series')
result = self.series.apply({'foo': np.sqrt, 'bar': np.abs})
assert_series_equal(result.reindex_like(expected), expected)
def test_transform_and_agg_error(self):
# we are trying to transform with an aggregator
def f():
self.series.transform(['min', 'max'])
pytest.raises(ValueError, f)
def f():
with np.errstate(all='ignore'):
self.series.agg(['sqrt', 'max'])
pytest.raises(ValueError, f)
def f():
with np.errstate(all='ignore'):
self.series.transform(['sqrt', 'max'])
pytest.raises(ValueError, f)
def f():
with np.errstate(all='ignore'):
self.series.agg({'foo': np.sqrt, 'bar': 'sum'})
pytest.raises(ValueError, f)
def test_demo(self):
# demonstration tests
s = Series(range(6), dtype='int64', name='series')
result = s.agg(['min', 'max'])
expected = Series([0, 5], index=['min', 'max'], name='series')
tm.assert_series_equal(result, expected)
result = s.agg({'foo': 'min'})
expected = Series([0], index=['foo'], name='series')
tm.assert_series_equal(result, expected)
# nested renaming
with tm.assert_produces_warning(FutureWarning):
result = s.agg({'foo': ['min', 'max']})
expected = DataFrame(
{'foo': [0, 5]},
index=['min', 'max']).unstack().rename('series')
tm.assert_series_equal(result, expected)
def test_multiple_aggregators_with_dict_api(self):
s = Series(range(6), dtype='int64', name='series')
# nested renaming
with tm.assert_produces_warning(FutureWarning):
result = s.agg({'foo': ['min', 'max'], 'bar': ['sum', 'mean']})
expected = DataFrame(
{'foo': [5.0, np.nan, 0.0, np.nan],
'bar': [np.nan, 2.5, np.nan, 15.0]},
columns=['foo', 'bar'],
index=['max', 'mean',
'min', 'sum']).unstack().rename('series')
tm.assert_series_equal(result.reindex_like(expected), expected)
def test_agg_apply_evaluate_lambdas_the_same(self):
# test that we are evaluating row-by-row first
# before vectorized evaluation
result = self.series.apply(lambda x: str(x))
expected = self.series.agg(lambda x: str(x))
tm.assert_series_equal(result, expected)
result = self.series.apply(str)
expected = self.series.agg(str)
tm.assert_series_equal(result, expected)
def test_with_nested_series(self):
# GH 2316
# .agg with a reducer and a transform, what to do
result = self.ts.apply(lambda x: Series(
[x, x ** 2], index=['x', 'x^2']))
expected = DataFrame({'x': self.ts, 'x^2': self.ts ** 2})
tm.assert_frame_equal(result, expected)
result = self.ts.agg(lambda x: Series(
[x, x ** 2], index=['x', 'x^2']))
tm.assert_frame_equal(result, expected)
def test_replicate_describe(self):
# this also tests a result set that is all scalars
expected = self.series.describe()
result = self.series.apply(OrderedDict(
[('count', 'count'),
('mean', 'mean'),
('std', 'std'),
('min', 'min'),
('25%', lambda x: x.quantile(0.25)),
('50%', 'median'),
('75%', lambda x: x.quantile(0.75)),
('max', 'max')]))
assert_series_equal(result, expected)
def test_reduce(self):
# reductions with named functions
result = self.series.agg(['sum', 'mean'])
expected = Series([self.series.sum(),
self.series.mean()],
['sum', 'mean'],
name=self.series.name)
assert_series_equal(result, expected)
def test_non_callable_aggregates(self):
# test agg using non-callable series attributes
s = Series([1, 2, None])
# Calling agg w/ just a string arg same as calling s.arg
result = s.agg('size')
expected = s.size
assert result == expected
# test when mixed w/ callable reducers
result = s.agg(['size', 'count', 'mean'])
expected = Series(OrderedDict([('size', 3.0),
('count', 2.0),
('mean', 1.5)]))
assert_series_equal(result[expected.index], expected)
class TestSeriesMap(TestData):
def test_map(self):
index, data = tm.getMixedTypeDict()
source = Series(data['B'], index=data['C'])
target = Series(data['C'][:4], index=data['D'][:4])
merged = target.map(source)
for k, v in compat.iteritems(merged):
assert v == source[target[k]]
# input could be a dict
merged = target.map(source.to_dict())
for k, v in compat.iteritems(merged):
assert v == source[target[k]]
# function
result = self.ts.map(lambda x: x * 2)
tm.assert_series_equal(result, self.ts * 2)
# GH 10324
a = Series([1, 2, 3, 4])
b = Series(["even", "odd", "even", "odd"], dtype="category")
c = Series(["even", "odd", "even", "odd"])
exp = Series(["odd", "even", "odd", np.nan], dtype="category")
tm.assert_series_equal(a.map(b), exp)
exp = Series(["odd", "even", "odd", np.nan])
tm.assert_series_equal(a.map(c), exp)
a = Series(['a', 'b', 'c', 'd'])
b = Series([1, 2, 3, 4],
index=pd.CategoricalIndex(['b', 'c', 'd', 'e']))
c = Series([1, 2, 3, 4], index=Index(['b', 'c', 'd', 'e']))
exp = Series([np.nan, 1, 2, 3])
tm.assert_series_equal(a.map(b), exp)
exp = Series([np.nan, 1, 2, 3])
tm.assert_series_equal(a.map(c), exp)
a = Series(['a', 'b', 'c', 'd'])
b = Series(['B', 'C', 'D', 'E'], dtype='category',
index=pd.CategoricalIndex(['b', 'c', 'd', 'e']))
c = Series(['B', 'C', 'D', 'E'], index=Index(['b', 'c', 'd', 'e']))
exp = Series(pd.Categorical([np.nan, 'B', 'C', 'D'],
categories=['B', 'C', 'D', 'E']))
tm.assert_series_equal(a.map(b), exp)
exp = Series([np.nan, 'B', 'C', 'D'])
tm.assert_series_equal(a.map(c), exp)
def test_map_compat(self):
# related GH 8024
s = Series([True, True, False], index=[1, 2, 3])
result = s.map({True: 'foo', False: 'bar'})
expected = Series(['foo', 'foo', 'bar'], index=[1, 2, 3])
assert_series_equal(result, expected)
def test_map_int(self):
left = Series({'a': 1., 'b': 2., 'c': 3., 'd': 4})
right = Series({1: 11, 2: 22, 3: 33})
assert left.dtype == np.float_
assert issubclass(right.dtype.type, np.integer)
merged = left.map(right)
assert merged.dtype == np.float_
assert isna(merged['d'])
assert not isna(merged['c'])
def test_map_type_inference(self):
s = Series(lrange(3))
s2 = s.map(lambda x: np.where(x == 0, 0, 1))
assert issubclass(s2.dtype.type, np.integer)
def test_map_decimal(self):
from decimal import Decimal
result = self.series.map(lambda x: Decimal(str(x)))
assert result.dtype == np.object_
assert isinstance(result[0], Decimal)
def test_map_na_exclusion(self):
s = Series([1.5, np.nan, 3, np.nan, 5])
result = s.map(lambda x: x * 2, na_action='ignore')
exp = s * 2
assert_series_equal(result, exp)
def test_map_dict_with_tuple_keys(self):
"""
Due to new MultiIndex-ing behaviour in v0.14.0,
dicts with tuple keys passed to map were being
converted to a multi-index, preventing tuple values
from being mapped properly.
"""
df = pd.DataFrame({'a': [(1, ), (2, ), (3, 4), (5, 6)]})
label_mappings = {(1, ): 'A', (2, ): 'B', (3, 4): 'A', (5, 6): 'B'}
df['labels'] = df['a'].map(label_mappings)
df['expected_labels'] = pd.Series(['A', 'B', 'A', 'B'], index=df.index)
# All labels should be filled now
tm.assert_series_equal(df['labels'], df['expected_labels'],
check_names=False)
def test_map_counter(self):
s = Series(['a', 'b', 'c'], index=[1, 2, 3])
counter = Counter()
counter['b'] = 5
counter['c'] += 1
result = s.map(counter)
expected = Series([0, 5, 1], index=[1, 2, 3])
assert_series_equal(result, expected)
def test_map_defaultdict(self):
s = Series([1, 2, 3], index=['a', 'b', 'c'])
default_dict = defaultdict(lambda: 'blank')
default_dict[1] = 'stuff'
result = s.map(default_dict)
expected = Series(['stuff', 'blank', 'blank'], index=['a', 'b', 'c'])
assert_series_equal(result, expected)
def test_map_dict_subclass_with_missing(self):
"""
Test Series.map with a dictionary subclass that defines __missing__,
i.e. sets a default value (GH #15999).
"""
class DictWithMissing(dict):
def __missing__(self, key):
return 'missing'
s = Series([1, 2, 3])
dictionary = DictWithMissing({3: 'three'})
result = s.map(dictionary)
expected = Series(['missing', 'missing', 'three'])
assert_series_equal(result, expected)
def test_map_dict_subclass_without_missing(self):
class DictWithoutMissing(dict):
pass
s = Series([1, 2, 3])
dictionary = DictWithoutMissing({3: 'three'})
result = s.map(dictionary)
expected = Series([np.nan, np.nan, 'three'])
assert_series_equal(result, expected)
def test_map_box(self):
vals = [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]
s = pd.Series(vals)
assert s.dtype == 'datetime64[ns]'
# boxed value must be Timestamp instance
res = s.map(lambda x: '{0}_{1}_{2}'.format(x.__class__.__name__,
x.day, x.tz))
exp = pd.Series(['Timestamp_1_None', 'Timestamp_2_None'])
tm.assert_series_equal(res, exp)
vals = [pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern')]
s = pd.Series(vals)
assert s.dtype == 'datetime64[ns, US/Eastern]'
res = s.map(lambda x: '{0}_{1}_{2}'.format(x.__class__.__name__,
x.day, x.tz))
exp = pd.Series(['Timestamp_1_US/Eastern', 'Timestamp_2_US/Eastern'])
tm.assert_series_equal(res, exp)
# timedelta
vals = [pd.Timedelta('1 days'), pd.Timedelta('2 days')]
s = pd.Series(vals)
assert s.dtype == 'timedelta64[ns]'
res = s.map(lambda x: '{0}_{1}'.format(x.__class__.__name__, x.days))
exp = pd.Series(['Timedelta_1', 'Timedelta_2'])
tm.assert_series_equal(res, exp)
# period (object dtype, not boxed)
vals = [pd.Period('2011-01-01', freq='M'),
pd.Period('2011-01-02', freq='M')]
s = pd.Series(vals)
assert s.dtype == 'object'
res = s.map(lambda x: '{0}_{1}'.format(x.__class__.__name__,
x.freqstr))
exp = pd.Series(['Period_M', 'Period_M'])
tm.assert_series_equal(res, exp)
def test_map_categorical(self):
values = pd.Categorical(list('ABBABCD'), categories=list('DCBA'),
ordered=True)
s = pd.Series(values, name='XX', index=list('abcdefg'))
result = s.map(lambda x: x.lower())
exp_values = pd.Categorical(list('abbabcd'), categories=list('dcba'),
ordered=True)
exp = pd.Series(exp_values, name='XX', index=list('abcdefg'))
tm.assert_series_equal(result, exp)
tm.assert_categorical_equal(result.values, exp_values)
result = s.map(lambda x: 'A')
exp = pd.Series(['A'] * 7, name='XX', index=list('abcdefg'))
tm.assert_series_equal(result, exp)
assert result.dtype == np.object
with pytest.raises(NotImplementedError):
s.map(lambda x: x, na_action='ignore')
def test_map_datetimetz(self):
values = pd.date_range('2011-01-01', '2011-01-02',
freq='H').tz_localize('Asia/Tokyo')
s = pd.Series(values, name='XX')
# keep tz
result = s.map(lambda x: x + pd.offsets.Day())
exp_values = pd.date_range('2011-01-02', '2011-01-03',
freq='H').tz_localize('Asia/Tokyo')
exp = pd.Series(exp_values, name='XX')
tm.assert_series_equal(result, exp)
# change dtype
# GH 14506 : Returned dtype changed from int32 to int64
result = s.map(lambda x: x.hour)
exp = pd.Series(list(range(24)) + [0], name='XX', dtype=np.int64)
tm.assert_series_equal(result, exp)
with pytest.raises(NotImplementedError):
s.map(lambda x: x, na_action='ignore')
# not vectorized
def f(x):
if not isinstance(x, pd.Timestamp):
raise ValueError
return str(x.tz)
result = s.map(f)
exp = pd.Series(['Asia/Tokyo'] * 25, name='XX')
tm.assert_series_equal(result, exp)
| bsd-3-clause |
YubinXie/Computational-Pathology | Track.py | 1 | 6559 | ##Created at 08/02/2017 by Yubin Xie, MSKCC
##Modified at #### by ***
## This script is to find the starting points in the binary images and find the shortest path between them
import itertools
import math
from scipy import misc
from scipy.sparse.dok import dok_matrix
from scipy.sparse.csgraph import dijkstra
import operator
from operator import sub, add
import numpy as np
from matplotlib import pyplot as plt
OutputFolder=""
SampleID="path"
def main(SampleID):
print SampleID
# Load the image
original_img = misc.imread(SampleID+'.png')
img = original_img[:, :, 0] + original_img[:, :, 1] + original_img[:, :, 2]
# Defines a translation from 2 coordinates to a single number
def to_index(y, x):
return y * img.shape[1] + x
# Defines a reversed translation from index to 2 coordinates
def to_coordinates(index):
return index / img.shape[1], index % img.shape[1]
# Defines the distance between 2 coordinates
def distance(list1,list2):
return math.hypot(list1[0]-list2[0],list1[1]-list2[1])
def tupleadd(a,b):
return tuple(map(sum,zip(a,b)))
SourceList=[]
for i in range(0,img.shape[0]-1):
for j in range(0,img.shape[1]-1):
if img[i,j]>=1:
img[i,j]=1
for i in range(1,img.shape[0]-2):
for j in range(1,img.shape[1]-2):
if img[i,j]==0:
continue
sourse=None
NearValue=0
CornerValue=0
NearValueList=[]
CornerValueList=[]
NearPosition = [(-1,0),(0,-1),(0,1),(1,0),]
CornerPosition = [(-1,1),(1,1),(-1,-1),(1,-1)]
for direction in range(4):
NearValue=NearValue+img[tupleadd((i,j),NearPosition[direction])]
NearValueList.append(img[tupleadd((i,j),NearPosition[direction])])
CornerValue=CornerValue+img[tupleadd((i,j),CornerPosition[direction])]
CornerValueList.append(img[tupleadd((i,j),CornerPosition[direction])])
if NearValue>=2:
continue
if CornerValue>=2:
continue
#One pixel as start point
if NearValue==0:
if CornerValue==1:
sourse=(i,j)
print "1.1",sourse
#Two or more pixel as start point
elif NearValue==1:
if CornerValue==1:
NearDirection=NearPosition[NearValueList.index(1)]
CornerDirection=CornerPosition[CornerValueList.index(1)]
if (NearDirection[0]!=CornerDirection[0]) and (NearDirection[1]!=CornerDirection[1]):
continue
Direction=NearPosition[NearValueList.index(1)]
AgainstDirectionIndex=3-NearValueList.index(1)
NextPointValue=1
NextPoint=(i,j)
while NextPointValue:
NextPoint = tupleadd(NextPoint, Direction)
if NextPoint[0]>=img.shape[0] or NextPoint[1]>=img.shape[1]:
break
NextPointValue=img[NextPoint]
if NextPoint[0]>=img.shape[0] or NextPoint[1]>=img.shape[1]:
continue
EndPoint= tuple(map(sub, NextPoint, Direction))
EndNearValueList=[]
EndCornerValueList=[]
EndNearValue=0
EndCornerValue=0
for direction in range(4):
EndNearPoint=tupleadd(EndPoint,NearPosition[direction])
EndNearValue=EndNearValue+img[EndNearPoint]
EndNearValueList.append(img[EndNearPoint])
EndCornerPoint=tupleadd(EndPoint,CornerPosition[direction])
if EndCornerPoint[0]>=img.shape[0] or EndCornerPoint[1]>=img.shape[1]:
break
EndCornerValue=EndCornerValue+img[EndCornerPoint]
EndCornerValueList.append(img[EndCornerPoint])
if EndNearValue==1:
if EndCornerValue==1:
sourse=(i,j)
if EndNearValue==2:
if EndCornerValue==0:
sourse=(i,j)
if EndCornerValue==1:
EndNearValueList[AgainstDirectionIndex]=0
NearDirection=NearPosition[EndNearValueList.index(1)]
CornerDirection=CornerPosition[EndCornerValueList.index(1)]
if (NearDirection[0]==CornerDirection[0]) or (NearDirection[1]==CornerDirection[1]):
#print CornerDirection,NearDirection
#print EndNearValueList
sourse=(i,j)
if sourse!=None:
if sourse not in SourceList:
SourceList.append(sourse)
print (SourceList), "\n",len(SourceList), " single points are found in the image"
# Two pixels are adjacent in the graph if both are painted.
adjacency = dok_matrix((img.shape[0] * img.shape[1],img.shape[0] * img.shape[1]), dtype=bool)
directions = list(itertools.product([0, 1, -1], [0, 1, -1]))
for i in range(1, img.shape[0] - 1):
for j in range(1, img.shape[1] - 1):
if not img[i, j]:
continue
for y_diff, x_diff in directions:
if img[i + y_diff, j + x_diff]:
adjacency[to_index(i, j),
to_index(i + y_diff, j + x_diff)] = True
# Choose first two point
source = to_index(SourceList[0][0],SourceList[0][1])
target = to_index(SourceList[1][0],15)
# Compute the shortest path between the source and all other points in the image
_, predecessors = dijkstra(adjacency, directed=False, indices=[source],
unweighted=True, return_predecessors=True)
# Construct the path between source and target
pixel_index = target
pixels_path = []
while pixel_index != source:
pixels_path.append(pixel_index)
pixel_index = predecessors[0, pixel_index]
#To visualize the chosen path
for point in SourceList:
original_img[point[0],point[1],0]=0
Path=[]
for point in pixels_path:
point=to_coordinates(point)
Path.append(point)
original_img[point[0],point[1],1]=0
print Path
plt.imshow(original_img)
plt.show()
if __name__ == '__main__':
main(SampleID)
| gpl-2.0 |
MohammedWasim/scikit-learn | sklearn/feature_selection/tests/test_rfe.py | 209 | 11733 | """
Testing Recursive feature elimination
"""
import warnings
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_equal, assert_true
from scipy import sparse
from sklearn.feature_selection.rfe import RFE, RFECV
from sklearn.datasets import load_iris, make_friedman1
from sklearn.metrics import zero_one_loss
from sklearn.svm import SVC, SVR
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import cross_val_score
from sklearn.utils import check_random_state
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
class MockClassifier(object):
"""
Dummy classifier to test recursive feature ellimination
"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
self.coef_ = np.ones(X.shape[1], dtype=np.float64)
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=True):
return {'foo_param': self.foo_param}
def set_params(self, **params):
return self
def test_rfe_set_params():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
y_pred = rfe.fit(X, y).predict(X)
clf = SVC()
with warnings.catch_warnings(record=True):
# estimator_params is deprecated
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'})
y_pred2 = rfe.fit(X, y).predict(X)
assert_array_equal(y_pred, y_pred2)
def test_rfe_features_importance():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = RandomForestClassifier(n_estimators=20,
random_state=generator, max_depth=2)
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
assert_equal(len(rfe.ranking_), X.shape[1])
clf_svc = SVC(kernel="linear")
rfe_svc = RFE(estimator=clf_svc, n_features_to_select=4, step=0.1)
rfe_svc.fit(X, y)
# Check if the supports are equal
assert_array_equal(rfe.get_support(), rfe_svc.get_support())
def test_rfe_deprecation_estimator_params():
deprecation_message = ("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. The "
"parameter is no longer necessary because the "
"value is set via the estimator initialisation or "
"set_params method.")
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
assert_warns_message(DeprecationWarning, deprecation_message,
RFE(estimator=SVC(), n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
assert_warns_message(DeprecationWarning, deprecation_message,
RFECV(estimator=SVC(), step=1, cv=5,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
def test_rfe():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
X_sparse = sparse.csr_matrix(X)
y = iris.target
# dense model
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
# sparse model
clf_sparse = SVC(kernel="linear")
rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1)
rfe_sparse.fit(X_sparse, y)
X_r_sparse = rfe_sparse.transform(X_sparse)
assert_equal(X_r.shape, iris.data.shape)
assert_array_almost_equal(X_r[:10], iris.data[:10])
assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data))
assert_equal(rfe.score(X, y), clf.score(iris.data, iris.target))
assert_array_almost_equal(X_r, X_r_sparse.toarray())
def test_rfe_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
# dense model
clf = MockClassifier()
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
assert_equal(X_r.shape, iris.data.shape)
def test_rfecv():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
# All the noisy variable were filtered out
assert_array_equal(X_r, iris.data)
# same in sparse
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
# Test using a customized loss function
scoring = make_scorer(zero_one_loss, greater_is_better=False)
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scoring)
ignore_warnings(rfecv.fit)(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test using a scorer
scorer = get_scorer('accuracy')
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scorer)
rfecv.fit(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test fix on grid_scores
def test_scorer(estimator, X, y):
return 1.0
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=test_scorer)
rfecv.fit(X, y)
assert_array_equal(rfecv.grid_scores_, np.ones(len(rfecv.grid_scores_)))
# Same as the first two tests, but with step=2
rfecv = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
rfecv.fit(X, y)
assert_equal(len(rfecv.grid_scores_), 6)
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
def test_rfecv_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=MockClassifier(), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
def test_rfe_estimator_tags():
rfe = RFE(SVC(kernel='linear'))
assert_equal(rfe._estimator_type, "classifier")
# make sure that cross-validation is stratified
iris = load_iris()
score = cross_val_score(rfe, iris.data, iris.target)
assert_greater(score.min(), .7)
def test_rfe_min_step():
n_features = 10
X, y = make_friedman1(n_samples=50, n_features=n_features, random_state=0)
n_samples, n_features = X.shape
estimator = SVR(kernel="linear")
# Test when floor(step * n_features) <= 0
selector = RFE(estimator, step=0.01)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is between (0,1) and floor(step * n_features) > 0
selector = RFE(estimator, step=0.20)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is an integer
selector = RFE(estimator, step=5)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
def test_number_of_subsets_of_features():
# In RFE, 'number_of_subsets_of_features'
# = the number of iterations in '_fit'
# = max(ranking_)
# = 1 + (n_features + step - n_features_to_select - 1) // step
# After optimization #4534, this number
# = 1 + np.ceil((n_features - n_features_to_select) / float(step))
# This test case is to test their equivalence, refer to #4534 and #3824
def formula1(n_features, n_features_to_select, step):
return 1 + ((n_features + step - n_features_to_select - 1) // step)
def formula2(n_features, n_features_to_select, step):
return 1 + np.ceil((n_features - n_features_to_select) / float(step))
# RFE
# Case 1, n_features - n_features_to_select is divisible by step
# Case 2, n_features - n_features_to_select is not divisible by step
n_features_list = [11, 11]
n_features_to_select_list = [3, 3]
step_list = [2, 3]
for n_features, n_features_to_select, step in zip(
n_features_list, n_features_to_select_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfe = RFE(estimator=SVC(kernel="linear"),
n_features_to_select=n_features_to_select, step=step)
rfe.fit(X, y)
# this number also equals to the maximum of ranking_
assert_equal(np.max(rfe.ranking_),
formula1(n_features, n_features_to_select, step))
assert_equal(np.max(rfe.ranking_),
formula2(n_features, n_features_to_select, step))
# In RFECV, 'fit' calls 'RFE._fit'
# 'number_of_subsets_of_features' of RFE
# = the size of 'grid_scores' of RFECV
# = the number of iterations of the for loop before optimization #4534
# RFECV, n_features_to_select = 1
# Case 1, n_features - 1 is divisible by step
# Case 2, n_features - 1 is not divisible by step
n_features_to_select = 1
n_features_list = [11, 10]
step_list = [2, 2]
for n_features, step in zip(n_features_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfecv = RFECV(estimator=SVC(kernel="linear"), step=step, cv=5)
rfecv.fit(X, y)
assert_equal(rfecv.grid_scores_.shape[0],
formula1(n_features, n_features_to_select, step))
assert_equal(rfecv.grid_scores_.shape[0],
formula2(n_features, n_features_to_select, step))
| bsd-3-clause |
pkruskal/scikit-learn | examples/datasets/plot_random_dataset.py | 348 | 2254 | """
==============================================
Plot randomly generated classification dataset
==============================================
Plot several randomly generated 2D classification datasets.
This example illustrates the :func:`datasets.make_classification`
:func:`datasets.make_blobs` and :func:`datasets.make_gaussian_quantiles`
functions.
For ``make_classification``, three binary and two multi-class classification
datasets are generated, with different numbers of informative features and
clusters per class. """
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_gaussian_quantiles
plt.figure(figsize=(8, 8))
plt.subplots_adjust(bottom=.05, top=.9, left=.05, right=.95)
plt.subplot(321)
plt.title("One informative feature, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=1,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(322)
plt.title("Two informative features, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(323)
plt.title("Two informative features, two clusters per class", fontsize='small')
X2, Y2 = make_classification(n_features=2, n_redundant=0, n_informative=2)
plt.scatter(X2[:, 0], X2[:, 1], marker='o', c=Y2)
plt.subplot(324)
plt.title("Multi-class, two informative features, one cluster",
fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(325)
plt.title("Three blobs", fontsize='small')
X1, Y1 = make_blobs(n_features=2, centers=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(326)
plt.title("Gaussian divided into three quantiles", fontsize='small')
X1, Y1 = make_gaussian_quantiles(n_features=2, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.show()
| bsd-3-clause |
KevinFasusi/supplychainpy | supplychainpy/_helpers/_data_cleansing.py | 1 | 8880 | # Copyright (c) 2015-2016, The Authors and Contributors
# <see AUTHORS file>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
# following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from decimal import Decimal
from warnings import warn
import csv
import logging
from supplychainpy._helpers._decorators import log_this
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
TABLE_HEADINGS = {
'UNIT_COST': 'unit_cost',
'LEAD_TIME': 'lead_time',
'RETAIL_PRICE': 'retail_price',
'QUANTITY_ON_HAND': 'quantity_on_hand',
'BACKLOG': 'backlog'
}
# TODO-feature format pandas frame
# TODO-feature allow data munger to accept any delimiter as a parameter
def clean_orders_data_col_txt(file) -> dict:
"""
Args:
file:
Returns:
dict:
"""
item_list = {}
split_line = []
for line in file:
split_line.append(line.split("|"))
for item in split_line:
item_list[item[0]] = Decimal(item[1].strip())
return item_list
# TODO-feature make csv version of clean_orders also take into account column number add tests (create csv file)
def clean_orders_data_col_csv(file) -> dict:
item_list = {}
read_csv = csv.reader(file)
headers = next(read_csv)
split_line = list(read_csv)
for item in split_line:
item_list[item[0]] = Decimal(item[1])
return item_list
# TODO-feature allow data munger to accept any delimiter as a parameter
def clean_orders_data_row(file, length: int) -> list:
collection = []
try:
sku_list = []
split_line = []
composite = {}
# unpacks the line form the txt file and splits using delimiter (default pipe) to split_line list
for line in file:
split_line.append(line.split("|"))
# sorts all parts of the list into labeled dictionary
for row in split_line:
for index, item in enumerate(row):
if int(index) < 1:
composite["sku_id"] = item
elif 1 <= int(index) <= length:
sku_list.append(item.strip("\n"))
elif int(index) == length + 1:
composite["unit_cost"] = item.strip("\n")
elif int(index) == length + 2:
composite["lead_time"] = item.strip("\n")
# if the sku id is not unique the the data will just append. Need to check if there are duplicates
# and throw an exception.
composite["demand"] = sku_list
sku_list = []
collection.append(composite)
composite = {}
except OSError:
print("")
return collection
# TODO-feature cleans a text or csv file and insert into numpy array
# remember to specify in documentation that the orders data will assume 12 months unless otherwise stated
@log_this(logging.INFO, message='clean orders data.')
def clean_orders_data_row_csv(file, length: int = 12) -> list:
# check length of list, check length of the length specified and validate the length and make sure it is long enough
# to support the unit_cost, lead_time, asp, quantity_on_hand
collection = []
try:
sku_list = []
composite = {}
read_csv = csv.reader(file)
headers = next(read_csv)
split_line = list(read_csv)
for row in split_line:
if length == len(row) - 6:
for index, item in enumerate(row):
if int(index) < 1:
composite["sku_id"] = item
elif 1 <= index <= length:
sku_list.append(item)
elif index == length + 1:
composite["unit_cost"] = item
log.info("Extracted unit cost for sku. SKU: {} UNIT COST: {} ".format(
composite.get("sku_id", "UNKNOWN_SKU"), item))
elif index == length + 2:
composite["lead_time"] = item
log.info("Extracted unit cost for sku. SKU: {} LEAD-TIME: {} ".format(
composite.get("sku_id", "UNKNOWN_SKU"), item))
elif index == length + 3:
composite["retail_price"] = item
log.info("Extracted unit cost for sku. SKU: {} RETAIL PRICE: {} ".format(
composite.get("sku_id", "UNKNOWN_SKU"), item))
elif index == length + 4:
composite["quantity_on_hand"] = item
log.info("Extracted unit cost for sku. SKU: {} QUANTITY ON HAND: {} ".format(
composite.get("sku_id", "UNKNOWN_SKU"), item))
elif index == length + 5:
composite["backlog"] = item
log.info("Extracted unit cost for sku. SKU: {} BACKLOG: {} ".format(
composite.get("sku_id", "UNKNOWN_SKU"), item))
# if the sku id is not unique the the data will just append. Need to check if there are duplicates
# and through an exception.
composite["demand"] = tuple(sku_list)
composite["headers"] = headers
sku_list = []
if composite.get("sku_id") is None or composite.get("unit_cost") is None or composite.get("lead_time") is \
None or composite.get("retail_price") is None:
err_msg = "csv file is formatted incorrectly. Please make sure the formatted file\n [sku_id," \
" orders1,orders2....unit_cost, lead_time, retail_price, quantity_on_hand"
raise Exception(err_msg)
collection.append(composite)
composite = {}
else:
formatting_err = "The file formatting is incorrect. The specified column count supplied as a" \
" parameter is {}.\n Including the sku_id, unit_cost, lead_time, retail_price\n " \
"and quantity on hand the csv row should be {} columns long.\n The current" \
" column count is {}. Please check the file or specified length."
raise Exception(formatting_err.format(length, (length + 1) + len(TABLE_HEADINGS), len(row)))
except OSError as e:
print(e)
return collection
def match_headers(headers: list)->dict:
pass
def check_extension(file_path, file_type: str) -> bool:
""" Check the correct file type has been selected.
Args:
file_path (file): The path to the file containing two columns of data, 1 period and 1 data-point for 1 sku.
file_type (str): specifying 'csv' or 'text'
Returns:
bool: Extension present or not.
"""
try:
if file_path.endswith(".txt") and file_type.lower() == "text":
flag = True
elif file_path.endswith(".csv") and file_type.lower() == "csv":
flag = True
else:
print(file_path, file_type)
flag = False
return flag
except OSError as e:
print(file_path, file_type ,e)
# refocator length to column count
# if a user specifies a lower column_count than actually supplied then the oher columns are going to be incorrect.
# probably best to check heading using regx
| bsd-3-clause |
RondaStrauch/landlab | landlab/ca/examples/sir/sir.py | 6 | 5291 | #!/usr/env/python
"""
sir.py:
Example of a Susceptible-Infectious-Recovered epidemiological
cellular automaton model implemented on a hexagonal grid using stochastic
pair-transition rules.
GT Sep 2014
"""
from __future__ import print_function
_DEBUG = False
import time
from landlab import HexModelGrid
from numpy import where, logical_and
from landlab.ca.celllab_cts import Transition, CAPlotter
from landlab.ca.hex_cts import HexCTS
import pylab
def setup_transition_list(infection_rate):
"""
Creates and returns a list of Transition() objects to represent state
transitions for the SIR model.
Parameters
----------
(none)
Returns
-------
xn_list : list of Transition objects
List of objects that encode information about the link-state transitions.
Notes
-----
The states and transitions are as follows:
Pair state Transition to Process
========== ============= =======
0 (0-0) (none) -
1 (0-1) 4 (1-1) infection
2 (0-2) recovery
2 (0-2) (none) -
3 (1-0) 4 (1-1) infection
6 (2-0) recovery
4 (1-1) 5 (1-2) recovery
6 (2-1) recovery
5 (1-2) 8 (2-2) recovery
6 (2-0) (none) -
7 (2-1) 8 (2-2) recovery
8 (2-2) (none) -
"""
xn_list = []
xn_list.append( Transition((0,1,0), (1,1,0), infection_rate, 'infection') )
xn_list.append( Transition((0,1,0), (0,2,0), 1., 'recovery') )
xn_list.append( Transition((1,0,0), (1,1,0), infection_rate, 'infection') )
xn_list.append( Transition((1,0,0), (2,0,0), 1., 'recovery') )
xn_list.append( Transition((1,1,0), (1,2,0), 1., 'recovery') )
xn_list.append( Transition((1,1,0), (2,1,0), 1., 'recovery') )
xn_list.append( Transition((1,2,0), (2,2,0), 1., 'recovery') )
xn_list.append( Transition((2,1,0), (2,2,0), 1., 'recovery') )
if _DEBUG:
print()
print('setup_transition_list(): list has',len(xn_list),'transitions:')
for t in xn_list:
print(' From state',t.from_state,'to state',t.to_state,'at rate',t.rate,'called',t.name)
return xn_list
def main():
# INITIALIZE
# User-defined parameters
nr = 80
nc = 41
plot_interval = 0.25
run_duration = 5.0
report_interval = 5.0 # report interval, in real-time seconds
infection_rate = 8.0
outfilename = 'sirmodel'+str(int(infection_rate))+'ir'
# Remember the clock time, and calculate when we next want to report
# progress.
current_real_time = time.time()
next_report = current_real_time + report_interval
time_slice = 0
# Create a grid
hmg = HexModelGrid(nr, nc, 1.0)
# Set up the states and pair transitions.
# Transition data here represent the disease status of a population.
ns_dict = { 0 : 'susceptible', 1 : 'infectious', 2: 'recovered' }
xn_list = setup_transition_list(infection_rate)
# Create data and initialize values
node_state_grid = hmg.add_zeros('node', 'node_state_grid')
wid = nc-1.0
ht = (nr-1.0)*0.866
is_middle_rows = logical_and(hmg.node_y>=0.4*ht, hmg.node_y<=0.5*ht)
is_middle_cols = logical_and(hmg.node_x>=0.4*wid, hmg.node_x<=0.6*wid)
middle_area = where(logical_and(is_middle_rows, is_middle_cols))[0]
node_state_grid[middle_area] = 1
node_state_grid[0] = 2 # to force full color range, set lower left to 'recovered'
# Create the CA model
ca = HexCTS(hmg, ns_dict, xn_list, node_state_grid)
# Set up the color map
import matplotlib
susceptible_color = (0.5, 0.5, 0.5) # gray
infectious_color = (0.5, 0.0, 0.0) # dark red
recovered_color = (0.0, 0.0, 1.0) # blue
clist = [susceptible_color, infectious_color, recovered_color]
my_cmap = matplotlib.colors.ListedColormap(clist)
# Create a CAPlotter object for handling screen display
ca_plotter = CAPlotter(ca, cmap=my_cmap)
# Plot the initial grid
ca_plotter.update_plot()
pylab.axis('off')
savename = outfilename+'0'
pylab.savefig(savename+'.pdf', format='pdf')
# RUN
current_time = 0.0
while current_time < run_duration:
# Once in a while, print out simulation and real time to let the user
# know that the sim is running ok
current_real_time = time.time()
if current_real_time >= next_report:
print('Current sim time',current_time,'(',100*current_time/run_duration,'%)')
next_report = current_real_time + report_interval
# Run the model forward in time until the next output step
ca.run(current_time+plot_interval, ca.node_state,
plot_each_transition=False) #True, plotter=ca_plotter)
current_time += plot_interval
# Plot the current grid
ca_plotter.update_plot()
pylab.axis('off')
time_slice += 1
savename = outfilename+str(time_slice)
pylab.savefig(savename+'.pdf', format='pdf')
# FINALIZE
# Plot
ca_plotter.finalize()
if __name__=='__main__':
main()
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.