repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
peri-source/peri | scripts/figures/generative-model.py | 1 | 1123 | import numpy as np
import scipy as sp
from peri import runner, util
from peri.viz.plots import generative_model
import pickle
import matplotlib.pyplot as pl
import matplotlib.gridspec as gridspec
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
from mpl_toolkits.axes_grid1 import ImageGrid
from matplotlib.patches import Circle, Rectangle
def sample_center_particle(state):
cind = state.closet_particle(np.array(state.image.shape)/2)
blocks = state.blocks_particle(cind)
hxy = runner.sample_state(state, blocks[1:3], N=5000, doprint=True)
hr = runner.sample_state(state, [blocks[-1]], N=5000, doprint=True)
z = state.state[blocks[0]]
y,x = hh.get_histogram().T
return x,y,z,r
def load():
s,h,l = pickle.load(open('/media/scratch/bamf/crystal-fcc/crystal_fcc.tif_t001.tif-fit-gaussian-4d.pkl'))
x,y,z,r = np.load('/media/scratch/bamf/crystal-fcc/crystal_fcc.tif_t001.tif-fit-gaussian-4d-sample-xyzr.npy').T
x -= s.pad
y -= s.pad
return s,x,y,z,r
def dorun():
generative_model(*load())
| mit |
OpenDrift/opendrift | examples/example_convolve_input.py | 1 | 2301 | #!/usr/bin/env python
"""
Convolve input
==============
Decreasing the spatial resolution of fields from a reader by convolution.
This may improve accuracy, see: https://doi.org/10.1016/j.rse.2019.01.001
"""
from datetime import datetime, timedelta
import matplotlib.pyplot as plt
from opendrift.readers import reader_netCDF_CF_generic
from opendrift.models.oceandrift import OceanDrift
lon = 4.9; lat = 60.0
o = OceanDrift(loglevel=20)
reader_norkyst = reader_netCDF_CF_generic.Reader(o.test_data_folder() + '16Nov2015_NorKyst_z_surface/norkyst800_subset_16Nov2015.nc')
time = reader_norkyst.start_time
o.add_reader([reader_norkyst])
o.seed_elements(lon, lat, radius=1000, number=1000, time=time)
o.run(steps=20)
#%%
# Store final field of x-component of current
original_current = reader_norkyst.var_block_after[list(reader_norkyst.var_block_after.keys())[0]].data_dict['x_sea_water_velocity'].copy()
#%%
# For the second run, the NorKyst currents are convolved with a kernel,
# effectively lowering the spatial resolution.
# <reader>.set_convolution_kernel may also be given as an array (kernel) directly
N = 10 # Convolusion kernel size
reader_norkyst.set_convolution_kernel(N) # Using convolution kernel for second run
o2 = OceanDrift(loglevel=20)
o2.add_reader([reader_norkyst])
o2.seed_elements(lon, lat, radius=1000, number=1000, time=time)
o2.run(steps=20)
#%%
# Store final field of x-component of (convolved) current
convolved_current = reader_norkyst.var_block_after[
"['x_sea_water_velocity', 'y_sea_water_velocity']"].data_dict['x_sea_water_velocity']
plt.subplot(2,1,1)
plt.imshow(original_current, interpolation='nearest')
plt.title('Original current field (x-component)')
clim = plt.gci().get_clim()
plt.colorbar()
plt.subplot(2,1,2)
plt.imshow(convolved_current, interpolation='nearest')
plt.clim(clim) # Make sure plots are comparable
plt.colorbar()
plt.title('Final, convolved current field (x-component)')
plt.show()
#%%
# Print and plot results, with convolved currents as background
print(o)
o.animation(compare=o2, fast=True, legend=[
'Original currents', 'Current convoled with kernel of size %s' % N],
background=['x_sea_water_velocity', 'y_sea_water_velocity'])
#%%
# .. image:: /gallery/animations/example_convolve_input_0.gif
o.plot(fast=True)
| gpl-2.0 |
mihaic/brainiak | brainiak/reprsimil/brsa.py | 4 | 211992 | # Copyright 2016 Mingbo Cai, Princeton Neuroscience Instititute,
# Princeton University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Bayesian Representational Similarity Analysis (BRSA)
This implementation is based on [Cai2016]_ and [Cai2019]_:
.. [Cai2016] "A Bayesian method for reducing bias in neural
representational similarity analysis",
M.B. Cai, N.W. Schuck, J.W. Pillow, Y. Niv,
Advances in Neural Information Processing Systems 29, 2016, 4952--4960
Available at:
http://papers.nips.cc/paper/6131-a-bayesian-method-for-reducing-bias-in-neural-representational-similarity-analysis.pdf
.. [Cai2019] "Representational structure or task structure?
Bias in neural representational similarity analysis and
a Bayesian method for reducing bias",
M.B. Cai, N.W. Schuck, J.W. Pillow, Y. Niv,
PLoS computational biology 15.5 (2019): e1006299.
https://doi.org/10.1371/journal.pcbi.1006299
`.BRSA` is based on [Cai2016] with additional consideration
of spatial noise correlation proposed in [Cai2019].
`.GBRSA` is based on [Cai2019].
`.GBRSA` may perform better than `.BRSA` due to marginalization of all
voxel-wise parameters. It can be use for single participant as well.
"""
# Authors: Mingbo Cai
# Princeton Neuroscience Institute, Princeton University, 2016
import numpy as np
import scipy
import scipy.optimize
import scipy.stats
import scipy.special
import time
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils import assert_all_finite, check_random_state
from sklearn.decomposition import PCA, FactorAnalysis, SparsePCA, FastICA
import logging
import brainiak.utils.utils as utils
import scipy.spatial.distance as spdist
from nitime import algorithms as alg
import copy
logger = logging.getLogger(__name__)
__all__ = [
"BRSA",
"GBRSA",
"Ncomp_SVHT_MG_DLD_approx",
"prior_GP_var_inv_gamma",
"prior_GP_var_half_cauchy",
]
def prior_GP_var_inv_gamma(y_invK_y, n_y, tau_range):
""" Imposing an inverse-Gamma prior onto the variance (tau^2)
parameter of a Gaussian Process, which is in turn a prior
imposed over an unknown function y = f(x).
The inverse-Gamma prior of tau^2, tau^2 ~ invgamma(shape, scale)
is described by a shape parameter alpha=2 and a scale parameter
beta=tau_range^2. tau_range describes the reasonable range of
tau in the inverse-Gamma prior.
The data y's at locations x's are assumed to follow Gaussian Process:
f(x, x') ~ N(0, K(x, x') / 2 tau^2), where K is a kernel
function defined on x. For n observations, K(x1, x2, ..., xn) is
an n by n positive definite matrix.
Given the prior parameter tau_range, number of observations
n_y, and y_invK_y = y * inv(K) * y',
the function returns the MAP estimate of tau^2 and
the log posterior probability of tau^2 at the MAP value:
log(p(tau^2|tau_range)).
This function is written primarily for BRSA but can also
be used elsewhere. y in this case corresponds to the log of
SNR in each voxel. GBRSA does not rely on this function.
An alternative form of prior is half-Cauchy prior on tau.
Inverse-Gamma prior penalizes for both very small and very
large values of tau, while half-Cauchy prior only penalizes
for very large values of tau.
For more information on usage, see description in BRSA class:
`.BRSA`
See also: `.prior_GP_var_half_cauchy`
Parameters
----------
y_invK_y: float
y * inv(K) * y^T, where y=f(x) is a vector of observations
of unknown function f at different locations x.
K is correlation matrix of f between different locations, based
on a Gaussian Process (GP) describing the smoothness property
of f. K fully incorporates the form of the kernel
and the length scale of the GP, but not the variance of the GP
(the purpose of this function is to estimate the variance).
n_y: int, number of observations
tau_range: float,
The reasonable range of tau, the standard deviation of the
Gaussian Process imposed on y=f(x). tau_range is parameter
of the inverse-Gamma prior. Say, if you expect the standard
deviation of the Gaussian process to be around 3, tau_range
can be set to 3.
The smaller it is, the more penalization is imposed
on large variation of y.
Returns
-------
tau2: The MAP estimation of tau^2 based on the prior on tau
and y_invK_y.
log_ptau: log(p(tau)) of the returned tau^2 based on the
inverse-Gamma prior.
"""
alpha = 2
tau2 = (y_invK_y + 2 * tau_range**2) / (alpha * 2 + 2 + n_y)
log_ptau = scipy.stats.invgamma.logpdf(
tau2, scale=tau_range**2, a=2)
return tau2, log_ptau
def prior_GP_var_half_cauchy(y_invK_y, n_y, tau_range):
""" Imposing a half-Cauchy prior onto the standard deviation (tau)
of the Gaussian Process which is in turn a prior imposed over
a function y = f(x).
The scale parameter of the half-Cauchy prior is tau_range.
The function returns the MAP estimate of tau^2 and
log(p(tau|tau_range)) for the MAP value of tau^2,
where tau_range describes the reasonable range of tau
in the half-Cauchy prior.
An alternative form of prior is inverse-Gamma prior on tau^2.
Inverse-Gamma prior penalizes for both very small and very
large values of tau, while half-Cauchy prior only penalizes
for very large values of tau.
For more information on usage, see description in BRSA class:
`.BRSA`
"""
tau2 = (y_invK_y - n_y * tau_range**2
+ np.sqrt(n_y**2 * tau_range**4 + (2 * n_y + 8)
* tau_range**2 * y_invK_y + y_invK_y**2))\
/ 2 / (n_y + 2)
log_ptau = scipy.stats.halfcauchy.logpdf(
tau2**0.5, scale=tau_range)
return tau2, log_ptau
def Ncomp_SVHT_MG_DLD_approx(X, zscore=True):
""" This function implements the approximate calculation of the
optimal hard threshold for singular values, by Matan Gavish
and David L. Donoho:
"The optimal hard threshold for singular values is 4 / sqrt(3)"
http://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=6846297
Parameters
----------
X: 2-D numpy array of size [n_T, n_V]
The data to estimate the optimal rank for selecting principal
components.
zscore: Boolean
Whether to z-score the data before calculating number of components.
Returns
-------
ncomp: integer
The optimal number of components determined by the method of MG
and DLD
"""
beta = X.shape[0] / X.shape[1]
if beta > 1:
beta = 1 / beta
omega = 0.56 * beta ** 3 - 0.95 * beta ** 2 + 1.82 * beta + 1.43
if zscore:
sing = np.linalg.svd(_zscore(X), False, False)
else:
sing = np.linalg.svd(X, False, False)
thresh = omega * np.median(sing)
ncomp = int(np.sum(np.logical_and(sing > thresh, np.logical_not(
np.isclose(sing, thresh)))))
# In the line above, we look for the singular values larger than
# the threshold but excluding those that happen to be "just" larger
# than the threshold by an amount close to the numerical precision.
# This is to prevent close-to-zero singular values to be included if
# the median of the eigenvalues is close to 0 (which could happen
# when the input X has lower rank than its minimal size.
return ncomp
def _zscore(a):
""" Calculating z-score of data on the first axis.
If the numbers in any column are all equal, scipy.stats.zscore
will return NaN for this column. We shall correct them all to
be zeros.
Parameters
----------
a: numpy array
Returns
-------
zscore: numpy array
The z-scores of input "a", with any columns including non-finite
numbers replaced by all zeros.
"""
assert a.ndim > 1, 'a must have more than one dimensions'
zscore = scipy.stats.zscore(a, axis=0)
zscore[:, np.logical_not(np.all(np.isfinite(zscore), axis=0))] = 0
return zscore
class BRSA(BaseEstimator, TransformerMixin):
"""Bayesian representational Similarity Analysis (BRSA)
Given the time series of neural imaging data in a region of interest
(ROI) and the hypothetical neural response (design matrix) to
each experimental condition of interest,
calculate the shared covariance matrix U of
the voxels(recording unit)' response profiles \\beta_i to each condition,
and the relative SNR of each voxels.
The relative SNR could be considered as the degree of contribution
of each voxel to this shared covariance matrix.
A correlation matrix converted from the covariance matrix U
will be provided as a quantification of neural representational similarity.
.. math::
Y = X \\cdot \\beta + X_0 \\cdot \\beta_0 + \\epsilon
\\beta_i \\sim N(0,(s_{i} \\sigma_{i})^2 U)
\\epsilon_i \\sim AR(1)
Please note that the model assumes that the covariance matrix U which
all \\beta_i follow is zero-meaned. This assumption does not imply
there must be both positive and negative responses across voxels.
However, it means that Bayesian RSA treats the task-evoked activity
against baseline BOLD level as signal, while in other RSA tools
the deviation of task-evoked activity in each voxel from the average
task-evoked activity level across voxels may be considered as signal
of interest. Due to this assumption in BRSA, relatively high degree
of similarity may be expected when the activity patterns of two
task conditions both include strong sensory driven signals regardless
of their specific stimuli. When two task conditions elicit exactly
the same activity patterns but only differ in their global magnitudes,
under the assumption in BRSA, their similarity is 1; under the assumption
that only deviation of pattern from average patterns is signal of interest,
their similarity should be -1.
Parameters
----------
n_iter : int.
Number of maximum iterations to run the algorithm.
rank : int. Default: None
The rank of the covariance matrix.
If not provided, the covariance matrix will be assumed
to be full rank. When you have many conditions
(e.g., calculating the similarity matrix of responses to each event),
you might try specifying a lower rank.
auto_nuisance: boolean.
In order to model spatial correlation between voxels that cannot
be accounted for by common response captured in the design matrix,
we assume that a set of time courses not related to the task
conditions are shared across voxels with unknown amplitudes.
One approach is for users to provide time series which they consider
as nuisance but exist in the noise (such as head motion).
The other way is to take the first n_nureg principal components
in the residual after subtracting the response to the design matrix
from the data, and use these components as the nuisance regressor.
This flag is for the second approach. If turned on,
PCA or factor analysis will be applied to the residuals
to obtain new nuisance regressors in each round of fitting.
These two approaches can be combined. If the users provide nuisance
regressors and set this flag as True, then the first n_nureg
principal components of the residuals after subtracting
both the responses to design matrix and the user-supplied nuisance
regressors will be used in addition to the nuisance regressors
provided by the users.
Note that nuisance regressor is not required from user. If it is
not provided, DC components for each run will be included as nuisance
regressor regardless of the auto_nuisance parameter.
n_nureg: Optional[int].
Number of nuisance regressors to use in order to model signals
shared across voxels not captured by the design matrix.
This number is in addition to any nuisance regressor that the user
has already provided.
If set to None, the number of nuisance regressors will be
automatically determined based on M Gavish
and D Donoho's approximate estimation of optimal hard
threshold for singular values.
This only takes effect if auto_nuisance is True.
nureg_zscore: boolean.
A flag to tell the algorithm whether data is z-scored before
estimating the number of nuisance regressor components necessary to
account for spatial noise correlation. It also determinie whether
the residual noise is z-scored before estimating the nuisance
regressors from residual.
This only takes effect if auto_nuisance is True.
nureg_method: string, naming a method from sklearn.decomposition.
'PCA', 'ICA', 'FA' or 'SPCA' are currently supported.
The method to estimate the shared component in noise across voxels.
This only takes effect if auto_nuisance is True.
baseline_single: boolean.
A time course of constant 1 will be included to the nuisance
regressor regardless of whether the user requests.
If baseline_single is set to False, one such regressor is included
for each fMRI run, but a single component in beta0\\_ will be
computed as the average of the weight maps corresponding to
these regressors. This might cause underestimation of noise variance.
If baseline_single is True, only one regressor of constant 1 will be
used for the whole dataset. This might be desirable if you
believe the average image intensity might not scale with the
same proportion for different voxels across scan. In other words,
it is possible that some part of the brain is more vulnerable to
change in baseline intensity due to facts such as
field inhomogeneity. Setting baseline_single to True will force the
nuisance regressors automatically estimated from residuals to
capture this. However, when each task condition only occurs in one
run and when the design matrix in each run sums together close to
a flat line, this option can cause the estimated similarity to be
extremely high between conditions occuring in the same run.
GP_space: boolean.
Whether to impose a Gaussion Process (GP) prior on the log(pseudo-SNR).
If true, the GP has a kernel defined over spatial coordinate
of each voxel. The idea behind this option is that
adjacent voxels should have similar SNRs.
This is relatively slow for big ROI. We find that when SNR
is generally low, smoothness can be overestimated.
But such regularization may reduce variance in the estimated
SNR map and similarity matrix.
GP_inten: boolean.
Whether to include a kernel defined over the intensity of image.
GP_space should be True as well if you want to use this,
because the smoothness should be primarily in space.
Smoothness in intensity is just complementary. The idea
behind this option is that voxels should have similar
SNRs when they are both adjacent (imposed by GP_space)
and are of the same tissue type (when their image intensities
are close). If you accept the second assumption, then
you can set GP_inten as True and provide an array to the `inten`
variable, expressing the intensities (brightness) for each voxel.
space_smooth_range: float.
The distance (in unit the same as what
you would use when supplying the spatial coordiates of
each voxel, typically millimeter) which you believe is
the maximum range of the length scale parameter of
Gaussian Process defined over voxel location. This is
used to impose a half-Cauchy prior on the length scale.
If set to None, the program will default to half of the
maximum distance between all voxels.
inten_smooth_range: float.
The difference in image intensity which
you believe is the maximum range of plausible length
scale for the Gaussian Process defined over image
intensity. Length scales larger than this are allowed,
but will be penalized. If set to None, this parameter
will default to half of the maximal intensity difference.
tau_range: float.
The reasonable range of the standard deviation
of log(SNR). This range should not be too
large. 5 is a loose range.
When a Gaussian Process is imposed on the log(SNR),
this parameter is used in a half-Cauchy prior
on the standard deviation, or an inverse-Gamma prior
on the variance of the GP.
tau2_prior: Callable[[float, int, float]], [float, float]],
Default: prior_GP_var_inv_gamma.
Can be prior_GP_var_inv_gamma or prior_GP_var_half_cauchy,
or a custom function.
The function which impose a prior for tau^2, the variance of the
GP prior on log(SNR), and returns the MAP estimate of tau^2.
It can be either prior_GP_var_inv_gamma for inverse-Gamma
or prior_GP_var_half_cauchy for half-Cauchy.
half-Cauchy prior is in fact imposed on tau.
But tau_range describes the range of tau in the prior in both cases.
Both functions are part of brsa module.
See also `.prior_GP_var_inv_gamma` and
`.prior_GP_var_half_cauchy`
To use the default inverse-Gamma prior, you can ignore this argument::
from brainiak.reprsimil.brsa import BRSA
brsa = BRSA()
If you want to try the alternative half-Cauchy prior,
then you need to import it in addition to BRSA::
from brainiak.reprsimil.brsa import BRSA, prior_GP_var_half_cauchy
brsa = BRSA(tau2_prior=prior_GP_var_half_cauchy)
eta: float.
A small number added to the diagonal element of the
covariance matrix in the Gaussian Process prior. This is
to ensure that the matrix is invertible.
init_iter: int.
How many initial iterations to fit the model
without introducing the GP prior before fitting with it,
if GP_space or GP_inten is requested. This initial
fitting is to give the parameters a good starting point.
optimizer: str or callable.
The optimizer to use for minimizing cost function which
scipy.optimize.minimize can accept.
We use 'L-BFGS-B' as a default. Users can try other strings
corresponding to optimizer provided by scipy.optimize.minimize,
or a custom optimizer, such as 'BFGS' or 'CG'.
Note that BRSA fits a lot of parameters. So a chosen optimizer
should accept gradient (Jacobian) of the cost function. Otherwise
the fitting is likely to be unbarely slow. We do not calculate
Hessian of the objective function. So an optimizer which requires
Hessian cannot be used.
random_state : RandomState or an int seed.
A random number generator instance to define the state of
the random permutations generator whenever the module
needs to generate random number (e.g., initial parameter
of the Cholesky factor).
anneal_speed: float.
Annealing is introduced in fitting of the Cholesky
decomposition of the shared covariance matrix. The amount
of perturbation decays exponentially. This parameter sets
the ratio of the maximum number of iteration to the
time constant of the exponential.
anneal_speed=10 means by n_iter/10 iterations,
the amount of perturbation is reduced by 2.713 times.
minimize_options: dictionary.
Default: {'gtol': 1e-4, 'disp': False, 'maxiter': 6}
This is the dictionary passed as the options argument to
scipy.optimize.minize which minimizes the cost function during
fitting. Notice that the minimization is performed for many times,
alternating between optimizing the covariance matrix U underlying
the pattern similarity matrix, and SNR. At most n_iter times
of this alternation is performed. So within each step of fitting,
the step of iteration performed by scipy.optimize.minize does not
have to be very large. In other words, scipy.optimize.minize does
not need to converge within each step of the alternating fitting
procedure.
tol: float.
Tolerance parameter passed to scipy.optimize.minimize. It is also
used for determining convergence of the alternating fitting
procedure.
Attributes
----------
U_ : numpy array, shape=[condition,condition].
The shared covariance matrix.
L_ : numpy array, shape=[condition,rank].
The Cholesky factor of the shared covariance matrix
(lower-triangular matrix).
C_: numpy array, shape=[condition,condition].
The correlation matrix derived from the shared covariance matrix.
This is the estimated similarity matrix between neural patterns
to your task conditions. Notice that it is recommended that
you also check U\\_, which is the covariance matrix underlying
this correlation matrix. In cases there is almost no response
to your task conditions, the diagonal values of U\\_ would become
very small and C\\_ might contain many correlation coefficients
close to 1 or -1. This might not reflect true strong correlation
or strong negative correlation, but a result of lack of
task-related neural activity, design matrix that does not match
true neural response, or not enough data.
It is also recommended to check nSNR\\_ after mapping it back to
the brain. A "reasonable" map should at least have higher values
in gray matter in than white matter.
nSNR_ : numpy array, shape=[voxels,].
The normalized pseuso-SNR of all voxels.
They are normalized such that the geometric mean is 1.
Note that this attribute can not be interpreted as true SNR,
but the relative ratios between voxel indicates the contribution
of each voxel to the representational similarity structure.
sigma_ : numpy array, shape=[voxels,].
The estimated standard deviation of the noise in each voxel
Assuming AR(1) model, this means the standard deviation
of the innovation noise.
rho_ : numpy array, shape=[voxels,].
The estimated autoregressive coefficient of each voxel
bGP_ : float, only if GP_space or GP_inten is True.
The standard deviation of the GP prior
lGPspace_ : float, only if GP_space or GP_inten is True
The length scale of Gaussian Process prior of log(SNR)
lGPinten_: float, only if GP_inten is True
The length scale in fMRI intensity of the GP prior of log(SNR)
beta_: array, shape=[conditions, voxels]
The maximum a posterior estimation of the response amplitudes
of each voxel to each task condition.
beta0_: numpy array, shape=[n_nureg + n_base, voxels]
The loading weights of each voxel for the shared time courses
not captured by the design matrix. This helps capture the
structure of spatial covariance of task-unrelated signal.
n_base is the number of columns of the user-supplied nuisance
regressors plus one for DC component
X0_: numpy array, shape=[time_points, n_nureg + n_base]
The estimated time course that is shared across voxels but
unrelated to the events of interest (design matrix).
beta0_null_: numpy array, shape=[n_nureg + n_base, voxels]
The equivalent of beta0\\_ in a null model which does not
include the design matrix and response pattern beta.
X0_null_: numpy array, shape=[time_points, n_nureg + n_base]
The equivalent of X0\\_ in a null model which does not
include the design matrix and response pattern beta
n_nureg_: int
Number of nuisance regressor in addition to such
regressors provided by the user (if any), if auto_nuisance
is set to True. If n_nureg is set to 'opt',
this will be estimated from data. 'opt' will use M Gavish
and D Donoho's approximate estimation of optimal hard
threshold for singular values.
random_state_: `RandomState`
Random number generator initialized using random_state.
"""
def __init__(
self, n_iter=100, rank=None,
auto_nuisance=True, n_nureg=None, nureg_zscore=True,
nureg_method='PCA', baseline_single=False,
GP_space=False, GP_inten=False,
space_smooth_range=None, inten_smooth_range=None,
tau_range=5.0,
tau2_prior=prior_GP_var_inv_gamma,
eta=0.0001, init_iter=20, optimizer='L-BFGS-B',
random_state=None, anneal_speed=10, tol=1e-4,
minimize_options={'gtol': 1e-4, 'disp': False,
'maxiter': 6}):
self.n_iter = n_iter
self.rank = rank
self.GP_space = GP_space
self.GP_inten = GP_inten
self.tol = tol
self.auto_nuisance = auto_nuisance
self.n_nureg = n_nureg
self.nureg_zscore = nureg_zscore
if auto_nuisance:
assert (n_nureg is None) \
or (isinstance(n_nureg, int) and n_nureg > 0), \
'n_nureg should be a positive integer or None'\
' if auto_nuisance is True.'
if self.nureg_zscore:
self.preprocess_residual = lambda x: _zscore(x)
else:
self.preprocess_residual = lambda x: x
if nureg_method == 'FA':
self.nureg_method = lambda x: FactorAnalysis(n_components=x)
elif nureg_method == 'PCA':
self.nureg_method = lambda x: PCA(n_components=x, whiten=True)
elif nureg_method == 'SPCA':
self.nureg_method = lambda x: SparsePCA(n_components=x,
max_iter=20, tol=tol)
elif nureg_method == 'ICA':
self.nureg_method = lambda x: FastICA(n_components=x,
whiten=True)
else:
raise ValueError('nureg_method can only be FA, PCA, '
'SPCA(for sparse PCA) or ICA')
self.baseline_single = baseline_single
self.minimize_options = minimize_options
self.eta = eta
# This is a tiny ridge added to the Gaussian Process
# covariance matrix template to gaurantee that it is invertible.
# Mathematically it means we assume that this proportion of the
# variance is always independent between voxels for the log(SNR2).
self.space_smooth_range = space_smooth_range
self.inten_smooth_range = inten_smooth_range
# The kernel of the Gaussian Process is the product of a kernel
# defined on spatial coordinate and a kernel defined on
# image intensity.
self.tau_range = tau_range
self.tau2_prior = tau2_prior
self.init_iter = init_iter
# When imposing smoothness prior, fit the model without this
# prior for this number of iterations.
self.optimizer = optimizer
self.random_state = random_state
self.anneal_speed = anneal_speed
return
def fit(self, X, design, nuisance=None, scan_onsets=None, coords=None,
inten=None):
"""Compute the Bayesian RSA
Parameters
----------
X: numpy array, shape=[time_points, voxels]
If you have multiple scans of the same participants that you
want to analyze together, you should concatenate them along
the time dimension after proper preprocessing (e.g. spatial
alignment), and specify the onsets of each scan in scan_onsets.
design: numpy array, shape=[time_points, conditions]
This is the design matrix. It should only include the hypothetic
response for task conditions. You should not include
regressors for a DC component or motion parameters, unless you
want to estimate their pattern similarity with response patterns
to your task conditions. If you want to model head motion,
you should include them in nuisance regressors.
If you have multiple run, the design matrix
of all runs should be concatenated along the time dimension,
with every column for one condition across runs.
For example, if you have 3 runs of experiment of one participant,
with each run lasting 200 TR. And you have 4 conditions,
then design should be a 600 x 4 numpy array.
nuisance: optional, numpy array, shape=[time_points, nuisance_factors]
The responses to these regressors will be marginalized out from
each voxel, which means they are considered, but won't be assumed
to share the same pseudo-SNR map with the design matrix.
Therefore, the pseudo-SNR map will only reflect the
relative contribution of design matrix to each voxel.
You can provide time courses such as those for head motion
to this parameter.
Note that if auto_nuisance is set to True, the first
n_nureg principal components of residual (excluding the response
to the design matrix and the user-provided nuisance regressors
and a constant baseline)
will be included as additional nuisance regressor after the
first round of fitting.
If auto_nuisance is set to False, the nuisance regressors supplied
by the users together with DC components will be used as
nuisance time series.
Please do not include time course of constant baseline in nuisance.
scan_onsets: optional, numpy array, shape=[runs,]
This specifies the indices of X which correspond to the onset
of each scanning run. For example, if you have two experimental
runs of the same subject, each with 100 TRs, then scan_onsets
should be [0,100].
If you do not provide the argument, the program will
assume all data are from the same run.
The effect of them is to make the inverse matrix
of the temporal covariance matrix of noise block-diagonal.
coords: optional, numpy array, shape=[voxels,3]
This is the coordinate of each voxel,
used for implementing Gaussian Process prior.
inten: optional, numpy array, shape=[voxel,]
This is the average fMRI intensity in each voxel.
It should be calculated from your data without any preprocessing
such as z-scoring. Because it should reflect
whether a voxel is bright (grey matter) or dark (white matter).
A Gaussian Process kernel defined on both coordinate and intensity
imposes a smoothness prior on adjcent voxels
but with the same tissue type. The Gaussian Process
is experimental and has shown good performance on
some visual datasets.
"""
logger.info('Running Bayesian RSA')
self.random_state_ = check_random_state(self.random_state)
# setting random seed
logger.debug('RandState set to {}'.format(self.random_state_))
assert not self.GP_inten or (self.GP_inten and self.GP_space),\
'You must speficiy GP_space to True'\
'if you want to use GP_inten'
# Check input data
assert_all_finite(X)
assert X.ndim == 2, 'The data should be 2-dimensional ndarray'
assert np.all(np.std(X, axis=0) > 0),\
'The time courses of some voxels do not change at all.'\
' Please make sure all voxels are within the brain'
# check design matrix
assert_all_finite(design)
assert design.ndim == 2,\
'The design matrix should be 2-dimensional ndarray'
assert np.linalg.matrix_rank(design) == design.shape[1], \
'Your design matrix has rank smaller than the number of'\
' columns. Some columns can be explained by linear '\
'combination of other columns. Please check your design matrix.'
assert np.size(design, axis=0) == np.size(X, axis=0),\
'Design matrix and data do not '\
'have the same number of time points.'
assert self.rank is None or self.rank <= design.shape[1],\
'Your design matrix has fewer columns than the rank you set'
# Check the nuisance regressors.
if nuisance is not None:
assert_all_finite(nuisance)
assert nuisance.ndim == 2,\
'The nuisance regressor should be 2-dimensional ndarray'
assert np.linalg.matrix_rank(nuisance) == nuisance.shape[1], \
'The nuisance regressor has rank smaller than the number of'\
'columns. Some columns can be explained by linear '\
'combination of other columns. Please check your nuisance' \
'regressors.'
assert np.size(nuisance, axis=0) == np.size(X, axis=0), \
'Nuisance regressor and data do not have the same '\
'number of time points.'
# check scan_onsets validity
assert scan_onsets is None or\
(np.max(scan_onsets) <= X.shape[0] and np.min(scan_onsets) >= 0),\
'Some scan onsets provided are out of the range of time points.'
# check the size of coords and inten
if self.GP_space:
logger.info('Fitting with Gaussian Process prior on log(SNR)')
assert coords is not None and coords.shape[0] == X.shape[1],\
'Spatial smoothness was requested by setting GP_space. '\
'But the voxel number of coords does not match that of '\
'data X, or voxel coordinates are not provided. '\
'Please make sure that coords is in the shape of '\
'[n_voxel x 3].'
assert coords.ndim == 2,\
'The coordinate matrix should be a 2-d array'
if self.GP_inten:
assert inten is not None and inten.shape[0] == X.shape[1],\
'The voxel number of intensity does not '\
'match that of data X, or intensity not provided.'
assert np.var(inten) > 0,\
'All voxels have the same intensity.'
if (not self.GP_space and coords is not None) or\
(not self.GP_inten and inten is not None):
logger.warning('Coordinates or image intensity provided'
' but GP_space or GP_inten is not set '
'to True. The coordinates or intensity are'
' ignored.')
# Estimate the number of necessary nuisance regressors
if self.auto_nuisance:
if self.n_nureg is None:
logger.info('number of nuisance regressors is determined '
'automatically.')
run_TRs, n_runs = self._run_TR_from_scan_onsets(
X.shape[0], scan_onsets)
ts_dc = self._gen_legendre(run_TRs, [0])
_, ts_base, _ = self._merge_DC_to_base(
ts_dc, nuisance, False)
ts_reg = np.concatenate((ts_base, design), axis=1)
beta_hat = np.linalg.lstsq(ts_reg, X, rcond=None)[0]
residuals = X - np.dot(ts_reg, beta_hat)
self.n_nureg_ = np.max(
[1, Ncomp_SVHT_MG_DLD_approx(residuals,
self.nureg_zscore)])
logger.info('Use {} nuisance regressors to model the spatial '
'correlation in noise.'.format(self.n_nureg_))
self.n_nureg_ = np.int32(self.n_nureg_)
else:
self.n_nureg_ = self.n_nureg
self.n_nureg_ = np.int32(self.n_nureg_)
# Run Bayesian RSA
# Note that we have a change of notation here. Within _fit_RSA_UV,
# design matrix is named X and data is named Y, to reflect the
# generative model that data Y is generated by mixing the response
# X to experiment conditions and other neural activity.
# However, in fit(), we keep the tradition of scikit-learn that
# X is the input data to fit and y, a reserved name not used, is
# the label to map to from X.
if not self.GP_space:
# If GP_space is not requested, then the model is fitted
# without imposing any Gaussian Process prior on log(SNR^2)
self.U_, self.L_, self.nSNR_, self.beta_, self.beta0_,\
self._beta_latent_, self.sigma_, self.rho_, _, _, _,\
self.X0_ = self._fit_RSA_UV(X=design, Y=X, X_base=nuisance,
scan_onsets=scan_onsets)
elif not self.GP_inten:
# If GP_space is requested, but GP_inten is not, a GP prior
# based on spatial locations of voxels will be imposed.
self.U_, self.L_, self.nSNR_, self.beta_, self.beta0_,\
self._beta_latent_, self.sigma_, self.rho_, \
self.lGPspace_, self.bGP_, _, \
self.X0_ = self._fit_RSA_UV(
X=design, Y=X, X_base=nuisance,
scan_onsets=scan_onsets, coords=coords)
else:
# If both self.GP_space and self.GP_inten are True,
# a GP prior based on both location and intensity is imposed.
self.U_, self.L_, self.nSNR_, self.beta_, self.beta0_,\
self._beta_latent_, self.sigma_, self.rho_, \
self.lGPspace_, self.bGP_, self.lGPinten_, self.X0_ = \
self._fit_RSA_UV(X=design, Y=X, X_base=nuisance,
scan_onsets=scan_onsets,
coords=coords, inten=inten)
self.C_ = utils.cov2corr(self.U_)
self.design_ = design.copy()
self._rho_design_, self._sigma2_design_ = \
self._est_AR1(self.design_, same_para=True)
self._rho_X0_, self._sigma2_X0_ = self._est_AR1(self.X0_)
# AR(1) parameters of the design matrix and nuisance regressors,
# which will be used in transform or score.
# Finally, we fit a null model with the same setting except
# that there is no response to X
self.beta0_null_, self.sigma_null_, self.rho_null_, \
self.X0_null_ = self._fit_null(Y=X, X_base=nuisance,
scan_onsets=scan_onsets)
self._rho_X0_null_, self._sigma2_X0_null_ =\
self._est_AR1(self.X0_null_)
return self
def transform(self, X, y=None, scan_onsets=None):
""" Use the model to estimate the time course of response to
each condition (ts), and the time course unrelated to task
(ts0) which is spread across the brain.
This is equivalent to "decoding" the design matrix and
nuisance regressors from a new dataset different from the
training dataset on which fit() was applied. An AR(1) smooth
prior is imposed on the decoded ts and ts0 with the AR(1)
parameters learnt from the corresponding time courses in the
training data.
Notice: if you set the rank to be lower than the number of
experimental conditions (number of columns in the design
matrix), the recovered task-related activity will have
collinearity (the recovered time courses of some conditions
can be linearly explained by the recovered time courses
of other conditions).
Parameters
----------
X : numpy arrays, shape=[time_points, voxels]
fMRI data of new data of the same subject. The voxels should
match those used in the fit() function. If data are z-scored
(recommended) when fitting the model, data should be z-scored
as well when calling transform()
y : not used (as it is unsupervised learning)
scan_onsets : numpy array, shape=[number of runs].
A list of indices corresponding to the onsets of
scans in the data X. If not provided, data will be assumed
to be acquired in a continuous scan.
Returns
-------
ts : numpy arrays, shape = [time_points, condition]
The estimated response to the task conditions which have the
response amplitudes estimated during the fit step.
ts0: numpy array, shape = [time_points, n_nureg]
The estimated time course spread across the brain, with the
loading weights estimated during the fit step.
"""
assert X.ndim == 2 and X.shape[1] == self.beta_.shape[1], \
'The shape of X is not consistent with the shape of data '\
'used in the fitting step. They should have the same number '\
'of voxels'
assert scan_onsets is None or (scan_onsets.ndim == 1 and
0 in scan_onsets), \
'scan_onsets should either be None or an array of indices '\
'If it is given, it should include at least 0'
if scan_onsets is None:
scan_onsets = np.array([0], dtype=int)
else:
scan_onsets = np.int32(scan_onsets)
ts, ts0, log_p = self._transform(
Y=X, scan_onsets=scan_onsets, beta=self.beta_,
beta0=self.beta0_, rho_e=self.rho_, sigma_e=self.sigma_,
rho_X=self._rho_design_, sigma2_X=self._sigma2_design_,
rho_X0=self._rho_X0_, sigma2_X0=self._sigma2_X0_)
return ts, ts0
def score(self, X, design, scan_onsets=None):
""" Use the model and parameters estimated by fit function
from some data of a participant to evaluate the log
likelihood of some new data of the same participant.
Design matrix of the same set of experimental
conditions in the testing data should be provided, with each
column corresponding to the same condition as that column
in the design matrix of the training data.
Unknown nuisance time series will be marginalized, assuming
they follow the same spatial pattern as in the training
data. The hypothetical response captured by the design matrix
will be subtracted from data before the marginalization
when evaluating the log likelihood. For null model,
nothing will be subtracted before marginalization.
There is a difference between the form of likelihood function
used in fit() and score(). In fit(), the response amplitude
beta to design matrix X and the modulation beta0 by nuisance
regressor X0 are both marginalized, with X provided and X0
estimated from data. In score(), posterior estimation of
beta and beta0 from the fitting step are assumed unchanged
to testing data and X0 is marginalized.
The logic underlying score() is to transfer
as much as what we can learn from training data when
calculating a likelihood score for testing data.
If you z-scored your data during fit step, you should
z-score them for score function as well. If you did not
z-score in fitting, you should not z-score here either.
Parameters
----------
X : numpy arrays, shape=[time_points, voxels]
fMRI data of new data of the same subject. The voxels should
match those used in the fit() function. If data are z-scored
(recommended) when fitting the model, data should be z-scored
as well when calling transform()
design : numpy array, shape=[time_points, conditions]
Design matrix expressing the hypothetical response of
the task conditions in data X.
scan_onsets : numpy array, shape=[number of runs].
A list of indices corresponding to the onsets of
scans in the data X. If not provided, data will be assumed
to be acquired in a continuous scan.
Returns
-------
ll: float.
The log likelihood of the new data based on the model and its
parameters fit to the training data.
ll_null: float.
The log likelihood of the new data based on a null model
which assumes the same as the full model for everything
except for that there is no response to any of the
task conditions.
"""
assert X.ndim == 2 and X.shape[1] == self.beta_.shape[1], \
'The shape of X is not consistent with the shape of data '\
'used in the fitting step. They should have the same number '\
'of voxels'
assert scan_onsets is None or (scan_onsets.ndim == 1 and
0 in scan_onsets), \
'scan_onsets should either be None or an array of indices '\
'If it is given, it should include at least 0'
if scan_onsets is None:
scan_onsets = np.array([0], dtype=int)
else:
scan_onsets = np.int32(scan_onsets)
ll = self._score(Y=X, design=design, beta=self.beta_,
scan_onsets=scan_onsets, beta0=self.beta0_,
rho_e=self.rho_, sigma_e=self.sigma_,
rho_X0=self._rho_X0_, sigma2_X0=self._sigma2_X0_)
ll_null = self._score(Y=X, design=None, beta=None,
scan_onsets=scan_onsets, beta0=self.beta0_,
rho_e=self.rho_, sigma_e=self.sigma_,
rho_X0=self._rho_X0_,
sigma2_X0=self._sigma2_X0_)
return ll, ll_null
# The following 2 functions _D_gen and _F_gen generate templates used
# for constructing inverse of covariance matrix of AR(1) noise
# The inverse of covarian matrix is
# (I - rho1 * D + rho1**2 * F) / sigma**2. D is a matrix where all the
# elements adjacent to the diagonal are 1 and all others are 0. F is
# a matrix which is 1 on all diagonal elements except for in the first
# and last columns. We denote (I - rho1 * D + rho1**2 * F) with A.
# In the function calculating likelihood function,
# XTAX, YTAY_diag, YTAX all mean multiplying the inverse covariance matrix
# in between either the design matrix or the data.
# As one can see, even though rho1 and sigma2 might update as we keep
# fitting parameters, several terms stay unchanged and do not need to
# be re-calculated.
# For example, in X'AX = X'(I + rho1*D + rho1**2*F)X / sigma2,
# the products X'X, X'DX, X'FX, etc. can always be re-used if they
# are pre-calculated. Therefore, _D_gen and _F_gen constructs matrices
# D and F, and _prepare_data_* calculates these products that can be
# re-used. In principle, once parameters have been fitted for a
# dataset, they can be updated for new incoming data by adding the
# products X'X, X'DX, X'FX, X'Y etc. from new data to those from
# existing data, and refit the parameters starting from the ones
# fitted from existing data.
def _D_gen(self, TR):
if TR > 0:
return np.diag(np.ones(TR - 1), -1) \
+ np.diag(np.ones(TR - 1), 1)
else:
return np.empty([0, 0])
def _F_gen(self, TR):
if TR > 0:
F = np.eye(TR)
F[0, 0] = 0
F[TR - 1, TR - 1] = 0
return F
else:
return np.empty([0, 0])
def _run_TR_from_scan_onsets(self, n_T, scan_onsets=None):
if scan_onsets is None:
# assume that all data are acquired within the same scan.
n_run = 1
run_TRs = np.array([n_T], dtype=int)
else:
# Each value in the scan_onsets tells the index at which
# a new scan starts. For example, if n_T = 500, and
# scan_onsets = [0,100,200,400], this means that the time points
# of 0-99 are from the first scan, 100-199 are from the second,
# 200-399 are from the third and 400-499 are from the fourth
run_TRs = np.int32(np.diff(np.append(scan_onsets, n_T)))
run_TRs = np.delete(run_TRs, np.where(run_TRs == 0))
n_run = run_TRs.size
# delete run length of 0 in case of duplication in scan_onsets.
logger.info('I infer that the number of volumes'
' in each scan are: {}'.format(run_TRs))
return run_TRs, n_run
def _prepare_DF(self, n_T, scan_onsets=None):
""" Prepare the essential template matrices D and F for
pre-calculating some terms to be re-used.
The inverse covariance matrix of AR(1) noise is
sigma^-2 * (I - rho1*D + rho1**2 * F).
And we denote A = I - rho1*D + rho1**2 * F"""
run_TRs, n_run = self._run_TR_from_scan_onsets(n_T, scan_onsets)
D_ele = map(self._D_gen, run_TRs)
F_ele = map(self._F_gen, run_TRs)
D = scipy.linalg.block_diag(*D_ele)
F = scipy.linalg.block_diag(*F_ele)
# D and F above are templates for constructing
# the inverse of temporal covariance matrix of noise
return D, F, run_TRs, n_run
def _prepare_data_XY(self, X, Y, D, F):
"""Prepares different forms of products of design matrix X
and data Y, or between themselves.
These products are re-used a lot during fitting.
So we pre-calculate them. Because these are reused,
it is in principle possible to update the fitting
as new data come in, by just incrementally adding
the products of new data and their corresponding parts
of design matrix to these pre-calculated terms.
"""
XTY, XTDY, XTFY = self._make_templates(D, F, X, Y)
YTY_diag = np.sum(Y * Y, axis=0)
YTDY_diag = np.sum(Y * np.dot(D, Y), axis=0)
YTFY_diag = np.sum(Y * np.dot(F, Y), axis=0)
XTX, XTDX, XTFX = self._make_templates(D, F, X, X)
return XTY, XTDY, XTFY, YTY_diag, YTDY_diag, YTFY_diag, XTX, \
XTDX, XTFX
def _gen_X_DC(self, run_TRs):
if self.baseline_single:
X_DC = np.ones((np.sum(run_TRs), 1))
else:
X_DC = scipy.linalg.block_diag(*map(np.ones, run_TRs)).T
return X_DC
def _gen_legendre(self, run_TRs, orders):
def reg(x):
return np.concatenate(
[scipy.special.legendre(o)(np.linspace(-1, 1, x))[None, :]
for o in orders], axis=0)
reg_poly = scipy.linalg.block_diag(
*map(reg, run_TRs)).T
return reg_poly
def _prepare_data_XYX0(self, X, Y, X_base, X_res, D, F, run_TRs,
no_DC=False):
"""Prepares different forms of products between design matrix X or
data Y or nuisance regressors X0.
These products are re-used a lot during fitting.
So we pre-calculate them.
no_DC means not inserting regressors for DC components
into nuisance regressor.
It will only take effect if X_base is not None.
"""
X_DC = self._gen_X_DC(run_TRs)
reg_sol = np.linalg.lstsq(X_DC, X, rcond=None)
if np.any(np.isclose(reg_sol[1], 0)):
raise ValueError('Your design matrix appears to have '
'included baseline time series.'
'Either remove them, or move them to'
' nuisance regressors.')
X_DC, X_base, idx_DC = self._merge_DC_to_base(X_DC, X_base,
no_DC)
if X_res is None:
X0 = X_base
else:
X0 = np.concatenate((X_base, X_res), axis=1)
n_X0 = X0.shape[1]
X0TX0, X0TDX0, X0TFX0 = self._make_templates(D, F, X0, X0)
XTX0, XTDX0, XTFX0 = self._make_templates(D, F, X, X0)
X0TY, X0TDY, X0TFY = self._make_templates(D, F, X0, Y)
return X0TX0, X0TDX0, X0TFX0, XTX0, XTDX0, XTFX0, \
X0TY, X0TDY, X0TFY, X0, X_base, n_X0, idx_DC
def _merge_DC_to_base(self, X_DC, X_base, no_DC):
""" Merge DC components X_DC to the baseline time series
X_base (By baseline, this means any fixed nuisance
regressors not updated during fitting, including DC
components and any nuisance regressors provided by
the user.
X_DC is always in the first few columns of X_base.
"""
if X_base is not None:
reg_sol = np.linalg.lstsq(X_DC, X_base, rcond=None)
if not no_DC:
if not np.any(np.isclose(reg_sol[1], 0)):
# No columns in X_base can be explained by the
# baseline regressors. So we insert them.
X_base = np.concatenate((X_DC, X_base), axis=1)
idx_DC = np.arange(0, X_DC.shape[1])
else:
logger.warning('Provided regressors for uninteresting '
'time series already include baseline. '
'No additional baseline is inserted.')
idx_DC = np.where(np.isclose(reg_sol[1], 0))[0]
else:
idx_DC = np.where(np.isclose(reg_sol[1], 0))[0]
else:
# If a set of regressors for non-interested signals is not
# provided, then we simply include one baseline for each run.
X_base = X_DC
idx_DC = np.arange(0, X_base.shape[1])
logger.info('You did not provide time series of no interest '
'such as DC component. Trivial regressors of'
' DC component are included for further modeling.'
' The final covariance matrix won''t '
'reflect these components.')
return X_DC, X_base, idx_DC
def _make_ar1_quad_form(self, XTX, XTDX, XTFX, rho1):
# Calculate the matrix X'AX = X'X - rho1 * X'DX + rho1^2 * X'FX
# Here, rho1 is the AR(1) coefficient. X is a matrix of time series
# with each row corresponding to a vector at one
# time point. The forms of matrices D and F are defined in _prepare_DF
# function. sigma^-2 * A would be the inverse of covariance matrix
# of AR(1) process (precision matrix) with rho1 as the AR coefficient
# and sigma^2 as the variance of independent noise at each time point.
return XTX - rho1 * XTDX + rho1**2 * XTFX
def _make_ar1_quad_form_grad(self, XTDX, XTFX, rho1):
# Calculate the derivative of the quadratic form X'AX with respect to
# AR1 coefficient rho1, given precalculated terms X'DX and X'FX,
# and rho1.
return - XTDX + 2 * rho1 * XTFX
def _make_templates(self, D, F, X, Y):
XTY = np.dot(X.T, Y)
XTDY = np.dot(np.dot(X.T, D), Y)
XTFY = np.dot(np.dot(X.T, F), Y)
return XTY, XTDY, XTFY
def _precompute_ar1_quad_forms(self, XTY, XTDY, XTFY, YTY_diag, YTDY_diag,
YTFY_diag, XTX, XTDX, XTFX, X0TX0, X0TDX0,
X0TFX0, XTX0, XTDX0, XTFX0, X0TY, X0TDY,
X0TFY, L, rho1, n_V, n_X0):
# Calculate the sandwich terms which put A between X, Y and X0
# These terms are used a lot in the likelihood. But in the _fitV
# step, they only need to be calculated once, since A is fixed.
# In _fitU step, they need to be calculated at each iteration,
# because rho1 changes.
XTAY = self._make_ar1_quad_form(XTY, XTDY, XTFY, rho1)
# dimension: feature*space
YTAY = self._make_ar1_quad_form(YTY_diag, YTDY_diag, YTFY_diag, rho1)
# dimension: space,
# A/sigma2 is the inverse of noise covariance matrix in each voxel.
# YTAY means Y'AY
XTAX = XTX[None, :, :] - rho1[:, None, None] \
* XTDX[None, :, :] \
+ rho1[:, None, None]**2 * XTFX[None, :, :]
# dimension: space*feature*feature
X0TAX0 = X0TX0[None, :, :] - rho1[:, None, None] \
* X0TDX0[None, :, :] \
+ rho1[:, None, None]**2 * X0TFX0[None, :, :]
# dimension: space*#baseline*#baseline
XTAX0 = XTX0[None, :, :] - rho1[:, None, None] \
* XTDX0[None, :, :] \
+ rho1[:, None, None]**2 * XTFX0[None, :, :]
# dimension: space*feature*#baseline
X0TAY = self._make_ar1_quad_form(X0TY, X0TDY, X0TFY, rho1)
# dimension: #baseline*space
X0TAX0_i = np.linalg.solve(X0TAX0, np.identity(n_X0)[None, :, :])
# dimension: space*#baseline*#baseline
XTAcorrX = XTAX
# dimension: space*feature*feature
XTAcorrY = XTAY
# dimension: feature*space
for i_v in range(n_V):
XTAcorrX[i_v, :, :] -= \
np.dot(np.dot(XTAX0[i_v, :, :], X0TAX0_i[i_v, :, :]),
XTAX0[i_v, :, :].T)
XTAcorrY[:, i_v] -= np.dot(np.dot(XTAX0[i_v, :, :],
X0TAX0_i[i_v, :, :]),
X0TAY[:, i_v])
XTAcorrXL = np.dot(XTAcorrX, L)
# dimension: space*feature*rank
LTXTAcorrXL = np.tensordot(XTAcorrXL, L, axes=(1, 0))
# dimension: rank*feature*rank
LTXTAcorrY = np.dot(L.T, XTAcorrY)
# dimension: rank*space
YTAcorrY = YTAY - np.sum(X0TAY * np.einsum('ijk,ki->ji',
X0TAX0_i, X0TAY), axis=0)
# dimension: space
return X0TAX0, XTAX0, X0TAY, X0TAX0_i, \
XTAcorrX, XTAcorrY, YTAcorrY, LTXTAcorrY, XTAcorrXL, LTXTAcorrXL
def _calc_LL(self, rho1, LTXTAcorrXL, LTXTAcorrY, YTAcorrY, X0TAX0, SNR2,
n_V, n_T, n_run, rank, n_X0):
# Calculate the log likelihood (excluding the GP prior of log(SNR))
# for both _loglike_AR1_diagV_fitU and _loglike_AR1_diagV_fitV,
# in addition to a few other terms.
LAMBDA_i = LTXTAcorrXL * SNR2[:, None, None] + np.eye(rank)
# dimension: space*rank*rank
LAMBDA = np.linalg.solve(LAMBDA_i, np.identity(rank)[None, :, :])
# dimension: space*rank*rank
# LAMBDA is essentially the inverse covariance matrix of the
# posterior probability of alpha, which bears the relation with
# beta by beta = L * alpha. L is the Cholesky factor of the
# shared covariance matrix U. Refer to the explanation below
# Equation 5 in the NIPS paper.
YTAcorrXL_LAMBDA = np.einsum('ji,ijk->ik', LTXTAcorrY, LAMBDA)
# dimension: space*rank
sigma2 = (YTAcorrY - np.sum(LTXTAcorrY * YTAcorrXL_LAMBDA.T, axis=0)
* SNR2) / (n_T - n_X0)
# dimension: space
LL = - np.sum(np.log(sigma2)) * (n_T - n_X0) * 0.5 \
+ np.sum(np.log(1 - rho1**2)) * n_run * 0.5 \
- np.sum(self._half_log_det(X0TAX0)) \
- np.sum(self._half_log_det(LAMBDA_i)) \
- (n_T - n_X0) * n_V * (1 + np.log(2 * np.pi)) * 0.5
# Log likelihood
return LL, LAMBDA_i, LAMBDA, YTAcorrXL_LAMBDA, sigma2
def _calc_dist2_GP(self, coords=None, inten=None,
GP_space=False, GP_inten=False):
# calculate the square of difference between each voxel's location
# coorinates and image intensity.
if GP_space:
assert coords is not None, 'coordinate is not provided'
# square of spatial distance between every two voxels
dist2 = spdist.squareform(spdist.pdist(coords, 'sqeuclidean'))
# set the hyperparameter for the GP process:
if self.space_smooth_range is None:
space_smooth_range = np.max(dist2)**0.5 / 2.0
# By default, we assume the length scale should be
# within half the size of ROI.
else:
space_smooth_range = self.space_smooth_range
if GP_inten:
assert inten is not None, 'intensity is not provided'
# squre of difference between intensities of
# # every two voxels
inten_diff2 = spdist.squareform(
spdist.pdist(inten[:, None], 'sqeuclidean'))
# set the hyperparameter for the GP process:
if self.inten_smooth_range is None:
inten_smooth_range = np.max(inten_diff2)**0.5 / 2.0
# By default, we assume the length scale should be
# within half the maximum difference of intensity.
else:
inten_smooth_range = self.inten_smooth_range
n_smooth = 2
else:
inten_diff2 = None
inten_smooth_range = None
n_smooth = 1
else:
n_smooth = 0
dist2 = None
inten_diff2 = None
GP_inten = False
space_smooth_range = None
inten_smooth_range = None
return dist2, inten_diff2, space_smooth_range, inten_smooth_range,\
n_smooth
def _build_index_param(self, n_l, n_V, n_smooth):
""" Build dictionaries to retrieve each parameter
from the combined parameters.
"""
idx_param_sing = {'Cholesky': np.arange(n_l), 'a1': n_l}
# for simplified fitting
idx_param_fitU = {'Cholesky': np.arange(n_l),
'a1': np.arange(n_l, n_l + n_V)}
# for the likelihood function when we fit U (the shared covariance).
idx_param_fitV = {'log_SNR2': np.arange(n_V - 1),
'c_space': n_V - 1, 'c_inten': n_V,
'c_both': np.arange(n_V - 1, n_V - 1 + n_smooth)}
# for the likelihood function when we fit V (reflected by SNR of
# each voxel)
return idx_param_sing, idx_param_fitU, idx_param_fitV
def _half_log_det(self, M):
""" Return log(|M|)*0.5. For positive definite matrix M
of more than 2 dimensions, calculate this for the
last two dimension and return a value corresponding
to each element in the first few dimensions.
"""
chol = np.linalg.cholesky(M)
if M.ndim == 2:
return np.sum(np.log(np.abs(np.diag(chol))))
else:
return np.sum(np.log(np.abs(np.diagonal(
chol, axis1=-2, axis2=-1))), axis=-1)
def _chol_idx(self, n_C, rank):
l_idx = np.tril_indices(n_C)
if rank is not None:
# The rank of covariance matrix is specified
idx_rank = np.where(l_idx[1] < rank)
l_idx = (l_idx[0][idx_rank], l_idx[1][idx_rank])
logger.info('Using the rank specified by the user: '
'{}'.format(rank))
else:
rank = n_C
# if not specified, we assume you want to
# estimate a full rank matrix
logger.warning('Please be aware that you did not specify the'
' rank of covariance matrix to estimate.'
'I will assume that the covariance matrix '
'shared among voxels is of full rank.'
'Rank = {}'.format(rank))
logger.warning('Please be aware that estimating a matrix of '
'high rank can be very slow.'
'If you have a good reason to specify a rank '
'lower than the number of experiment conditions,'
' do so.')
return l_idx, rank
def _fit_RSA_UV(self, X, Y, X_base,
scan_onsets=None, coords=None, inten=None):
""" The major utility of fitting Bayesian RSA.
Note that there is a naming change of variable. X in fit()
is changed to Y here, and design in fit() is changed to X here.
This is because we follow the tradition that X expresses the
variable defined (controlled) by the experimenter, i.e., the
time course of experimental conditions convolved by an HRF,
and Y expresses data.
However, in wrapper function fit(), we follow the naming
routine of scikit-learn.
"""
GP_inten = self.GP_inten
GP_space = self.GP_space
rank = self.rank
n_V = np.size(Y, axis=1)
n_T = np.size(Y, axis=0)
n_C = np.size(X, axis=1)
l_idx, rank = self._chol_idx(n_C, rank)
n_l = np.size(l_idx[0]) # the number of parameters for L
t_start = time.time()
D, F, run_TRs, n_run = self._prepare_DF(
n_T, scan_onsets=scan_onsets)
XTY, XTDY, XTFY, YTY_diag, YTDY_diag, YTFY_diag, XTX, \
XTDX, XTFX = self._prepare_data_XY(X, Y, D, F)
X0TX0, X0TDX0, X0TFX0, XTX0, XTDX0, XTFX0, \
X0TY, X0TDY, X0TFY, X0, X_base, n_X0, idx_DC = \
self._prepare_data_XYX0(
X, Y, X_base, None, D, F, run_TRs, no_DC=False)
# Prepare the data for fitting. These pre-calculated matrices
# will be re-used a lot in evaluating likelihood function and
# gradient.
# DC component will be added to the nuisance regressors.
# In later steps, we do not need to add DC components again
dist2, inten_diff2, space_smooth_range, inten_smooth_range,\
n_smooth = self._calc_dist2_GP(
coords=coords, inten=inten,
GP_space=GP_space, GP_inten=GP_inten)
# Calculating the distance between voxel locations and betweeen
# voxel intensities. These are used if a Gaussian Process prior
# is requested to regularize log(SNR^2)
idx_param_sing, idx_param_fitU, idx_param_fitV = \
self._build_index_param(n_l, n_V, n_smooth)
# Indexes to find each parameter in a combined parameter vector.
current_GP = np.zeros(n_smooth)
# We will perform the fitting in 2~3 steps:
# (1) A preliminary fitting assuming all voxels share
# exactly the same temporal covariance matrix for their noise.
# SNR is assumed to be 1 for all voxels in this fitting.
# Therefore, there are only n_l+2 free parameters.
# (2) (optional) A fitting which allows each voxel to have their
# own pseudo-SNR and AR(1) coefficients. But no Gaussian Process
# prior is imposed on log(SNR). This step is neglected if GP
# prior is not requested. This step allows the SNR parameters to
# move closer to their correct values before GP is introduced.
# This step alternately fits the shared covariance and voxel-
# specific variance. It fits for init_iter steps and the
# tolerance is also increased by a factor of 5 to speed up
# fitting.
# (3) Final fitting. If GP prior is requested, it will be
# introduced in this step. Otherwise, just fit as the previous
# step, but using un-altered tolerance setting, and n_iter
# as the number of iteration.
# Step 1 fitting, with a simplified model
current_vec_U_chlsk_l, current_a1, current_logSigma2 = \
self._initial_fit_singpara(
XTX, XTDX, XTFX, YTY_diag, YTDY_diag, YTFY_diag,
XTY, XTDY, XTFY, X0TX0, X0TDX0, X0TFX0,
XTX0, XTDX0, XTFX0, X0TY, X0TDY, X0TFY,
X, Y, X0, idx_param_sing,
l_idx, n_C, n_T, n_V, n_l, n_run, n_X0, rank)
current_logSNR2 = -current_logSigma2
norm_factor = np.mean(current_logSNR2)
current_logSNR2 = current_logSNR2 - norm_factor
X_res = None
# Step 2 fitting, which only happens if
# GP prior is requested
if GP_space:
current_vec_U_chlsk_l, current_a1, current_logSNR2, X_res\
= self._fit_diagV_noGP(
XTY, XTDY, XTFY, YTY_diag, YTDY_diag, YTFY_diag,
XTX, XTDX, XTFX, X, Y, X_base, X_res, D, F, run_TRs,
current_vec_U_chlsk_l,
current_a1, current_logSNR2,
idx_param_fitU, idx_param_fitV,
l_idx, n_C, n_T, n_V, n_l, n_run, n_X0, rank)
current_GP[0] = np.log(np.min(
dist2[np.tril_indices_from(dist2, k=-1)]))
# We start fitting the model with GP prior with a small
# length scale: the size of voxels.
# Alternatively, initialize with a large distance.
# Further testing of initial parameters need to be done.
# current_GP[0] = np.log(np.max(dist2)/4.0)
logger.debug('current GP[0]:{}'.format(current_GP[0]))
if GP_inten:
current_GP[1] = np.log(np.maximum(
np.percentile(inten_diff2[np.tril_indices_from(
inten_diff2, k=-1)], 2), 0.5))
logger.debug(
'current GP[1]:{}'.format(current_GP[1]))
# We start the length scale for intensity with
# a small value. A heuristic is 2 percentile of
# all the square differences. But it should not be
# smaller than 0.5. This limit is set in case
# many voxels have close to equal intensities,
# which might render 2 percentile to 0.
# Step 3 fitting. GP prior is imposed if requested.
# In this step, unless auto_nuisance is set to False, X_res
# will be re-estimated from the residuals after each step
# of fitting. And X0 will be concatenation of X_base and X_res
logger.debug('indexing:{}'.format(idx_param_fitV))
logger.debug('initial GP parameters:{}'.format(current_GP))
current_vec_U_chlsk_l, current_a1, current_logSNR2,\
current_GP, X_res = self._fit_diagV_GP(
XTY, XTDY, XTFY, YTY_diag, YTDY_diag, YTFY_diag,
XTX, XTDX, XTFX, X, Y, X_base, X_res, D, F, run_TRs,
current_vec_U_chlsk_l,
current_a1, current_logSNR2, current_GP, n_smooth,
idx_param_fitU, idx_param_fitV,
l_idx, n_C, n_T, n_V, n_l, n_run, n_X0, rank,
GP_space, GP_inten, dist2, inten_diff2,
space_smooth_range, inten_smooth_range)
estU_chlsk_l_AR1_UV = np.zeros([n_C, rank])
estU_chlsk_l_AR1_UV[l_idx] = current_vec_U_chlsk_l
est_cov_AR1_UV = np.dot(estU_chlsk_l_AR1_UV, estU_chlsk_l_AR1_UV.T)
est_rho1_AR1_UV = 2 / np.pi * np.arctan(current_a1)
est_SNR_AR1_UV = np.exp(current_logSNR2 / 2.0)
# Calculating est_sigma_AR1_UV, est_sigma_AR1_UV,
# est_beta_AR1_UV and est_beta0_AR1_UV
X0TX0, X0TDX0, X0TFX0, XTX0, XTDX0, XTFX0, \
X0TY, X0TDY, X0TFY, X0, X_base, n_X0, _ \
= self._prepare_data_XYX0(
X, Y, X_base, X_res, D, F, run_TRs, no_DC=True)
X0TAX0, XTAX0, X0TAY, X0TAX0_i, \
XTAcorrX, XTAcorrY, YTAcorrY, LTXTAcorrY, XTAcorrXL, LTXTAcorrXL\
= self._precompute_ar1_quad_forms(XTY, XTDY, XTFY,
YTY_diag, YTDY_diag, YTFY_diag,
XTX, XTDX, XTFX, X0TX0, X0TDX0,
X0TFX0, XTX0, XTDX0, XTFX0, X0TY,
X0TDY, X0TFY,
estU_chlsk_l_AR1_UV,
est_rho1_AR1_UV, n_V, n_X0)
LL, LAMBDA_i, LAMBDA, YTAcorrXL_LAMBDA, sigma2 \
= self._calc_LL(est_rho1_AR1_UV, LTXTAcorrXL, LTXTAcorrY, YTAcorrY,
X0TAX0, est_SNR_AR1_UV**2,
n_V, n_T, n_run, rank, n_X0)
est_sigma_AR1_UV = sigma2**0.5
est_beta_AR1_UV = est_SNR_AR1_UV**2 \
* np.dot(estU_chlsk_l_AR1_UV, YTAcorrXL_LAMBDA.T)
est_beta_AR1_UV_latent = \
est_SNR_AR1_UV**2 * YTAcorrXL_LAMBDA.T
# the latent term means that X*L multiplied by this term
# is the same as X*beta. This will be used for decoding
# and cross-validating, in case L is low-rank
est_beta0_AR1_UV = np.einsum(
'ijk,ki->ji', X0TAX0_i,
(X0TAY - np.einsum('ikj,ki->ji', XTAX0, est_beta_AR1_UV)))
# Now we want to collapse all beta0 corresponding to DC components
# of different runs to a single map, and preserve only one DC component
# across runs. This is because they should express the same component
# and the new data to transform do not necessarily have the same
# numbers of runs as the training data.
if idx_DC.size > 1:
collapsed_DC = np.sum(X0[:, idx_DC], axis=1)
X0 = np.insert(np.delete(X0, idx_DC, axis=1), 0,
collapsed_DC, axis=1)
collapsed_beta0 = np.mean(est_beta0_AR1_UV[idx_DC, :], axis=0)
est_beta0_AR1_UV = np.insert(
np.delete(est_beta0_AR1_UV, idx_DC, axis=0),
0, collapsed_beta0, axis=0)
t_finish = time.time()
logger.info(
'total time of fitting: {} seconds'.format(t_finish - t_start))
logger.debug('final GP parameters:{}'.format(current_GP))
if GP_space:
est_space_smooth_r = np.exp(current_GP[0] / 2.0)
if GP_inten:
est_intensity_kernel_r = np.exp(current_GP[1] / 2.0)
K_major = np.exp(- (dist2 / est_space_smooth_r**2 +
inten_diff2 / est_intensity_kernel_r**2)
/ 2.0)
else:
est_intensity_kernel_r = None
K_major = np.exp(- dist2 / est_space_smooth_r**2 / 2.0)
K = K_major + np.diag(np.ones(n_V) * self.eta)
invK_tilde_log_SNR = np.linalg.solve(K, current_logSNR2) / 2
log_SNR_invK_tilde_log_SNR = np.dot(current_logSNR2,
invK_tilde_log_SNR) / 2
tau2, _ = self.tau2_prior(log_SNR_invK_tilde_log_SNR, n_V,
self.tau_range)
est_std_log_SNR = tau2 ** 0.5
else:
est_space_smooth_r = None
est_intensity_kernel_r = None
est_std_log_SNR = None
return est_cov_AR1_UV, estU_chlsk_l_AR1_UV, est_SNR_AR1_UV, \
est_beta_AR1_UV, est_beta0_AR1_UV, est_beta_AR1_UV_latent,\
est_sigma_AR1_UV, est_rho1_AR1_UV, est_space_smooth_r, \
est_std_log_SNR, est_intensity_kernel_r, X0
def _transform(self, Y, scan_onsets, beta, beta0,
rho_e, sigma_e, rho_X, sigma2_X, rho_X0, sigma2_X0):
""" Given the data Y and the response amplitudes beta and beta0
estimated in the fit step, estimate the corresponding X and X0.
It is done by a forward-backward algorithm.
We assume X and X0 both are vector autoregressive (VAR)
processes, to capture temporal smoothness. Their VAR
parameters are estimated from training data at the fit stage.
"""
logger.info('Transforming new data.')
# Constructing the transition matrix and the variance of
# innovation noise as prior for the latent variable X and X0
# in new data.
n_C = beta.shape[0]
n_T = Y.shape[0]
weight = np.concatenate((beta, beta0), axis=0)
T_X = np.diag(np.concatenate((rho_X, rho_X0)))
Var_X = np.concatenate((sigma2_X / (1 - rho_X**2),
sigma2_X0 / (1 - rho_X0**2)))
Var_dX = np.concatenate((sigma2_X, sigma2_X0))
sigma2_e = sigma_e ** 2
scan_onsets = np.setdiff1d(scan_onsets, n_T)
n_scan = scan_onsets.size
X = [None] * scan_onsets.size
X0 = [None] * scan_onsets.size
total_log_p = 0
for scan, onset in enumerate(scan_onsets):
# Forward step
if scan == n_scan - 1:
offset = n_T
else:
offset = scan_onsets[scan + 1]
mu, mu_Gamma_inv, Gamma_inv, log_p_data, Lambda_0, \
Lambda_1, H, deltaY, deltaY_sigma2inv_rho_weightT = \
self._forward_step(Y[onset:offset, :],
T_X, Var_X, Var_dX, rho_e, sigma2_e,
weight)
total_log_p += log_p_data
# Backward step
mu_hat, mu_Gamma_inv_hat, Gamma_inv_hat \
= self._backward_step(
deltaY, deltaY_sigma2inv_rho_weightT, sigma2_e,
weight, mu, mu_Gamma_inv, Gamma_inv,
Lambda_0, Lambda_1, H)
X[scan] = np.concatenate(
[mu_t[None, :n_C] for mu_t in mu_hat])
X0[scan] = np.concatenate(
[mu_t[None, n_C:] for mu_t in mu_hat])
X = np.concatenate(X)
X0 = np.concatenate(X0)
return X, X0, total_log_p
def _score(self, Y, design, beta, scan_onsets, beta0, rho_e, sigma_e,
rho_X0, sigma2_X0):
""" Given the data Y, and the spatial pattern beta0
of nuisance time series, return the cross-validated score
of the data Y given all parameters of the subject estimated
during the first step.
It is assumed that the user has design matrix built for the
data Y. Both beta and beta0 are posterior expectation estimated
from training data with the estimated covariance matrix U and
SNR serving as prior. We marginalize X0 instead of fitting
it in this function because this function is for the purpose
of evaluating model no new data. We should avoid doing any
additional fitting when performing cross-validation.
The hypothetic response to the task will be subtracted, and
the unknown nuisance activity which contributes to the data
through beta0 will be marginalized.
"""
logger.info('Estimating cross-validated score for new data.')
n_T = Y.shape[0]
if design is not None:
Y = Y - np.dot(design, beta)
# The function works for both full model and null model.
# If design matrix is not provided, the whole data is
# used as input for _forward_step. If design matrix is provided,
# residual after subtracting design * beta is fed to _forward_step
T_X = np.diag(rho_X0)
Var_X = sigma2_X0 / (1 - rho_X0**2)
Var_dX = sigma2_X0
# Prior parmeters for X0: T_X is transitioning matrix, Var_X
# is the marginal variance of the first time point. Var_dX is the
# variance of the updating noise.
sigma2_e = sigma_e ** 2
# variance of voxel-specific updating noise component
scan_onsets = np.setdiff1d(scan_onsets, n_T).astype(int)
n_scan = scan_onsets.size
total_log_p = 0
for scan, onset in enumerate(scan_onsets):
# Forward step
if scan == n_scan - 1:
offset = n_T
else:
offset = scan_onsets[scan + 1]
_, _, _, log_p_data, _, _, _, _, _ = \
self._forward_step(
Y[onset:offset, :], T_X, Var_X, Var_dX, rho_e, sigma2_e,
beta0)
total_log_p += log_p_data
return total_log_p
def _est_AR1(self, x, same_para=False):
""" Estimate the AR(1) parameters of input x.
Each column of x is assumed as independent from other columns,
and each column is treated as an AR(1) process.
If same_para is set as True, then all columns of x
are concatenated and a single set of AR(1) parameters
is estimated. Strictly speaking the breaking point
between each concatenated column should be considered.
But for long time series, this is ignored.
"""
if same_para:
n_c = x.shape[1]
x = np.reshape(x, x.size, order='F')
rho, sigma2 = alg.AR_est_YW(x, 1)
# We concatenate all the design matrix to estimate common AR(1)
# parameters. This creates some bias because the end of one column
# and the beginning of the next column of the design matrix are
# treated as consecutive samples.
rho = np.ones(n_c) * rho
sigma2 = np.ones(n_c) * sigma2
else:
rho = np.zeros(np.shape(x)[1])
sigma2 = np.zeros(np.shape(x)[1])
for c in np.arange(np.shape(x)[1]):
rho[c], sigma2[c] = alg.AR_est_YW(x[:, c], 1)
return rho, sigma2
def _forward_step(self, Y, T_X, Var_X, Var_dX, rho_e, sigma2_e, weight):
""" forward step for HMM, assuming both the hidden state and noise
have 1-step dependence on the previous value.
"""
# We currently only implement diagonal form
# of covariance matrix for Var_X, Var_dX and T_X, which means
# each dimension of X is independent and their innovation noise
# are also independent. Note that log_p_data takes this assumption.
if Var_X.ndim == 1:
inv_Var_X = np.diag(1 / Var_X)
half_log_det_Var_X = np.sum(np.log(Var_X)) / 2.0
Var_X = np.diag(Var_X)
# the marginal variance of X
else:
half_log_det_Var_X = self._half_log_det(Var_X)
inv_Var_X = np.linalg.inv(Var_X)
if Var_dX.ndim == 1:
inv_Var_dX = np.diag(1 / Var_dX)
half_log_det_Var_dX = np.sum(np.log(Var_dX)) / 2.0
Var_dX = np.diag(Var_dX)
# the marginal variance of Delta X (the change of X from
# previous time point)
else:
inv_Var_dX = np.linalg.inv(Var_dX)
half_log_det_Var_dX = self._half_log_det(Var_dX)
if T_X.ndim == 1:
T_X = np.diag(T_X)
# Transfer function of X: the expected mean of X at t+1
# time point is T_x * X
[n_T, n_V] = np.shape(Y)
# numbers of time points and voxels
mu = [None] * n_T
# posterior mean of X, conditioned on all data up till the current
# time point
Gamma_inv = [None] * n_T
# inverse of poterior Gamma.
mu_Gamma_inv = [None] * n_T
# mu * inv(Gamma)
log_p_data = - np.log(np.pi * 2) * (n_T * n_V) / 2 \
- half_log_det_Var_X - np.sum(np.log(sigma2_e)) * n_T / 2.0\
+ np.sum(np.log(1 - rho_e**2)) / 2.0 - half_log_det_Var_dX \
* (n_T - 1)
# This is the term to be incremented by c_n at each time step.
# We first add all the fixed terms to it.
# The following are a few fixed terms.
Lambda_0 = np.dot(T_X, np.dot(inv_Var_dX, T_X.T)) \
+ np.dot(weight * rho_e**2 / sigma2_e, weight.T)
H = np.dot(inv_Var_dX, T_X.T) + np.dot(weight * rho_e / sigma2_e,
weight.T)
Lambda_1 = inv_Var_dX + np.dot(weight / sigma2_e, weight.T)
Gamma_inv[0] = inv_Var_X + np.dot(
weight * (1 - rho_e**2) / sigma2_e, weight.T)
# We might not need this and only use linalg.solve for related terms.
mu_Gamma_inv[0] = np.dot(
Y[0, :] * (1 - rho_e**2) / sigma2_e, weight.T)
mu[0] = np.linalg.solve(Gamma_inv[0], mu_Gamma_inv[0])
log_p_data -= 0.5 * np.sum(Y[0, :]**2 * (1 - rho_e**2) / sigma2_e)
# This is the term added for the first time point.
deltaY = Y[1:, :] - rho_e * Y[:-1, :]
deltaY_sigma2inv_rho_weightT = np.dot(
deltaY / sigma2_e * rho_e, weight.T)
for t in np.arange(1, n_T):
Gamma_tilde_inv = Lambda_0 + Gamma_inv[t - 1]
tmp = np.linalg.solve(Gamma_tilde_inv, H.T)
Gamma_inv[t] = Lambda_1 - np.dot(H, tmp)
mu_Gamma_inv[t] = np.dot(deltaY[t - 1, :] / sigma2_e, weight.T) \
+ np.dot(mu_Gamma_inv[t - 1]
- deltaY_sigma2inv_rho_weightT[t - 1, :], tmp)
mu[t] = np.linalg.solve(Gamma_inv[t], mu_Gamma_inv[t])
tmp2 = mu_Gamma_inv[t - 1] - deltaY_sigma2inv_rho_weightT[t - 1, :]
log_p_data += -self._half_log_det(Gamma_tilde_inv) \
+ np.dot(tmp2, np.linalg.solve(Gamma_tilde_inv, tmp2)) / 2.0
log_p_data += -self._half_log_det(Gamma_inv[-1]) \
+ np.dot(mu_Gamma_inv[-1], mu[-1]) / 2.0 \
- np.sum(deltaY**2 / sigma2_e) / 2.0
return mu, mu_Gamma_inv, Gamma_inv, log_p_data, Lambda_0, \
Lambda_1, H, deltaY, deltaY_sigma2inv_rho_weightT
def _backward_step(self, deltaY, deltaY_sigma2inv_rho_weightT,
sigma2_e, weight, mu, mu_Gamma_inv, Gamma_inv,
Lambda_0, Lambda_1, H):
""" backward step for HMM, assuming both the hidden state and noise
have 1-step dependence on the previous value.
"""
n_T = len(Gamma_inv)
# All the terms with hat before are parameters of posterior
# distributions of X conditioned on data from all time points,
# whereas the ones without hat calculated by _forward_step
# are mean and covariance of posterior of X conditioned on
# data up to the time point.
Gamma_inv_hat = [None] * n_T
mu_Gamma_inv_hat = [None] * n_T
mu_hat = [None] * n_T
mu_hat[-1] = mu[-1].copy()
mu_Gamma_inv_hat[-1] = mu_Gamma_inv[-1].copy()
Gamma_inv_hat[-1] = Gamma_inv[-1].copy()
for t in np.arange(n_T - 2, -1, -1):
tmp = np.linalg.solve(Gamma_inv_hat[t + 1] - Gamma_inv[t + 1]
+ Lambda_1, H)
Gamma_inv_hat[t] = Gamma_inv[t] + Lambda_0 - np.dot(H.T, tmp)
mu_Gamma_inv_hat[t] = mu_Gamma_inv[t] \
- deltaY_sigma2inv_rho_weightT[t, :] + np.dot(
mu_Gamma_inv_hat[t + 1] - mu_Gamma_inv[t + 1]
+ np.dot(deltaY[t, :] / sigma2_e, weight.T), tmp)
mu_hat[t] = np.linalg.solve(Gamma_inv_hat[t],
mu_Gamma_inv_hat[t])
return mu_hat, mu_Gamma_inv_hat, Gamma_inv_hat
def _initial_fit_singpara(self, XTX, XTDX, XTFX,
YTY_diag, YTDY_diag, YTFY_diag,
XTY, XTDY, XTFY, X0TX0, X0TDX0, X0TFX0,
XTX0, XTDX0, XTFX0, X0TY, X0TDY, X0TFY,
X, Y, X0, idx_param_sing, l_idx,
n_C, n_T, n_V, n_l, n_run, n_X0, rank):
""" Perform initial fitting of a simplified model, which assumes
that all voxels share exactly the same temporal covariance
matrix for their noise (the same noise variance and
auto-correlation). The SNR is implicitly assumed to be 1
for all voxels.
"""
logger.info('Initial fitting assuming single parameter of '
'noise for all voxels')
X_joint = np.concatenate((X0, X), axis=1)
beta_hat = np.linalg.lstsq(X_joint, Y, rcond=None)[0]
residual = Y - np.dot(X_joint, beta_hat)
# point estimates of betas and fitting residuals without assuming
# the Bayesian model underlying RSA.
# There are several possible ways of initializing the covariance.
# (1) start from the point estimation of covariance
cov_point_est = np.cov(beta_hat[n_X0:, :]) / np.var(residual)
current_vec_U_chlsk_l = \
np.linalg.cholesky((cov_point_est + np.eye(n_C)) / 2)[l_idx]
# We use the average of covariance of point estimation and an identity
# matrix as the initial value of the covariance matrix, just in case
# the user provides data in which n_V is smaller than n_C.
# (2) start from identity matrix
# current_vec_U_chlsk_l = np.eye(n_C)[l_idx]
# (3) random initialization
# current_vec_U_chlsk_l = self.random_state_.randn(n_l)
# vectorized version of L, Cholesky factor of U, the shared
# covariance matrix of betas across voxels.
rho1 = np.sum(
residual[0:-1, :] * residual[1:, :], axis=0) / \
np.sum(residual[0:-1, :] * residual[0:-1, :], axis=0)
# Estimate of auto correlation assuming data includes pure noise.
log_sigma2 = np.log(np.var(
residual[1:, :] - residual[0:-1, :] * rho1, axis=0))
# log of estimates of the variance of the "innovation" noise
# of AR(1) process at each time point.
param0 = np.empty(np.sum(np.fromiter(
(np.size(v) for v in idx_param_sing.values()), int)))
# Initial parameter
# Then we fill each part of the original guess of parameters
param0[idx_param_sing['Cholesky']] = current_vec_U_chlsk_l
param0[idx_param_sing['a1']] = np.median(np.tan(rho1 * np.pi / 2))
# Fit it.
res = scipy.optimize.minimize(
self._loglike_AR1_singpara, param0,
args=(XTX, XTDX, XTFX, YTY_diag, YTDY_diag, YTFY_diag,
XTY, XTDY, XTFY, X0TX0, X0TDX0, X0TFX0,
XTX0, XTDX0, XTFX0, X0TY, X0TDY, X0TFY,
l_idx, n_C, n_T, n_V, n_run, n_X0,
idx_param_sing, rank),
method=self.optimizer, jac=True, tol=self.tol,
options={'disp': self.minimize_options['disp'],
'maxiter': 100})
current_vec_U_chlsk_l = res.x[idx_param_sing['Cholesky']]
current_a1 = res.x[idx_param_sing['a1']] * np.ones(n_V)
# log(sigma^2) assuming the data include no signal is returned,
# as a starting point for the iteration in the next step.
# Although it should overestimate the variance,
# setting it this way might allow it to track log(sigma^2)
# more closely for each voxel.
return current_vec_U_chlsk_l, current_a1, log_sigma2
def _fit_diagV_noGP(
self, XTY, XTDY, XTFY, YTY_diag, YTDY_diag, YTFY_diag,
XTX, XTDX, XTFX, X, Y, X_base, X_res, D, F, run_TRs,
current_vec_U_chlsk_l,
current_a1, current_logSNR2,
idx_param_fitU, idx_param_fitV,
l_idx, n_C, n_T, n_V, n_l, n_run, n_X0, rank):
""" (optional) second step of fitting, full model but without
GP prior on log(SNR). This step is only done if GP prior
is requested.
"""
init_iter = self.init_iter
logger.info('second fitting without GP prior'
' for {} times'.format(init_iter))
# Initial parameters
param0_fitU = np.empty(np.sum(np.fromiter(
(np.size(v) for v in idx_param_fitU.values()), int)))
param0_fitV = np.empty(np.size(idx_param_fitV['log_SNR2']))
# We cannot use the same logic as the line above because
# idx_param_fitV also includes entries for GP parameters.
param0_fitU[idx_param_fitU['Cholesky']] = \
current_vec_U_chlsk_l.copy()
param0_fitU[idx_param_fitU['a1']] = current_a1.copy()
param0_fitV[idx_param_fitV['log_SNR2']] = \
current_logSNR2[:-1].copy()
L = np.zeros((n_C, rank))
tol = self.tol * 5
for it in range(0, init_iter):
X0TX0, X0TDX0, X0TFX0, XTX0, XTDX0, XTFX0, \
X0TY, X0TDY, X0TFY, X0, X_base, n_X0, _ \
= self._prepare_data_XYX0(
X, Y, X_base, X_res, D, F, run_TRs, no_DC=True)
# fit U, the covariance matrix, together with AR(1) param
param0_fitU[idx_param_fitU['Cholesky']] = \
current_vec_U_chlsk_l \
+ self.random_state_.randn(n_l) \
* np.linalg.norm(current_vec_U_chlsk_l) \
/ n_l**0.5 * np.exp(-it / init_iter * self.anneal_speed - 1)
param0_fitU[idx_param_fitU['a1']] = current_a1
res_fitU = scipy.optimize.minimize(
self._loglike_AR1_diagV_fitU, param0_fitU,
args=(XTX, XTDX, XTFX, YTY_diag, YTDY_diag, YTFY_diag,
XTY, XTDY, XTFY, X0TX0, X0TDX0, X0TFX0,
XTX0, XTDX0, XTFX0, X0TY, X0TDY, X0TFY,
current_logSNR2, l_idx, n_C,
n_T, n_V, n_run, n_X0, idx_param_fitU, rank),
method=self.optimizer, jac=True, tol=tol,
options=self.minimize_options)
current_vec_U_chlsk_l = \
res_fitU.x[idx_param_fitU['Cholesky']]
current_a1 = res_fitU.x[idx_param_fitU['a1']]
norm_fitUchange = np.linalg.norm(res_fitU.x - param0_fitU)
logger.debug('norm of parameter change after fitting U: '
'{}'.format(norm_fitUchange))
param0_fitU = res_fitU.x.copy()
# fit V, reflected in the log(SNR^2) of each voxel
rho1 = np.arctan(current_a1) * 2 / np.pi
L[l_idx] = current_vec_U_chlsk_l
X0TAX0, XTAX0, X0TAY, X0TAX0_i, \
XTAcorrX, XTAcorrY, YTAcorrY, \
LTXTAcorrY, XTAcorrXL, LTXTAcorrXL = \
self._precompute_ar1_quad_forms(XTY, XTDY, XTFY,
YTY_diag, YTDY_diag, YTFY_diag,
XTX, XTDX, XTFX,
X0TX0, X0TDX0, X0TFX0,
XTX0, XTDX0, XTFX0,
X0TY, X0TDY, X0TFY,
L, rho1, n_V, n_X0)
res_fitV = scipy.optimize.minimize(
self._loglike_AR1_diagV_fitV, param0_fitV,
args=(X0TAX0, XTAX0, X0TAY,
X0TAX0_i, XTAcorrX, XTAcorrY, YTAcorrY,
LTXTAcorrY, XTAcorrXL, LTXTAcorrXL,
current_vec_U_chlsk_l,
current_a1, l_idx, n_C, n_T, n_V, n_run,
n_X0, idx_param_fitV, rank,
False, False),
method=self.optimizer, jac=True, tol=tol,
options=self.minimize_options)
current_logSNR2[0:n_V - 1] = res_fitV.x
current_logSNR2[-1] = - np.sum(current_logSNR2[0:n_V - 1])
norm_fitVchange = np.linalg.norm(res_fitV.x - param0_fitV)
logger.debug('norm of parameter change after fitting V: '
'{}'.format(norm_fitVchange))
logger.debug('E[log(SNR2)^2]: {}'.format(
np.mean(current_logSNR2**2)))
# The lines below are for debugging purpose.
# If any voxel's log(SNR^2) gets to non-finite number,
# something might be wrong -- could be that the data has
# nothing to do with the design matrix.
if np.any(np.logical_not(np.isfinite(current_logSNR2))):
logger.warning('Initial fitting: iteration {}'.format(it))
logger.warning('current log(SNR^2): '
'{}'.format(current_logSNR2))
logger.warning('log(sigma^2) has non-finite number')
param0_fitV = res_fitV.x.copy()
# Re-estimating X_res from residuals
current_SNR2 = np.exp(current_logSNR2)
if self.auto_nuisance:
LL, LAMBDA_i, LAMBDA, YTAcorrXL_LAMBDA, current_sigma2 \
= self._calc_LL(rho1, LTXTAcorrXL, LTXTAcorrY, YTAcorrY,
X0TAX0, current_SNR2,
n_V, n_T, n_run, rank, n_X0)
betas = current_SNR2 * np.dot(L, YTAcorrXL_LAMBDA.T)
beta0s = np.einsum(
'ijk,ki->ji', X0TAX0_i,
(X0TAY - np.einsum('ikj,ki->ji', XTAX0, betas)))
residuals = Y - np.dot(X, betas) - np.dot(
X_base, beta0s[:np.shape(X_base)[1], :])
X_res = self.nureg_method(
self.n_nureg_).fit_transform(
self.preprocess_residual(residuals))
if norm_fitVchange / np.sqrt(param0_fitV.size) < tol \
and norm_fitUchange / np.sqrt(param0_fitU.size) \
< tol:
break
return current_vec_U_chlsk_l, current_a1, current_logSNR2, X_res
def _fit_diagV_GP(
self, XTY, XTDY, XTFY, YTY_diag, YTDY_diag, YTFY_diag,
XTX, XTDX, XTFX, X, Y, X_base, X_res, D, F, run_TRs,
current_vec_U_chlsk_l,
current_a1, current_logSNR2, current_GP, n_smooth,
idx_param_fitU, idx_param_fitV, l_idx,
n_C, n_T, n_V, n_l, n_run, n_X0, rank, GP_space, GP_inten,
dist2, inten_diff2, space_smooth_range, inten_smooth_range):
""" Last step of fitting. If GP is not requested, this step will
still be done, just without GP prior on log(SNR).
"""
tol = self.tol
n_iter = self.n_iter
logger.info('Last step of fitting.'
' for maximum {} times'.format(n_iter))
# Initial parameters
param0_fitU = np.empty(np.sum(np.fromiter(
(np.size(v) for v in idx_param_fitU.values()), int)))
param0_fitV = np.empty(np.size(idx_param_fitV['log_SNR2'])
+ np.size(idx_param_fitV['c_both']))
# We cannot use the same logic as the line above because
# idx_param_fitV also includes entries for GP parameters.
param0_fitU[idx_param_fitU['Cholesky']] = \
current_vec_U_chlsk_l.copy()
param0_fitU[idx_param_fitU['a1']] = current_a1.copy()
param0_fitV[idx_param_fitV['log_SNR2']] = \
current_logSNR2[:-1].copy()
L = np.zeros((n_C, rank))
L[l_idx] = current_vec_U_chlsk_l
if self.GP_space:
param0_fitV[idx_param_fitV['c_both']] = current_GP.copy()
for it in range(0, n_iter):
X0TX0, X0TDX0, X0TFX0, XTX0, XTDX0, XTFX0, \
X0TY, X0TDY, X0TFY, X0, X_base, n_X0, _ = \
self._prepare_data_XYX0(
X, Y, X_base, X_res, D, F, run_TRs, no_DC=True)
# fit U
param0_fitU[idx_param_fitU['Cholesky']] = \
current_vec_U_chlsk_l \
+ self.random_state_.randn(n_l) \
* np.linalg.norm(current_vec_U_chlsk_l) \
/ n_l**0.5 * np.exp(-it / n_iter * self.anneal_speed - 1)
param0_fitU[idx_param_fitU['a1']] = current_a1
res_fitU = scipy.optimize.minimize(
self._loglike_AR1_diagV_fitU, param0_fitU,
args=(XTX, XTDX, XTFX, YTY_diag, YTDY_diag, YTFY_diag,
XTY, XTDY, XTFY, X0TX0, X0TDX0, X0TFX0,
XTX0, XTDX0, XTFX0, X0TY, X0TDY, X0TFY,
current_logSNR2, l_idx, n_C, n_T, n_V,
n_run, n_X0, idx_param_fitU, rank),
method=self.optimizer, jac=True,
tol=tol,
options=self.minimize_options)
current_vec_U_chlsk_l = \
res_fitU.x[idx_param_fitU['Cholesky']]
current_a1 = res_fitU.x[idx_param_fitU['a1']]
L[l_idx] = current_vec_U_chlsk_l
fitUchange = res_fitU.x - param0_fitU
norm_fitUchange = np.linalg.norm(fitUchange)
logger.debug('norm of parameter change after fitting U: '
'{}'.format(norm_fitUchange))
param0_fitU = res_fitU.x.copy()
# fit V
rho1 = np.arctan(current_a1) * 2 / np.pi
X0TAX0, XTAX0, X0TAY, X0TAX0_i, \
XTAcorrX, XTAcorrY, YTAcorrY, \
LTXTAcorrY, XTAcorrXL, LTXTAcorrXL = \
self._precompute_ar1_quad_forms(XTY, XTDY, XTFY,
YTY_diag, YTDY_diag, YTFY_diag,
XTX, XTDX, XTFX,
X0TX0, X0TDX0, X0TFX0,
XTX0, XTDX0, XTFX0,
X0TY, X0TDY, X0TFY,
L, rho1, n_V, n_X0)
res_fitV = scipy.optimize.minimize(
self._loglike_AR1_diagV_fitV, param0_fitV, args=(
X0TAX0, XTAX0, X0TAY, X0TAX0_i,
XTAcorrX, XTAcorrY, YTAcorrY,
LTXTAcorrY, XTAcorrXL, LTXTAcorrXL,
current_vec_U_chlsk_l, current_a1,
l_idx, n_C, n_T, n_V, n_run, n_X0,
idx_param_fitV, rank,
GP_space, GP_inten, dist2, inten_diff2,
space_smooth_range, inten_smooth_range),
method=self.optimizer, jac=True,
tol=tol,
options=self.minimize_options)
current_logSNR2[0:n_V - 1] = \
res_fitV.x[idx_param_fitV['log_SNR2']]
current_logSNR2[n_V - 1] = -np.sum(current_logSNR2[0:n_V - 1])
current_GP = res_fitV.x[idx_param_fitV['c_both']]
fitVchange = res_fitV.x - param0_fitV
norm_fitVchange = np.linalg.norm(fitVchange)
param0_fitV = res_fitV.x.copy()
logger.debug('norm of parameter change after fitting V: '
'{}'.format(norm_fitVchange))
logger.debug('E[log(SNR2)^2]: {}'.format(
np.mean(current_logSNR2**2)))
# Re-estimating X_res from residuals
current_SNR2 = np.exp(current_logSNR2)
if self.auto_nuisance:
LL, LAMBDA_i, LAMBDA, YTAcorrXL_LAMBDA, current_sigma2 \
= self._calc_LL(rho1, LTXTAcorrXL, LTXTAcorrY, YTAcorrY,
X0TAX0, current_SNR2,
n_V, n_T, n_run, rank, n_X0)
betas = current_SNR2 \
* np.dot(L, YTAcorrXL_LAMBDA.T)
beta0s = np.einsum(
'ijk,ki->ji', X0TAX0_i,
(X0TAY - np.einsum('ikj,ki->ji', XTAX0, betas)))
residuals = Y - np.dot(X, betas) - np.dot(
X_base, beta0s[:np.shape(X_base)[1], :])
X_res = self.nureg_method(self.n_nureg_).fit_transform(
self.preprocess_residual(residuals))
if GP_space:
logger.debug('current GP[0]: {}'.format(current_GP[0]))
logger.debug('gradient for GP[0]: {}'.format(
res_fitV.jac[idx_param_fitV['c_space']]))
if GP_inten:
logger.debug('current GP[1]: {}'.format(current_GP[1]))
logger.debug('gradient for GP[1]: {}'.format(
res_fitV.jac[idx_param_fitV['c_inten']]))
if np.max(np.abs(fitVchange)) < tol and \
np.max(np.abs(fitUchange)) < tol:
break
return current_vec_U_chlsk_l, current_a1, current_logSNR2,\
current_GP, X_res
def _fit_null(self, Y, X_base, scan_onsets=None):
""" Fit a null model.
"""
n_V = np.size(Y, axis=1)
n_T = np.size(Y, axis=0)
t_start = time.time()
D, F, run_TRs, n_run = self._prepare_DF(
n_T, scan_onsets=scan_onsets)
YTY_diag = np.sum(Y * Y, axis=0)
YTDY_diag = np.sum(Y * np.dot(D, Y), axis=0)
YTFY_diag = np.sum(Y * np.dot(F, Y), axis=0)
tol = self.tol
n_iter = self.n_iter
logger.info('Fitting null model'
' for maximum {} times'.format(n_iter))
# Add DC components capturing run-specific baselines.
X_DC = self._gen_X_DC(run_TRs)
X_DC, X_base, idx_DC = self._merge_DC_to_base(
X_DC, X_base, no_DC=False)
X_res = None
param0 = np.zeros(n_V)
for it in range(0, n_iter):
if X_res is None:
X0 = X_base
else:
X0 = np.concatenate((X_base, X_res), axis=1)
n_X0 = X0.shape[1]
X0TX0, X0TDX0, X0TFX0 = self._make_templates(D, F, X0, X0)
X0TY, X0TDY, X0TFY = self._make_templates(D, F, X0, Y)
res_null = scipy.optimize.minimize(
self._loglike_AR1_null, param0, args=(
YTY_diag, YTDY_diag, YTFY_diag,
X0TX0, X0TDX0, X0TFX0, X0TY, X0TDY, X0TFY,
n_T, n_V, n_run, n_X0),
method=self.optimizer, jac=True, tol=tol,
options=self.minimize_options)
param_change = res_null.x - param0
param0 = res_null.x.copy()
est_rho1_AR1_null = 2.0 / np.pi * np.arctan(param0)
if self.auto_nuisance:
X0TAX0 = X0TX0[None, :, :] \
- est_rho1_AR1_null[:, None, None] \
* X0TDX0[None, :, :] \
+ est_rho1_AR1_null[:, None, None]**2 \
* X0TFX0[None, :, :]
# dimension: space*#baseline*#baseline
X0TAY = self._make_ar1_quad_form(X0TY, X0TDY, X0TFY,
est_rho1_AR1_null)
# dimension: #baseline*space
beta0s = np.linalg.solve(X0TAX0, X0TAY.T).T
residuals = Y - np.dot(X_base, beta0s[:np.shape(X_base)[1], :])
X_res = self.nureg_method(self.n_nureg_).fit_transform(
self.preprocess_residual(residuals))
if np.max(np.abs(param_change)) < self.tol:
logger.info('The change of parameters is smaller than '
'the tolerance value {}. Fitting is finished '
'after {} iterations'.format(self.tol, it + 1))
break
X0TAX0 = X0TX0[None, :, :] \
- est_rho1_AR1_null[:, None, None] \
* X0TDX0[None, :, :] \
+ est_rho1_AR1_null[:, None, None]**2 \
* X0TFX0[None, :, :]
# dimension: space*#baseline*#baseline
X0TAY = self._make_ar1_quad_form(X0TY, X0TDY, X0TFY,
est_rho1_AR1_null)
# dimension: #baseline*space
est_beta0_AR1_null = np.linalg.solve(X0TAX0, X0TAY.T).T
YTAY = self._make_ar1_quad_form(YTY_diag, YTDY_diag, YTFY_diag,
est_rho1_AR1_null)
# dimension: space,
YTAcorrY = YTAY - np.sum(X0TAY * est_beta0_AR1_null, axis=0)
# dimension: space,
est_sigma_AR1_null = (YTAcorrY / (n_T - n_X0)) ** 0.5
if idx_DC.size > 1:
collapsed_DC = np.sum(X0[:, idx_DC], axis=1)
X0 = np.insert(np.delete(X0, idx_DC, axis=1), 0,
collapsed_DC, axis=1)
collapsed_beta0 = np.mean(est_beta0_AR1_null[idx_DC, :], axis=0)
est_beta0_AR1_null = np.insert(
np.delete(est_beta0_AR1_null, idx_DC, axis=0),
0, collapsed_beta0, axis=0)
t_finish = time.time()
logger.info(
'total time of fitting: {} seconds'.format(t_finish - t_start))
return est_beta0_AR1_null, est_sigma_AR1_null, est_rho1_AR1_null, X0
# We fit two parts of the parameters iteratively.
# The following are the corresponding negative log likelihood functions.
def _loglike_AR1_diagV_fitU(self, param, XTX, XTDX, XTFX, YTY_diag,
YTDY_diag, YTFY_diag, XTY, XTDY, XTFY,
X0TX0, X0TDX0, X0TFX0,
XTX0, XTDX0, XTFX0, X0TY, X0TDY, X0TFY,
log_SNR2, l_idx, n_C, n_T, n_V, n_run, n_X0,
idx_param_fitU, rank):
# This function calculates the log likelihood of data given cholesky
# decomposition of U and AR(1) parameters of noise as free parameters.
# Free parameters are in param.
# The log of the square of signal to noise level in each voxel
# (the ratio of the diagonal elements in V and
# the noise variance) are fixed. This likelihood is iteratively
# optimized with the one with suffix _fitV.
#
# The meaing of U and V follow this wiki page of matrix normal
# distribution:
# https://en.wikipedia.org/wiki/Matrix_normal_distribution
#
# We assume betas of all voxels as a matrix follow this distribution.
# U describe the covariance between conditions. V describe the
# covariance between voxels.
#
# In this version, we assume that beta is independent between voxels
# and noise is also independent.
# By the assumption that noise is independent, we only need to pass
# the products X'X, X'Y and Y'Y, instead of X and Y
# Y'Y is passed in the form of its diagonal elements.
# DiagV means we assume that the variance of beta can be different
# between voxels. This means that V is a diagonal matrix instead of
# an identity matrix. The parameter includes the lower triangular
# part of the cholesky decomposition
# of U (flattened), then tan(rho1*pi/2) where rho1 is
# each voxel's autoregressive coefficient (assumging AR(1) model).
# Such parametrization avoids the need of boundaries
# for parameters.
L = np.zeros([n_C, rank])
# lower triagular matrix L, cholesky decomposition of U
L[l_idx] = param[idx_param_fitU['Cholesky']]
a1 = param[idx_param_fitU['a1']]
rho1 = 2.0 / np.pi * np.arctan(a1) # auto-regressive coefficients
SNR2 = np.exp(log_SNR2)
# each element of SNR2 is the ratio of the diagonal element on V
# to the variance of the fresh noise in that voxel
X0TAX0, XTAX0, X0TAY, X0TAX0_i, \
XTAcorrX, XTAcorrY, YTAcorrY, \
LTXTAcorrY, XTAcorrXL, LTXTAcorrXL = \
self._precompute_ar1_quad_forms(XTY, XTDY, XTFY,
YTY_diag, YTDY_diag, YTFY_diag,
XTX, XTDX, XTFX, X0TX0, X0TDX0,
X0TFX0, XTX0, XTDX0, XTFX0,
X0TY, X0TDY, X0TFY,
L, rho1, n_V, n_X0)
# Only starting from this point, SNR2 is involved
LL, LAMBDA_i, LAMBDA, YTAcorrXL_LAMBDA, sigma2 \
= self._calc_LL(rho1, LTXTAcorrXL, LTXTAcorrY, YTAcorrY,
X0TAX0, SNR2, n_V, n_T, n_run, rank, n_X0)
if not np.isfinite(LL):
logger.warning('NaN detected!')
logger.warning('LL: {}'.format(LL))
logger.warning('sigma2: {}'.format(sigma2))
logger.warning('YTAcorrY: {}'.format(YTAcorrY))
logger.warning('LTXTAcorrY: {}'.format(LTXTAcorrY))
logger.warning('YTAcorrXL_LAMBDA: {}'.format(YTAcorrXL_LAMBDA))
logger.warning('SNR2: {}'.format(SNR2))
YTAcorrXL_LAMBDA_LT = np.dot(YTAcorrXL_LAMBDA, L.T)
# dimension: space*feature (feature can be larger than rank)
deriv_L = -np.einsum('ijk,ikl,i', XTAcorrXL, LAMBDA, SNR2) \
- np.dot(np.einsum('ijk,ik->ji', XTAcorrXL, YTAcorrXL_LAMBDA)
* SNR2**2 / sigma2, YTAcorrXL_LAMBDA) \
+ np.dot(XTAcorrY / sigma2 * SNR2, YTAcorrXL_LAMBDA)
# dimension: feature*rank
# The following are for calculating the derivative to a1
deriv_a1 = np.empty(n_V)
dXTAX_drho1 = -XTDX + 2 * rho1[:, None, None] * XTFX
# dimension: space*feature*feature
dXTAY_drho1 = self._make_ar1_quad_form_grad(XTDY, XTFY, rho1)
# dimension: feature*space
dYTAY_drho1 = self._make_ar1_quad_form_grad(YTDY_diag, YTFY_diag, rho1)
# dimension: space,
dX0TAX0_drho1 = - X0TDX0 \
+ 2 * rho1[:, None, None] * X0TFX0
# dimension: space*rank*rank
dXTAX0_drho1 = - XTDX0 \
+ 2 * rho1[:, None, None] * XTFX0
# dimension: space*feature*rank
dX0TAY_drho1 = self._make_ar1_quad_form_grad(X0TDY, X0TFY, rho1)
# dimension: rank*space
# The following are executed for each voxel.
for i_v in range(n_V):
# All variables with _ele as suffix are for data of just one voxel
invX0TAX0_X0TAX_ele = np.dot(X0TAX0_i[i_v, :, :],
XTAX0[i_v, :, :].T)
invX0TAX0_X0TAY_ele = np.dot(X0TAX0_i[i_v, :, :], X0TAY[:, i_v])
dXTAX0_drho1_invX0TAX0_X0TAX_ele = np.dot(dXTAX0_drho1[i_v, :, :],
invX0TAX0_X0TAX_ele)
# preparation for the variable below
dXTAcorrX_drho1_ele = dXTAX_drho1[i_v, :, :] \
- dXTAX0_drho1_invX0TAX0_X0TAX_ele \
- dXTAX0_drho1_invX0TAX0_X0TAX_ele.T \
+ np.dot(np.dot(invX0TAX0_X0TAX_ele.T,
dX0TAX0_drho1[i_v, :, :]),
invX0TAX0_X0TAX_ele)
dXTAcorrY_drho1_ele = dXTAY_drho1[:, i_v] \
- np.dot(invX0TAX0_X0TAX_ele.T, dX0TAY_drho1[:, i_v]) \
- np.dot(dXTAX0_drho1[i_v, :, :], invX0TAX0_X0TAY_ele) \
+ np.dot(np.dot(invX0TAX0_X0TAX_ele.T,
dX0TAX0_drho1[i_v, :, :]),
invX0TAX0_X0TAY_ele)
dYTAcorrY_drho1_ele = dYTAY_drho1[i_v] \
- np.dot(dX0TAY_drho1[:, i_v], invX0TAX0_X0TAY_ele) * 2\
+ np.dot(np.dot(invX0TAX0_X0TAY_ele, dX0TAX0_drho1[i_v, :, :]),
invX0TAX0_X0TAY_ele)
deriv_a1[i_v] = 2 / np.pi / (1 + a1[i_v]**2) \
* (- n_run * rho1[i_v] / (1 - rho1[i_v]**2)
- np.einsum('ij,ij', X0TAX0_i[i_v, :, :],
dX0TAX0_drho1[i_v, :, :]) * 0.5
- np.einsum('ij,ij', LAMBDA[i_v, :, :],
np.dot(np.dot(
L.T, dXTAcorrX_drho1_ele), L))
* (SNR2[i_v] * 0.5)
- dYTAcorrY_drho1_ele * 0.5 / sigma2[i_v]
+ SNR2[i_v] / sigma2[i_v]
* np.dot(dXTAcorrY_drho1_ele,
YTAcorrXL_LAMBDA_LT[i_v, :])
- (0.5 * SNR2[i_v]**2 / sigma2[i_v])
* np.dot(np.dot(YTAcorrXL_LAMBDA_LT[i_v, :],
dXTAcorrX_drho1_ele),
YTAcorrXL_LAMBDA_LT[i_v, :]))
deriv = np.empty(np.size(param))
deriv[idx_param_fitU['Cholesky']] = deriv_L[l_idx]
deriv[idx_param_fitU['a1']] = deriv_a1
return -LL, -deriv
def _loglike_AR1_diagV_fitV(self, param,
X0TAX0, XTAX0, X0TAY, X0TAX0_i,
XTAcorrX, XTAcorrY, YTAcorrY,
LTXTAcorrY, XTAcorrXL, LTXTAcorrXL,
L_l, a1, l_idx, n_C, n_T, n_V, n_run,
n_X0, idx_param_fitV, rank=None,
GP_space=False, GP_inten=False,
dist2=None, inten_dist2=None,
space_smooth_range=None,
inten_smooth_range=None):
# This function calculates the log likelihood of data given
# the log of the square of pseudo signal to noise ratio in each voxel.
# The free parameter log(SNR^2) is in param
# This likelihood is iteratively optimized with the one with _fitU.
# The cholesky factor of U and autoregressive coefficient
# in temporal AR(1) model for noise are fixed.
# Because the ML estimate of the variance of noise in each voxel
# (sigma^2) given other parameters has analytic form,
# we do not need to explicitly parametrize it.
# Just set it to the ML value.
#
# L_l is the lower triangular part of L, a1 is tan(rho1*pi/2),
# where rho1 is the autoregressive coefficient in each voxel
# We can optionally include Gaussion Process prior to log(SNR).
# This term is not included in _fitU, because log(SNR)
# are fixed in _fitU.
# GP_space and GP_inten are Boolean, indicating whether we want to
# include GP kernels either on voxel coordinates or intensity.
# dist2 and inten_dist2 are the squares of spatial distances and
# intensity differences ([n_voxel x n_voxel]. space_smooth_range
# and inten_smooth_range are the range we believe the GP length
# scale should reside in. They are used in additional half-cauchy
# prior to constraint these length scales.
n_l = np.size(l_idx[0])
# the number of parameters in the index of lower-triangular matrix
if rank is None:
rank = int((2 * n_C + 1 -
np.sqrt(n_C**2 * 4 + n_C * 4 + 1 - 8 * n_l)) / 2)
L = np.zeros([n_C, rank])
L[l_idx] = L_l
log_SNR2 = np.empty(n_V)
log_SNR2[0:n_V - 1] = param[idx_param_fitV['log_SNR2']]
log_SNR2[-1] = -np.sum(log_SNR2[0:n_V - 1])
# This is following the restriction that SNR's have geometric mean
# of 1. That is why they are called pseudo-SNR. This restriction
# is imposed because SNR and L are determined only up to a scale
# Be cautious that during simulation, when there is absolute
# no signal in the data, sometimes the fitting diverges,
# presumably because we have created correlation between logS_NR2
# due to the constraint. But I have not reproduced this often.
SNR2 = np.exp(log_SNR2)
# If requested, a GP prior is imposed on log(SNR).
rho1 = 2.0 / np.pi * np.arctan(a1)
# AR(1) coefficient, dimension: space
LL, LAMBDA_i, LAMBDA, YTAcorrXL_LAMBDA, sigma2 \
= self._calc_LL(rho1, LTXTAcorrXL, LTXTAcorrY, YTAcorrY, X0TAX0,
SNR2, n_V, n_T, n_run, rank, n_X0)
# Log likelihood of data given parameters, without the GP prior.
deriv_log_SNR2 = (-rank + np.trace(LAMBDA, axis1=1, axis2=2)) * 0.5\
+ np.sum(YTAcorrXL_LAMBDA**2, axis=1) * SNR2 / sigma2 / 2
# Partial derivative of log likelihood over log(SNR^2)
# dimension: space,
# The second term above is due to the equation for calculating
# sigma2
if GP_space:
# Imposing GP prior on log(SNR) at least over
# spatial coordinates
c_space = param[idx_param_fitV['c_space']]
l2_space = np.exp(c_space)
# The square of the length scale of the GP kernel defined on
# the spatial coordinates of voxels
dl2_dc_space = l2_space
# partial derivative of l^2 over b
if GP_inten:
c_inten = param[idx_param_fitV['c_inten']]
l2_inten = np.exp(c_inten)
# The square of the length scale of the GP kernel defined
# on the image intensity of voxels
dl2_dc_inten = l2_inten
# partial derivative of l^2 over b
K_major = np.exp(- (dist2 / l2_space
+ inten_dist2 / l2_inten)
/ 2.0)
else:
K_major = np.exp(- dist2 / l2_space / 2.0)
# The kernel defined over the spatial coordinates of voxels.
# This is a template: the diagonal values are all 1, meaning
# the variance of log(SNR) has not been multiplied
K_tilde = K_major + np.diag(np.ones(n_V) * self.eta)
# We add a small number to the diagonal to make sure the matrix
# is invertible.
# Note that the K_tilder here is still template:
# It is the correct K divided by the variance tau^2
# So it does not depend on the variance of the GP.
L_K_tilde = np.linalg.cholesky(K_tilde)
inv_L_K_tilde = np.linalg.solve(L_K_tilde, np.identity(n_V))
inv_K_tilde = np.dot(inv_L_K_tilde.T, inv_L_K_tilde)
log_det_K_tilde = np.sum(np.log(np.diag(L_K_tilde)**2))
invK_tilde_log_SNR = np.dot(inv_K_tilde, log_SNR2) / 2
log_SNR_invK_tilde_log_SNR = np.dot(log_SNR2,
invK_tilde_log_SNR) / 2
# MAP estimate of the variance of the Gaussian Process given
# other parameters.
tau2, log_ptau = self.tau2_prior(log_SNR_invK_tilde_log_SNR, n_V,
self.tau_range)
# log_ptau is log(p(tau)) given the form of prior for tau
LL += log_ptau
# GP prior terms added to the log likelihood
LL = LL - log_det_K_tilde / 2.0 - n_V / 2.0 * np.log(tau2) \
- np.log(2 * np.pi) * n_V / 2.0 \
- log_SNR_invK_tilde_log_SNR / tau2 / 2
deriv_log_SNR2 -= invK_tilde_log_SNR / tau2 / 2.0
# Note that the derivative to log(SNR) is
# invK_tilde_log_SNR / tau2, but we are calculating the
# derivative to log(SNR^2)
dK_tilde_dl2_space = dist2 * (K_major) / 2.0 \
/ l2_space**2
deriv_c_space = \
(np.dot(np.dot(invK_tilde_log_SNR, dK_tilde_dl2_space),
invK_tilde_log_SNR) / tau2 / 2.0
- np.sum(inv_K_tilde * dK_tilde_dl2_space) / 2.0)\
* dl2_dc_space
# Prior on the length scales
LL += scipy.stats.halfcauchy.logpdf(
l2_space**0.5, scale=space_smooth_range)
deriv_c_space -= 1 / (l2_space + space_smooth_range**2)\
* dl2_dc_space
if GP_inten:
dK_tilde_dl2_inten = inten_dist2 * K_major \
/ 2.0 / l2_inten**2
deriv_c_inten = \
(np.dot(np.dot(invK_tilde_log_SNR, dK_tilde_dl2_inten),
invK_tilde_log_SNR) / tau2 / 2.0
- np.sum(inv_K_tilde * dK_tilde_dl2_inten) / 2.0)\
* dl2_dc_inten
# Prior on the length scale
LL += scipy.stats.halfcauchy.logpdf(
l2_inten**0.5, scale=inten_smooth_range)
deriv_c_inten -= 1 / (l2_inten + inten_smooth_range**2)\
* dl2_dc_inten
else:
LL += np.sum(scipy.stats.norm.logpdf(log_SNR2 / 2.0,
scale=self.tau_range))
# If GP prior is not requested, we still want to regularize on
# the magnitude of log(SNR).
deriv_log_SNR2 += - log_SNR2 / self.tau_range**2 / 4.0
deriv = np.empty(np.size(param))
deriv[idx_param_fitV['log_SNR2']] = \
deriv_log_SNR2[0:n_V - 1] - deriv_log_SNR2[n_V - 1]
if GP_space:
deriv[idx_param_fitV['c_space']] = deriv_c_space
if GP_inten:
deriv[idx_param_fitV['c_inten']] = deriv_c_inten
return -LL, -deriv
def _loglike_AR1_singpara(self, param, XTX, XTDX, XTFX, YTY_diag,
YTDY_diag, YTFY_diag, XTY, XTDY, XTFY,
X0TX0, X0TDX0, X0TFX0,
XTX0, XTDX0, XTFX0, X0TY, X0TDY, X0TFY,
l_idx, n_C, n_T, n_V, n_run, n_X0,
idx_param_sing, rank=None):
# In this version, we assume that beta is independent
# between voxels and noise is also independent.
# singpara version uses single parameter of sigma^2 and rho1
# to all voxels. This serves as the initial fitting to get
# an estimate of L and sigma^2 and rho1. The SNR is inherently
# assumed to be 1.
n_l = np.size(l_idx[0])
# the number of parameters in the index of lower-triangular matrix
if rank is None:
rank = int((2 * n_C + 1
- np.sqrt(n_C**2 * 4 + n_C * 4 + 1 - 8 * n_l)) / 2)
L = np.zeros([n_C, rank])
L[l_idx] = param[idx_param_sing['Cholesky']]
a1 = param[idx_param_sing['a1']]
rho1 = 2.0 / np.pi * np.arctan(a1)
XTAX = XTX - rho1 * XTDX + rho1**2 * XTFX
X0TAX0 = X0TX0 - rho1 * X0TDX0 + rho1**2 * X0TFX0
XTAX0 = XTX0 - rho1 * XTDX0 + rho1**2 * XTFX0
XTAcorrX = XTAX - np.dot(XTAX0, np.linalg.solve(X0TAX0, XTAX0.T))
XTAcorrXL = np.dot(XTAcorrX, L)
LAMBDA_i = np.dot(np.dot(L.T, XTAcorrX), L) + np.eye(rank)
XTAY = XTY - rho1 * XTDY + rho1**2 * XTFY
X0TAY = X0TY - rho1 * X0TDY + rho1**2 * X0TFY
XTAcorrY = XTAY - np.dot(XTAX0, np.linalg.solve(X0TAX0, X0TAY))
LTXTAcorrY = np.dot(L.T, XTAcorrY)
YTAY = YTY_diag - rho1 * YTDY_diag + rho1**2 * YTFY_diag
YTAcorrY = YTAY \
- np.sum(X0TAY * np.linalg.solve(X0TAX0, X0TAY), axis=0)
LAMBDA_LTXTAcorrY = np.linalg.solve(LAMBDA_i, LTXTAcorrY)
L_LAMBDA_LTXTAcorrY = np.dot(L, LAMBDA_LTXTAcorrY)
sigma2 = np.mean(YTAcorrY -
np.sum(LTXTAcorrY * LAMBDA_LTXTAcorrY, axis=0))\
/ (n_T - n_X0)
LL = n_V * (-np.log(sigma2) * (n_T - n_X0) * 0.5
+ np.log(1 - rho1**2) * n_run * 0.5
- self._half_log_det(X0TAX0)
- self._half_log_det(LAMBDA_i))
deriv_L = np.dot(XTAcorrY, LAMBDA_LTXTAcorrY.T) / sigma2 \
- np.dot(np.dot(XTAcorrXL, LAMBDA_LTXTAcorrY),
LAMBDA_LTXTAcorrY.T) / sigma2 \
- np.linalg.solve(LAMBDA_i, XTAcorrXL.T).T * n_V
# These terms are used to construct derivative to a1.
dXTAX_drho1 = - XTDX + 2 * rho1 * XTFX
dX0TAX0_drho1 = - X0TDX0 + 2 * rho1 * X0TFX0
dXTAX0_drho1 = - XTDX0 + 2 * rho1 * XTFX0
invX0TAX0_X0TAX = np.linalg.solve(X0TAX0, XTAX0.T)
dXTAX0_drho1_invX0TAX0_X0TAX = np.dot(dXTAX0_drho1, invX0TAX0_X0TAX)
dXTAcorrX_drho1 = dXTAX_drho1 - dXTAX0_drho1_invX0TAX0_X0TAX \
- dXTAX0_drho1_invX0TAX0_X0TAX.T \
+ np.dot(np.dot(invX0TAX0_X0TAX.T, dX0TAX0_drho1),
invX0TAX0_X0TAX)
dLTXTAcorrXL_drho1 = np.dot(np.dot(L.T, dXTAcorrX_drho1), L)
dYTAY_drho1 = - YTDY_diag + 2 * rho1 * YTFY_diag
dX0TAY_drho1 = - X0TDY + 2 * rho1 * X0TFY
invX0TAX0_X0TAY = np.linalg.solve(X0TAX0, X0TAY)
dYTAX0_drho1_invX0TAX0_X0TAY = np.sum(dX0TAY_drho1
* invX0TAX0_X0TAY, axis=0)
dYTAcorrY_drho1 = dYTAY_drho1 - dYTAX0_drho1_invX0TAX0_X0TAY * 2\
+ np.sum(invX0TAX0_X0TAY *
np.dot(dX0TAX0_drho1, invX0TAX0_X0TAY), axis=0)
dXTAY_drho1 = - XTDY + 2 * rho1 * XTFY
dXTAcorrY_drho1 = dXTAY_drho1 \
- np.dot(dXTAX0_drho1, invX0TAX0_X0TAY) \
- np.dot(invX0TAX0_X0TAX.T, dX0TAY_drho1) \
+ np.dot(np.dot(invX0TAX0_X0TAX.T, dX0TAX0_drho1),
invX0TAX0_X0TAY)
deriv_a1 = 2.0 / (np.pi * (1 + a1**2)) \
* (n_V * (- n_run * rho1 / (1 - rho1**2)
- 0.5 * np.trace(np.linalg.solve(
X0TAX0, dX0TAX0_drho1))
- 0.5 * np.trace(np.linalg.solve(
LAMBDA_i, dLTXTAcorrXL_drho1)))
- 0.5 * np.sum(dYTAcorrY_drho1) / sigma2
+ np.sum(dXTAcorrY_drho1 * L_LAMBDA_LTXTAcorrY) / sigma2
- 0.5 * np.sum(np.dot(dXTAcorrX_drho1, L_LAMBDA_LTXTAcorrY)
* L_LAMBDA_LTXTAcorrY) / sigma2)
deriv = np.empty(np.size(param))
deriv[idx_param_sing['Cholesky']] = deriv_L[l_idx]
deriv[idx_param_sing['a1']] = deriv_a1
return -LL, -deriv
def _loglike_AR1_null(self, param, YTY_diag, YTDY_diag, YTFY_diag,
X0TX0, X0TDX0, X0TFX0, X0TY, X0TDY, X0TFY,
n_T, n_V, n_run, n_X0):
# This function calculates the log likelihood of data given AR(1)
# parameters of noise as free parameters.
# Free parameters are in param.
# It serves as a null model which assumes no response to design
# matrix.
a1 = param
rho1 = 2.0 / np.pi * np.arctan(a1) # auto-regressive coefficients
YTAY = self._make_ar1_quad_form(YTY_diag, YTDY_diag, YTFY_diag, rho1)
# dimension: space,
# A/sigma2 is the inverse of noise covariance matrix in each voxel.
# YTAY means Y'AY
X0TAX0 = X0TX0[None, :, :] - rho1[:, None, None] \
* X0TDX0[None, :, :] \
+ rho1[:, None, None]**2 * X0TFX0[None, :, :]
# dimension: space*#baseline*#baseline
X0TAY = self._make_ar1_quad_form(X0TY, X0TDY, X0TFY, rho1)
# dimension: #baseline*space
# X0TAX0_i = np.linalg.solve(X0TAX0, np.identity(n_X0)[None, :, :])
X0TAX0_i = np.linalg.inv(X0TAX0)
# dimension: space*#baseline*#baseline
YTAcorrY = YTAY - np.sum(X0TAY * np.einsum('ijk,ki->ji',
X0TAX0_i, X0TAY), axis=0)
# dimension: space,
sigma2 = YTAcorrY / (n_T - n_X0)
# dimension: space,
LL = - np.sum(np.log(sigma2)) * (n_T - n_X0) * 0.5 \
+ np.sum(np.log(1 - rho1**2)) * n_run * 0.5 \
- np.sum(self._half_log_det(X0TAX0)) \
- (n_T - n_X0) * n_V * (1 + np.log(2 * np.pi)) * 0.5
# The following are for calculating the derivative to a1
deriv_a1 = np.empty(n_V)
dYTAY_drho1 = self._make_ar1_quad_form_grad(YTDY_diag, YTFY_diag, rho1)
# dimension: space,
dX0TAX0_drho1 = - X0TDX0 \
+ 2 * rho1[:, None, None] * X0TFX0
# dimension: space*rank*rank
dX0TAY_drho1 = self._make_ar1_quad_form_grad(X0TDY, X0TFY, rho1)
# dimension: rank*space
# The following are executed for each voxel.
for i_v in range(n_V):
# All variables with _ele as suffix are for data of just one voxel
invX0TAX0_X0TAY_ele = np.dot(X0TAX0_i[i_v, :, :], X0TAY[:, i_v])
# preparation for the variable below
dYTAcorrY_drho1_ele = dYTAY_drho1[i_v] \
- np.dot(dX0TAY_drho1[:, i_v], invX0TAX0_X0TAY_ele) * 2\
+ np.dot(np.dot(invX0TAX0_X0TAY_ele, dX0TAX0_drho1[i_v, :, :]),
invX0TAX0_X0TAY_ele)
deriv_a1[i_v] = 2 / np.pi / (1 + a1[i_v]**2) \
* (- n_run * rho1[i_v] / (1 - rho1[i_v]**2)
- np.einsum('ij,ij', X0TAX0_i[i_v, :, :],
dX0TAX0_drho1[i_v, :, :]) * 0.5
- dYTAcorrY_drho1_ele * 0.5 / sigma2[i_v])
deriv = deriv_a1
return -LL, -deriv
class GBRSA(BRSA):
"""Group Bayesian representational Similarity Analysis (GBRSA)
Given the time series of neural imaging data in a region of interest
(ROI) and the hypothetical neural response (design matrix) to
each experimental condition of interest,
calculate the shared covariance matrix of
the voxels(recording unit)' response to each condition,
and the relative SNR of each voxels.
The relative SNR could be considered as the degree of contribution
of each voxel to this shared covariance matrix.
A correlation matrix converted from the covariance matrix
will be provided as a quantification of neural representational similarity.
Both tools provide estimation of SNR and noise parameters at the end,
and both tools provide empirical Bayesian estimates of activity patterns
beta, together with weight map of nuisance signals beta0.
The differences of this tool from BRSA are:
(1) It allows fitting a shared covariance matrix (which can be converted
to similarity matrix) across multiple subjects.
This is analogous to SRM under funcalign submodule. Because of using
multiple subjects, the result is less noisy.
(2) In the fitting process, the SNR and noise parameters are marginalized
for each voxel. Therefore, this tool should be faster than BRSA
when analyzing an ROI of hundreds to thousands voxels. It does not
provide a spatial smoothness prior on SNR though.
(3) The voxel-wise pseudo-SNR and noise parameters estimated are
posterior mean estimates, while those estimated by BRSA are
maximum-a-posterior estimates.
If your goal is to perform searchlight RSA with relatively fewer voxels
on single subject, BRSA should be faster. However, GBRSA can in principle
be used together with searchlight in a template space such as MNI.
.. math::
Y = X \\cdot \\beta + X_0 \\cdot \\beta_0 + \\epsilon
\\beta_i \\sim N(0,(s_{i} \\sigma_{i})^2 U)
See also `.BRSA`.
Please note that the model assumes that the covariance matrix U which
all \\beta_i follow is zero-meaned. For more details of its implication,
see documentation of `.BRSA`
Parameters
----------
n_iter : int.
Number of maximum iterations to run the algorithm.
rank : int.
The rank of the covariance matrix.
If not provided, the covariance matrix will be assumed
to be full rank. When you have many conditions
(e.g., calculating the similarity matrix of responses to each event),
you might want to start with specifying a lower rank and use metrics
such as AIC or BIC to decide the optimal rank. The log likelihood
for the fitted data can be retrieved through private attributes
_LL_train\\_. Note that this log likelihood score is only used
here for selecting hyperparameters such as rank. For any formal
model comparison, we recommend using score() function on left-out
data.
auto_nuisance: Boolean.
In order to model spatial correlation between voxels that cannot
be accounted for by common response captured in the design matrix,
we assume that a set of time courses not related to the task
conditions are shared across voxels with unknown amplitudes.
One approach is for users to provide time series which they consider
as nuisance but exist in the noise (such as head motion).
The other way is to take the first n_nureg principal components
in the residual after subtracting the response to the design matrix
from the data, and use these components as the nuisance regressor.
This flag is for the second approach. If turned on,
PCA or factor analysis will be applied to the residuals
to obtain new nuisance regressors in each round of fitting.
These two approaches can be combined. If the users provide nuisance
regressors and set this flag as True, then the first n_nureg
principal components of the residuals after subtracting
both the responses to design matrix and the user-supplied nuisance
regressors will be used in addition to the nuisance regressors
provided by the users.
Note that nuisance regressor is not required from user. If it is
not provided, DC components for each run will be included as nuisance
regressor regardless of the auto_nuisance parameter.
n_nureg: Optional[int].
Number of nuisance regressors to use in order to model signals
shared across voxels not captured by the design matrix.
This number is in addition to any nuisance regressor that the user
has already provided.
If set to None, the number of nuisance regressors will be
automatically determined based on M Gavish
and D Donoho's approximate estimation of optimal hard
threshold for singular values. (Gavish & Donoho,
IEEE Transactions on Information Theory 60.8 (2014): 5040-5053.)
This only takes effect if auto_nuisance is True.
nureg_zscore: Boolean.
A flag to tell the algorithm whether data is z-scored before
estimating the number of nuisance regressor components necessary to
account for spatial noise correlation. It also determinie whether
the residual noise is z-scored before estimating the nuisance
regressors from residual.
This only takes effect if auto_nuisance is True.
nureg_method: string, naming a method from sklearn.decomposition.
'PCA', 'ICA', 'FA' or 'SPCA' are currently supported.
The method to estimate the shared component in noise across voxels.
This only takes effect if auto_nuisance is True.
baseline_single: Boolean.
A time course of constant 1 will be included to the nuisance
regressor for each participant. If baseline_single is set to False,
one such regressor is included for each fMRI run, but at the end of
fitting, a single component in beta0\\_ will be computed as the average
of the weight maps corresponding to these regressors. This might
cause underestimation of noise variance.
If baseline_single is True, only one regressor of constant 1 will be
used for the whole dataset. This might be desirable if you
believe the average image intensity might not scale with the
same proportion for different voxels across scan. In other words,
it is possible that some part of the brain is more vulnerable to
change in baseline intensity due to facts such as
field inhomogeneity. Setting baseline_single to True will force the
nuisance regressors automatically estimated from residuals to
capture this. However, when each task condition only occurs in one
run and when the design matrix in each run sums together close to
a flat line, this option can cause the estimated similarity to be
extremely high between conditions occuring in the same run.
SNR_prior: string.
The type of prior for pseudo-SNR.
If set to 'exp', truncated exponential distribution with scale
parameter of 1 is imposed on pseudo-SNR.
If set to 'lognorm', a truncated log normal prior is imposed.
In this case, the standard deviation of log(SNR) is set
by the parameter logS_range.
If set to 'unif', a uniform prior in [0,1] is imposed.
In all above cases, SNR is numerically
marginalized on a grid of parameters. So the parameter SNR_bins
determines how accurate the numerical integration is. The more
number of bins are used, the more accurate the numerical
integration becomes.
If set to 'equal', all voxels are assumed to have the same fixed
SNR. Pseudo-SNR is 1.0 for all voxels.
In all the cases, the grids used for pseudo-SNR do not really
set an upper bound for SNR, because the real SNR is determined
by both pseudo-SNR and U, the shared covariance structure.
logS_range: float.
The reasonable range of the spread of SNR in log scale.
This parameter only takes effect if SNR_prior is set to 'lognorm'.
It is effectively the `s` parameter of `scipy.stats.lognorm`,
or the standard deviation of the distribution in log scale.
logS_range specifies how variable you believe the SNRs
to vary across voxels in log scale.
This range should not be set too large, otherwise the fitting
may encounter numerical issue.
If it is set too small, the estimated SNR will turn to be too
close to each other and the estimated similarity matrix might
overfit to voxels of low SNR.
If you increase logS_range, it is recommended to increase
SNR_bins accordingly, otherwise the pseudo-SNR values evaluated might
be too sparse, causing the posterior pseudo-SNR estimations
to be clustered around the bins.
SNR_bins: integer.
The number of bins used to numerically marginalize the pseudo-SNR
parameter. In general, you should try to choose a large number
to the degree that decreasing SNR_bins does not change the result
of fitting result. However, very large number of bins also causes
slower computation and larger memory consumption.
For SNR_prior='lognorm', the default value 21 is based on
the default value of logS_range=1.0 and bin width of 0.3 on log scale.
But it is also a reasonable choice for the other two options
for SNR_prior.
rho_bins: integer.
The number of bins to divide the region of (-1, 1) for rho.
This only takes effect for fitting the marginalized version.
If set to 20, discrete numbers of {-0.95, -0.85, ..., 0.95} will
be used to numerically integrate rho from -1 to 1.
optimizer: str or callable.
The optimizer to use for minimizing cost function which
scipy.optimize.minimize can accept.
We use 'L-BFGS-B' as a default. Users can try other strings
corresponding to optimizer provided by scipy.optimize.minimize,
or a custom optimizer, such as 'BFGS' or 'CG'.
Note that BRSA fits a lot of parameters. So a chosen optimizer
should accept gradient (Jacobian) of the cost function. Otherwise
the fitting is likely to be unbarely slow. We do not calculate
Hessian of the objective function. So an optimizer which requires
Hessian cannot be used.
minimize_options: dictionary.
This is the dictionary passed as the options argument to
scipy.optimize.minize which minimizes the cost function during
fitting. Notice that the minimization is performed for up to
n_iter times, with the nuisance regressor re-estimated each time.
So within each of the n_iter steps of fitting,
scipy.optimize.minize does not need to fully converge. The key
'maxiter' in this dictionary determines the maximum number of
iteration done by scipy.optimize.minimize within each of the n_iter
steps of fitting.
tol: float.
Tolerance parameter passed to scipy.optimize.minimize. It is also
used for determining convergence of the alternating fitting
procedure.
random_state : RandomState or an int seed.
A random number generator instance to define the state of
the random permutations generator whenever the module
needs to generate random number (e.g., initial parameter
of the Cholesky factor).
anneal_speed: float.
Annealing is introduced in fitting of the Cholesky
decomposition of the shared covariance matrix. The amount
of perturbation decays exponentially. This parameter sets
the ratio of the maximum number of iteration to the
time constant of the exponential.
anneal_speed=10 means by n_iter/10 iterations,
the amount of perturbation is reduced by 2.713 times.
Attributes
----------
U_ : numpy array, shape=[condition,condition].
The shared covariance matrix
L_ : numpy array, shape=[condition,condition].
The Cholesky factor of the shared covariance matrix
(lower-triangular matrix).
C_: numpy array, shape=[condition,condition].
The correlation matrix derived from the shared covariance matrix.
This is the estimated similarity matrix between neural patterns
to your task conditions. Notice that it is recommended that
you also check U\\_, which is the covariance matrix underlying
this correlation matrix. In cases there is almost no response
to your task conditions, the diagonal values of U\\_ would become
very small and C\\_ might contain many correlation coefficients
close to 1 or -1. This might not reflect true strong correlation
or strong negative correlation, but a result of lack of
task-related neural activity, design matrix that does not match
true neural response, or not enough data.
It is also recommended to check nSNR\\_ after mapping it back to
the brain. A "reasonable" map should at least have higher values
in gray matter in than white matter.
nSNR_ : list of numpy arrays, shape=[voxels,] for each subject in the list.
The pseuso-SNR of all voxels. If SNR_prior='lognormal',
the geometric mean of nSNR\\_ would be approximately 1.
If SNR_prior='unif', all nSNR\\_ would be in the range of (0,1).
If SNR_prior='exp' (default), the range of values would vary
depending on the data and SNR_bins, but many should have low
values with few voxels with high values.
Note that this attribute can not be interpreted as true SNR,
but the relative ratios between voxels indicate the contribution
of each voxel to the representational similarity structure.
sigma_ : list of numpy arrays, shape=[voxels,] for each subject.
The estimated standard deviation of the noise in each voxel
Assuming AR(1) model, this means the standard deviation
of the innovation noise.
rho_ : list of numpy arrays, shape=[voxels,] for each subject.
The estimated autoregressive coefficient of each voxel
beta_: list of numpy arrays, shape=[conditions, voxels] for each subject.
The posterior mean estimation of the response amplitudes
of each voxel to each task condition.
beta0_: list of numpy arrays, shape=[n_nureg + n_base, voxels]
for each subject.
The loading weights of each voxel for the shared time courses
not captured by the design matrix.
n_base is the number of columns of the user-supplied nuisance
regressors plus one for DC component.
X0_: list of numpy arrays, shape=[time_points, n_nureg + n_base]
for each subject.
The estimated time course that is shared across voxels but
unrelated to the events of interest (design matrix).
beta0_null_: list of numpy arrays, shape=[n_nureg + n_base, voxels]
for each subject.
The equivalent of beta0\\_ in a null model which does not
include the design matrix and response pattern beta
X0_null_: list of numpy arrays, shape=[time_points, n_nureg + n_base]
for each subject.
The equivalent of X0\\_ in a null model which does not
include the design matrix and response pattern beta
n_nureg_: 1-d numpy array
Number of nuisance regressor used to model the spatial noise
correlation of each participant.
random_state_: `RandomState`
Random number generator initialized using random_state.
"""
def __init__(
self, n_iter=100, rank=None,
auto_nuisance=True, n_nureg=None, nureg_zscore=True,
nureg_method='PCA',
baseline_single=False, logS_range=1.0, SNR_prior='exp',
SNR_bins=21, rho_bins=20, tol=1e-4, optimizer='L-BFGS-B',
minimize_options={'gtol': 1e-4, 'disp': False,
'maxiter': 20}, random_state=None,
anneal_speed=10):
self.n_iter = n_iter
self.rank = rank
self.auto_nuisance = auto_nuisance
self.n_nureg = n_nureg
self.nureg_zscore = nureg_zscore
if auto_nuisance:
assert (n_nureg is None) \
or (isinstance(n_nureg, int) and n_nureg > 0), \
'n_nureg should be a positive integer or None'\
' if auto_nuisance is True.'
if self.nureg_zscore:
self.preprocess_residual = lambda x: _zscore(x)
else:
self.preprocess_residual = lambda x: x
if nureg_method == 'FA':
self.nureg_method = lambda x: FactorAnalysis(n_components=x)
elif nureg_method == 'PCA':
self.nureg_method = lambda x: PCA(n_components=x, whiten=True)
elif nureg_method == 'SPCA':
self.nureg_method = lambda x: SparsePCA(n_components=x,
max_iter=20, tol=tol)
elif nureg_method == 'ICA':
self.nureg_method = lambda x: FastICA(n_components=x,
whiten=True)
else:
raise ValueError('nureg_method can only be FA, PCA, '
'SPCA(for sparse PCA) or ICA')
self.baseline_single = baseline_single
if type(logS_range) is int:
logS_range = float(logS_range)
self.logS_range = logS_range
assert SNR_prior in ['unif', 'lognorm', 'exp', 'equal'], \
'SNR_prior can only be chosen from ''unif'', ''lognorm''' \
' ''exp'' and ''equal'''
self.SNR_prior = SNR_prior
if self.SNR_prior == 'equal':
self.SNR_bins = 1
else:
self.SNR_bins = SNR_bins
self.rho_bins = rho_bins
self.tol = tol
self.optimizer = optimizer
self.minimize_options = minimize_options
self.random_state = random_state
self.anneal_speed = anneal_speed
return
def fit(self, X, design, nuisance=None, scan_onsets=None):
""" Fit the model to data of all participants jointly.
Parameters
----------
X: list of numpy arrays, shape=[time_points, voxels] for each entry.
Data to be fitted. Each participant corresponds to one item in
the list. If you have multiple scans of the same participants
that you want to analyze together, you should concatenate them
along the time dimension after proper preprocessing (e.g. spatial
alignment), and specify the onsets of each scan in scan_onsets.
design: list of numpy arrays, shape=[time_points, conditions] for each.
This is the design matrix of each participant.
It should only include the hypothetic response for task conditions.
You should not include regressors for a DC component or
motion parameters, unless with a strong reason.
If you want to model head motion, you should include them
in nuisance regressors.
If you have multiple run, the design matrix
of all runs should be concatenated along the time dimension for
each participant, with every column for one condition across runs.
If the design matrix is the same for all subjects,
either provide a list as required, or provide single numpy array.
nuisance: optional, list of numpy arrays,
shape=[time_points, nuisance_factors] for each subject in the list.
Nuisance regressors of each participant.
The responses to these regressors will be marginalized out from
each voxel, which means they are considered, but won't be assumed
to share the same pseudo-SNR map with the design matrix.
Therefore, the pseudo-SNR map will only reflect the
relative contribution of design matrix to each voxel.
You can provide time courses such as those for head motion
to this parameter.
Note that if auto_nuisance is set to True, the first
n_nureg principal components of residual (excluding the response
to the design matrix and the user-provided nuisance regressors)
will be included as additional nuisance regressor after the
first round of fitting.
If auto_nuisance is set to False, the nuisance regressors supplied
by the users together with DC components will be used as
nuisance time series.
scan_onsets: optional, list numpy arrays, shape=[runs,] for each.
Each item in the list specifies the indices of X which correspond
to the onset of each scanning run for one participant.
For example, if you have two experimental runs of
the first participant, each with 100 TRs, and one run of the
second participant, with 150 TR, then scan_onsets should be
[ndarry([0, 100]), ndarry([150])].
The effect of this argument is to make the inverse matrix
of the temporal covariance matrix of noise block-diagonal.
If you do not provide the argument, the program will
assume all data are from the same run for each participant.
"""
logger.info('Running Group Bayesian RSA (which can also analyze'
' data of a single participant). Voxel-specific parameters'
'are all marginalized.')
self.random_state_ = check_random_state(self.random_state)
# setting random seed
logger.debug('RandState set to {}'.format(self.random_state_))
# Checking all inputs.
X = self._check_data_GBRSA(X)
design = self._check_design_GBRSA(design, X)
nuisance = self._check_nuisance_GBRSA(
copy.deepcopy(nuisance), X)
# The reason that we use copy of nuisance is because they
# may be modified inside our code.
scan_onsets = self._check_scan_onsets_GBRSA(scan_onsets, X)
# Run Marginalized Bayesian RSA
# Note that we have a change of notation here.
# Within _fit_RSA_marginalized, design matrix is named X
# and data is named Y, to reflect the
# generative model that data Y is generated by mixing the response
# X to experiment conditions and other neural activity.
# However, in fit(), we keep the scikit-learn API that
# X is the input data to fit and y, a reserved name not used, is
# the label to map to from X.
assert self.SNR_bins >= 10 and self.SNR_prior != 'equal' or \
self.SNR_bins == 1 and self.SNR_prior == 'equal', \
'At least 10 bins are required to perform the numerical'\
' integration over SNR, unless choosing SNR_prior=''equal'','\
' in which case SNR_bins should be 1.'
assert self.rho_bins >= 10, \
'At least 10 bins are required to perform the numerical'\
' integration over rho'
assert self.logS_range * 6 / self.SNR_bins < 0.5 \
or self.SNR_prior != 'lognorm', \
'The minimum grid of log(SNR) should not be larger than 0.5 '\
'if log normal prior is chosen for SNR.' \
' Please consider increasing SNR_bins or reducing logS_range'
self.n_subj_ = len(X)
self.n_V_ = [None] * self.n_subj_
for subj, x in enumerate(X):
self.n_V_[subj] = x.shape[1]
if self.auto_nuisance:
if self.n_nureg is None:
logger.info('numbers of nuisance regressors are determined '
'automatically.')
n_runs = np.zeros(self.n_subj_)
n_comps = np.ones(self.n_subj_)
for s_id in np.arange(self.n_subj_):
# For each subject, determine the number of nuisance
# regressors needed to account for the covariance
# in residuals.
# Residual is calculated by regrssing
# out the design matrix and DC component and linear trend
# from data of each run.
run_TRs, n_runs[s_id] = self._run_TR_from_scan_onsets(
X[s_id].shape[0], scan_onsets[s_id])
ts_dc = self._gen_legendre(run_TRs, [0])
_, ts_base, _ = self._merge_DC_to_base(
ts_dc, nuisance[s_id], False)
ts_reg = np.concatenate((ts_base, design[s_id]), axis=1)
beta_hat = np.linalg.lstsq(ts_reg, X[s_id], rcond=None)[0]
residuals = X[s_id] - np.dot(ts_reg, beta_hat)
n_comps[s_id] = np.min(
[np.max([Ncomp_SVHT_MG_DLD_approx(
residuals, self.nureg_zscore), 1]),
np.linalg.matrix_rank(residuals) - 1])
# n_nureg_ should not exceed the rank of
# residual minus 1.
self.n_nureg_ = n_comps
logger.info('Use {} nuisance regressors to model the spatial '
'correlation in noise.'.format(self.n_nureg_))
else:
self.n_nureg_ = self.n_nureg * np.ones(self.n_subj_)
self.n_nureg_ = np.int32(self.n_nureg_)
self.beta0_null_, self.sigma_null_, self.rho_null_, self.X0_null_,\
self._LL_null_train_ = self._fit_RSA_marginalized_null(
Y=X, X_base=nuisance, scan_onsets=scan_onsets)
self.U_, self.L_, self.nSNR_, self.beta_, self.beta0_,\
self.sigma_, self.rho_, self.X0_, self._LL_train_ = \
self._fit_RSA_marginalized(
X=design, Y=X, X_base=nuisance,
scan_onsets=scan_onsets)
self.C_ = utils.cov2corr(self.U_)
self.design_ = design.copy()
self._rho_design_ = [None] * self.n_subj_
self._sigma2_design_ = [None] * self.n_subj_
self._rho_X0_ = [None] * self.n_subj_
self._sigma2_X0_ = [None] * self.n_subj_
self._rho_X0_null_ = [None] * self.n_subj_
self._sigma2_X0_null_ = [None] * self.n_subj_
for subj in np.arange(self.n_subj_):
self._rho_design_[subj], self._sigma2_design_[subj] = \
self._est_AR1(self.design_[subj], same_para=True)
self._rho_X0_[subj], self._sigma2_X0_[subj] = \
self._est_AR1(self.X0_[subj])
self._rho_X0_null_[subj], self._sigma2_X0_null_[subj] =\
self._est_AR1(self.X0_null_[subj])
# AR(1) parameters of the design matrix and nuisance regressors,
# which will be used in transform or score.
return self
def transform(self, X, y=None, scan_onsets=None):
""" Use the model to estimate the time course of response to
each condition (ts), and the time course unrelated to task
(ts0) which is spread across the brain.
This is equivalent to "decoding" the design matrix and
nuisance regressors from a new dataset different from the
training dataset on which fit() was applied. An AR(1) smooth
prior is imposed on the decoded ts and ts0 with the AR(1)
parameters learnt from the corresponding time courses in the
training data.
Parameters
----------
X : list of 2-D arrays. For each item, shape=[time_points, voxels]
New fMRI data of the same subjects. The voxels should
match those used in the fit() function.
The size of the list should match the size of the list X fed
to fit(), with each item in the list corresponding to data
from the same subject in the X fed to fit(). If you do not
need to transform some subjects' data, leave the entry
corresponding to that subject as None.
If data are z-scored when fitting the model,
data should be z-scored as well when calling transform()
y : not used (as it is unsupervised learning)
scan_onsets : list of 1-D numpy arrays,
Each array corresponds to the onsets of
scans in the data X for the particular subject.
If not provided, data will be assumed
to be acquired in a continuous scan.
Returns
-------
ts : list of 2-D arrays. For each, shape = [time_points, condition]
The estimated response to the cognitive dimensions
(task dimensions) whose response amplitudes were estimated
during the fit step.
One item for each subject. If some subjects' data are
not provided, None will be returned.
ts0: list of 2-D array. For each, shape = [time_points, n_nureg]
The estimated time courses spread across the brain, with the
loading weights estimated during the fit step.
One item for each subject. If some subjects' data are
not provided, None will be returned.
"""
X = self._check_data_GBRSA(X, for_fit=False)
scan_onsets = self._check_scan_onsets_GBRSA(scan_onsets, X)
assert len(X) == self.n_subj_
ts = [None] * self.n_subj_
ts0 = [None] * self.n_subj_
log_p = [None] * self.n_subj_
for i, x in enumerate(X):
if x is not None:
s = scan_onsets[i]
ts[i], ts0[i], log_p[i] = self._transform(
Y=x, scan_onsets=s, beta=self.beta_[i],
beta0=self.beta0_[i], rho_e=self.rho_[i],
sigma_e=self.sigma_[i], rho_X=self._rho_design_[i],
sigma2_X=self._sigma2_design_[i],
rho_X0=self._rho_X0_[i], sigma2_X0=self._sigma2_X0_[i])
return ts, ts0
def score(self, X, design, scan_onsets=None):
""" After fit() is applied to the data of a group of participants,
use the parameters estimated by fit() function to evaluate
from some data of a set of participants to evaluate
the log likelihood of some new data of the same participants
given these estimated parameters.
Design matrices of the same set of experimental
conditions in the testing data should be provided, with each
column corresponding to the same condition as that column
in the design matrix of the training data.
Unknown nuisance time series will be marginalized, assuming
they follow the same spatial pattern as in the training
data. The hypothetical response captured by the design matrix
will be subtracted from data before the marginalization
when evaluating the log likelihood. For null model,
nothing will be subtracted before marginalization.
There is a difference between the form of likelihood function
used in fit() and score(). In fit(), the response amplitude
beta to design matrix X and the modulation beta0 by nuisance
regressor X0 are both marginalized, with X provided and X0
estimated from data. In score(), posterior estimation of
beta and beta0 from the fitting step are assumed unchanged
in testing data; X is assumed given by the user,
and X0 is marginalized.
The logic underlying score() is to transfer
as much as what we can learn from training data when
calculating a likelihood score for testing data. This is done
at the cost of using point estimation for beta and beta0.
If you z-scored your data during fit step, you should
z-score them for score function as well. If you did not
z-score in fitting, you should not z-score here either.
Parameters
----------
X : List of 2-D arrays. For each item, shape=[time_points, voxels]
fMRI data of new data of the same participants.
The voxels of each participants should
match those used in the fit() function. If data are z-scored
(recommended) when fitting the model, data should be z-scored
as well when calling transform()
design : List of 2-D arrays. shape=[time_points, conditions] for each
Each corresponds to one participant.
Design matrices expressing the hypothetical response of
the task conditions in data X.
scan_onsets : List of 2-D arrays, shape=[#fMRI runs] for each
Each array corresponds to one participant.
Lists of indices corresponding to the onsets of
scans in the data X.
If not provided, data will be assumed
to be acquired in a continuous scan.
Returns
-------
ll: list, shape=[number of participants]
The log likelihoods of the new data based on the model and its
parameters fit to the training data.
If data of some participants are not provided, the corresponding
entry will be None.
ll_null: list, shape=[number of participants]
The log likelihood of the new data based on a null model
which assumes the same as the full model for everything
except for that there is no response to any of the
task conditions.
"""
X = self._check_data_GBRSA(X, for_fit=False)
scan_onsets = self._check_scan_onsets_GBRSA(scan_onsets, X)
design = self._check_design_GBRSA(design, X)
assert len(X) == self.n_subj_
ll = [None] * self.n_subj_
ll_null = [None] * self.n_subj_
for subj in np.arange(self.n_subj_):
if X[subj] is not None:
ll[subj] = self._score(
Y=X[subj], design=design[subj], beta=self.beta_[subj],
scan_onsets=scan_onsets[subj], beta0=self.beta0_[subj],
rho_e=self.rho_[subj], sigma_e=self.sigma_[subj],
rho_X0=self._rho_X0_[subj],
sigma2_X0=self._sigma2_X0_[subj])
ll_null[subj] = self._score(
Y=X[subj], design=None, beta=None,
scan_onsets=scan_onsets[subj], beta0=self.beta0_[subj],
rho_e=self.rho_[subj], sigma_e=self.sigma_[subj],
rho_X0=self._rho_X0_[subj],
sigma2_X0=self._sigma2_X0_[subj])
return ll, ll_null
def _precompute_ar1_quad_forms_marginalized(
self, XTY, XTDY, XTFY, YTY_diag, YTDY_diag, YTFY_diag,
XTX, XTDX, XTFX, X0TX0, X0TDX0, X0TFX0,
XTX0, XTDX0, XTFX0, X0TY, X0TDY, X0TFY,
rho1, n_V, n_X0):
# Calculate the sandwich terms which put Acorr between X, Y and X0
# These terms are used a lot in the likelihood. This function
# is used for the marginalized version.
XTAY = XTY - rho1[:, None, None] * XTDY \
+ rho1[:, None, None]**2 * XTFY
# dimension: #rho*feature*space
YTAY_diag = YTY_diag - rho1[:, None] * YTDY_diag \
+ rho1[:, None]**2 * YTFY_diag
# dimension: #rho*space,
# A/sigma2 is the inverse of noise covariance matrix in each voxel.
# YTAY means Y'AY
XTAX = XTX - rho1[:, None, None] * XTDX \
+ rho1[:, None, None]**2 * XTFX
# dimension: n_rho*feature*feature
X0TAX0 = X0TX0[None, :, :] - rho1[:, None, None] \
* X0TDX0[None, :, :] \
+ rho1[:, None, None]**2 * X0TFX0[None, :, :]
# dimension: #rho*#baseline*#baseline
XTAX0 = XTX0[None, :, :] - rho1[:, None, None] \
* XTDX0[None, :, :] \
+ rho1[:, None, None]**2 * XTFX0[None, :, :]
# dimension: n_rho*feature*#baseline
X0TAY = X0TY - rho1[:, None, None] * X0TDY \
+ rho1[:, None, None]**2 * X0TFY
# dimension: #rho*#baseline*space
X0TAX0_i = np.linalg.solve(X0TAX0, np.identity(n_X0)[None, :, :])
# dimension: #rho*#baseline*#baseline
XTAcorrX = XTAX
# dimension: #rho*feature*feature
XTAcorrY = XTAY
# dimension: #rho*feature*space
YTAcorrY_diag = YTAY_diag
for i_r in range(np.size(rho1)):
XTAcorrX[i_r, :, :] -= \
np.dot(np.dot(XTAX0[i_r, :, :], X0TAX0_i[i_r, :, :]),
XTAX0[i_r, :, :].T)
XTAcorrY[i_r, :, :] -= np.dot(np.dot(XTAX0[i_r, :, :],
X0TAX0_i[i_r, :, :]),
X0TAY[i_r, :, :])
YTAcorrY_diag[i_r, :] -= np.sum(
X0TAY[i_r, :, :] * np.dot(X0TAX0_i[i_r, :, :],
X0TAY[i_r, :, :]), axis=0)
return X0TAX0, X0TAX0_i, XTAcorrX, XTAcorrY, YTAcorrY_diag, \
X0TAY, XTAX0
def _fit_RSA_marginalized(self, X, Y, X_base,
scan_onsets=None):
""" The major utility of fitting Bayesian RSA
(marginalized version).
Note that there is a naming change of variable. X in fit()
is changed to Y here, and design in fit() is changed to X here.
This is because we follow the tradition that X expresses the
variable defined (controlled) by the experimenter, i.e., the
time course of experimental conditions convolved by an HRF,
and Y expresses data.
However, in wrapper function fit(), we follow the naming
routine of scikit-learn.
"""
rank = self.rank
n_subj = len(Y)
n_V = [np.size(y, axis=1) for y in Y]
n_T = [np.size(y, axis=0) for y in Y]
n_C = np.size(X[0], axis=1)
l_idx, rank = self._chol_idx(n_C, rank)
n_l = np.size(l_idx[0]) # the number of parameters for L
t_start = time.time()
logger.info('Starting to fit the model. Maximum iteration: '
'{}.'.format(self.n_iter))
# log_SNR_grids, SNR_weights \
# = np.polynomial.hermite.hermgauss(SNR_bins)
# SNR_weights = SNR_weights / np.pi**0.5
# SNR_grids = np.exp(log_SNR_grids * self.logS_range * 2**.5)
SNR_grids, SNR_weights = self._set_SNR_grids()
logger.info('The grids of pseudo-SNR used for numerical integration '
'is {}.'.format(SNR_grids))
assert np.max(SNR_grids) < 1e10, \
'ATTENTION!! The range of grids of pseudo-SNR' \
' to be marginalized is too large. Please ' \
'consider reducing logS_range to 1 or 2'
rho_grids, rho_weights = self._set_rho_grids()
logger.info('The grids of rho used to do numerical integration '
'is {}.'.format(rho_grids))
n_grid = self.SNR_bins * self.rho_bins
log_weights = np.reshape(
np.log(SNR_weights[:, None]) + np.log(rho_weights), n_grid)
all_rho_grids = np.reshape(np.repeat(
rho_grids[None, :], self.SNR_bins, axis=0), n_grid)
all_SNR_grids = np.reshape(np.repeat(
SNR_grids[:, None], self.rho_bins, axis=1), n_grid)
# Prepare the data for fitting. These pre-calculated matrices
# will be re-used a lot in evaluating likelihood function and
# gradient.
D = [None] * n_subj
F = [None] * n_subj
run_TRs = [None] * n_subj
n_run = [None] * n_subj
XTY = [None] * n_subj
XTDY = [None] * n_subj
XTFY = [None] * n_subj
YTY_diag = [None] * n_subj
YTDY_diag = [None] * n_subj
YTFY_diag = [None] * n_subj
XTX = [None] * n_subj
XTDX = [None] * n_subj
XTFX = [None] * n_subj
X0TX0 = [None] * n_subj
X0TDX0 = [None] * n_subj
X0TFX0 = [None] * n_subj
XTX0 = [None] * n_subj
XTDX0 = [None] * n_subj
XTFX0 = [None] * n_subj
X0TY = [None] * n_subj
X0TDY = [None] * n_subj
X0TFY = [None] * n_subj
X0 = [None] * n_subj
X_res = [None] * n_subj
n_X0 = [None] * n_subj
idx_DC = [None] * n_subj
log_fixed_terms = [None] * n_subj
# Initialization for L.
# There are several possible ways of initializing the covariance.
# (1) start from the point estimation of covariance
cov_point_est = np.zeros((n_C, n_C))
for subj in range(n_subj):
D[subj], F[subj], run_TRs[subj], n_run[subj] = self._prepare_DF(
n_T[subj], scan_onsets=scan_onsets[subj])
XTY[subj], XTDY[subj], XTFY[subj], YTY_diag[subj], \
YTDY_diag[subj], YTFY_diag[subj], XTX[subj], XTDX[subj], \
XTFX[subj] = self._prepare_data_XY(
X[subj], Y[subj], D[subj], F[subj])
# The contents above stay fixed during fitting.
# Initializing X0 as DC baseline
# DC component will be added to the nuisance regressors.
# In later steps, we do not need to add DC components again
X0TX0[subj], X0TDX0[subj], X0TFX0[subj], XTX0[subj], XTDX0[subj], \
XTFX0[subj], X0TY[subj], X0TDY[subj], X0TFY[subj], X0[subj], \
X_base[subj], n_X0[subj], idx_DC[subj] = \
self._prepare_data_XYX0(
X[subj], Y[subj], X_base[subj], None, D[subj], F[subj],
run_TRs[subj], no_DC=False)
X_joint = np.concatenate((X0[subj], X[subj]), axis=1)
beta_hat = np.linalg.lstsq(X_joint, Y[subj], rcond=None)[0]
residual = Y[subj] - np.dot(X_joint, beta_hat)
# point estimates of betas and fitting residuals without assuming
# the Bayesian model underlying RSA.
cov_point_est += np.cov(beta_hat[n_X0[subj]:, :]
/ np.std(residual, axis=0))
log_fixed_terms[subj] = - (n_T[subj] - n_X0[subj]) \
/ 2 * np.log(2 * np.pi) + n_run[subj] \
/ 2 * np.log(1 - all_rho_grids**2) \
+ scipy.special.gammaln(
(n_T[subj] - n_X0[subj] - 2) / 2) \
+ (n_T[subj] - n_X0[subj] - 2) / 2 * np.log(2)
# These are terms in the log likelihood that do not
# depend on L. Notice that the last term comes from
# ther term of marginalizing sigma. We take the 2 in
# the denominator out. Accordingly, the "denominator"
# variable in the _raw_loglike_grids() function is not
# divided by 2
cov_point_est = cov_point_est / n_subj
current_vec_U_chlsk_l = np.linalg.cholesky(
(cov_point_est + np.eye(n_C)) / 2)[l_idx]
# We use the average of covariance of point estimation and an identity
# matrix as the initial value of the covariance matrix, just in case
# the user provides data in which n_V is smaller than n_C.
# (2) start from identity matrix
# current_vec_U_chlsk_l = np.eye(n_C)[l_idx]
# (3) random initialization
# current_vec_U_chlsk_l = self.random_state_.randn(n_l)
# vectorized version of L, Cholesky factor of U, the shared
# covariance matrix of betas across voxels.
L = np.zeros((n_C, rank))
L[l_idx] = current_vec_U_chlsk_l
X0TAX0 = [None] * n_subj
X0TAX0_i = [None] * n_subj
XTAcorrX = [None] * n_subj
s2XTAcorrX = [None] * n_subj
YTAcorrY_diag = [None] * n_subj
XTAcorrY = [None] * n_subj
sXTAcorrY = [None] * n_subj
X0TAY = [None] * n_subj
XTAX0 = [None] * n_subj
half_log_det_X0TAX0 = [None] * n_subj
s_post = [None] * n_subj
rho_post = [None] * n_subj
sigma_post = [None] * n_subj
beta_post = [None] * n_subj
beta0_post = [None] * n_subj
# The contents below can be updated during fitting.
# e.g., X0 will be re-estimated
logger.info('start real fitting')
LL = np.zeros(n_subj)
for it in range(self.n_iter):
logger.info('Iteration {}'.format(it))
# Re-estimate part of X0: X_res
for subj in range(n_subj):
if self.auto_nuisance and it > 0:
residuals = Y[subj] - np.dot(X[subj], beta_post[subj]) \
- np.dot(
X_base[subj],
beta0_post[subj][:np.shape(X_base[subj])[1], :])
X_res[subj] = self.nureg_method(
self.n_nureg_[subj]).fit_transform(
self.preprocess_residual(residuals))
X0TX0[subj], X0TDX0[subj], X0TFX0[subj], XTX0[subj],\
XTDX0[subj], XTFX0[subj], X0TY[subj], X0TDY[subj], \
X0TFY[subj], X0[subj], X_base[subj], n_X0[subj], _ = \
self._prepare_data_XYX0(
X[subj], Y[subj], X_base[subj], X_res[subj],
D[subj], F[subj], run_TRs[subj], no_DC=True)
X0TAX0[subj], X0TAX0_i[subj], XTAcorrX[subj], XTAcorrY[subj],\
YTAcorrY_diag[subj], X0TAY[subj], XTAX0[subj] \
= self._precompute_ar1_quad_forms_marginalized(
XTY[subj], XTDY[subj], XTFY[subj], YTY_diag[subj],
YTDY_diag[subj], YTFY_diag[subj], XTX[subj],
XTDX[subj], XTFX[subj], X0TX0[subj], X0TDX0[subj],
X0TFX0[subj], XTX0[subj], XTDX0[subj], XTFX0[subj],
X0TY[subj], X0TDY[subj], X0TFY[subj], rho_grids,
n_V[subj], n_X0[subj])
# Now we expand to another dimension including SNR
# and collapse the dimension again.
half_log_det_X0TAX0[subj], X0TAX0[subj], X0TAX0_i[subj], \
s2XTAcorrX[subj], YTAcorrY_diag[subj], sXTAcorrY[subj], \
X0TAY[subj], XTAX0[subj] = self._matrix_flattened_grid(
X0TAX0[subj], X0TAX0_i[subj], SNR_grids,
XTAcorrX[subj], YTAcorrY_diag[subj], XTAcorrY[subj],
X0TAY[subj], XTAX0[subj], n_C, n_V[subj], n_X0[subj],
n_grid)
res = scipy.optimize.minimize(
self._sum_loglike_marginalized, current_vec_U_chlsk_l
+ self.random_state_.randn(n_l) *
np.linalg.norm(current_vec_U_chlsk_l)
/ n_l**0.5 * np.exp(-it / self.n_iter
* self.anneal_speed - 1),
args=(s2XTAcorrX, YTAcorrY_diag, sXTAcorrY,
half_log_det_X0TAX0,
log_weights, log_fixed_terms,
l_idx, n_C, n_T, n_V, n_X0,
n_grid, rank),
method=self.optimizer, jac=True, tol=self.tol,
options=self.minimize_options)
param_change = res.x - current_vec_U_chlsk_l
current_vec_U_chlsk_l = res.x.copy()
# Estimating a few parameters.
L[l_idx] = current_vec_U_chlsk_l
for subj in range(n_subj):
LL_raw, denominator, L_LAMBDA, L_LAMBDA_LT = \
self._raw_loglike_grids(
L, s2XTAcorrX[subj], YTAcorrY_diag[subj],
sXTAcorrY[subj], half_log_det_X0TAX0[subj],
log_weights, log_fixed_terms[subj], n_C, n_T[subj],
n_V[subj], n_X0[subj], n_grid, rank)
result_sum, max_value, result_exp = utils.sumexp_stable(LL_raw)
LL[subj] = np.sum(np.log(result_sum) + max_value)
weight_post = result_exp / result_sum
s_post[subj] = np.sum(all_SNR_grids[:, None] * weight_post,
axis=0)
# Mean-posterior estimate of SNR.
rho_post[subj] = np.sum(all_rho_grids[:, None] * weight_post,
axis=0)
# Mean-posterior estimate of rho.
sigma_means = denominator ** 0.5 \
* (np.exp(scipy.special.gammaln(
(n_T[subj] - n_X0[subj] - 3) / 2)
- scipy.special.gammaln(
(n_T[subj] - n_X0[subj] - 2) / 2)) / 2**0.5)
sigma_post[subj] = np.sum(sigma_means * weight_post, axis=0)
# The mean of inverse-Gamma distribution is beta/(alpha-1)
# The mode is beta/(alpha+1). Notice that beta here does not
# refer to the brain activation, but the scale parameter of
# inverse-Gamma distribution. In the _UV version, we use the
# maximum likelihood estimate of sigma^2. So we divide by
# (alpha+1), which is (n_T - n_X0).
beta_post[subj] = np.zeros((n_C, n_V[subj]))
beta0_post[subj] = np.zeros((n_X0[subj], n_V[subj]))
for grid in range(n_grid):
beta_post[subj] += np.dot(L_LAMBDA_LT[grid, :, :],
sXTAcorrY[subj][grid, :, :])\
* all_SNR_grids[grid] \
* weight_post[grid, :]
beta0_post[subj] += weight_post[grid, :] * np.dot(
X0TAX0_i[subj][grid, :, :],
(X0TAY[subj][grid, :, :]
- np.dot(np.dot(XTAX0[subj][grid, :, :].T,
L_LAMBDA_LT[grid, :, :]),
sXTAcorrY[subj][grid, :, :])
* all_SNR_grids[grid]))
if np.max(np.abs(param_change)) < self.tol:
logger.info('The change of parameters is smaller than '
'the tolerance value {}. Fitting is finished '
'after {} iterations'.format(self.tol, it + 1))
break
for subj in range(n_subj):
if idx_DC[subj].size > 1:
collapsed_DC = np.sum(X0[subj][:, idx_DC[subj]], axis=1)
X0[subj] = np.insert(np.delete(X0[subj], idx_DC[subj], axis=1),
0, collapsed_DC, axis=1)
collapsed_beta0 = np.mean(beta0_post[subj][idx_DC[subj], :],
axis=0)
beta0_post[subj] = np.insert(
np.delete(beta0_post[subj], idx_DC[subj], axis=0),
0, collapsed_beta0, axis=0)
t_finish = time.time()
logger.info(
'total time of fitting: {} seconds'.format(t_finish - t_start))
return np.dot(L, L.T), L, s_post, \
beta_post, beta0_post, sigma_post, \
rho_post, X0, LL
def _fit_RSA_marginalized_null(self, Y, X_base,
scan_onsets):
""" The marginalized version of the null model for Bayesian RSA.
The null model assumes no task-related response to the
design matrix.
Note that there is a naming change of variable. X in fit()
is changed to Y here.
This is because we follow the tradition that Y corresponds
to data.
However, in wrapper function fit(), we follow the naming
routine of scikit-learn.
"""
# Because there is nothing to learn that is shared across
# participants, we can run each subject in serial.
# The only fitting required is to re-estimate X0 after
# each iteration
n_subj = len(Y)
t_start = time.time()
logger.info('Starting to fit the model. Maximum iteration: '
'{}.'.format(self.n_iter))
rho_grids, rho_weights = self._set_rho_grids()
logger.info('The grids of rho used to do numerical integration '
'is {}.'.format(rho_grids))
n_grid = self.rho_bins
log_weights = np.log(rho_weights)
rho_post = [None] * n_subj
sigma_post = [None] * n_subj
beta0_post = [None] * n_subj
X0 = [None] * n_subj
LL_null = np.zeros(n_subj)
for subj in range(n_subj):
logger.debug('Running on subject {}.'.format(subj))
[n_T, n_V] = np.shape(Y[subj])
D, F, run_TRs, n_run = self._prepare_DF(
n_T, scan_onsets=scan_onsets[subj])
YTY_diag = np.sum(Y[subj] * Y[subj], axis=0)
YTDY_diag = np.sum(Y[subj] * np.dot(D, Y[subj]), axis=0)
YTFY_diag = np.sum(Y[subj] * np.dot(F, Y[subj]), axis=0)
# Add DC components capturing run-specific baselines.
X_DC = self._gen_X_DC(run_TRs)
X_DC, X_base[subj], idx_DC = self._merge_DC_to_base(
X_DC, X_base[subj], no_DC=False)
X_res = np.empty((n_T, 0))
for it in range(0, self.n_iter):
X0[subj] = np.concatenate(
(X_base[subj], X_res), axis=1)
n_X0 = X0[subj].shape[1]
X0TX0, X0TDX0, X0TFX0 = self._make_templates(
D, F, X0[subj], X0[subj])
X0TY, X0TDY, X0TFY = self._make_templates(
D, F, X0[subj], Y[subj])
YTAY_diag = YTY_diag - rho_grids[:, None] * YTDY_diag \
+ rho_grids[:, None]**2 * YTFY_diag
# dimension: #rho*space,
# A/sigma2 is the inverse of noise covariance matrix.
# YTAY means Y'AY
X0TAX0 = X0TX0[None, :, :] \
- rho_grids[:, None, None] \
* X0TDX0[None, :, :] \
+ rho_grids[:, None, None]**2 \
* X0TFX0[None, :, :]
# dimension: #rho*#baseline*#baseline
X0TAY = X0TY - rho_grids[:, None, None] * X0TDY \
+ rho_grids[:, None, None]**2 * X0TFY
# dimension: #rho*#baseline*space
X0TAX0_i = np.linalg.solve(
X0TAX0, np.identity(n_X0)[None, :, :])
# dimension: #rho*#baseline*#baseline
YTAcorrY_diag = np.empty(np.shape(YTAY_diag))
for i_r in range(np.size(rho_grids)):
YTAcorrY_diag[i_r, :] = YTAY_diag[i_r, :] \
- np.sum(X0TAY[i_r, :, :] * np.dot(
X0TAX0_i[i_r, :, :], X0TAY[i_r, :, :]),
axis=0)
log_fixed_terms = - (n_T - n_X0) / 2 * np.log(2 * np.pi)\
+ n_run / 2 * np.log(1 - rho_grids**2) \
+ scipy.special.gammaln((n_T - n_X0 - 2) / 2) \
+ (n_T - n_X0 - 2) / 2 * np.log(2)
# These are terms in the log likelihood that do not
# depend on L. Notice that the last term comes from
# ther term of marginalizing sigma. We take the 2 in
# the denominator out. Accordingly, the "denominator"
# variable in the _raw_loglike_grids() function is not
# divided by 2
half_log_det_X0TAX0 = self._half_log_det(X0TAX0)
LL_raw = -half_log_det_X0TAX0[:, None] \
- (n_T - n_X0 - 2) / 2 * np.log(YTAcorrY_diag) \
+ log_weights[:, None] + log_fixed_terms[:, None]
# dimension: n_grid * space
# The log likelihood at each pair of values of rho1.
# half_log_det_X0TAX0 is 0.5*log(det(X0TAX0)) with the size of
# number of parameter grids. So is the size of log_weights
result_sum, max_value, result_exp = utils.sumexp_stable(LL_raw)
weight_post = result_exp / result_sum
rho_post[subj] = np.sum(rho_grids[:, None] * weight_post,
axis=0)
# Mean-posterior estimate of rho.
sigma_means = YTAcorrY_diag ** 0.5 \
* (np.exp(scipy.special.gammaln((n_T - n_X0 - 3) / 2)
- scipy.special.gammaln((n_T - n_X0 - 2) / 2))
/ 2**0.5)
sigma_post[subj] = np.sum(sigma_means * weight_post, axis=0)
beta0_post[subj] = np.zeros((n_X0, n_V))
for grid in range(n_grid):
beta0_post[subj] += weight_post[grid, :] * np.dot(
X0TAX0_i[grid, :, :], X0TAY[grid, :, :])
if self.auto_nuisance:
residuals = Y[subj] - np.dot(
X_base[subj],
beta0_post[subj][:np.size(X_base[subj], 1), :])
X_res_new = self.nureg_method(
self.n_nureg_[subj]).fit_transform(
self.preprocess_residual(residuals))
if it >= 1:
if np.max(np.abs(X_res_new - X_res)) <= self.tol:
logger.info('The change of X_res is '
'smaller than the tolerance value {}.'
'Fitting is finished after {} '
'iterations'.format(self.tol, it + 1))
break
X_res = X_res_new
if idx_DC.size > 1:
collapsed_DC = np.sum(X0[subj][:, idx_DC], axis=1)
X0[subj] = np.insert(np.delete(X0[subj], idx_DC, axis=1), 0,
collapsed_DC, axis=1)
collapsed_beta0 = np.mean(beta0_post[subj][idx_DC, :], axis=0)
beta0_post[subj] = np.insert(
np.delete(beta0_post[subj], idx_DC, axis=0),
0, collapsed_beta0, axis=0)
LL_null[subj] = np.sum(np.log(result_sum) + max_value)
t_finish = time.time()
logger.info(
'total time of fitting: {} seconds'.format(t_finish - t_start))
return beta0_post, sigma_post, rho_post, X0, LL_null
def _raw_loglike_grids(self, L, s2XTAcorrX, YTAcorrY_diag,
sXTAcorrY, half_log_det_X0TAX0,
log_weights, log_fixed_terms,
n_C, n_T, n_V, n_X0,
n_grid, rank):
# LAMBDA_i = np.dot(np.einsum('ijk,jl->ilk', s2XTAcorrX, L), L) \
# + np.identity(rank)
LAMBDA_i = np.empty((n_grid, rank, rank))
for grid in np.arange(n_grid):
LAMBDA_i[grid, :, :] = np.dot(np.dot(L.T,
s2XTAcorrX[grid, :, :]), L)
LAMBDA_i += np.identity(rank)
# dimension: n_grid * rank * rank
Chol_LAMBDA_i = np.linalg.cholesky(LAMBDA_i)
# dimension: n_grid * rank * rank
half_log_det_LAMBDA_i = np.sum(
np.log(np.abs(np.diagonal(Chol_LAMBDA_i, axis1=1, axis2=2))),
axis=1)
# dimension: n_grid
L_LAMBDA = np.empty((n_grid, n_C, rank))
L_LAMBDA_LT = np.empty((n_grid, n_C, n_C))
s2YTAcorrXL_LAMBDA_LTXTAcorrY = np.empty((n_grid, n_V))
# dimension: space * n_grid
for grid in np.arange(n_grid):
L_LAMBDA[grid, :, :] = scipy.linalg.cho_solve(
(Chol_LAMBDA_i[grid, :, :], True), L.T).T
L_LAMBDA_LT[grid, :, :] = np.dot(L_LAMBDA[grid, :, :], L.T)
s2YTAcorrXL_LAMBDA_LTXTAcorrY[grid, :] = np.sum(
sXTAcorrY[grid, :, :] * np.dot(L_LAMBDA_LT[grid, :, :],
sXTAcorrY[grid, :, :]),
axis=0)
denominator = (YTAcorrY_diag - s2YTAcorrXL_LAMBDA_LTXTAcorrY)
# dimension: n_grid * space
# Not necessary the best name for it. But this term appears
# as the denominator within the gradient wrt L
# In the equation of the log likelihood, this "denominator"
# term is in fact divided by 2. But we absorb that into the
# log fixted term.
LL_raw = -half_log_det_X0TAX0[:, None] \
- half_log_det_LAMBDA_i[:, None] \
- (n_T - n_X0 - 2) / 2 * np.log(denominator) \
+ log_weights[:, None] + log_fixed_terms[:, None]
# dimension: n_grid * space
# The log likelihood at each pair of values of SNR and rho1.
# half_log_det_X0TAX0 is 0.5*log(det(X0TAX0)) with the size of
# number of parameter grids. So is the size of log_weights
return LL_raw, denominator, L_LAMBDA, L_LAMBDA_LT
def _sum_loglike_marginalized(self, L_vec, s2XTAcorrX, YTAcorrY_diag,
sXTAcorrY, half_log_det_X0TAX0,
log_weights, log_fixed_terms,
l_idx, n_C, n_T, n_V, n_X0,
n_grid, rank=None):
sum_LL_total = 0
sum_grad_L = np.zeros(np.size(l_idx[0]))
for subj in range(len(YTAcorrY_diag)):
LL_total, grad_L = self._loglike_marginalized(
L_vec, s2XTAcorrX[subj], YTAcorrY_diag[subj],
sXTAcorrY[subj], half_log_det_X0TAX0[subj], log_weights,
log_fixed_terms[subj], l_idx, n_C, n_T[subj],
n_V[subj], n_X0[subj], n_grid, rank)
sum_LL_total += LL_total
sum_grad_L += grad_L
return sum_LL_total, sum_grad_L
def _loglike_marginalized(self, L_vec, s2XTAcorrX, YTAcorrY_diag,
sXTAcorrY, half_log_det_X0TAX0,
log_weights, log_fixed_terms,
l_idx, n_C, n_T, n_V, n_X0,
n_grid, rank=None):
# In this version, we assume that beta is independent
# between voxels and noise is also independent. X0 captures the
# co-flucturation between voxels that is
# not captured by design matrix X.
# marginalized version marginalize sigma^2, s and rho1
# for all voxels. n_grid is the number of grid on which the numeric
# integration is performed to marginalize s and rho1 for each voxel.
# The log likelihood is an inverse-Gamma distribution sigma^2,
# so we can analytically marginalize it assuming uniform prior.
# n_grid is the number of grid in the parameter space of (s, rho1)
# that is used for numerical integration over (s, rho1).
n_l = np.size(l_idx[0])
# the number of parameters in the index of lower-triangular matrix
if rank is None:
rank = int((2 * n_C + 1
- np.sqrt(n_C**2 * 4 + n_C * 4 + 1 - 8 * n_l)) / 2)
L = np.zeros([n_C, rank])
L[l_idx] = L_vec
LL_raw, denominator, L_LAMBDA, _ = self._raw_loglike_grids(
L, s2XTAcorrX, YTAcorrY_diag, sXTAcorrY, half_log_det_X0TAX0,
log_weights, log_fixed_terms, n_C, n_T, n_V, n_X0, n_grid, rank)
result_sum, max_value, result_exp = utils.sumexp_stable(LL_raw)
LL_total = np.sum(np.log(result_sum) + max_value)
# Now we start the gradient with respect to L
# s2XTAcorrXL_LAMBDA = np.einsum('ijk,ikl->ijl',
# s2XTAcorrX, L_LAMBDA)
s2XTAcorrXL_LAMBDA = np.empty((n_grid, n_C, rank))
for grid in range(n_grid):
s2XTAcorrXL_LAMBDA[grid, :, :] = np.dot(s2XTAcorrX[grid, :, :],
L_LAMBDA[grid, :, :])
# dimension: n_grid * condition * rank
I_minus_s2XTAcorrXL_LAMBDA_LT = np.identity(n_C) \
- np.dot(s2XTAcorrXL_LAMBDA, L.T)
# dimension: n_grid * condition * condition
# The step above may be calculated by einsum. Not sure
# which is faster.
weight_grad = result_exp / result_sum
weight_grad_over_denominator = weight_grad / denominator
# dimension: n_grid * space
weighted_sXTAcorrY = sXTAcorrY \
* weight_grad_over_denominator[:, None, :]
# dimension: n_grid * condition * space
# sYTAcorrXL_LAMBDA = np.einsum('ijk,ijl->ikl', sXTAcorrY, L_LAMBDA)
# dimension: n_grid * space * rank
grad_L = np.zeros([n_C, rank])
for grid in range(n_grid):
grad_L += np.dot(
np.dot(I_minus_s2XTAcorrXL_LAMBDA_LT[grid, :, :],
sXTAcorrY[grid, :, :]),
np.dot(weighted_sXTAcorrY[grid, :, :].T,
L_LAMBDA[grid, :, :])) * (n_T - n_X0 - 2)
grad_L -= np.sum(s2XTAcorrXL_LAMBDA
* np.sum(weight_grad, axis=1)[:, None, None],
axis=0)
# dimension: condition * rank
return -LL_total, -grad_L[l_idx]
def _check_data_GBRSA(self, X, for_fit=True):
# Check input data
if type(X) is np.ndarray:
X = [X]
assert type(X) is list, 'Input data X must be either a list '\
'with each entry for one participant, or a numpy arrary '\
'for single participant.'
if for_fit:
for i, x in enumerate(X):
assert_all_finite(x)
assert x.ndim == 2, 'Each participants'' data should be ' \
'2 dimension ndarray'
assert np.all(np.std(x, axis=0) > 0),\
'The time courses of some voxels in participant {} '\
'do not change at all. Please make sure all voxels '\
'are within the brain'.format(i)
else:
for i, x in enumerate(X):
if x is not None:
assert x.ndim == 2, 'Each participants'' data should be ' \
'2 dimension ndarray'
assert x.shape[1] == self.n_V_[i], 'Number of voxels '\
'does not match that in the data used for fitting: '\
'subject {}'.format(i)
# This program allows to fit a single subject. But to have a consistent
# data structure, we make sure X and design are both lists.
return X
def _check_design_GBRSA(self, design, X):
# check design matrix
if type(design) is np.ndarray:
design = [design] * len(X)
if len(X) > 1:
logger.warning('There are multiple subjects while '
'there is only one design matrix. '
'I assume that the design matrix '
'is shared across all subjects.')
assert type(design) is list, 'design matrix must be either a list '\
'with each entry for one participant, or an numpy arrary '\
'for single participant.'
for i, d in enumerate(design):
if X[i] is not None:
assert_all_finite(d)
assert d.ndim == 2,\
'The design matrix should be 2 dimension ndarray'
assert np.linalg.matrix_rank(d) == d.shape[1], \
'Your design matrix of subject {} has rank ' \
'smaller than the number of columns. Some columns '\
'can be explained by linear combination of other columns.'\
'Please check your design matrix.'.format(i)
assert np.size(d, axis=0) == np.size(X[i], axis=0),\
'Design matrix and data of subject {} do not '\
'have the same number of time points.'.format(i)
assert self.rank is None or self.rank <= d.shape[1],\
'Your design matrix of subject {} '\
'has fewer columns than the rank you set'.format(i)
if i == 0:
n_C = np.shape(d)[1]
else:
assert n_C == np.shape(d)[1], \
'In Group Bayesian RSA, all subjects should have'\
' the same set of experiment conditions, t'\
'hus the same number of columns in design matrix'
if X[i].shape[1] <= d.shape[1]:
logger.warning('Your data have fewer voxels than the '
'number of task conditions. This might '
'cause problem in fitting. Please consider '
'increasing the size of your ROI, or set '
'the rank parameter to a lower number to '
'estimate a low-rank representational '
'structure.')
return design
def _check_nuisance_GBRSA(sef, nuisance, X):
# Check the nuisance regressors.
if nuisance is not None:
if type(nuisance) is np.ndarray:
nuisance = [nuisance] * len(X)
if len(X) > 1:
logger.warning('ATTENTION! There are multiple subjects '
'while there is only one nuisance matrix. '
'I assume that the nuisance matrix '
'is shared across all subjects. '
'Please double check.')
assert type(nuisance) is list, \
'nuisance matrix must be either a list '\
'with each entry for one participant, or an numpy arrary '\
'for single participant.'
for i, n in enumerate(nuisance):
assert_all_finite(n)
if n is not None:
assert n.ndim == 2,\
'The nuisance regressor should be '\
'2 dimension ndarray or None'
assert np.linalg.matrix_rank(n) == n.shape[1], \
'The nuisance regressor of subject {} has rank '\
'smaller than the number of columns.'\
'Some columns can be explained by linear '\
'combination of other columns. Please check your' \
' nuisance regressors.'.format(i)
assert np.size(n, axis=0) == np.size(X[i], axis=0), \
'Nuisance regressor and data do not have the same '\
'number of time points.'
else:
nuisance = [None] * len(X)
logger.info('None was provided for nuisance matrix. Replicating '
'it for all subjects.')
return nuisance
def _check_scan_onsets_GBRSA(self, scan_onsets, X):
# check scan_onsets validity
if scan_onsets is None or type(scan_onsets) is np.ndarray:
if scan_onsets is None:
scan_onsets = np.array([0], dtype=int)
scan_onsets = [scan_onsets] * len(X)
if len(X) > 1:
logger.warning('There are multiple subjects while '
'there is only one set of scan_onsets. '
'I assume that it is the same for all'
' subjects. Please double check')
for i in np.arange(len(scan_onsets)):
if X[i] is not None:
if scan_onsets[i] is None:
scan_onsets[i] = np.array([0], dtype=int)
logger.warning('No scan onsets were provided for subject'
' {}. Treating all data of this subject as'
' coming from the same run.')
else:
scan_onsets[i] = np.int32(scan_onsets[i])
assert (np.max(scan_onsets[i]) <= X[i].shape[0]
and np.min(scan_onsets[i]) >= 0
and 0 in scan_onsets[i]
and scan_onsets[i].ndim == 1), \
'Scan onsets of subject {} has formatting ' \
'issues: {}'.format(i, scan_onsets[i])
return scan_onsets
def _bin_exp(self, n_bin, scale=1.0):
""" Calculate the bin locations to approximate exponential distribution.
It breaks the cumulative probability of exponential distribution
into n_bin equal bins, each covering 1 / n_bin probability. Then it
calculates the center of mass in each bins and returns the
centers of mass. So, it approximates the exponential distribution
with n_bin of Delta function weighted by 1 / n_bin, at the
locations of these centers of mass.
Parameters:
-----------
n_bin: int
The number of bins to approximate the exponential distribution
scale: float.
The scale parameter of the exponential distribution, defined in
the same way as scipy.stats. It does not influence the ratios
between the bins, but just controls the spacing between the bins.
So generally users should not change its default.
Returns:
--------
bins: numpy array of size [n_bin,]
The centers of mass for each segment of the
exponential distribution.
"""
boundaries = np.flip(scipy.stats.expon.isf(
np.linspace(0, 1, n_bin + 1),
scale=scale), axis=0)
bins = np.empty(n_bin)
for i in np.arange(n_bin):
bins[i] = utils.center_mass_exp(
(boundaries[i], boundaries[i + 1]), scale=scale)
return bins
def _set_SNR_grids(self):
""" Set the grids and weights for SNR used in numerical integration
of SNR parameters.
"""
if self.SNR_prior == 'unif':
SNR_grids = np.linspace(0, 1, self.SNR_bins)
SNR_weights = np.ones(self.SNR_bins) / (self.SNR_bins - 1)
SNR_weights[0] = SNR_weights[0] / 2.0
SNR_weights[-1] = SNR_weights[-1] / 2.0
elif self.SNR_prior == 'lognorm':
dist = scipy.stats.lognorm
alphas = np.arange(np.mod(self.SNR_bins, 2),
self.SNR_bins + 2, 2) / self.SNR_bins
# The goal here is to divide the area under the pdf curve
# to segments representing equal probabilities.
bounds = dist.interval(alphas, (self.logS_range,))
bounds = np.unique(bounds)
# bounds contain the boundaries which equally separate
# the probability mass of the distribution
SNR_grids = np.zeros(self.SNR_bins)
for i in np.arange(self.SNR_bins):
SNR_grids[i] = dist.expect(
lambda x: x, args=(self.logS_range,),
lb=bounds[i], ub=bounds[i + 1]) * self.SNR_bins
# Center of mass of each segment between consecutive
# bounds are set as the grids for SNR.
SNR_weights = np.ones(self.SNR_bins) / self.SNR_bins
elif self.SNR_prior == 'exp':
SNR_grids = self._bin_exp(self.SNR_bins)
SNR_weights = np.ones(self.SNR_bins) / self.SNR_bins
else:
SNR_grids = np.ones(1)
SNR_weights = np.ones(1)
SNR_weights = SNR_weights / np.sum(SNR_weights)
return SNR_grids, SNR_weights
def _set_rho_grids(self):
""" Set the grids and weights for rho used in numerical integration
of AR(1) parameters.
"""
rho_grids = np.arange(self.rho_bins) * 2 / self.rho_bins - 1 \
+ 1 / self.rho_bins
rho_weights = np.ones(self.rho_bins) / self.rho_bins
return rho_grids, rho_weights
def _matrix_flattened_grid(self, X0TAX0, X0TAX0_i, SNR_grids, XTAcorrX,
YTAcorrY_diag, XTAcorrY, X0TAY, XTAX0,
n_C, n_V, n_X0, n_grid):
""" We need to integrate parameters SNR and rho on 2-d discrete grids.
This function generates matrices which have only one dimension for
these two parameters, with each slice in that dimension
corresponding to each combination of the discrete grids of SNR
and discrete grids of rho.
"""
half_log_det_X0TAX0 = np.reshape(
np.repeat(self._half_log_det(X0TAX0)[None, :],
self.SNR_bins, axis=0), n_grid)
X0TAX0 = np.reshape(
np.repeat(X0TAX0[None, :, :, :],
self.SNR_bins, axis=0),
(n_grid, n_X0, n_X0))
X0TAX0_i = np.reshape(np.repeat(
X0TAX0_i[None, :, :, :],
self.SNR_bins, axis=0),
(n_grid, n_X0, n_X0))
s2XTAcorrX = np.reshape(
SNR_grids[:, None, None, None]**2 * XTAcorrX,
(n_grid, n_C, n_C))
YTAcorrY_diag = np.reshape(np.repeat(
YTAcorrY_diag[None, :, :],
self.SNR_bins, axis=0), (n_grid, n_V))
sXTAcorrY = np.reshape(SNR_grids[:, None, None, None]
* XTAcorrY, (n_grid, n_C, n_V))
X0TAY = np.reshape(np.repeat(X0TAY[None, :, :, :],
self.SNR_bins, axis=0),
(n_grid, n_X0, n_V))
XTAX0 = np.reshape(np.repeat(XTAX0[None, :, :, :],
self.SNR_bins, axis=0),
(n_grid, n_C, n_X0))
return half_log_det_X0TAX0, X0TAX0, X0TAX0_i, s2XTAcorrX, \
YTAcorrY_diag, sXTAcorrY, X0TAY, XTAX0
| apache-2.0 |
swails/mdtraj | mdtraj/core/topology.py | 1 | 48729 | ##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2014 Stanford University and the Authors
#
# Authors: Peter Eastman, Robert McGibbon
# Contributors: Kyle A. Beauchamp, Matthew Harrigan, Carlos Xavier Hernandez
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
#
# Portions of this code originate from the OpenMM molecular simulation
# toolkit, copyright (c) 2012 Stanford University and Peter Eastman. Those
# portions are distributed under the following terms:
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
##############################################################################
##############################################################################
# Imports
##############################################################################
from __future__ import print_function, division
import itertools
import numpy as np
import os
import xml.etree.ElementTree as etree
from mdtraj.core import element as elem
from mdtraj.core.residue_names import (_PROTEIN_RESIDUES, _WATER_RESIDUES,
_AMINO_ACID_CODES)
from mdtraj.core.selection import parse_selection
from mdtraj.utils import ilen, import_, ensure_type
from mdtraj.utils.six import string_types
##############################################################################
# Utilities
##############################################################################
def _topology_from_subset(topology, atom_indices):
"""Create a new topology that only contains the supplied indices
Note
----
This really should be a copy constructor (class method) on Topology.
It used to work on OpenMM topologies, but we've diverged where that no
longer works.
Parameters
----------
topology : mdtraj.Topology
The base topology
atom_indices : array_like, dtype=int
The indices of the atoms to keep
"""
newTopology = Topology()
old_atom_to_new_atom = {}
for chain in topology._chains:
newChain = newTopology.add_chain()
for residue in chain._residues:
resSeq = getattr(residue, 'resSeq', None) or residue.index
newResidue = newTopology.add_residue(residue.name, newChain,
resSeq, residue.segment_id)
for atom in residue._atoms:
if atom.index in atom_indices:
try: # OpenMM Topology objects don't have serial attributes, so we have to check first.
serial = atom.serial
except AttributeError:
serial = None
newAtom = newTopology.add_atom(atom.name, atom.element,
newResidue, serial=serial)
old_atom_to_new_atom[atom] = newAtom
bondsiter = topology.bonds
if not hasattr(bondsiter, '__iter__'):
bondsiter = bondsiter()
for atom1, atom2 in bondsiter:
try:
newTopology.add_bond(old_atom_to_new_atom[atom1],
old_atom_to_new_atom[atom2])
except KeyError:
pass
# we only put bonds into the new topology if both of their partners
# were indexed and thus HAVE a new atom
# Delete empty residues
newTopology._residues = [r for r in newTopology._residues if len(r._atoms) > 0]
for chain in newTopology._chains:
chain._residues = [r for r in chain._residues if len(r._atoms) > 0]
# Delete empty chains
newTopology._chains = [c for c in newTopology._chains
if len(c._residues) > 0]
# Re-set the numAtoms and numResidues
newTopology._numAtoms = ilen(newTopology.atoms)
newTopology._numResidues = ilen(newTopology.residues)
return newTopology
##############################################################################
# Classes
##############################################################################
class Topology(object):
"""Topology stores the topological information about a system.
The structure of a Topology object is similar to that of a PDB file.
It consists of a set of Chains (often but not always corresponding to
polymer chains). Each Chain contains a set of Residues, and each Residue
contains a set of Atoms. In addition, the Topology stores a list of which
atom pairs are bonded to each other.
Atom and residue names should follow the PDB 3.0 nomenclature for all
molecules for which one exists.
Attributes
----------
chains : generator
Iterator over all Chains in the Topology.
residues : generator
Iterator over all Residues in the Chain.
atoms : generator
Iterator over all Atoms in the Chain.
Examples
--------
>>> topology = md.load('example.pdb').topology
>>> print(topology)
<mdtraj.Topology with 1 chains, 3 residues, 22 atoms, 21 bonds at 0x105a98e90>
>>> table, bonds = topology.to_dataframe()
>>> print(table.head())
serial name element resSeq resName chainID
0 0 H1 H 1 CYS 0
1 1 CH3 C 1 CYS 0
2 2 H2 H 1 CYS 0
3 3 H3 H 1 CYS 0
4 4 C C 1 CYS 0
>>> # rename residue "CYS" to "CYSS"
>>> table[table['residue'] == 'CYS']['residue'] = 'CYSS'
>>> print(table.head())
serial name element resSeq resName chainID
0 0 H1 H 1 CYSS 0
1 1 CH3 C 1 CYSS 0
2 2 H2 H 1 CYSS 0
3 3 H3 H 1 CYSS 0
4 4 C C 1 CYSS 0
>>> t2 = md.Topology.from_dataframe(table, bonds)
"""
_standardBonds = {}
def __init__(self):
"""Create a new Topology object"""
self._chains = []
self._numResidues = 0
self._numAtoms = 0
self._bonds = []
self._atoms = []
self._residues = []
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return "<%s>" % (self._string_summary_basic())
def __repr__(self):
return "<%s at 0x%02x>" % (self._string_summary_basic(), id(self))
def _string_summary_basic(self):
return ("mdtraj.Topology with %d chains, %d residues, "
"%d atoms, %d bonds" % (self.n_chains, self.n_residues,
self.n_atoms, len(self._bonds)))
def copy(self):
"""Return a copy of the topology
Returns
-------
out : Topology
A copy of this topology
"""
out = Topology()
for chain in self.chains:
c = out.add_chain()
for residue in chain.residues:
r = out.add_residue(residue.name, c, residue.resSeq, residue.segment_id)
for atom in residue.atoms:
out.add_atom(atom.name, atom.element, r,
serial=atom.serial)
for a1, a2 in self.bonds:
out.add_bond(a1, a2)
return out
def __copy__(self, *args):
return self.copy()
def __deepcopy__(self, *args):
return self.copy()
def __hash__(self):
hash_value = hash(tuple(self._chains))
hash_value ^= hash(tuple(self._atoms))
hash_value ^= hash(tuple(self._bonds))
hash_value ^= hash(tuple(self._residues))
return hash_value
def join(self, other):
"""Join two topologies together
Parameters
----------
other : Topology
Another topology object
Returns
-------
out : Topology
A joint topology, with all of the atoms/residues/chains/bonds
in each of the individual topologies
"""
if not isinstance(other, Topology):
raise ValueError('other must be an instance of Topology to join')
out = self.copy()
atom_mapping = {}
for chain in other.chains:
c = out.add_chain()
for residue in chain.residues:
r = out.add_residue(residue.name, c, residue.resSeq, residue.segment_id)
for atom in residue.atoms:
a = out.add_atom(atom.name, atom.element, r,
serial=atom.serial)
atom_mapping[atom] = a
for a1, a2 in other.bonds:
out.add_bond(atom_mapping[a1], atom_mapping[a2])
return out
def to_fasta(self, chain=None):
"""Convert this topology into FASTA string
Parameters
----------
chain : Integer, optional, default=None
If specified, will return the FASTA string for this chain in the
Topology.
Returns
-------
fasta : String or list of Strings
A FASTA string for each chain specified.
"""
fasta = lambda c: "".join([res.code for res in c.residues
if res.is_protein and res.code is not None])
if chain is not None:
if not isinstance(chain, int):
raise ValueError('chain must be an Integer.')
return fasta(self._chains[chain])
else:
return [fasta(c) for c in self._chains]
def to_openmm(self, traj=None):
"""Convert this topology into OpenMM topology
Parameters
----------
traj : MDTraj.Trajectory, optional, default=None
If specified, use the first frame from this trajectory to
set the unitcell information in the openmm topology.
Returns
-------
topology : simtk.openmm.app.Topology
This topology, as an OpenMM topology
"""
app = import_('simtk.openmm.app')
mm = import_('simtk.openmm')
u = import_('simtk.unit')
out = app.Topology()
atom_mapping = {}
for chain in self.chains:
c = out.addChain()
for residue in chain.residues:
r = out.addResidue(residue.name, c)
for atom in residue.atoms:
if atom.element is elem.virtual:
element = None
else:
element = app.Element.getBySymbol(atom.element.symbol)
a = out.addAtom(atom.name, element, r)
atom_mapping[atom] = a
for a1, a2 in self.bonds:
out.addBond(atom_mapping[a1], atom_mapping[a2])
if traj is not None:
angles = traj.unitcell_angles[0]
if np.linalg.norm(angles - 90.0) > 1E-4:
raise(ValueError("Unitcell angles must be 90.0 to use "
"in OpenMM topology."))
box_vectors = mm.Vec3(*traj.unitcell_lengths[0]) * u.nanometer
out.setUnitCellDimensions(box_vectors)
return out
@classmethod
def from_openmm(cls, value):
"""Create a mdtraj topology from an OpenMM topology
Parameters
----------
value : simtk.openmm.app.Topology
An OpenMM topology that you wish to convert to a
mdtraj topology.
"""
app = import_('simtk.openmm.app')
if not isinstance(value, app.Topology):
raise TypeError('value must be an OpenMM Topology. '
'You supplied a %s' % type(value))
out = cls()
atom_mapping = {}
for chain in value.chains():
c = out.add_chain()
for residue in chain.residues():
r = out.add_residue(residue.name, c)
for atom in residue.atoms():
if atom.element is None:
element = elem.virtual
else:
element = elem.get_by_symbol(atom.element.symbol)
a = out.add_atom(atom.name, element, r)
atom_mapping[atom] = a
for a1, a2 in value.bonds():
out.add_bond(atom_mapping[a1], atom_mapping[a2])
return out
def to_dataframe(self):
"""Convert this topology into a pandas dataframe
Returns
-------
atoms : pandas.DataFrame
The atoms in the topology, represented as a data frame.
bonds : np.ndarray
The bonds in this topology, represented as an n_bonds x 2 array
of the indices of the atoms involved in each bond.
"""
pd = import_('pandas')
data = [(atom.serial, atom.name, atom.element.symbol,
atom.residue.resSeq, atom.residue.name,
atom.residue.chain.index,atom.segment_id) for atom in self.atoms]
atoms = pd.DataFrame(data, columns=["serial", "name", "element",
"resSeq", "resName", "chainID","segmentID"])
bonds = np.array([(a.index, b.index) for (a, b) in self.bonds])
return atoms, bonds
@classmethod
def from_dataframe(cls, atoms, bonds=None):
"""Create a mdtraj topology from a pandas data frame
Parameters
----------
atoms : pandas.DataFrame
The atoms in the topology, represented as a data frame. This data
frame should have columns "serial" (atom index), "name" (atom name),
"element" (atom's element), "resSeq" (index of the residue)
"resName" (name of the residue), "chainID" (index of the chain),
and optionally "segmentID", following the same conventions
as wwPDB 3.0 format.
bonds : np.ndarray, shape=(n_bonds, 2), dtype=int, optional
The bonds in the topology, represented as an n_bonds x 2 array
of the indices of the atoms involved in each bond. Specifiying
bonds here is optional. To create standard protein bonds, you can
use `create_standard_bonds` to "fill in" the bonds on your newly
created Topology object
See Also
--------
create_standard_bonds
"""
pd = import_('pandas')
if bonds is None:
bonds = np.zeros((0, 2))
for col in ["name", "element", "resSeq",
"resName", "chainID", "serial"]:
if col not in atoms.columns:
raise ValueError('dataframe must have column %s' % col)
if "segmentID" not in atoms.columns:
atoms["segmentID"] = ""
out = cls()
if not isinstance(atoms, pd.DataFrame):
raise TypeError('atoms must be an instance of pandas.DataFrame. '
'You supplied a %s' % type(atoms))
if not isinstance(bonds, np.ndarray):
raise TypeError('bonds must be an instance of numpy.ndarray. '
'You supplied a %s' % type(bonds))
if not np.all(np.arange(len(atoms)) == atoms.index):
raise ValueError('atoms must be uniquely numbered '
'starting from zero.')
out._atoms = [None for i in range(len(atoms))]
for ci in np.unique(atoms['chainID']):
chain_atoms = atoms[atoms['chainID'] == ci]
c = out.add_chain()
for ri in np.unique(chain_atoms['resSeq']):
residue_atoms = chain_atoms[chain_atoms['resSeq'] == ri]
rnames = residue_atoms['resName']
residue_name = np.array(rnames)[0]
segids = residue_atoms['segmentID']
segment_id = np.array(segids)[0]
if not np.all(rnames == residue_name):
raise ValueError('All of the atoms with residue index %d '
'do not share the same residue name' % ri)
r = out.add_residue(residue_name, c, ri,segment_id)
for atom_index, atom in residue_atoms.iterrows():
atom_index = int(atom_index) # Fixes bizarre hashing issue on Py3K. See #545
a = Atom(atom['name'], elem.get_by_symbol(atom['element']),
atom_index, r, serial=atom['serial'])
out._atoms[atom_index] = a
r._atoms.append(a)
for ai1, ai2 in bonds:
out.add_bond(out.atom(ai1), out.atom(ai2))
out._numAtoms = out.n_atoms
return out
def to_bondgraph(self):
"""Create a NetworkX graph from the atoms and bonds in this topology
Returns
-------
g : nx.Graph
A graph whose nodes are the Atoms in this topology, and
whose edges are the bonds
See Also
--------
atoms
bonds
Notes
-----
This method requires the NetworkX python package.
"""
nx = import_('networkx')
g = nx.Graph()
g.add_nodes_from(self.atoms)
g.add_edges_from(self.bonds)
return g
def __eq__(self, other):
"""Are two topologies equal?
Parameters
----------
other : object
The object to compare to
Returns
-------
equality : bool
Are the two topologies identical?
"""
if not isinstance(other, Topology):
return False
if self is other:
return True
if len(self._chains) != len(other._chains):
return False
for c1, c2 in zip(self.chains, other.chains):
if c1.index != c2.index:
return False
if len(c1._residues) != len(c2._residues):
return False
for r1, r2 in zip(c1.residues, c2.residues):
if (r1.index != r1.index) or (r1.name != r2.name): # or (r1.resSeq != r2.resSeq):
return False
if len(r1._atoms) != len(r2._atoms):
return False
for a1, a2 in zip(r1.atoms, r2.atoms):
if (a1.index != a2.index) or (a1.name != a2.name):
return False
if a1.element is not a2.element:
return False
# for attr in ['atomic_number', 'name', 'symbol']:
# if getattr(a1.element, attr) != getattr(a2.element, attr):
# return False
if len(self._bonds) != len(other._bonds):
return False
# the bond ordering is somewhat ambiguous, so try and fix it for comparison
self_sorted_bonds = sorted([(a1.index, b1.index)
for (a1, b1) in self.bonds])
other_sorted_bonds = sorted([(a2.index, b2.index)
for (a2, b2) in other.bonds])
for i in range(len(self._bonds)):
(a1, b1) = self_sorted_bonds[i]
(a2, b2) = other_sorted_bonds[i]
if (a1 != a2) or (b1 != b2):
return False
return True
def add_chain(self):
"""Create a new Chain and add it to the Topology.
Returns
-------
chain : mdtraj.topology.Chain
the newly created Chain
"""
chain = Chain(len(self._chains), self)
self._chains.append(chain)
return chain
def add_residue(self, name, chain, resSeq=None, segment_id=""):
"""Create a new Residue and add it to the Topology.
Parameters
----------
name : str
The name of the residue to add
chain : mdtraj.topology.Chain
The Chain to add it to
resSeq : int, optional
Residue sequence number, such as from a PDB record. These sequence
numbers are arbitrary, and do not necessarily start at 0 (or 1).
If not supplied, the resSeq attribute will be set to the
residue's sequential (0 based) index.
segment_id : str, optional
A label for the segment to which this residue belongs
Returns
-------
residue : mdtraj.topology.Residue
The newly created Residue
"""
if resSeq is None:
resSeq = self._numResidues
residue = Residue(name, self._numResidues, chain, resSeq, segment_id)
self._residues.append(residue)
self._numResidues += 1
chain._residues.append(residue)
return residue
def add_atom(self, name, element, residue, serial=None):
"""Create a new Atom and add it to the Topology.
Parameters
----------
name : str
The name of the atom to add
element : mdtraj.element.Element
The element of the atom to add
residue : mdtraj.topology.Residue
The Residue to add it to
serial : int
Serial number associated with the atom.
Returns
-------
atom : mdtraj.topology.Atom
the newly created Atom
"""
if element is None:
element = elem.virtual
atom = Atom(name, element, self._numAtoms, residue, serial=serial)
self._atoms.append(atom)
self._numAtoms += 1
residue._atoms.append(atom)
return atom
def add_bond(self, atom1, atom2):
"""Create a new bond and add it to the Topology.
Parameters
----------
atom1 : mdtraj.topology.Atom
The first Atom connected by the bond
atom2 : mdtraj.topology.Atom
The second Atom connected by the bond
"""
if atom1.index < atom2.index:
self._bonds.append((atom1, atom2))
else:
self._bonds.append((atom2, atom1))
def chain(self, index):
"""Get a specific chain by index. These indices
start from zero.
Parameters
----------
index : int
The index of the chain to select.
Returns
-------
chain : Chain
The `index`-th chain in the topology.
"""
return self._chains[index]
@property
def chains(self):
"""Iterator over all Chains in the Topology.
Returns
-------
chainiter : listiterator
Iterator over all Chains in the Topology.
"""
return iter(self._chains)
@property
def n_chains(self):
"""Get the number of chains in the Topology"""
return len(self._chains)
def residue(self, index):
"""Get a specific residue by index. These indices
start from zero.
Parameters
----------
index : int
The index of the residue to select.
Returns
-------
residue : Residue
The `index`-th residue in the topology.
"""
return self._residues[index]
@property
def residues(self):
"""Iterator over all Residues in the Topology.
Returns
-------
residueiter : generator
Iterator over all Residues in the Topology.
"""
for chain in self._chains:
for residue in chain._residues:
yield residue
@property
def n_residues(self):
"""Get the number of residues in the Topology. """
return len(self._residues)
def atom(self, index):
"""Get a specific atom by index. These indices
start from zero.
Parameters
----------
index : int
The index of the atom to select.
Returns
-------
atom : Atom
The `index`-th atom in the topology.
"""
return self._atoms[index]
@property
def atoms(self):
"""Iterator over all Atoms in the Topology.
Returns
-------
atomiter : generator
Iterator over all Atoms in the Topology.
"""
for chain in self._chains:
for residue in chain._residues:
for atom in residue._atoms:
yield atom
def atoms_by_name(self, name):
"""Iterator over all Atoms in the Topology with a specified name
Parameters
----------
name : str
The particular atom name of interest.
Examples
--------
>>> for atom in topology.atoms_by_name('CA'):
... print(atom)
Returns
-------
atomiter : generator
"""
for atom in self.atoms:
if atom.name == name:
yield atom
@property
def n_atoms(self):
"""Get the number of atoms in the Topology"""
return len(self._atoms)
@property
def bonds(self):
"""Iterator over all bonds (each represented as a tuple of two Atoms) in the Topology.
Returns
-------
atomiter : generator
Iterator over all tuple of Atoms in the Trajectory involved in a bond.
"""
return iter(self._bonds)
@property
def n_bonds(self):
"""Get the number of bonds in the Topology"""
return len(self._bonds)
def create_standard_bonds(self):
"""Create bonds based on the atom and residue names for all standard residue types.
"""
if len(Topology._standardBonds) == 0:
# Load the standard bond defitions.
tree = etree.parse(os.path.join(os.path.dirname(__file__), '..',
'formats', 'pdb', 'data', 'residues.xml'))
for residue in tree.getroot().findall('Residue'):
bonds = []
Topology._standardBonds[residue.attrib['name']] = bonds
for bond in residue.findall('Bond'):
bonds.append((bond.attrib['from'], bond.attrib['to']))
for chain in self._chains:
# First build a map of atom names to atoms.
atomMaps = []
for residue in chain._residues:
atomMap = {}
atomMaps.append(atomMap)
for atom in residue._atoms:
atomMap[atom.name] = atom
# Loop over residues and construct bonds.
for i in range(len(chain._residues)):
name = chain._residues[i].name
if name in Topology._standardBonds:
for bond in Topology._standardBonds[name]:
if bond[0].startswith('-') and i > 0:
fromResidue = i-1
fromAtom = bond[0][1:]
elif (bond[0].startswith('+')
and i < len(chain._residues)):
fromResidue = i+1
fromAtom = bond[0][1:]
else:
fromResidue = i
fromAtom = bond[0]
if bond[1].startswith('-') and i > 0:
toResidue = i-1
toAtom = bond[1][1:]
elif (bond[1].startswith('+')
and i < len(chain._residues)):
toResidue = i+1
toAtom = bond[1][1:]
else:
toResidue = i
toAtom = bond[1]
if (fromAtom in atomMaps[fromResidue]
and toAtom in atomMaps[toResidue]):
self.add_bond(atomMaps[fromResidue][fromAtom],
atomMaps[toResidue][toAtom])
def create_disulfide_bonds(self, positions):
"""Identify disulfide bonds based on proximity and add them to the Topology.
Parameters
----------
positions : list
The list of atomic positions based on which to identify bonded atoms
"""
def isCyx(res):
names = [atom.name for atom in res._atoms]
return 'SG' in names and 'HG' not in names
cyx = [res for res in self.residues
if res.name == 'CYS' and isCyx(res)]
atomNames = [[atom.name for atom in res._atoms] for res in cyx]
for i in range(len(cyx)):
sg1 = cyx[i]._atoms[atomNames[i].index('SG')]
pos1 = positions[sg1.index]
for j in range(i):
sg2 = cyx[j]._atoms[atomNames[j].index('SG')]
pos2 = positions[sg2.index]
delta = [x-y for (x, y) in zip(pos1, pos2)]
distance = np.sqrt(
delta[0]*delta[0] + delta[1]*delta[1] + delta[2]*delta[2])
if distance < 0.3: # this is supposed to be nm. I think we're good
self.add_bond(sg1, sg2)
def subset(self, atom_indices):
"""Create a new Topology from a subset of the atoms in an existing topology.
Notes
-----
The existing topology will not be altered.
Parameters
----------
atom_indices : array_like
A list of the indices corresponding to the atoms in that you'd
like to retain.
"""
return _topology_from_subset(self, atom_indices)
def select_expression(self, selection_string):
"""Translate a atom selection expression into a pure python expression.
Parameters
----------
selection_string : str
An expression in the MDTraj atom selection DSL
Examples
--------
>>> topology.select_expression('name O and water')
"[atom.index for atom in topology.atoms if ((atom.name == 'O') and atom.residue.is_water)]")
Returns
-------
python_string : str
A string containing a pure python expression, equivalent to the
selection expression.
"""
condition = parse_selection(selection_string).source
fmt_string = "[atom.index for atom in topology.atoms if {condition}]"
return fmt_string.format(condition=condition)
def select(self, selection_string):
"""Execute a selection against the topology
Parameters
----------
selection_string : str
An expression in the MDTraj atom selection DSL
Examples
--------
>>> topology.select('name O and water')
array([1, 3, 5, 10, ...])
Returns
-------
indices : np.ndarray, dtype=int, ndim=1
Array of the indices of the atoms matching the selection expression.
See Also
--------
select_expression, mdtraj.core.selection.parse_selection
"""
filter_func = parse_selection(selection_string).expr
indices = np.array([a.index for a in self.atoms if filter_func(a)])
return indices
def select_atom_indices(self, selection='minimal'):
"""Get the indices of biologically-relevant groups by name.
Parameters
----------
selection : {'all', 'alpha', 'minimal', 'heavy', 'water'}
What types of atoms to select.
``all``
All atoms
``alpha``
Protein residue alpha carbons
``minimal``
Keep the atoms in protein residues with names in {CA, CB, C, N, O}
``heavy``
All non-hydrogen protein atoms.
``water``
Water oxygen atoms
Returns
----------
indices : np.ndarray (N,)
An array of the indices of the selected atoms.
"""
selection = selection.lower()
options = ['all', 'alpha', 'minimal', 'heavy', 'water']
if selection == 'all':
atom_indices = np.arange(self.n_atoms)
elif selection == 'alpha':
atom_indices = [a.index for a in self.atoms if
a.name == 'CA'
and a.residue.is_protein]
elif selection == 'minimal':
atom_indices = [a.index for a in self.atoms if
a.name in ['CA', 'CB', 'C', 'N', 'O']
and a.residue.is_protein]
elif selection == 'heavy':
atom_indices = [a.index for a in self.atoms if
a.element != elem.hydrogen
and a.residue.is_protein]
elif selection == 'water':
atom_indices = [a.index for a in self.atoms if
a.name in ['O', 'OW']
and a.residue.is_water]
else:
raise ValueError(
'%s is not a valid option. Selection must be one of %s' % (
selection, ', '.join(options)))
indices = np.array(atom_indices)
return indices
def select_pairs(self, selection1=None, selection2=None):
"""Generate unique pairs of atom indices.
If a selecton is a string, it will be resolved using the atom selection
DSL, otherwise it is expected to be an array of atom indices.
Parameters
----------
selection1 : str or array-like, shape=(n_indices, ), dtype=int
A selection for `select()` or an array of atom indices.
selection2 : str or array-like, shape=(n_indices, ), dtype=int
A selection for `select()` or an array of atom indices.
Returns
-------
pairs : array-like, shape=(n_pairs, 2), dtype=int
Each row gives the indices of two atoms.
"""
# Resolve selections using the atom selection DSL...
if isinstance(selection1, string_types):
a_indices = self.select(selection1)
else: # ...or use a provided array of indices.
a_indices = ensure_type(selection1, dtype=np.int32, ndim=1,
name='a_indices', warn_on_cast=False)
if isinstance(selection2, string_types):
b_indices = self.select(selection2)
else:
b_indices = ensure_type(selection2, dtype=np.int32, ndim=1,
name='b_indices', warn_on_cast=False)
a_indices.sort()
b_indices.sort()
# Create unique pairs from the indices.
# In the cases where a_indices and b_indices are identical or mutually
# exclusive, we can utilize a more efficient and memory friendly
# approach by removing the intermediate set creation required in
# the general case.
if np.array_equal(a_indices, b_indices):
pairs = self._unique_pairs_equal(a_indices)
elif len(np.intersect1d(a_indices, b_indices)) == 0:
pairs = self._unique_pairs_mutually_exclusive(a_indices, b_indices)
else:
pairs = self._unique_pairs(a_indices, b_indices)
return pairs
@classmethod
def _unique_pairs(cls, a_indices, b_indices):
return np.array(list(set(
(a, b) if a > b else (b, a)
for a, b in itertools.product(a_indices, b_indices)
if a != b)), dtype=np.int32)
@classmethod
def _unique_pairs_mutually_exclusive(cls, a_indices, b_indices):
pairs = np.fromiter(itertools.chain.from_iterable(
itertools.product(a_indices, b_indices)),
dtype=np.int32, count=len(a_indices) * len(b_indices) * 2)
return np.vstack((pairs[::2], pairs[1::2])).T
@classmethod
def _unique_pairs_equal(cls, a_indices):
pairs = np.fromiter(itertools.chain.from_iterable(
itertools.combinations(a_indices, 2)),
dtype=np.int32, count=len(a_indices) * (len(a_indices) - 1))
return np.vstack((pairs[::2], pairs[1::2])).T
def find_molecules(self):
"""Identify molecules based on bonds.
A molecule is defined as a set of atoms that are connected to each other by bonds.
This method uses the list of bonds to divide up the Topology's atoms into molecules.
Returns
-------
molecules : list of sets
Each entry represents one molecule, and is the set of all Atoms in that molecule
"""
# Make a list of every other atom to which each atom is connected.
num_atoms = self.n_atoms
atom_bonds = [[] for i in range(num_atoms)]
for atom1, atom2 in self.bonds:
atom_bonds[atom1.index].append(atom2.index)
atom_bonds[atom2.index].append(atom1.index)
# This is essentially a recursive algorithm, but it is reformulated as a loop to avoid
# stack overflows. It selects an atom, marks it as a new molecule, then recursively
# marks every atom bonded to it as also being in that molecule.
atom_molecule = [-1]*num_atoms
num_molecules = 0
for i in range(num_atoms):
if atom_molecule[i] == -1:
# Start a new molecule.
atom_stack = [i]
neighbor_stack = [0]
molecule = num_molecules
num_molecules += 1
# Recursively tag all the bonded atoms.
while len(atom_stack) > 0:
atom = atom_stack[-1]
atom_molecule[atom] = molecule
while neighbor_stack[-1] < len(atom_bonds[atom]) and atom_molecule[atom_bonds[atom][neighbor_stack[-1]]] != -1:
neighbor_stack[-1] += 1
if neighbor_stack[-1] < len(atom_bonds[atom]):
atom_stack.append(atom_bonds[atom][neighbor_stack[-1]])
neighbor_stack.append(0)
else:
del atom_stack[-1]
del neighbor_stack[-1]
# Build the final output.
molecules = [set() for i in range(num_molecules)]
for atom in self.atoms:
molecules[atom_molecule[atom.index]].add(atom)
return molecules
class Chain(object):
"""A Chain object represents a chain within a Topology.
Attributes
----------
index : int
The index of the Chain within its Topology
topology : mdtraj.Topology
The Topology this Chain belongs to
residues : generator
Iterator over all Residues in the Chain.
atoms : generator
Iterator over all Atoms in the Chain.
"""
def __init__(self, index, topology):
"""Construct a new Chain. You should call add_chain() on the Topology instead of calling this directly."""
# The index of the Chain within its Topology
self.index = index
# The Topology this Chain belongs to
self.topology = topology
self._residues = []
@property
def residues(self):
"""Iterator over all Residues in the Chain.
Returns
-------
residueiter : listiterator
Iterator over all Residues in the Topology.
"""
return iter(self._residues)
def residue(self, index):
"""Get a specific residue in this Chain.
Parameters
----------
index : int
The index of the residue to select.
Returns
-------
residue : Residue
"""
return self._residues[index]
@property
def n_residues(self):
"""Get the number of residues in this Chain. """
return len(self._residues)
@property
def atoms(self):
"""Iterator over all Atoms in the Chain.
Returns
-------
atomiter : generator
Iterator over all Atoms in the Chain.
"""
for residue in self._residues:
for atom in residue._atoms:
yield atom
def atoms_by_name(self, name):
"""Iterator over all Atoms in the Chain with a specified name.
Parameters
----------
name : str
The particular atom name of interest.
Examples
--------
>>> for atom in chain.atoms_by_name('CA'):
... print(atom)
Returns
-------
atomiter : generator
"""
for atom in self.atoms:
if atom.name == name:
yield atom
def atom(self, index):
"""Get a specific atom in this Chain.
Parameters
----------
index : int
The index of the atom to select.
Returns
-------
atom : Atom
"""
# this could be made faster by caching the list
# of atoms internally if necessary
return next(itertools.islice(self.atoms, index, index + 1))
@property
def n_atoms(self):
"""Get the number of atoms in this Chain"""
return sum(r.n_atoms for r in self._residues)
class Residue(object):
"""A Residue object represents a residue within a Topology.
Attributes
----------
name : str
The name of the Residue
index : int
The index of the Residue within its Topology
chain : mdtraj.topology.Chain
The chain within which this residue belongs
resSeq : int
The residue sequence number
segment_id : str, optional
A label for the segment to which this residue belongs
"""
def __init__(self, name, index, chain, resSeq, segment_id=''):
"""Construct a new Residue. You should call add_residue()
on the Topology instead of calling this directly."""
self.name = name
self.index = index
self.chain = chain
self.resSeq = resSeq
self.segment_id = segment_id
self._atoms = []
@property
def atoms(self):
"""Iterator over all Atoms in the Residue.
Returns
-------
atomiter : listiterator
Iterator over all Atoms in the Residue.
"""
return iter(self._atoms)
def atoms_by_name(self, name):
"""Iterator over all Atoms in the Residue with a specified name
Parameters
----------
name : str
The particular atom name of interest.
Examples
--------
>>> for atom in residue.atoms_by_name('CA'):
... print(atom)
Returns
-------
atomiter : generator
"""
for atom in self.atoms:
if atom.name == name:
yield atom
def atom(self, index_or_name):
"""Get a specific atom in this Residue.
Parameters
----------
index_or_name : {int, str}
Either a (zero-based) index, or the name of the atom. If a string
is passed in, the first atom -- in index order -- with a matching
name wil be returned.
Returns
-------
atom : Atom
"""
try:
return self._atoms[index_or_name]
except TypeError:
try:
return next(self.atoms_by_name(index_or_name))
except StopIteration:
raise KeyError('no matching atom found')
@property
def n_atoms(self):
"""Get the number of atoms in this Residue"""
return len(self._atoms)
@property
def is_protein(self):
"""Whether the residue is one found in proteins."""
return self.name in _PROTEIN_RESIDUES
@property
def code(self):
"""Get the one letter code for this Residue"""
if self.is_protein:
return _AMINO_ACID_CODES[self.name]
else:
return None
@property
def is_water(self):
"""Whether the residue is water.
Residue names according to VMD
References
----------
http://www.ks.uiuc.edu/Research/vmd/vmd-1.3/ug/node133.html
"""
return self.name in _WATER_RESIDUES
@property
def is_nucleic(self):
"""Whether the residue is one found in nucleic acids."""
raise NotImplementedError
def __str__(self):
return '%s%s' % (self.name, self.resSeq)
def __repr__(self):
return str(self)
class Atom(object):
"""An Atom object represents a residue within a Topology.
Attributes
----------
name : str
The name of the Atom
element : mdtraj.element.Element
The element of the Atoms
index : int
The index of the Atom within its Topology
residue : mdtraj.topology.Residue
The Residue this Atom belongs to
serial : int
The serial number from the PDB specification. Unlike index,
this may not be contiguous or 0-indexed.
"""
def __init__(self, name, element, index, residue, serial=None):
"""Construct a new Atom. You should call add_atom() on the Topology instead of calling this directly."""
# The name of the Atom
self.name = name
# That Atom's element
self.element = element
# The index of the Atom within its Topology
self.index = index
# The Residue this Atom belongs to
self.residue = residue
# The not-necessarily-contiguous "serial" number from the PDB spec
self.serial = serial
@property
def n_bonds(self):
"""Number of bonds in which the atom participates."""
# TODO: this info could be cached.
return ilen(bond for bond in self.residue.chain.topology.bonds
if self in bond)
@property
def is_backbone(self):
"""Whether the atom is in the backbone of a protein residue"""
return (self.name in set(['C', 'CA', 'N', 'O'])
and self.residue.is_protein)
@property
def is_sidechain(self):
"""Whether the atom is in the sidechain of a protein residue"""
return (self.name not in set(['C', 'CA', 'N', 'O'])
and self.residue.is_protein)
@property
def segment_id(self):
"""User specified segment_id of the residue to which this atom belongs"""
return self.residue.segment_id
def __eq__(self, other):
""" Check whether two Atom objects are equal. """
if self.name != other.name:
return False
if self.index != other.index:
return False
if self.element.name != other.element.name:
return False
if self.residue.name != other.residue.name:
return False
if self.residue.index != other.residue.index:
return False
if self.residue.chain.index != other.residue.chain.index:
return False
return True
def __hash__(self):
"""A quick comparison. """
return self.index
def __str__(self):
return '%s-%s' % (self.residue, self.name)
def __repr__(self):
return str(self)
| lgpl-2.1 |
DistrictDataLabs/yellowbrick | yellowbrick/style/palettes.py | 1 | 44210 | # yellowbrick.style.palettes
# Implements the variety of colors that yellowbrick allows access to by name.
#
# Author: Patrick O'Melveny
# Author: Benjamin Bengfort
#
# Copyright (C) 2016 The scikit-yb developers
# For license information, see LICENSE.txt
#
# ID: palettes.py [c6aff34] [email protected] $
"""
Implements the variety of colors that yellowbrick allows access to by name.
This code was originally based on Seaborn's rcmody.py but has since been
cleaned up to be Yellowbrick-specific and to dereference tools we don't use.
Note that these functions alter the matplotlib rc dictionary on the fly.
"""
#########################################################################
## Imports
#########################################################################
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.colors as mplcol
from itertools import cycle
from .colors import get_color_cycle
from yellowbrick.exceptions import YellowbrickValueError
##########################################################################
## Exports
##########################################################################
__all__ = ["color_palette", "set_color_codes"]
##########################################################################
## Special, Named Colors
##########################################################################
YB_KEY = "#111111" # The yellowbrick key (black) color is very dark grey
LINE_COLOR = YB_KEY # Colors for best fit lines, diagonals, etc.
##########################################################################
## Color Palettes
## Note all 6/7 color palettes can be mapped to bgrmyck color codes
## via the `set_color_codes` function, make sure they are ordered!
##########################################################################
PALETTES = {
# "name": ['blue', 'green', 'red', 'maroon', 'yellow', 'cyan']
# The yellowbrick default palette
"yellowbrick": ["#0272a2", "#9fc377", "#ca0b03", "#a50258", "#d7c703", "#88cada"],
# The following are from ColorBrewer
"accent": ["#386cb0", "#7fc97f", "#f0027f", "#beaed4", "#ffff99", "#fdc086"],
"dark": ["#7570b3", "#66a61e", "#d95f02", "#e7298a", "#e6ab02", "#1b9e77"],
"pastel": ["#cbd5e8", "#b3e2cd", "#fdcdac", "#f4cae4", "#fff2ae", "#e6f5c9"],
"bold": ["#377eb8", "#4daf4a", "#e41a1c", "#984ea3", "#ffff33", "#ff7f00"],
"muted": ["#80b1d3", "#8dd3c7", "#fb8072", "#bebada", "#ffffb3", "#fdb462"],
# The reset colors back to the original mpl color codes
"reset": [
"#0000ff",
"#008000",
"#ff0000",
"#bf00bf",
"#bfbf00",
"#00bfbf",
"#000000",
],
# Colorblind colors
"colorblind": ["#0072B2", "#009E73", "#D55E00", "#CC79A7", "#F0E442", "#56B4E9"],
"sns_colorblind": [
"#0072B2",
"#009E73",
"#D55E00",
"#CC79A7",
"#F0E442",
"#56B4E9",
],
# The following are Seaborn colors
"sns_deep": ["#4C72B0", "#55A868", "#C44E52", "#8172B2", "#CCB974", "#64B5CD"],
"sns_muted": ["#4878CF", "#6ACC65", "#D65F5F", "#B47CC7", "#C4AD66", "#77BEDB"],
"sns_pastel": ["#92C6FF", "#97F0AA", "#FF9F9A", "#D0BBFF", "#FFFEA3", "#B0E0E6"],
"sns_bright": ["#003FFF", "#03ED3A", "#E8000B", "#8A2BE2", "#FFC400", "#00D7FF"],
"sns_dark": ["#001C7F", "#017517", "#8C0900", "#7600A1", "#B8860B", "#006374"],
# Other palettes
"flatui": ["#34495e", "#2ecc71", "#e74c3c", "#9b59b6", "#f4d03f", "#3498db"],
"paired": [
"#a6cee3",
"#1f78b4",
"#b2df8a",
"#33a02c",
"#fb9a99",
"#e31a1c",
"#cab2d6",
"#6a3d9a",
"#ffff99",
"#b15928",
"#fdbf6f",
"#ff7f00",
],
"set1": [
"#377eb8",
"#4daf4a",
"#e41a1c",
"#984ea3",
"#ffff33",
"#ff7f00",
"#a65628",
"#f781bf",
"#999999",
],
# colors extracted from this blog post during pycon2017:
# http://lewisandquark.tumblr.com/
"neural_paint": [
"#167192",
"#6e7548",
"#c5a2ab",
"#00ccff",
"#de78ae",
"#ffcc99",
"#3d3f42",
"#ffffcc",
],
}
SEQUENCES = {
"ddl_heat": {
12: [
"#DBDBDB",
"#DCD5CC",
"#DCCEBE",
"#DDC8AF",
"#DEC2A0",
"#DEBB91",
"#DFB583",
"#DFAE74",
"#E0A865",
"#E1A256",
"#E19B48",
"#E29539",
]
},
"YlGn": {
3: ["#f7fcb9", "#addd8e", "#31a354"],
4: ["#ffffcc", "#c2e699", "#78c679", "#238443"],
5: ["#ffffcc", "#c2e699", "#78c679", "#31a354", "#006837"],
6: ["#ffffcc", "#d9f0a3", "#addd8e", "#78c679", "#31a354", "#006837"],
7: [
"#ffffcc",
"#d9f0a3",
"#addd8e",
"#78c679",
"#41ab5d",
"#238443",
"#005a32",
],
8: [
"#ffffe5",
"#f7fcb9",
"#d9f0a3",
"#addd8e",
"#78c679",
"#41ab5d",
"#238443",
"#005a32",
],
9: [
"#ffffe5",
"#f7fcb9",
"#d9f0a3",
"#addd8e",
"#78c679",
"#41ab5d",
"#238443",
"#006837",
"#004529",
],
},
"YlGnBu": {
3: ["#edf8b1", "#7fcdbb", "#2c7fb8"],
4: ["#ffffcc", "#a1dab4", "#41b6c4", "#225ea8"],
5: ["#ffffcc", "#a1dab4", "#41b6c4", "#2c7fb8", "#253494"],
6: ["#ffffcc", "#c7e9b4", "#7fcdbb", "#41b6c4", "#2c7fb8", "#253494"],
7: [
"#ffffcc",
"#c7e9b4",
"#7fcdbb",
"#41b6c4",
"#1d91c0",
"#225ea8",
"#0c2c84",
],
8: [
"#ffffd9",
"#edf8b1",
"#c7e9b4",
"#7fcdbb",
"#41b6c4",
"#1d91c0",
"#225ea8",
"#0c2c84",
],
9: [
"#ffffd9",
"#edf8b1",
"#c7e9b4",
"#7fcdbb",
"#41b6c4",
"#1d91c0",
"#225ea8",
"#253494",
"#081d58",
],
},
"GnBu": {
3: ["#e0f3db", "#a8ddb5", "#43a2ca"],
4: ["#f0f9e8", "#bae4bc", "#7bccc4", "#2b8cbe"],
5: ["#f0f9e8", "#bae4bc", "#7bccc4", "#43a2ca", "#0868ac"],
6: ["#f0f9e8", "#ccebc5", "#a8ddb5", "#7bccc4", "#43a2ca", "#0868ac"],
7: [
"#f0f9e8",
"#ccebc5",
"#a8ddb5",
"#7bccc4",
"#4eb3d3",
"#2b8cbe",
"#08589e",
],
8: [
"#f7fcf0",
"#e0f3db",
"#ccebc5",
"#a8ddb5",
"#7bccc4",
"#4eb3d3",
"#2b8cbe",
"#08589e",
],
9: [
"#f7fcf0",
"#e0f3db",
"#ccebc5",
"#a8ddb5",
"#7bccc4",
"#4eb3d3",
"#2b8cbe",
"#0868ac",
"#084081",
],
},
"BuGn": {
3: ["#e5f5f9", "#99d8c9", "#2ca25f"],
4: ["#edf8fb", "#b2e2e2", "#66c2a4", "#238b45"],
5: ["#edf8fb", "#b2e2e2", "#66c2a4", "#2ca25f", "#006d2c"],
6: ["#edf8fb", "#ccece6", "#99d8c9", "#66c2a4", "#2ca25f", "#006d2c"],
7: [
"#edf8fb",
"#ccece6",
"#99d8c9",
"#66c2a4",
"#41ae76",
"#238b45",
"#005824",
],
8: [
"#f7fcfd",
"#e5f5f9",
"#ccece6",
"#99d8c9",
"#66c2a4",
"#41ae76",
"#238b45",
"#005824",
],
9: [
"#f7fcfd",
"#e5f5f9",
"#ccece6",
"#99d8c9",
"#66c2a4",
"#41ae76",
"#238b45",
"#006d2c",
"#00441b",
],
},
"PuBuGn": {
3: ["#ece2f0", "#a6bddb", "#1c9099"],
4: ["#f6eff7", "#bdc9e1", "#67a9cf", "#02818a"],
5: ["#f6eff7", "#bdc9e1", "#67a9cf", "#1c9099", "#016c59"],
6: ["#f6eff7", "#d0d1e6", "#a6bddb", "#67a9cf", "#1c9099", "#016c59"],
7: [
"#f6eff7",
"#d0d1e6",
"#a6bddb",
"#67a9cf",
"#3690c0",
"#02818a",
"#016450",
],
8: [
"#fff7fb",
"#ece2f0",
"#d0d1e6",
"#a6bddb",
"#67a9cf",
"#3690c0",
"#02818a",
"#016450",
],
9: [
"#fff7fb",
"#ece2f0",
"#d0d1e6",
"#a6bddb",
"#67a9cf",
"#3690c0",
"#02818a",
"#016c59",
"#014636",
],
},
"PuBu": {
3: ["#ece7f2", "#a6bddb", "#2b8cbe"],
4: ["#f1eef6", "#bdc9e1", "#74a9cf", "#0570b0"],
5: ["#f1eef6", "#bdc9e1", "#74a9cf", "#2b8cbe", "#045a8d"],
6: ["#f1eef6", "#d0d1e6", "#a6bddb", "#74a9cf", "#2b8cbe", "#045a8d"],
7: [
"#f1eef6",
"#d0d1e6",
"#a6bddb",
"#74a9cf",
"#3690c0",
"#0570b0",
"#034e7b",
],
8: [
"#fff7fb",
"#ece7f2",
"#d0d1e6",
"#a6bddb",
"#74a9cf",
"#3690c0",
"#0570b0",
"#034e7b",
],
9: [
"#fff7fb",
"#ece7f2",
"#d0d1e6",
"#a6bddb",
"#74a9cf",
"#3690c0",
"#0570b0",
"#045a8d",
"#023858",
],
},
"BuPu": {
3: ["#e0ecf4", "#9ebcda", "#8856a7"],
4: ["#edf8fb", "#b3cde3", "#8c96c6", "#88419d"],
5: ["#edf8fb", "#b3cde3", "#8c96c6", "#8856a7", "#810f7c"],
6: ["#edf8fb", "#bfd3e6", "#9ebcda", "#8c96c6", "#8856a7", "#810f7c"],
7: [
"#edf8fb",
"#bfd3e6",
"#9ebcda",
"#8c96c6",
"#8c6bb1",
"#88419d",
"#6e016b",
],
8: [
"#f7fcfd",
"#e0ecf4",
"#bfd3e6",
"#9ebcda",
"#8c96c6",
"#8c6bb1",
"#88419d",
"#6e016b",
],
9: [
"#f7fcfd",
"#e0ecf4",
"#bfd3e6",
"#9ebcda",
"#8c96c6",
"#8c6bb1",
"#88419d",
"#810f7c",
"#4d004b",
],
},
"RdPu": {
3: ["#fde0dd", "#fa9fb5", "#c51b8a"],
4: ["#feebe2", "#fbb4b9", "#f768a1", "#ae017e"],
5: ["#feebe2", "#fbb4b9", "#f768a1", "#c51b8a", "#7a0177"],
6: ["#feebe2", "#fcc5c0", "#fa9fb5", "#f768a1", "#c51b8a", "#7a0177"],
7: [
"#feebe2",
"#fcc5c0",
"#fa9fb5",
"#f768a1",
"#dd3497",
"#ae017e",
"#7a0177",
],
8: [
"#fff7f3",
"#fde0dd",
"#fcc5c0",
"#fa9fb5",
"#f768a1",
"#dd3497",
"#ae017e",
"#7a0177",
],
9: [
"#fff7f3",
"#fde0dd",
"#fcc5c0",
"#fa9fb5",
"#f768a1",
"#dd3497",
"#ae017e",
"#7a0177",
"#49006a",
],
},
"PuRd": {
3: ["#e7e1ef", "#c994c7", "#dd1c77"],
4: ["#f1eef6", "#d7b5d8", "#df65b0", "#ce1256"],
5: ["#f1eef6", "#d7b5d8", "#df65b0", "#dd1c77", "#980043"],
6: ["#f1eef6", "#d4b9da", "#c994c7", "#df65b0", "#dd1c77", "#980043"],
7: [
"#f1eef6",
"#d4b9da",
"#c994c7",
"#df65b0",
"#e7298a",
"#ce1256",
"#91003f",
],
8: [
"#f7f4f9",
"#e7e1ef",
"#d4b9da",
"#c994c7",
"#df65b0",
"#e7298a",
"#ce1256",
"#91003f",
],
9: [
"#f7f4f9",
"#e7e1ef",
"#d4b9da",
"#c994c7",
"#df65b0",
"#e7298a",
"#ce1256",
"#980043",
"#67001f",
],
},
"OrRd": {
3: ["#fee8c8", "#fdbb84", "#e34a33"],
4: ["#fef0d9", "#fdcc8a", "#fc8d59", "#d7301f"],
5: ["#fef0d9", "#fdcc8a", "#fc8d59", "#e34a33", "#b30000"],
6: ["#fef0d9", "#fdd49e", "#fdbb84", "#fc8d59", "#e34a33", "#b30000"],
7: [
"#fef0d9",
"#fdd49e",
"#fdbb84",
"#fc8d59",
"#ef6548",
"#d7301f",
"#990000",
],
8: [
"#fff7ec",
"#fee8c8",
"#fdd49e",
"#fdbb84",
"#fc8d59",
"#ef6548",
"#d7301f",
"#990000",
],
9: [
"#fff7ec",
"#fee8c8",
"#fdd49e",
"#fdbb84",
"#fc8d59",
"#ef6548",
"#d7301f",
"#b30000",
"#7f0000",
],
},
"YlOrRd": {
3: ["#ffeda0", "#feb24c", "#f03b20"],
4: ["#ffffb2", "#fecc5c", "#fd8d3c", "#e31a1c"],
5: ["#ffffb2", "#fecc5c", "#fd8d3c", "#f03b20", "#bd0026"],
6: ["#ffffb2", "#fed976", "#feb24c", "#fd8d3c", "#f03b20", "#bd0026"],
7: [
"#ffffb2",
"#fed976",
"#feb24c",
"#fd8d3c",
"#fc4e2a",
"#e31a1c",
"#b10026",
],
8: [
"#ffffcc",
"#ffeda0",
"#fed976",
"#feb24c",
"#fd8d3c",
"#fc4e2a",
"#e31a1c",
"#b10026",
],
9: [
"#ffffcc",
"#ffeda0",
"#fed976",
"#feb24c",
"#fd8d3c",
"#fc4e2a",
"#e31a1c",
"#bd0026",
"#800026",
],
},
"YlOrBr": {
3: ["#fff7bc", "#fec44f", "#d95f0e"],
4: ["#ffffd4", "#fed98e", "#fe9929", "#cc4c02"],
5: ["#ffffd4", "#fed98e", "#fe9929", "#d95f0e", "#993404"],
6: ["#ffffd4", "#fee391", "#fec44f", "#fe9929", "#d95f0e", "#993404"],
7: [
"#ffffd4",
"#fee391",
"#fec44f",
"#fe9929",
"#ec7014",
"#cc4c02",
"#8c2d04",
],
8: [
"#ffffe5",
"#fff7bc",
"#fee391",
"#fec44f",
"#fe9929",
"#ec7014",
"#cc4c02",
"#8c2d04",
],
9: [
"#ffffe5",
"#fff7bc",
"#fee391",
"#fec44f",
"#fe9929",
"#ec7014",
"#cc4c02",
"#993404",
"#662506",
],
},
"Purples": {
3: ["#efedf5", "#bcbddc", "#756bb1"],
4: ["#f2f0f7", "#cbc9e2", "#9e9ac8", "#6a51a3"],
5: ["#f2f0f7", "#cbc9e2", "#9e9ac8", "#756bb1", "#54278f"],
6: ["#f2f0f7", "#dadaeb", "#bcbddc", "#9e9ac8", "#756bb1", "#54278f"],
7: [
"#f2f0f7",
"#dadaeb",
"#bcbddc",
"#9e9ac8",
"#807dba",
"#6a51a3",
"#4a1486",
],
8: [
"#fcfbfd",
"#efedf5",
"#dadaeb",
"#bcbddc",
"#9e9ac8",
"#807dba",
"#6a51a3",
"#4a1486",
],
9: [
"#fcfbfd",
"#efedf5",
"#dadaeb",
"#bcbddc",
"#9e9ac8",
"#807dba",
"#6a51a3",
"#54278f",
"#3f007d",
],
},
"Blues": {
3: ["#deebf7", "#9ecae1", "#3182bd"],
4: ["#eff3ff", "#bdd7e7", "#6baed6", "#2171b5"],
5: ["#eff3ff", "#bdd7e7", "#6baed6", "#3182bd", "#08519c"],
6: ["#eff3ff", "#c6dbef", "#9ecae1", "#6baed6", "#3182bd", "#08519c"],
7: [
"#eff3ff",
"#c6dbef",
"#9ecae1",
"#6baed6",
"#4292c6",
"#2171b5",
"#084594",
],
8: [
"#f7fbff",
"#deebf7",
"#c6dbef",
"#9ecae1",
"#6baed6",
"#4292c6",
"#2171b5",
"#084594",
],
9: [
"#f7fbff",
"#deebf7",
"#c6dbef",
"#9ecae1",
"#6baed6",
"#4292c6",
"#2171b5",
"#08519c",
"#08306b",
],
},
"Greens": {
3: ["#e5f5e0", "#a1d99b", "#31a354"],
4: ["#edf8e9", "#bae4b3", "#74c476", "#238b45"],
5: ["#edf8e9", "#bae4b3", "#74c476", "#31a354", "#006d2c"],
6: ["#edf8e9", "#c7e9c0", "#a1d99b", "#74c476", "#31a354", "#006d2c"],
7: [
"#edf8e9",
"#c7e9c0",
"#a1d99b",
"#74c476",
"#41ab5d",
"#238b45",
"#005a32",
],
8: [
"#f7fcf5",
"#e5f5e0",
"#c7e9c0",
"#a1d99b",
"#74c476",
"#41ab5d",
"#238b45",
"#005a32",
],
9: [
"#f7fcf5",
"#e5f5e0",
"#c7e9c0",
"#a1d99b",
"#74c476",
"#41ab5d",
"#238b45",
"#006d2c",
"#00441b",
],
},
"Oranges": {
3: ["#fee6ce", "#fdae6b", "#e6550d"],
4: ["#feedde", "#fdbe85", "#fd8d3c", "#d94701"],
5: ["#feedde", "#fdbe85", "#fd8d3c", "#e6550d", "#a63603"],
6: ["#feedde", "#fdd0a2", "#fdae6b", "#fd8d3c", "#e6550d", "#a63603"],
7: [
"#feedde",
"#fdd0a2",
"#fdae6b",
"#fd8d3c",
"#f16913",
"#d94801",
"#8c2d04",
],
8: [
"#fff5eb",
"#fee6ce",
"#fdd0a2",
"#fdae6b",
"#fd8d3c",
"#f16913",
"#d94801",
"#8c2d04",
],
9: [
"#fff5eb",
"#fee6ce",
"#fdd0a2",
"#fdae6b",
"#fd8d3c",
"#f16913",
"#d94801",
"#a63603",
"#7f2704",
],
},
"Reds": {
3: ["#fee0d2", "#fc9272", "#de2d26"],
4: ["#fee5d9", "#fcae91", "#fb6a4a", "#cb181d"],
5: ["#fee5d9", "#fcae91", "#fb6a4a", "#de2d26", "#a50f15"],
6: ["#fee5d9", "#fcbba1", "#fc9272", "#fb6a4a", "#de2d26", "#a50f15"],
7: [
"#fee5d9",
"#fcbba1",
"#fc9272",
"#fb6a4a",
"#ef3b2c",
"#cb181d",
"#99000d",
],
8: [
"#fff5f0",
"#fee0d2",
"#fcbba1",
"#fc9272",
"#fb6a4a",
"#ef3b2c",
"#cb181d",
"#99000d",
],
9: [
"#fff5f0",
"#fee0d2",
"#fcbba1",
"#fc9272",
"#fb6a4a",
"#ef3b2c",
"#cb181d",
"#a50f15",
"#67000d",
],
},
"Greys": {
3: ["#f0f0f0", "#bdbdbd", "#636363"],
4: ["#f7f7f7", "#cccccc", "#969696", "#525252"],
5: ["#f7f7f7", "#cccccc", "#969696", "#636363", "#252525"],
6: ["#f7f7f7", "#d9d9d9", "#bdbdbd", "#969696", "#636363", "#252525"],
7: [
"#f7f7f7",
"#d9d9d9",
"#bdbdbd",
"#969696",
"#737373",
"#525252",
"#252525",
],
8: [
"#ffffff",
"#f0f0f0",
"#d9d9d9",
"#bdbdbd",
"#969696",
"#737373",
"#525252",
"#252525",
],
9: [
"#ffffff",
"#f0f0f0",
"#d9d9d9",
"#bdbdbd",
"#969696",
"#737373",
"#525252",
"#252525",
"#000000",
],
},
"PuOr": {
3: ["#f1a340", "#f7f7f7", "#998ec3"],
4: ["#e66101", "#fdb863", "#b2abd2", "#5e3c99"],
5: ["#e66101", "#fdb863", "#f7f7f7", "#b2abd2", "#5e3c99"],
6: ["#b35806", "#f1a340", "#fee0b6", "#d8daeb", "#998ec3", "#542788"],
7: [
"#b35806",
"#f1a340",
"#fee0b6",
"#f7f7f7",
"#d8daeb",
"#998ec3",
"#542788",
],
8: [
"#b35806",
"#e08214",
"#fdb863",
"#fee0b6",
"#d8daeb",
"#b2abd2",
"#8073ac",
"#542788",
],
9: [
"#b35806",
"#e08214",
"#fdb863",
"#fee0b6",
"#f7f7f7",
"#d8daeb",
"#b2abd2",
"#8073ac",
"#542788",
],
10: [
"#7f3b08",
"#b35806",
"#e08214",
"#fdb863",
"#fee0b6",
"#d8daeb",
"#b2abd2",
"#8073ac",
"#542788",
"#2d004b",
],
11: [
"#7f3b08",
"#b35806",
"#e08214",
"#fdb863",
"#fee0b6",
"#f7f7f7",
"#d8daeb",
"#b2abd2",
"#8073ac",
"#542788",
"#2d004b",
],
},
"BrBG": {
3: ["#d8b365", "#f5f5f5", "#5ab4ac"],
4: ["#a6611a", "#dfc27d", "#80cdc1", "#018571"],
5: ["#a6611a", "#dfc27d", "#f5f5f5", "#80cdc1", "#018571"],
6: ["#8c510a", "#d8b365", "#f6e8c3", "#c7eae5", "#5ab4ac", "#01665e"],
7: [
"#8c510a",
"#d8b365",
"#f6e8c3",
"#f5f5f5",
"#c7eae5",
"#5ab4ac",
"#01665e",
],
8: [
"#8c510a",
"#bf812d",
"#dfc27d",
"#f6e8c3",
"#c7eae5",
"#80cdc1",
"#35978f",
"#01665e",
],
9: [
"#8c510a",
"#bf812d",
"#dfc27d",
"#f6e8c3",
"#f5f5f5",
"#c7eae5",
"#80cdc1",
"#35978f",
"#01665e",
],
10: [
"#543005",
"#8c510a",
"#bf812d",
"#dfc27d",
"#f6e8c3",
"#c7eae5",
"#80cdc1",
"#35978f",
"#01665e",
"#003c30",
],
11: [
"#543005",
"#8c510a",
"#bf812d",
"#dfc27d",
"#f6e8c3",
"#f5f5f5",
"#c7eae5",
"#80cdc1",
"#35978f",
"#01665e",
"#003c30",
],
},
"PRGn": {
3: ["#af8dc3", "#f7f7f7", "#7fbf7b"],
4: ["#7b3294", "#c2a5cf", "#a6dba0", "#008837"],
5: ["#7b3294", "#c2a5cf", "#f7f7f7", "#a6dba0", "#008837"],
6: ["#762a83", "#af8dc3", "#e7d4e8", "#d9f0d3", "#7fbf7b", "#1b7837"],
7: [
"#762a83",
"#af8dc3",
"#e7d4e8",
"#f7f7f7",
"#d9f0d3",
"#7fbf7b",
"#1b7837",
],
8: [
"#762a83",
"#9970ab",
"#c2a5cf",
"#e7d4e8",
"#d9f0d3",
"#a6dba0",
"#5aae61",
"#1b7837",
],
9: [
"#762a83",
"#9970ab",
"#c2a5cf",
"#e7d4e8",
"#f7f7f7",
"#d9f0d3",
"#a6dba0",
"#5aae61",
"#1b7837",
],
10: [
"#40004b",
"#762a83",
"#9970ab",
"#c2a5cf",
"#e7d4e8",
"#d9f0d3",
"#a6dba0",
"#5aae61",
"#1b7837",
"#00441b",
],
11: [
"#40004b",
"#762a83",
"#9970ab",
"#c2a5cf",
"#e7d4e8",
"#f7f7f7",
"#d9f0d3",
"#a6dba0",
"#5aae61",
"#1b7837",
"#00441b",
],
},
"PiYG": {
3: ["#e9a3c9", "#f7f7f7", "#a1d76a"],
4: ["#d01c8b", "#f1b6da", "#b8e186", "#4dac26"],
5: ["#d01c8b", "#f1b6da", "#f7f7f7", "#b8e186", "#4dac26"],
6: ["#c51b7d", "#e9a3c9", "#fde0ef", "#e6f5d0", "#a1d76a", "#4d9221"],
7: [
"#c51b7d",
"#e9a3c9",
"#fde0ef",
"#f7f7f7",
"#e6f5d0",
"#a1d76a",
"#4d9221",
],
8: [
"#c51b7d",
"#de77ae",
"#f1b6da",
"#fde0ef",
"#e6f5d0",
"#b8e186",
"#7fbc41",
"#4d9221",
],
9: [
"#c51b7d",
"#de77ae",
"#f1b6da",
"#fde0ef",
"#f7f7f7",
"#e6f5d0",
"#b8e186",
"#7fbc41",
"#4d9221",
],
10: [
"#8e0152",
"#c51b7d",
"#de77ae",
"#f1b6da",
"#fde0ef",
"#e6f5d0",
"#b8e186",
"#7fbc41",
"#4d9221",
"#276419",
],
11: [
"#8e0152",
"#c51b7d",
"#de77ae",
"#f1b6da",
"#fde0ef",
"#f7f7f7",
"#e6f5d0",
"#b8e186",
"#7fbc41",
"#4d9221",
"#276419",
],
},
"RdBu": {
3: ["#ef8a62", "#f7f7f7", "#67a9cf"],
4: ["#ca0020", "#f4a582", "#92c5de", "#0571b0"],
5: ["#ca0020", "#f4a582", "#f7f7f7", "#92c5de", "#0571b0"],
6: ["#b2182b", "#ef8a62", "#fddbc7", "#d1e5f0", "#67a9cf", "#2166ac"],
7: [
"#b2182b",
"#ef8a62",
"#fddbc7",
"#f7f7f7",
"#d1e5f0",
"#67a9cf",
"#2166ac",
],
8: [
"#b2182b",
"#d6604d",
"#f4a582",
"#fddbc7",
"#d1e5f0",
"#92c5de",
"#4393c3",
"#2166ac",
],
9: [
"#b2182b",
"#d6604d",
"#f4a582",
"#fddbc7",
"#f7f7f7",
"#d1e5f0",
"#92c5de",
"#4393c3",
"#2166ac",
],
10: [
"#67001f",
"#b2182b",
"#d6604d",
"#f4a582",
"#fddbc7",
"#d1e5f0",
"#92c5de",
"#4393c3",
"#2166ac",
"#053061",
],
11: [
"#67001f",
"#b2182b",
"#d6604d",
"#f4a582",
"#fddbc7",
"#f7f7f7",
"#d1e5f0",
"#92c5de",
"#4393c3",
"#2166ac",
"#053061",
],
},
"RdGy": {
3: ["#ef8a62", "#ffffff", "#999999"],
4: ["#ca0020", "#f4a582", "#bababa", "#404040"],
5: ["#ca0020", "#f4a582", "#ffffff", "#bababa", "#404040"],
6: ["#b2182b", "#ef8a62", "#fddbc7", "#e0e0e0", "#999999", "#4d4d4d"],
7: [
"#b2182b",
"#ef8a62",
"#fddbc7",
"#ffffff",
"#e0e0e0",
"#999999",
"#4d4d4d",
],
8: [
"#b2182b",
"#d6604d",
"#f4a582",
"#fddbc7",
"#e0e0e0",
"#bababa",
"#878787",
"#4d4d4d",
],
9: [
"#b2182b",
"#d6604d",
"#f4a582",
"#fddbc7",
"#ffffff",
"#e0e0e0",
"#bababa",
"#878787",
"#4d4d4d",
],
10: [
"#67001f",
"#b2182b",
"#d6604d",
"#f4a582",
"#fddbc7",
"#e0e0e0",
"#bababa",
"#878787",
"#4d4d4d",
"#1a1a1a",
],
11: [
"#67001f",
"#b2182b",
"#d6604d",
"#f4a582",
"#fddbc7",
"#ffffff",
"#e0e0e0",
"#bababa",
"#878787",
"#4d4d4d",
"#1a1a1a",
],
},
"RdYlBu": {
3: ["#fc8d59", "#ffffbf", "#91bfdb"],
4: ["#d7191c", "#fdae61", "#abd9e9", "#2c7bb6"],
5: ["#d7191c", "#fdae61", "#ffffbf", "#abd9e9", "#2c7bb6"],
6: ["#d73027", "#fc8d59", "#fee090", "#e0f3f8", "#91bfdb", "#4575b4"],
7: [
"#d73027",
"#fc8d59",
"#fee090",
"#ffffbf",
"#e0f3f8",
"#91bfdb",
"#4575b4",
],
8: [
"#d73027",
"#f46d43",
"#fdae61",
"#fee090",
"#e0f3f8",
"#abd9e9",
"#74add1",
"#4575b4",
],
9: [
"#d73027",
"#f46d43",
"#fdae61",
"#fee090",
"#ffffbf",
"#e0f3f8",
"#abd9e9",
"#74add1",
"#4575b4",
],
10: [
"#a50026",
"#d73027",
"#f46d43",
"#fdae61",
"#fee090",
"#e0f3f8",
"#abd9e9",
"#74add1",
"#4575b4",
"#313695",
],
11: [
"#a50026",
"#d73027",
"#f46d43",
"#fdae61",
"#fee090",
"#ffffbf",
"#e0f3f8",
"#abd9e9",
"#74add1",
"#4575b4",
"#313695",
],
},
"Spectral": {
3: ["#fc8d59", "#ffffbf", "#99d594"],
4: ["#d7191c", "#fdae61", "#abdda4", "#2b83ba"],
5: ["#d7191c", "#fdae61", "#ffffbf", "#abdda4", "#2b83ba"],
6: ["#d53e4f", "#fc8d59", "#fee08b", "#e6f598", "#99d594", "#3288bd"],
7: [
"#d53e4f",
"#fc8d59",
"#fee08b",
"#ffffbf",
"#e6f598",
"#99d594",
"#3288bd",
],
8: [
"#d53e4f",
"#f46d43",
"#fdae61",
"#fee08b",
"#e6f598",
"#abdda4",
"#66c2a5",
"#3288bd",
],
9: [
"#d53e4f",
"#f46d43",
"#fdae61",
"#fee08b",
"#ffffbf",
"#e6f598",
"#abdda4",
"#66c2a5",
"#3288bd",
],
10: [
"#9e0142",
"#d53e4f",
"#f46d43",
"#fdae61",
"#fee08b",
"#e6f598",
"#abdda4",
"#66c2a5",
"#3288bd",
"#5e4fa2",
],
11: [
"#9e0142",
"#d53e4f",
"#f46d43",
"#fdae61",
"#fee08b",
"#ffffbf",
"#e6f598",
"#abdda4",
"#66c2a5",
"#3288bd",
"#5e4fa2",
],
},
"RdYlGn": {
3: ["#fc8d59", "#ffffbf", "#91cf60"],
4: ["#d7191c", "#fdae61", "#a6d96a", "#1a9641"],
5: ["#d7191c", "#fdae61", "#ffffbf", "#a6d96a", "#1a9641"],
6: ["#d73027", "#fc8d59", "#fee08b", "#d9ef8b", "#91cf60", "#1a9850"],
7: [
"#d73027",
"#fc8d59",
"#fee08b",
"#ffffbf",
"#d9ef8b",
"#91cf60",
"#1a9850",
],
8: [
"#d73027",
"#f46d43",
"#fdae61",
"#fee08b",
"#d9ef8b",
"#a6d96a",
"#66bd63",
"#1a9850",
],
9: [
"#d73027",
"#f46d43",
"#fdae61",
"#fee08b",
"#ffffbf",
"#d9ef8b",
"#a6d96a",
"#66bd63",
"#1a9850",
],
10: [
"#a50026",
"#d73027",
"#f46d43",
"#fdae61",
"#fee08b",
"#d9ef8b",
"#a6d96a",
"#66bd63",
"#1a9850",
"#006837",
],
11: [
"#a50026",
"#d73027",
"#f46d43",
"#fdae61",
"#fee08b",
"#ffffbf",
"#d9ef8b",
"#a6d96a",
"#66bd63",
"#1a9850",
"#006837",
],
},
}
## Special, backward compatible color map.
ddlheatmap = mplcol.ListedColormap(SEQUENCES["ddl_heat"][12], "DDL Heat", 12)
## Default Color Sequence
DEFAULT_SEQUENCE = "RdBu"
##########################################################################
## Palette Object
##########################################################################
class ColorPalette(list):
"""
A wrapper for functionality surrounding a list of colors, including a
context manager that allows the palette to be set with a with statement.
"""
def __init__(self, name_or_list):
"""
Can initialize the ColorPalette with either a name or a list.
Parameters
----------
name_or_list :
specify a palette name or a list of RGB or Hex values
"""
if isinstance(name_or_list, str):
if name_or_list not in PALETTES:
raise YellowbrickValueError(
"'{}' is not a recognized palette!".format(name_or_list)
)
name_or_list = PALETTES[name_or_list]
super(ColorPalette, self).__init__(name_or_list)
def __enter__(self):
"""
Open the context and assign the pallete to the mpl.rcParams
"""
from .rcmod import set_palette
self._orig_palette = color_palette()
set_palette(self)
return self
def __exit__(self, *args):
"""
Close the context and restore the original palette
"""
from .rcmod import set_palette
set_palette(self._orig_palette)
def as_hex(self):
"""
Return a color palette with hex codes instead of RGB values.
"""
hex = [mpl.colors.rgb2hex(rgb) for rgb in self]
return ColorPalette(hex)
def as_rgb(self):
"""
Return a color palette with RGB values instead of hex codes.
"""
rgb = [mpl.colors.colorConverter.to_rgb(hex) for hex in self]
return ColorPalette(rgb)
def plot(self, size=1):
"""
Plot the values in the color palette as a horizontal array.
See Seaborn's palplot function for inspiration.
Parameters
----------
size : int
scaling factor for size of the plot
"""
n = len(self)
fig, ax = plt.subplots(1, 1, figsize=(n * size, size))
ax.imshow(
np.arange(n).reshape(1, n),
cmap=mpl.colors.ListedColormap(list(self)),
interpolation="nearest",
aspect="auto",
)
ax.set_xticks(np.arange(n) - 0.5)
ax.set_yticks([-0.5, 0.5])
ax.set_xticklabels([])
ax.set_yticklabels([])
##########################################################################
## Palette Functions
##########################################################################
def color_palette(palette=None, n_colors=None):
"""
Return a color palette object with color definition and handling.
Calling this function with ``palette=None`` will return the current
matplotlib color cycle.
This function can also be used in a ``with`` statement to temporarily
set the color cycle for a plot or set of plots.
Parameters
----------
palette : None or str or sequence
Name of a palette or ``None`` to return the current palette. If a
sequence the input colors are used but possibly cycled.
Available palette names from :py:mod:`yellowbrick.colors.palettes` are:
.. hlist::
:columns: 3
* :py:const:`accent`
* :py:const:`dark`
* :py:const:`paired`
* :py:const:`pastel`
* :py:const:`bold`
* :py:const:`muted`
* :py:const:`colorblind`
* :py:const:`sns_colorblind`
* :py:const:`sns_deep`
* :py:const:`sns_muted`
* :py:const:`sns_pastel`
* :py:const:`sns_bright`
* :py:const:`sns_dark`
* :py:const:`flatui`
* :py:const:`neural_paint`
n_colors : None or int
Number of colors in the palette. If ``None``, the default will depend
on how ``palette`` is specified. Named palettes default to 6 colors
which allow the use of the names "bgrmyck", though others do have more
or less colors; therefore reducing the size of the list can only be
done by specifying this parameter. Asking for more colors than exist
in the palette will cause it to cycle.
Returns
-------
list(tuple)
Returns a ColorPalette object, which behaves like a list, but can be
used as a context manager and possesses functions to convert colors.
.. seealso::
:func:`.set_palette`
Set the default color cycle for all plots.
:func:`.set_color_codes`
Reassign color codes like ``"b"``, ``"g"``, etc. to
colors from one of the yellowbrick palettes.
:func:`..colors.resolve_colors`
Resolve a color map or listed sequence of colors.
"""
if palette is None:
palette = get_color_cycle()
if n_colors is None:
n_colors = len(palette)
elif not isinstance(palette, str):
if n_colors is None:
n_colors = len(palette)
else:
if palette.lower() not in PALETTES:
raise YellowbrickValueError(
"'{}' is not a recognized palette!".format(palette)
)
palette = PALETTES[palette.lower()]
if n_colors is None:
n_colors = len(palette)
# Always return as many colors as we asked for
pal_cycle = cycle(palette)
palette = [next(pal_cycle) for _ in range(n_colors)]
# Always return in RGB tuple format
try:
palette = map(mpl.colors.colorConverter.to_rgb, palette)
palette = ColorPalette(palette)
except ValueError:
raise YellowbrickValueError(
"Could not generate a palette for %s" % str(palette)
)
return palette
def set_color_codes(palette="accent"):
"""
Change how matplotlib color shorthands are interpreted.
Calling this will change how shorthand codes like "b" or "g"
are interpreted by matplotlib in subsequent plots.
Parameters
----------
palette : str
Named yellowbrick palette to use as the source of colors.
See Also
--------
set_palette : Color codes can also be set through the function that
sets the matplotlib color cycle.
"""
if palette not in PALETTES:
raise YellowbrickValueError("'{}' is not a recognized palette!".format(palette))
# Fetch the colors and adapt the length
colors = PALETTES[palette]
if len(colors) > 7:
# Truncate colors that are longer than 7
colors = colors[:7]
elif len(colors) < 7:
# Add the key (black) color to colors that are shorter than 7
colors = colors + [YB_KEY]
# Set the color codes on matplotlib
for code, color in zip("bgrmyck", colors):
rgb = mpl.colors.colorConverter.to_rgb(color)
mpl.colors.colorConverter.colors[code] = rgb
mpl.colors.colorConverter.cache[code] = rgb
##########################################################################
## Sequence Functions
##########################################################################
def color_sequence(palette=None, n_colors=None):
"""
Return a `ListedColormap` object from a named sequence palette. Useful
for continuous color scheme values and color maps.
Calling this function with ``palette=None`` will return the default
color sequence: Color Brewer RdBu.
Parameters
----------
palette : None or str or sequence
Name of a palette or ``None`` to return the default palette. If a
sequence the input colors are used to create a ListedColormap.
The currently implemented color sequences are from Color Brewer.
Available palette names from :py:mod:`yellowbrick.colors.palettes` are:
.. hlist::
:columns: 3
* :py:const: Blues
* :py:const: BrBG
* :py:const: BuGn
* :py:const: BuPu
* :py:const: GnBu
* :py:const: Greens
* :py:const: Greys
* :py:const: OrRd
* :py:const: Oranges
* :py:const: PRGn
* :py:const: PiYG
* :py:const: PuBu
* :py:const: PuBuGn
* :py:const: PuOr
* :py:const: PuRd
* :py:const: Purples
* :py:const: RdBu
* :py:const: RdGy
* :py:const: RdPu
* :py:const: RdYlBu
* :py:const: RdYlGn
* :py:const: Reds
* :py:const: Spectral
* :py:const: YlGn
* :py:const: YlGnBu
* :py:const: YlOrBr
* :py:const: YlOrRd
* :py:const: ddl_heat
n_colors : None or int
Number of colors in the palette. If ``None``, the default will depend
on how ``palette`` is specified - selecting the largest sequence for
that palette name. Note that sequences have a minimum lenght of 3 - if
a number of colors is specified that is not available for the sequence
a ``ValueError`` is raised.
Returns
-------
colormap
Returns a ListedColormap object, an artist object from the matplotlib
library that can be used wherever a colormap is necessary.
"""
# Select the default colormap if None is passed in.
palette = palette or DEFAULT_SEQUENCE
# Create a listed color map from the sequence
if not isinstance(palette, str):
return mplcol.ListedColormap(palette)
# Otherwise perform a case-insensitive lookup
sequences = {key.lower(): key for key in SEQUENCES.keys()}
if palette.lower() not in sequences:
raise YellowbrickValueError("'{}' is not a recognized palette!".format(palette))
# Collect the palette into the dictionary of lists.
n_palettes = SEQUENCES[sequences[palette.lower()]]
# If no definitive color is passed in, maximize it.
if n_colors is None:
n_colors = max(n_palettes.keys())
else:
if n_colors not in n_palettes.keys():
raise YellowbrickValueError(
"No {} palette of length {}".format(palette, n_colors)
)
# Return the color map from the sequence
return mplcol.ListedColormap(n_palettes[n_colors], name=palette, N=n_colors)
| apache-2.0 |
CallaJun/hackprince | indico/matplotlib/tests/test_ticker.py | 9 | 4261 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import nose.tools
from nose.tools import assert_raises
from numpy.testing import assert_almost_equal
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
from matplotlib.testing.decorators import cleanup
def test_MaxNLocator():
loc = mticker.MaxNLocator(nbins=5)
test_value = np.array([20., 40., 60., 80., 100.])
assert_almost_equal(loc.tick_values(20, 100), test_value)
test_value = np.array([0., 0.0002, 0.0004, 0.0006, 0.0008, 0.001])
assert_almost_equal(loc.tick_values(0.001, 0.0001), test_value)
test_value = np.array([-1.0e+15, -5.0e+14, 0e+00, 5e+14, 1.0e+15])
assert_almost_equal(loc.tick_values(-1e15, 1e15), test_value)
def test_LinearLocator():
loc = mticker.LinearLocator(numticks=3)
test_value = np.array([-0.8, -0.3, 0.2])
assert_almost_equal(loc.tick_values(-0.8, 0.2), test_value)
def test_MultipleLocator():
loc = mticker.MultipleLocator(base=3.147)
test_value = np.array([-9.441, -6.294, -3.147, 0., 3.147, 6.294,
9.441, 12.588])
assert_almost_equal(loc.tick_values(-7, 10), test_value)
@cleanup
def test_AutoMinorLocator():
fig, ax = plt.subplots()
ax.set_xlim(0, 1.39)
ax.minorticks_on()
test_value = np.array([0.05, 0.1, 0.15, 0.25, 0.3, 0.35, 0.45,
0.5, 0.55, 0.65, 0.7, 0.75, 0.85, 0.9,
0.95, 1, 1.05, 1.1, 1.15, 1.25, 1.3, 1.35])
assert_almost_equal(ax.xaxis.get_ticklocs(minor=True), test_value)
def test_LogLocator():
loc = mticker.LogLocator(numticks=5)
assert_raises(ValueError, loc.tick_values, 0, 1000)
test_value = np.array([1.00000000e-05, 1.00000000e-03, 1.00000000e-01,
1.00000000e+01, 1.00000000e+03, 1.00000000e+05,
1.00000000e+07, 1.000000000e+09])
assert_almost_equal(loc.tick_values(0.001, 1.1e5), test_value)
loc = mticker.LogLocator(base=2)
test_value = np.array([0.5, 1., 2., 4., 8., 16., 32., 64., 128., 256.])
assert_almost_equal(loc.tick_values(1, 100), test_value)
def test_LogFormatterExponent():
class FakeAxis(object):
"""Allow Formatter to be called without having a "full" plot set up."""
def get_view_interval(self):
return 1, 10
i = np.arange(-3, 4, dtype=float)
expected_result = ['-3', '-2', '-1', '0', '1', '2', '3']
for base in [2, 5, 10, np.pi, np.e]:
formatter = mticker.LogFormatterExponent(base=base)
formatter.axis = FakeAxis()
vals = base**i
labels = [formatter(x, pos) for (x, pos) in zip(vals, i)]
nose.tools.assert_equal(labels, expected_result)
# Should be a blank string for non-integer powers if labelOnlyBase=True
formatter = mticker.LogFormatterExponent(base=10, labelOnlyBase=True)
formatter.axis = FakeAxis()
nose.tools.assert_equal(formatter(10**0.1), '')
# Otherwise, non-integer powers should be nicely formatted
locs = np.array([0.1, 0.00001, np.pi, 0.2, -0.2, -0.00001])
i = range(len(locs))
expected_result = ['0.1', '1e-05', '3.14', '0.2', '-0.2', '-1e-05']
for base in [2, 5, 10, np.pi, np.e]:
formatter = mticker.LogFormatterExponent(base, labelOnlyBase=False)
formatter.axis = FakeAxis()
vals = base**locs
labels = [formatter(x, pos) for (x, pos) in zip(vals, i)]
nose.tools.assert_equal(labels, expected_result)
def test_use_offset():
for use_offset in [True, False]:
with matplotlib.rc_context({'axes.formatter.useoffset': use_offset}):
tmp_form = mticker.ScalarFormatter()
nose.tools.assert_equal(use_offset, tmp_form.get_useOffset())
def test_formatstrformatter():
# test % style formatter
tmp_form = mticker.FormatStrFormatter('%05d')
nose.tools.assert_equal('00002', tmp_form(2))
# test str.format() style formatter
tmp_form = mticker.StrMethodFormatter('{x:05d}')
nose.tools.assert_equal('00002', tmp_form(2))
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| lgpl-3.0 |
ilyes14/scikit-learn | doc/sphinxext/numpy_ext/docscrape_sphinx.py | 408 | 8061 | import re
import inspect
import textwrap
import pydoc
from .docscrape import NumpyDocString
from .docscrape import FunctionDoc
from .docscrape import ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config=None):
config = {} if config is None else config
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' ' * indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
# GAEL: Toctree commented out below because it creates
# hundreds of sphinx warnings
# out += ['.. autosummary::', ' :toctree:', '']
out += ['.. autosummary::', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
import sphinx # local import to avoid test dependency
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Raises', 'Attributes'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Methods',):
out += self._str_member_list(param_list)
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config=None):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
jinzekid/codehub | python/code_snippet/spider/test_ip_address.py | 1 | 6643 | g_ip_html = 'proxy_ip.html'
############################################################
"""
利用工厂模式,生成不同的header信息
"""
class HeaderFactory():
def __init__(self):
self.list_user_agent = [
# For Android
"Mozilla/5.0 (Linux; Android 4.1.1; Nexus 7 Build/JRO03D) \
AppleWebKit/535.19 (KHTML, like Gecko) \
Chrome/18.0.1025.166 Safari/535.19",
"Mozilla/5.0 (Linux; U; Android 4.0.4; en-gb; GT-I9300 Build/IMM76D) \
AppleWebKit/534.30 (KHTML, like Gecko) \
Version/4.0 Mobile Safari/534.30",
"Mozilla/5.0 (Linux; U; Android 2.2; en-gb; GT-P1000 Build/FROYO) \
AppleWebKit/533.1 (KHTML, like Gecko) \
Version/4.0 Mobile Safari/533.1",
# For Firefox
"Mozilla/5.0 (Windows NT 6.2; WOW64; rv:21.0) \
Gecko/20100101 Firefox/21.0",
"Mozilla/5.0 (Android; Mobile; rv:14.0) \
Gecko/14.0 Firefox/14.0",
# For chrome
"Mozilla/5.0 (Windows NT 6.2; WOW64) \
AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/27.0.1453.94 Safari/537.36",
"Mozilla/5.0 (Linux; Android 4.0.4; Galaxy Nexus Build/IMM76B) \
AppleWebKit/535.19 (KHTML, like Gecko) \
Chrome/18.0.1025.133 Mobile Safari/535.19",
# For iOS
"Mozilla/5.0 (iPad; CPU OS 5_0 like Mac OS X) \
AppleWebKit/534.46 (KHTML, like Gecko) \
Version/5.1 Mobile/9A334 Safari/7534.48.3",
"Mozilla/5.0 (iPod; U; CPU like Mac OS X; en) \
AppleWebKit/420.1 (KHTML, like Gecko) \
Version/3.0 Mobile/3A101a Safari/419.3",
"Mozilla/5.0 (iPhone; CPU iPhone OS 10_3 like Mac OS X) \
AppleWebKit/602.1.50 (KHTML, like Gecko) \
CriOS/56.0.2924.75 Mobile/14E5239e Safari/602.1"
]
pass
def get_random_user_agent(self):
import random
int_random = random.randint(0,len(self.list_user_agent)-1)
return self.list_user_agent[int_random]
"""把方法当属性使用"""
@property
def header_info(self):
header_default = [
("Accept", "text/html,application/xhtml+xml,\
application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8"),
("Accept-Language", "en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7"),
("Connection", "keep-alive"),
("referer", ""),
("Accept-Encoding", "utf-8")
]
header_default.append(('User-Agent', self.get_random_user_agent()))
return header_default
############################################################
def get_ip_html():
import urllib.request
url = "http://www.xicidaili.com/wn/1"
headers = ("User-Agent",
"Mozilla/5.0 (iPhone; CPU iPhone OS 10_3 like Mac OS X) \
AppleWebKit/602.1.50 (KHTML, like Gecko) \
CriOS/56.0.2924.75 Mobile/14E5239e Safari/602.1")
opener = urllib.request.build_opener()
opener.addheaders = [headers]
data = opener.open(url).read()
fhandle = open(g_ip_html , 'wb')
fhandle.write(data)
fhandle.close()
print('>>:finished save ip html....')
############################################################
def parse_ip_html():
print(">>:开始解析ip地址...")
import pandas as pd
with open(g_ip_html, 'r') as f:
data = f.read()
list_ip_address = bs4_paraser(data)
data = pd.DataFrame(list_ip_address, columns=['ip','port','alive'])
test_useful_ip_address(data.sort_values(by='alive' ,ascending=False))
############################################################
def get_alive_minutes(alive):
total_minutes = 0
if alive.find('天') != -1:
str_time = alive.replace('天', '')
total_minutes = int(str_time)*24*60
elif alive.find('小时') != -1:
str_time = alive.replace('小时', '')
total_minutes = int(str_time)*60
else:
str_time = alive.replace('分钟', '')
total_minutes = int(str_time)
return total_minutes
def bs4_paraser(html):
from bs4 import BeautifulSoup
import re
all_values = []
value = {}
soup = BeautifulSoup(html, 'html.parser')
# 获取影评的部分
all_div = soup.find_all('tr')
for row in all_div:
dict_ip_info = {}
all_td = row.find_all('td')
if len(all_td) > 1:
str_ip = re.findall(r"<td>(.+?)</td>",\
str(all_td[1]))[0]
dict_ip_info['ip'] = str_ip
str_port = re.findall(r"<td>(.+?)</td>",\
str(all_td[2]))[0]
dict_ip_info['port'] = int(str_port)
str_alive = re.findall(r"<td>(.+?)</td>",\
str(all_td[len(all_td)-2]))[0]
dict_ip_info['alive'] = get_alive_minutes(str_alive)
all_values.append(dict_ip_info)
return all_values
# 测试IP地址是否有用
def test_useful_ip_address(df_ip):
test_url = 'https://httpbin.org/anything/test_ip'
import urllib.request
import time
for index, dict_ip_info in df_ip.iterrows():
print(dict_ip_info['ip'])
ip_address = dict_ip_info['ip']
ip_port = dict_ip_info['port']
proxy = urllib.request.ProxyHandler(\
{'https': '%s:%s' %(ip_address, ip_port)\
})
# 是否开启DebugLog
httphd = urllib.request.HTTPHandler(debuglevel=0)
opener = urllib.request.build_opener(proxy, httphd)
# 创建全局默认的opener对象
urllib.request.install_opener(opener)
# 使用添加报头
req = urllib.request.Request(test_url)
header_infos = HeaderFactory().header_info
for info in header_infos:
list_info = list(info)
req.add_header(list_info[0], list_info[1])
try:
data = urllib.request.urlopen(req,timeout=6).read().decode('utf-8')
if data is not None:
print(data)
print('good ip address:' + ip_address)
print('================================')
except Exception as e:
print('请求超时...' + str(e))
time.sleep(2)
# proxy = urllib.request.ProxyHandler({'http': test_url})
# # 是否开启DebugLog
# httphd = urllib.request.HTTPHandler(debuglevel=0)
# opener = urllib.request.build_opener(proxy, httphd)
'''############################################################'''
# get_ip_html()
parse_ip_html()
| gpl-3.0 |
akionakamura/scikit-learn | sklearn/tests/test_dummy.py | 129 | 17774 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.base import clone
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.stats import _weighted_percentile
from sklearn.dummy import DummyClassifier, DummyRegressor
@ignore_warnings
def _check_predict_proba(clf, X, y):
proba = clf.predict_proba(X)
# We know that we can have division by zero
log_proba = clf.predict_log_proba(X)
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n_outputs = y.shape[1]
n_samples = len(X)
if n_outputs == 1:
proba = [proba]
log_proba = [log_proba]
for k in range(n_outputs):
assert_equal(proba[k].shape[0], n_samples)
assert_equal(proba[k].shape[1], len(np.unique(y[:, k])))
assert_array_equal(proba[k].sum(axis=1), np.ones(len(X)))
# We know that we can have division by zero
assert_array_equal(np.log(proba[k]), log_proba[k])
def _check_behavior_2d(clf):
# 1d case
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([1, 2, 1, 1])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
# 2d case
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_behavior_2d_for_constant(clf):
# 2d case only
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([[1, 0, 5, 4, 3],
[2, 0, 1, 2, 5],
[1, 0, 4, 5, 2],
[1, 3, 3, 2, 0]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_equality_regressor(statistic, y_learn, y_pred_learn,
y_test, y_pred_test):
assert_array_equal(np.tile(statistic, (y_learn.shape[0], 1)),
y_pred_learn)
assert_array_equal(np.tile(statistic, (y_test.shape[0], 1)),
y_pred_test)
def test_most_frequent_and_prior_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [1, 2, 1, 1]
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
if strategy == "prior":
assert_array_equal(clf.predict_proba(X[0]),
clf.class_prior_.reshape((1, -1)))
else:
assert_array_equal(clf.predict_proba(X[0]),
clf.class_prior_.reshape((1, -1)) > 0.5)
def test_most_frequent_and_prior_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
n_samples = len(X)
for strategy in ("prior", "most_frequent"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_stratified_strategy():
X = [[0]] * 5 # ignored
y = [1, 2, 1, 1, 2]
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
def test_stratified_strategy_multioutput():
X = [[0]] * 5 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 1],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_uniform_strategy():
X = [[0]] * 4 # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
def test_uniform_strategy_multioutput():
X = [[0]] * 4 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_string_labels():
X = [[0]] * 5
y = ["paris", "paris", "tokyo", "amsterdam", "berlin"]
clf = DummyClassifier(strategy="most_frequent")
clf.fit(X, y)
assert_array_equal(clf.predict(X), ["paris"] * 5)
def test_classifier_exceptions():
clf = DummyClassifier(strategy="unknown")
assert_raises(ValueError, clf.fit, [], [])
assert_raises(ValueError, clf.predict, [])
assert_raises(ValueError, clf.predict_proba, [])
def test_mean_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 4 # ignored
y = random_state.randn(4)
reg = DummyRegressor()
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.mean(y)] * len(X))
def test_mean_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
mean = np.mean(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor()
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(mean, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_regressor_exceptions():
reg = DummyRegressor()
assert_raises(ValueError, reg.predict, [])
def test_median_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="median")
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
def test_median_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="median")
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="quantile", quantile=0.5)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.min(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=1)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.max(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0.3)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.percentile(y, q=30)] * len(X))
def test_quantile_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
quantile_values = np.percentile(y_learn, axis=0, q=80).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.5)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.8)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
quantile_values, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_invalid():
X = [[0]] * 5 # ignored
y = [0] * 5 # ignored
est = DummyRegressor(strategy="quantile")
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=None)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=[0])
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=-0.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=1.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile='abc')
assert_raises(TypeError, est.fit, X, y)
def test_quantile_strategy_empty_train():
est = DummyRegressor(strategy="quantile", quantile=0.4)
assert_raises(ValueError, est.fit, [], [])
def test_constant_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="constant", constant=[43])
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
reg = DummyRegressor(strategy="constant", constant=43)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
def test_constant_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
# test with 2d array
constants = random_state.randn(5)
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="constant", constant=constants)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
constants, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d_for_constant(est)
def test_y_mean_attribute_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
# when strategy = 'mean'
est = DummyRegressor(strategy='mean')
est.fit(X, y)
assert_equal(est.constant_, np.mean(y))
def test_unknown_strategey_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='gona')
assert_raises(ValueError, est.fit, X, y)
def test_constants_not_specified_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='constant')
assert_raises(TypeError, est.fit, X, y)
def test_constant_size_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X = random_state.randn(10, 10)
y = random_state.randn(10, 5)
est = DummyRegressor(strategy='constant', constant=[1, 2, 3, 4])
assert_raises(ValueError, est.fit, X, y)
def test_constant_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0, constant=1)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
X = [[0], [0], [0], [0]] # ignored
y = ['two', 'one', 'two', 'two']
clf = DummyClassifier(strategy="constant", random_state=0, constant='one')
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.array(['one'] * 4))
_check_predict_proba(clf, X, y)
def test_constant_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[2, 3],
[1, 3],
[2, 3],
[2, 0]])
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[1, 0])
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
def test_constant_strategy_exceptions():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0)
assert_raises(ValueError, clf.fit, X, y)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[2, 0])
assert_raises(ValueError, clf.fit, X, y)
def test_classification_sample_weight():
X = [[0], [0], [1]]
y = [0, 1, 0]
sample_weight = [0.1, 1., 0.1]
clf = DummyClassifier().fit(X, y, sample_weight)
assert_array_almost_equal(clf.class_prior_, [0.2 / 1.2, 1. / 1.2])
def test_constant_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[0, 1],
[4, 0],
[1, 1],
[1, 4],
[1, 1]]))
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0, constant=[1, 0])
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
def test_uniform_strategy_sparse_target_warning():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[2, 1],
[2, 2],
[1, 4],
[4, 2],
[1, 1]]))
clf = DummyClassifier(strategy="uniform", random_state=0)
assert_warns_message(UserWarning,
"the uniform strategy would not save memory",
clf.fit, X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 1 / 3, decimal=1)
assert_almost_equal(p[2], 1 / 3, decimal=1)
assert_almost_equal(p[4], 1 / 3, decimal=1)
def test_stratified_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[4, 1],
[0, 0],
[1, 1],
[1, 4],
[1, 1]]))
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
y_pred = y_pred.toarray()
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[0], 1. / 5, decimal=1)
assert_almost_equal(p[4], 1. / 5, decimal=1)
def test_most_frequent_and_prior_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[1, 0],
[1, 3],
[4, 0],
[0, 1],
[1, 0]]))
n_samples = len(X)
y_expected = np.hstack([np.ones((n_samples, 1)), np.zeros((n_samples, 1))])
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), y_expected)
def test_dummy_regressor_sample_weight(n_samples=10):
random_state = np.random.RandomState(seed=1)
X = [[0]] * n_samples
y = random_state.rand(n_samples)
sample_weight = random_state.rand(n_samples)
est = DummyRegressor(strategy="mean").fit(X, y, sample_weight)
assert_equal(est.constant_, np.average(y, weights=sample_weight))
est = DummyRegressor(strategy="median").fit(X, y, sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 50.))
est = DummyRegressor(strategy="quantile", quantile=.95).fit(X, y,
sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 95.))
| bsd-3-clause |
pv/scikit-learn | examples/ensemble/plot_voting_decision_regions.py | 230 | 2386 | """
==================================================
Plot the decision boundaries of a VotingClassifier
==================================================
Plot the decision boundaries of a `VotingClassifier` for
two features of the Iris dataset.
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`DecisionTreeClassifier`,
`KNeighborsClassifier`, and `SVC`) and used to initialize a
soft-voting `VotingClassifier` with weights `[2, 1, 2]`, which means that
the predicted probabilities of the `DecisionTreeClassifier` and `SVC`
count 5 times as much as the weights of the `KNeighborsClassifier` classifier
when the averaged probability is calculated.
"""
print(__doc__)
from itertools import product
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import VotingClassifier
# Loading some example data
iris = datasets.load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
# Training classifiers
clf1 = DecisionTreeClassifier(max_depth=4)
clf2 = KNeighborsClassifier(n_neighbors=7)
clf3 = SVC(kernel='rbf', probability=True)
eclf = VotingClassifier(estimators=[('dt', clf1), ('knn', clf2),
('svc', clf3)],
voting='soft', weights=[2, 1, 2])
clf1.fit(X, y)
clf2.fit(X, y)
clf3.fit(X, y)
eclf.fit(X, y)
# Plotting decision regions
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
f, axarr = plt.subplots(2, 2, sharex='col', sharey='row', figsize=(10, 8))
for idx, clf, tt in zip(product([0, 1], [0, 1]),
[clf1, clf2, clf3, eclf],
['Decision Tree (depth=4)', 'KNN (k=7)',
'Kernel SVM', 'Soft Voting']):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
axarr[idx[0], idx[1]].contourf(xx, yy, Z, alpha=0.4)
axarr[idx[0], idx[1]].scatter(X[:, 0], X[:, 1], c=y, alpha=0.8)
axarr[idx[0], idx[1]].set_title(tt)
plt.show()
| bsd-3-clause |
GeraldLoeffler/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_fltkagg.py | 69 | 20839 | """
A backend for FLTK
Copyright: Gregory Lielens, Free Field Technologies SA and
John D. Hunter 2004
This code is released under the matplotlib license
"""
from __future__ import division
import os, sys, math
import fltk as Fltk
from backend_agg import FigureCanvasAgg
import os.path
import matplotlib
from matplotlib import rcParams, verbose
from matplotlib.cbook import is_string_like
from matplotlib.backend_bases import \
RendererBase, GraphicsContextBase, FigureManagerBase, FigureCanvasBase,\
NavigationToolbar2, cursors
from matplotlib.figure import Figure
from matplotlib._pylab_helpers import Gcf
import matplotlib.windowing as windowing
from matplotlib.widgets import SubplotTool
import thread,time
Fl_running=thread.allocate_lock()
def Fltk_run_interactive():
global Fl_running
if Fl_running.acquire(0):
while True:
Fltk.Fl.check()
time.sleep(0.005)
else:
print "fl loop already running"
# the true dots per inch on the screen; should be display dependent
# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
PIXELS_PER_INCH = 75
cursord= {
cursors.HAND: Fltk.FL_CURSOR_HAND,
cursors.POINTER: Fltk.FL_CURSOR_ARROW,
cursors.SELECT_REGION: Fltk.FL_CURSOR_CROSS,
cursors.MOVE: Fltk.FL_CURSOR_MOVE
}
special_key={
Fltk.FL_Shift_R:'shift',
Fltk.FL_Shift_L:'shift',
Fltk.FL_Control_R:'control',
Fltk.FL_Control_L:'control',
Fltk.FL_Control_R:'control',
Fltk.FL_Control_L:'control',
65515:'win',
65516:'win',
}
def error_msg_fltk(msg, parent=None):
Fltk.fl_message(msg)
def draw_if_interactive():
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.draw()
def ishow():
"""
Show all the figures and enter the fltk mainloop in another thread
This allows to keep hand in interractive python session
Warning: does not work under windows
This should be the last line of your script
"""
for manager in Gcf.get_all_fig_managers():
manager.show()
if show._needmain:
thread.start_new_thread(Fltk_run_interactive,())
show._needmain = False
def show():
"""
Show all the figures and enter the fltk mainloop
This should be the last line of your script
"""
for manager in Gcf.get_all_fig_managers():
manager.show()
#mainloop, if an fltk program exist no need to call that
#threaded (and interractive) version
if show._needmain:
Fltk.Fl.run()
show._needmain = False
show._needmain = True
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
window = Fltk.Fl_Double_Window(10,10,30,30)
canvas = FigureCanvasFltkAgg(figure)
window.end()
window.show()
window.make_current()
figManager = FigureManagerFltkAgg(canvas, num, window)
if matplotlib.is_interactive():
figManager.show()
return figManager
class FltkCanvas(Fltk.Fl_Widget):
def __init__(self,x,y,w,h,l,source):
Fltk.Fl_Widget.__init__(self, 0, 0, w, h, "canvas")
self._source=source
self._oldsize=(None,None)
self._draw_overlay = False
self._button = None
self._key = None
def draw(self):
newsize=(self.w(),self.h())
if(self._oldsize !=newsize):
self._oldsize =newsize
self._source.resize(newsize)
self._source.draw()
t1,t2,w,h = self._source.figure.bbox.bounds
Fltk.fl_draw_image(self._source.buffer_rgba(0,0),0,0,int(w),int(h),4,0)
self.redraw()
def blit(self,bbox=None):
if bbox is None:
t1,t2,w,h = self._source.figure.bbox.bounds
else:
t1o,t2o,wo,ho = self._source.figure.bbox.bounds
t1,t2,w,h = bbox.bounds
x,y=int(t1),int(t2)
Fltk.fl_draw_image(self._source.buffer_rgba(x,y),x,y,int(w),int(h),4,int(wo)*4)
#self.redraw()
def handle(self, event):
x=Fltk.Fl.event_x()
y=Fltk.Fl.event_y()
yf=self._source.figure.bbox.height() - y
if event == Fltk.FL_FOCUS or event == Fltk.FL_UNFOCUS:
return 1
elif event == Fltk.FL_KEYDOWN:
ikey= Fltk.Fl.event_key()
if(ikey<=255):
self._key=chr(ikey)
else:
try:
self._key=special_key[ikey]
except:
self._key=None
FigureCanvasBase.key_press_event(self._source, self._key)
return 1
elif event == Fltk.FL_KEYUP:
FigureCanvasBase.key_release_event(self._source, self._key)
self._key=None
elif event == Fltk.FL_PUSH:
self.window().make_current()
if Fltk.Fl.event_button1():
self._button = 1
elif Fltk.Fl.event_button2():
self._button = 2
elif Fltk.Fl.event_button3():
self._button = 3
else:
self._button = None
if self._draw_overlay:
self._oldx=x
self._oldy=y
if Fltk.Fl.event_clicks():
FigureCanvasBase.button_press_event(self._source, x, yf, self._button)
return 1
else:
FigureCanvasBase.button_press_event(self._source, x, yf, self._button)
return 1
elif event == Fltk.FL_ENTER:
self.take_focus()
return 1
elif event == Fltk.FL_LEAVE:
return 1
elif event == Fltk.FL_MOVE:
FigureCanvasBase.motion_notify_event(self._source, x, yf)
return 1
elif event == Fltk.FL_DRAG:
self.window().make_current()
if self._draw_overlay:
self._dx=Fltk.Fl.event_x()-self._oldx
self._dy=Fltk.Fl.event_y()-self._oldy
Fltk.fl_overlay_rect(self._oldx,self._oldy,self._dx,self._dy)
FigureCanvasBase.motion_notify_event(self._source, x, yf)
return 1
elif event == Fltk.FL_RELEASE:
self.window().make_current()
if self._draw_overlay:
Fltk.fl_overlay_clear()
FigureCanvasBase.button_release_event(self._source, x, yf, self._button)
self._button = None
return 1
return 0
class FigureCanvasFltkAgg(FigureCanvasAgg):
def __init__(self, figure):
FigureCanvasAgg.__init__(self,figure)
t1,t2,w,h = self.figure.bbox.bounds
w, h = int(w), int(h)
self.canvas=FltkCanvas(0, 0, w, h, "canvas",self)
#self.draw()
def resize(self,size):
w, h = size
# compute desired figure size in inches
dpival = self.figure.dpi.get()
winch = w/dpival
hinch = h/dpival
self.figure.set_size_inches(winch,hinch)
def draw(self):
FigureCanvasAgg.draw(self)
self.canvas.redraw()
def blit(self,bbox):
self.canvas.blit(bbox)
show = draw
def widget(self):
return self.canvas
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
def destroy_figure(ptr,figman):
figman.window.hide()
Gcf.destroy(figman._num)
class FigureManagerFltkAgg(FigureManagerBase):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The fltk.Toolbar
window : The fltk.Window
"""
def __init__(self, canvas, num, window):
FigureManagerBase.__init__(self, canvas, num)
#Fltk container window
t1,t2,w,h = canvas.figure.bbox.bounds
w, h = int(w), int(h)
self.window = window
self.window.size(w,h+30)
self.window_title="Figure %d" % num
self.window.label(self.window_title)
self.window.size_range(350,200)
self.window.callback(destroy_figure,self)
self.canvas = canvas
self._num = num
if matplotlib.rcParams['toolbar']=='classic':
self.toolbar = NavigationToolbar( canvas, self )
elif matplotlib.rcParams['toolbar']=='toolbar2':
self.toolbar = NavigationToolbar2FltkAgg( canvas, self )
else:
self.toolbar = None
self.window.add_resizable(canvas.widget())
if self.toolbar:
self.window.add(self.toolbar.widget())
self.toolbar.update()
self.window.show()
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar != None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
def resize(self, event):
width, height = event.width, event.height
self.toolbar.configure(width=width) # , height=height)
def show(self):
_focus = windowing.FocusManager()
self.canvas.draw()
self.window.redraw()
def set_window_title(self, title):
self.window_title=title
self.window.label(title)
class AxisMenu:
def __init__(self, toolbar):
self.toolbar=toolbar
self._naxes = toolbar.naxes
self._mbutton = Fltk.Fl_Menu_Button(0,0,50,10,"Axes")
self._mbutton.add("Select All",0,select_all,self,0)
self._mbutton.add("Invert All",0,invert_all,self,Fltk.FL_MENU_DIVIDER)
self._axis_txt=[]
self._axis_var=[]
for i in range(self._naxes):
self._axis_txt.append("Axis %d" % (i+1))
self._mbutton.add(self._axis_txt[i],0,set_active,self,Fltk.FL_MENU_TOGGLE)
for i in range(self._naxes):
self._axis_var.append(self._mbutton.find_item(self._axis_txt[i]))
self._axis_var[i].set()
def adjust(self, naxes):
if self._naxes < naxes:
for i in range(self._naxes, naxes):
self._axis_txt.append("Axis %d" % (i+1))
self._mbutton.add(self._axis_txt[i],0,set_active,self,Fltk.FL_MENU_TOGGLE)
for i in range(self._naxes, naxes):
self._axis_var.append(self._mbutton.find_item(self._axis_txt[i]))
self._axis_var[i].set()
elif self._naxes > naxes:
for i in range(self._naxes-1, naxes-1, -1):
self._mbutton.remove(i+2)
if(naxes):
self._axis_var=self._axis_var[:naxes-1]
self._axis_txt=self._axis_txt[:naxes-1]
else:
self._axis_var=[]
self._axis_txt=[]
self._naxes = naxes
set_active(0,self)
def widget(self):
return self._mbutton
def get_indices(self):
a = [i for i in range(len(self._axis_var)) if self._axis_var[i].value()]
return a
def set_active(ptr,amenu):
amenu.toolbar.set_active(amenu.get_indices())
def invert_all(ptr,amenu):
for a in amenu._axis_var:
if not a.value(): a.set()
set_active(ptr,amenu)
def select_all(ptr,amenu):
for a in amenu._axis_var:
a.set()
set_active(ptr,amenu)
class FLTKButton:
def __init__(self, text, file, command,argument,type="classic"):
file = os.path.join(rcParams['datapath'], 'images', file)
self.im = Fltk.Fl_PNM_Image(file)
size=26
if type=="repeat":
self.b = Fltk.Fl_Repeat_Button(0,0,size,10)
self.b.box(Fltk.FL_THIN_UP_BOX)
elif type=="classic":
self.b = Fltk.Fl_Button(0,0,size,10)
self.b.box(Fltk.FL_THIN_UP_BOX)
elif type=="light":
self.b = Fltk.Fl_Light_Button(0,0,size+20,10)
self.b.box(Fltk.FL_THIN_UP_BOX)
elif type=="pushed":
self.b = Fltk.Fl_Button(0,0,size,10)
self.b.box(Fltk.FL_UP_BOX)
self.b.down_box(Fltk.FL_DOWN_BOX)
self.b.type(Fltk.FL_TOGGLE_BUTTON)
self.tooltiptext=text+" "
self.b.tooltip(self.tooltiptext)
self.b.callback(command,argument)
self.b.image(self.im)
self.b.deimage(self.im)
self.type=type
def widget(self):
return self.b
class NavigationToolbar:
"""
Public attriubutes
canvas - the FigureCanvas (FigureCanvasFltkAgg = customised fltk.Widget)
"""
def __init__(self, canvas, figman):
#xmin, xmax = canvas.figure.bbox.intervalx().get_bounds()
#height, width = 50, xmax-xmin
self.canvas = canvas
self.figman = figman
Fltk.Fl_File_Icon.load_system_icons()
self._fc = Fltk.Fl_File_Chooser( ".", "*", Fltk.Fl_File_Chooser.CREATE, "Save Figure" )
self._fc.hide()
t1,t2,w,h = canvas.figure.bbox.bounds
w, h = int(w), int(h)
self._group = Fltk.Fl_Pack(0,h+2,1000,26)
self._group.type(Fltk.FL_HORIZONTAL)
self._axes=self.canvas.figure.axes
self.naxes = len(self._axes)
self.omenu = AxisMenu( toolbar=self)
self.bLeft = FLTKButton(
text="Left", file="stock_left.ppm",
command=pan,argument=(self,1,'x'),type="repeat")
self.bRight = FLTKButton(
text="Right", file="stock_right.ppm",
command=pan,argument=(self,-1,'x'),type="repeat")
self.bZoomInX = FLTKButton(
text="ZoomInX",file="stock_zoom-in.ppm",
command=zoom,argument=(self,1,'x'),type="repeat")
self.bZoomOutX = FLTKButton(
text="ZoomOutX", file="stock_zoom-out.ppm",
command=zoom, argument=(self,-1,'x'),type="repeat")
self.bUp = FLTKButton(
text="Up", file="stock_up.ppm",
command=pan,argument=(self,1,'y'),type="repeat")
self.bDown = FLTKButton(
text="Down", file="stock_down.ppm",
command=pan,argument=(self,-1,'y'),type="repeat")
self.bZoomInY = FLTKButton(
text="ZoomInY", file="stock_zoom-in.ppm",
command=zoom,argument=(self,1,'y'),type="repeat")
self.bZoomOutY = FLTKButton(
text="ZoomOutY",file="stock_zoom-out.ppm",
command=zoom, argument=(self,-1,'y'),type="repeat")
self.bSave = FLTKButton(
text="Save", file="stock_save_as.ppm",
command=save_figure, argument=self)
self._group.end()
def widget(self):
return self._group
def close(self):
Gcf.destroy(self.figman._num)
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def update(self):
self._axes = self.canvas.figure.axes
naxes = len(self._axes)
self.omenu.adjust(naxes)
def pan(ptr, arg):
base,direction,axe=arg
for a in base._active:
if(axe=='x'):
a.panx(direction)
else:
a.pany(direction)
base.figman.show()
def zoom(ptr, arg):
base,direction,axe=arg
for a in base._active:
if(axe=='x'):
a.zoomx(direction)
else:
a.zoomy(direction)
base.figman.show()
def save_figure(ptr,base):
filetypes = base.canvas.get_supported_filetypes()
default_filetype = base.canvas.get_default_filetype()
sorted_filetypes = filetypes.items()
sorted_filetypes.sort()
selected_filter = 0
filters = []
for i, (ext, name) in enumerate(sorted_filetypes):
filter = '%s (*.%s)' % (name, ext)
filters.append(filter)
if ext == default_filetype:
selected_filter = i
filters = '\t'.join(filters)
file_chooser=base._fc
file_chooser.filter(filters)
file_chooser.filter_value(selected_filter)
file_chooser.show()
while file_chooser.visible() :
Fltk.Fl.wait()
fname=None
if(file_chooser.count() and file_chooser.value(0) != None):
fname=""
(status,fname)=Fltk.fl_filename_absolute(fname, 1024, file_chooser.value(0))
if fname is None: # Cancel
return
#start from last directory
lastDir = os.path.dirname(fname)
file_chooser.directory(lastDir)
format = sorted_filetypes[file_chooser.filter_value()][0]
try:
base.canvas.print_figure(fname, format=format)
except IOError, msg:
err = '\n'.join(map(str, msg))
msg = 'Failed to save %s: Error msg was\n\n%s' % (
fname, err)
error_msg_fltk(msg)
class NavigationToolbar2FltkAgg(NavigationToolbar2):
"""
Public attriubutes
canvas - the FigureCanvas
figman - the Figure manager
"""
def __init__(self, canvas, figman):
self.canvas = canvas
self.figman = figman
NavigationToolbar2.__init__(self, canvas)
self.pan_selected=False
self.zoom_selected=False
def set_cursor(self, cursor):
Fltk.fl_cursor(cursord[cursor],Fltk.FL_BLACK,Fltk.FL_WHITE)
def dynamic_update(self):
self.canvas.draw()
def pan(self,*args):
self.pan_selected=not self.pan_selected
self.zoom_selected = False
self.canvas.canvas._draw_overlay= False
if self.pan_selected:
self.bPan.widget().value(1)
else:
self.bPan.widget().value(0)
if self.zoom_selected:
self.bZoom.widget().value(1)
else:
self.bZoom.widget().value(0)
NavigationToolbar2.pan(self,args)
def zoom(self,*args):
self.zoom_selected=not self.zoom_selected
self.canvas.canvas._draw_overlay=self.zoom_selected
self.pan_selected = False
if self.pan_selected:
self.bPan.widget().value(1)
else:
self.bPan.widget().value(0)
if self.zoom_selected:
self.bZoom.widget().value(1)
else:
self.bZoom.widget().value(0)
NavigationToolbar2.zoom(self,args)
def configure_subplots(self,*args):
window = Fltk.Fl_Double_Window(100,100,480,240)
toolfig = Figure(figsize=(6,3))
canvas = FigureCanvasFltkAgg(toolfig)
window.end()
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
window.show()
canvas.show()
def _init_toolbar(self):
Fltk.Fl_File_Icon.load_system_icons()
self._fc = Fltk.Fl_File_Chooser( ".", "*", Fltk.Fl_File_Chooser.CREATE, "Save Figure" )
self._fc.hide()
t1,t2,w,h = self.canvas.figure.bbox.bounds
w, h = int(w), int(h)
self._group = Fltk.Fl_Pack(0,h+2,1000,26)
self._group.type(Fltk.FL_HORIZONTAL)
self._axes=self.canvas.figure.axes
self.naxes = len(self._axes)
self.omenu = AxisMenu( toolbar=self)
self.bHome = FLTKButton(
text="Home", file="home.ppm",
command=self.home,argument=self)
self.bBack = FLTKButton(
text="Back", file="back.ppm",
command=self.back,argument=self)
self.bForward = FLTKButton(
text="Forward", file="forward.ppm",
command=self.forward,argument=self)
self.bPan = FLTKButton(
text="Pan/Zoom",file="move.ppm",
command=self.pan,argument=self,type="pushed")
self.bZoom = FLTKButton(
text="Zoom to rectangle",file="zoom_to_rect.ppm",
command=self.zoom,argument=self,type="pushed")
self.bsubplot = FLTKButton( text="Configure Subplots", file="subplots.ppm",
command = self.configure_subplots,argument=self,type="pushed")
self.bSave = FLTKButton(
text="Save", file="filesave.ppm",
command=save_figure, argument=self)
self._group.end()
self.message = Fltk.Fl_Output(0,0,w,8)
self._group.add_resizable(self.message)
self.update()
def widget(self):
return self._group
def close(self):
Gcf.destroy(self.figman._num)
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def update(self):
self._axes = self.canvas.figure.axes
naxes = len(self._axes)
self.omenu.adjust(naxes)
NavigationToolbar2.update(self)
def set_message(self, s):
self.message.value(s)
FigureManager = FigureManagerFltkAgg
| agpl-3.0 |
AsimmHirani/ISpyPi | tensorflow/contrib/tensorflow-master/tensorflow/contrib/learn/python/learn/learn_io/io_test.py | 18 | 5287 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tf.learn IO operation tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import sys
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags"):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.learn_io import *
from tensorflow.python.platform import test
# pylint: enable=wildcard-import
class IOTest(test.TestCase):
# pylint: disable=undefined-variable
"""tf.learn IO operation tests."""
def test_pandas_dataframe(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
labels = pd.DataFrame(iris.target)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
score = accuracy_score(labels[0], list(classifier.predict_classes(data)))
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
else:
print("No pandas installed. pandas-related tests are skipped.")
def test_pandas_series(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
labels = pd.Series(iris.target)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
score = accuracy_score(labels, list(classifier.predict_classes(data)))
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
def test_string_data_formats(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
with self.assertRaises(ValueError):
learn.io.extract_pandas_data(pd.DataFrame({"Test": ["A", "B"]}))
with self.assertRaises(ValueError):
learn.io.extract_pandas_labels(pd.DataFrame({"Test": ["A", "B"]}))
def test_dask_io(self):
if HAS_DASK and HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# test dask.dataframe
df = pd.DataFrame(
dict(
a=list("aabbcc"), b=list(range(6))),
index=pd.date_range(
start="20100101", periods=6))
ddf = dd.from_pandas(df, npartitions=3)
extracted_ddf = extract_dask_data(ddf)
self.assertEqual(
extracted_ddf.divisions, (0, 2, 4, 6),
"Failed with divisions = {0}".format(extracted_ddf.divisions))
self.assertEqual(
extracted_ddf.columns.tolist(), ["a", "b"],
"Failed with columns = {0}".format(extracted_ddf.columns))
# test dask.series
labels = ddf["a"]
extracted_labels = extract_dask_labels(labels)
self.assertEqual(
extracted_labels.divisions, (0, 2, 4, 6),
"Failed with divisions = {0}".format(extracted_labels.divisions))
# labels should only have one column
with self.assertRaises(ValueError):
extract_dask_labels(ddf)
else:
print("No dask installed. dask-related tests are skipped.")
def test_dask_iris_classification(self):
if HAS_DASK and HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
data = dd.from_pandas(data, npartitions=2)
labels = pd.DataFrame(iris.target)
labels = dd.from_pandas(labels, npartitions=2)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
predictions = data.map_partitions(classifier.predict).compute()
score = accuracy_score(labels.compute(), predictions)
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
if __name__ == "__main__":
test.main()
| apache-2.0 |
zakkum42/Bosch | src/04-model/bosch_station_autoencoder.py | 1 | 5077 | import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import pickle
import os.path
from include.dataset_fnames import generate_station_data_fname, generate_data_fname, generate_response_data_fname
from include.feature_lists import numeric_features, numeric_missing_features_list, numeric_features_to_work_on, categoric_features
import theano
import theano.tensor as T
from keras import backend as K
from keras.layers import Input, Dense
from keras.models import Model
from keras.callbacks import Callback
#from keras.losses import mean_squared_error
from BoschNNModel1 import BoschNNModel, BoschNNInput, BoschNNOutput, BoschNNCallback
def load_station_df(station_id, use_time=False, use_null_tracker=False, use_category=False, use_prev_station=False, use_product_id=False, use_response=False):
fname = generate_station_data_fname(station_id, sample_type='train', data_type='numeric', use_product=False, allow_nan_values=False)
print fname
station_features = numeric_features[station_id]
features = ['Id'] + station_features
if use_time:
features = features + ['time']
station_df = pd.read_csv(fname, usecols=features, index_col=['Id'])
if use_null_tracker:
for feature in station_features:
new_column = feature + '_isnull'
null_indices = station_df[station_df[feature].isnull()].index
station_df[new_column] = 0
station_df.loc[null_indices, new_column] = 1
if use_product_id:
fname2 = generate_station_data_fname(station_id, sample_type='train', data_type='numeric', use_product=True, allow_nan_values=False)
print fname2
features = ['Id'] + ['product']
station_df2 = pd.read_csv(fname2, usecols=features, index_col=['Id'], dtype=object)
station_df['product'] = station_df2['product']
# print station_df.head()
if use_category:
fname2 = generate_station_data_fname(station_id, sample_type='train', data_type='categorical', use_product=False, allow_nan_values=False)
print fname2
station_features = categoric_features[station_id]
features = ['Id'] + station_features
station_df2 = pd.read_csv(fname2, usecols=features, index_col=['Id'], dtype=object)
# print station_df2.head()
indices = station_df.index
station_df = pd.concat([station_df, station_df2], axis=1)
station_df = station_df.loc[indices]
station_df = pd.get_dummies(station_df)
fname = generate_response_data_fname()
field_list = ['Id'] + ['Response']
response_df = pd.read_csv(fname, usecols=field_list, index_col=['Id'], dtype='int')
indices = station_df.index
response_df = response_df.loc[indices]
if use_response:
station_df = pd.concat([station_df, response_df], axis=1)
return station_df, response_df
def my_mean_squared_error(y_true, y_pred):
mask = T.invert(T.isnan(y_true))
y_true = y_true[mask.nonzero()]
y_pred = y_pred[mask.nonzero()]
return K.mean(K.square(y_pred - y_true), axis=-1)
def create_autoencoder_model(station_id):
print "Station:", station_id
station_df, response_df = load_station_df(station_id)
rows = station_df.shape[0]
cols = station_df.shape[1]
print rows, cols
# station_df = station_df.sample(frac=1.0)
X_train = station_df.values[:-100000]
X_test = station_df.values[-100000:]
boschAutoencoderModel = BoschNNModel()
# Create Input Layer
inputLayer = BoschNNInput(boschAutoencoderModel, 2*cols, input_shape=(cols,), name="first", activation='tanh')
# Create Output Layer
outputLayer = BoschNNOutput(boschAutoencoderModel, cols, name='last', activation='tanh')
# Add layers to model
boschAutoencoderModel.add(inputLayer)
boschAutoencoderModel.add(Dense(32, name='middle1', activation='tanh'))
boschAutoencoderModel.add(Dense(16, name='middle2', activation='tanh'))
boschAutoencoderModel.add(Dense(10, name='middle3', activation='tanh'))
boschAutoencoderModel.add(Dense(16, name='middle4', activation='tanh'))
boschAutoencoderModel.add(Dense(32, name='middle5', activation='tanh'))
boschAutoencoderModel.add(outputLayer)
# Compile model
boschAutoencoderModel.compile(optimizer='adadelta', loss=my_mean_squared_error)
# this is what we have in the model
boschAutoencoderModel.summary()
# Initialize Callback for weight processing
boschNNCallback = BoschNNCallback()
# boschModel.fit(X, y, epochs=10, batch_size=1, shuffle=False, verbose=True, callbacks=[boschNNCallback])
boschAutoencoderModel.fit(X_train, X_train,
epochs=5,
batch_size=1,
shuffle=False,
callbacks=[boschNNCallback],
validation_data=(X_test, X_test))
return
if __name__ == '__main__':
for station_id in ['L1S24']: #sorted(numeric_features):
create_autoencoder_model(station_id)
| apache-2.0 |
dylanGeng/BuildingMachineLearningSystemsWithPython | ch09/utils.py | 24 | 5568 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import os
import sys
from matplotlib import pylab
import numpy as np
DATA_DIR = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "data")
CHART_DIR = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "charts")
for d in [DATA_DIR, CHART_DIR]:
if not os.path.exists(d):
os.mkdir(d)
# Put your directory to the different music genres here
GENRE_DIR = None
GENRE_LIST = ["classical", "jazz", "country", "pop", "rock", "metal"]
# Put your directory to the test dir here
TEST_DIR = None
if GENRE_DIR is None or TEST_DIR is None:
print("Please set GENRE_DIR and TEST_DIR in utils.py")
sys.exit(1)
def plot_confusion_matrix(cm, genre_list, name, title):
pylab.clf()
pylab.matshow(cm, fignum=False, cmap='Blues', vmin=0, vmax=1.0)
ax = pylab.axes()
ax.set_xticks(range(len(genre_list)))
ax.set_xticklabels(genre_list)
ax.xaxis.set_ticks_position("bottom")
ax.set_yticks(range(len(genre_list)))
ax.set_yticklabels(genre_list)
pylab.title(title)
pylab.colorbar()
pylab.grid(False)
pylab.show()
pylab.xlabel('Predicted class')
pylab.ylabel('True class')
pylab.grid(False)
pylab.savefig(
os.path.join(CHART_DIR, "confusion_matrix_%s.png" % name), bbox_inches="tight")
def plot_pr(auc_score, name, precision, recall, label=None):
pylab.clf()
pylab.figure(num=None, figsize=(5, 4))
pylab.grid(True)
pylab.fill_between(recall, precision, alpha=0.5)
pylab.plot(recall, precision, lw=1)
pylab.xlim([0.0, 1.0])
pylab.ylim([0.0, 1.0])
pylab.xlabel('Recall')
pylab.ylabel('Precision')
pylab.title('P/R curve (AUC = %0.2f) / %s' % (auc_score, label))
filename = name.replace(" ", "_")
pylab.savefig(
os.path.join(CHART_DIR, "pr_" + filename + ".png"), bbox_inches="tight")
def plot_roc(auc_score, name, tpr, fpr, label=None):
pylab.clf()
pylab.figure(num=None, figsize=(5, 4))
pylab.grid(True)
pylab.plot([0, 1], [0, 1], 'k--')
pylab.plot(fpr, tpr)
pylab.fill_between(fpr, tpr, alpha=0.5)
pylab.xlim([0.0, 1.0])
pylab.ylim([0.0, 1.0])
pylab.xlabel('False Positive Rate')
pylab.ylabel('True Positive Rate')
pylab.title('ROC curve (AUC = %0.2f) / %s' %
(auc_score, label), verticalalignment="bottom")
pylab.legend(loc="lower right")
filename = name.replace(" ", "_")
pylab.savefig(
os.path.join(CHART_DIR, "roc_" + filename + ".png"), bbox_inches="tight")
def show_most_informative_features(vectorizer, clf, n=20):
c_f = sorted(zip(clf.coef_[0], vectorizer.get_feature_names()))
top = zip(c_f[:n], c_f[:-(n + 1):-1])
for (c1, f1), (c2, f2) in top:
print("\t%.4f\t%-15s\t\t%.4f\t%-15s" % (c1, f1, c2, f2))
def plot_log():
pylab.clf()
x = np.arange(0.001, 1, 0.001)
y = np.log(x)
pylab.title('Relationship between probabilities and their logarithm')
pylab.plot(x, y)
pylab.grid(True)
pylab.xlabel('P')
pylab.ylabel('log(P)')
filename = 'log_probs.png'
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
def plot_feat_importance(feature_names, clf, name):
pylab.clf()
coef_ = clf.coef_
important = np.argsort(np.absolute(coef_.ravel()))
f_imp = feature_names[important]
coef = coef_.ravel()[important]
inds = np.argsort(coef)
f_imp = f_imp[inds]
coef = coef[inds]
xpos = np.array(range(len(coef)))
pylab.bar(xpos, coef, width=1)
pylab.title('Feature importance for %s' % (name))
ax = pylab.gca()
ax.set_xticks(np.arange(len(coef)))
labels = ax.set_xticklabels(f_imp)
for label in labels:
label.set_rotation(90)
filename = name.replace(" ", "_")
pylab.savefig(os.path.join(
CHART_DIR, "feat_imp_%s.png" % filename), bbox_inches="tight")
def plot_feat_hist(data_name_list, filename=None):
pylab.clf()
num_rows = 1 + (len(data_name_list) - 1) / 2
num_cols = 1 if len(data_name_list) == 1 else 2
pylab.figure(figsize=(5 * num_cols, 4 * num_rows))
for i in range(num_rows):
for j in range(num_cols):
pylab.subplot(num_rows, num_cols, 1 + i * num_cols + j)
x, name = data_name_list[i * num_cols + j]
pylab.title(name)
pylab.xlabel('Value')
pylab.ylabel('Density')
# the histogram of the data
max_val = np.max(x)
if max_val <= 1.0:
bins = 50
elif max_val > 50:
bins = 50
else:
bins = max_val
n, bins, patches = pylab.hist(
x, bins=bins, normed=1, facecolor='green', alpha=0.75)
pylab.grid(True)
if not filename:
filename = "feat_hist_%s.png" % name
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
def plot_bias_variance(data_sizes, train_errors, test_errors, name):
pylab.clf()
pylab.ylim([0.0, 1.0])
pylab.xlabel('Data set size')
pylab.ylabel('Error')
pylab.title("Bias-Variance for '%s'" % name)
pylab.plot(
data_sizes, train_errors, "-", data_sizes, test_errors, "--", lw=1)
pylab.legend(["train error", "test error"], loc="upper right")
pylab.grid(True)
pylab.savefig(os.path.join(CHART_DIR, "bv_" + name + ".png"))
| mit |
daniel-severo/dask-ml | dask_ml/base.py | 1 | 3188 | import os
import six
from abc import ABCMeta
import numpy as np
import dask
from dask.array import learn
class _WritableDoc(ABCMeta):
"""In py27, classes inheriting from `object` do not have
a multable __doc__.
We inherit from ABCMeta instead of type to avoid metaclass
conflicts, since some sklearn estimators (eventually) subclass
ABCMeta
"""
# TODO: Py2: remove all this
@six.add_metaclass(_WritableDoc)
class _BigPartialFitMixin(object):
""" Wraps a partial_fit enabled estimator for use with Dask arrays """
_init_kwargs = []
_fit_kwargs = []
def __init__(self, **kwargs):
missing = set(self._init_kwargs) - set(kwargs)
if missing:
raise TypeError("{} requires the keyword arguments {}".format(
type(self), missing)
)
for kwarg in self._init_kwargs:
setattr(self, kwarg, kwargs.pop(kwarg))
super(_BigPartialFitMixin, self).__init__(**kwargs)
@classmethod
def _get_param_names(cls):
# Evil hack to make sure repr, get_params work
# We could also try rewriting __init__ once the class is created
bases = cls.mro()
# walk bases until you hit an sklearn class.
for base in bases:
if base.__module__.startswith("sklearn"):
break
# merge the inits
my_init = cls._init_kwargs
their_init = base._get_param_names()
return my_init + their_init
def fit(self, X, y=None, get=None):
if get is None:
get = dask.threaded.get
fit_kwargs = {k: getattr(self, k) for k in self._fit_kwargs}
result = learn.fit(self, X, y, get=get, **fit_kwargs)
# Copy the learned attributes over to self
# It should go without saying that this is *not* threadsafe
attrs = {k: v for k, v in vars(result).items() if k.endswith('_')}
for k, v in attrs.items():
setattr(self, k, v)
return self
def predict(self, X, dtype=None):
predict = super(_BigPartialFitMixin, self).predict
if dtype is None:
dtype = self._get_predict_dtype(X)
return X.map_blocks(predict, dtype=dtype, drop_axis=1)
def _get_predict_dtype(self, X):
xx = np.zeros((1, X.shape[1]), dtype=X.dtype)
return super(_BigPartialFitMixin, self).predict(xx).dtype
def _copy_partial_doc(cls):
for base in cls.mro():
if base.__module__.startswith('sklearn'):
break
lines = base.__doc__.split(os.linesep)
header, rest = lines[0], lines[1:]
insert = """
This class wraps scikit-learn's {classname}. When a dask-array is passed
to our ``fit`` method, the array is passed block-wise to the scikit-learn
class' ``partial_fit`` method. This will allow you to fit the estimator
on larger-than memory datasets sequentially (block-wise), but without an
parallelism, or any ability to distribute across a cluster.""".format(
classname=base.__name__)
doc = '\n'.join([header + insert] + rest)
cls.__doc__ = doc
return cls
__all__ = [
'_BigPartialFitMixin',
'_copy_partial_doc',
]
| bsd-3-clause |
joetrollo/thinkful-library | linear-regression/workshop notes.py | 1 | 17557 |
# coding: utf-8
# This workshop walks through the assignment for the accompanying course, [Linear Regression and a Space Scam](https://projects.thinkful.com/linear-regression-and-a-space-scam-476), focusing on the background behind linear regression and performing hypothesis tests manually.
#
# The objective was to use linear regression to fit the following two equations to our data, and then determine which of them is best. First, we assume only one radioactive isotope is present, in which case our equation is $\Delta N(t)=A r_1 e^{-r_1 t}+C$, and then we test a second form that includes a second isotope, $\Delta N(t)=A r_1 e^{-r_1 t}+B r_2 e^{-r_2 t}+C$. Here, $A$ and $B$ represent the relative amount of each isotope present in the initial fuel same, and $C$ is the level of background radiation. The decay rates $r_1$ and $r_2$ are 0.4 and 0.1 respectively.
# In[1]:
import numpy as np
import pandas as pd
import matplotlib
from matplotlib import pyplot as plt
get_ipython().magic('matplotlib inline')
# The following cell sets some default styling for our plots, based both on personal preferences and knowing that larger plots are easier to see in screen-shares. Additionally, I'm suppressing some warnings that arise because I'm using the latest version of Python.
# In[2]:
matplotlib.rcParams.update({'font.size': 22,
'font.family': 'serif',
'figure.figsize': (12,8),
'text.usetex': True,
'lines.linewidth': 2})
import warnings
warnings.simplefilter(action = "ignore", category = FutureWarning)
# It will be useful to define our constants and a function representing the decay of an isotope with decay rate $r$ at time $t$.
# In[3]:
r1, r2 = 0.4, 0.1
decay = lambda t, r: r*np.exp(-r*t)
# The data are hosted in GitHub and can be directly downloaded with pandas. Additionally, I'm going to extract the columns into individual NumPy arrays for brevity and manual manipulation later.
# In[4]:
d = pd.read_csv('https://github.com/joetrollo/thinkful-library/'
'raw/master/linear-regression/engine.csv')
t, N = d.as_matrix().T
# Before we begin our analysis, it's worth the few minutes to plot the data and see what we're dealing with.
# In[5]:
plt.scatter(t, N, color='r', edgecolor='', s=50)
plt.xlabel('$t$')
plt.ylabel('$\Delta N$')
plt.show()
# Some introductions to linear regression don't make it clear that, even though the above plot is curved, the model still falls withing the realm of "linear". Let's review a basic example of linear regression and then generalize it for more applications. Consider the following plot of randomly generated data, following the equation $y=mx+b+\varepsilon$:
# In[6]:
n = 50 # number of data points
m = 0.5 # slope
b = -1 # intercept
e = 0.5 # intensity of noise
# We'll generate n x-values
x = np.random.uniform(-2, 8, size=n)
# Then the corresponding y-values
y = m*x + b
# And add random noise to the output
y += np.random.normal(scale=e, size=n)
x2 = np.r_[-2, 8]
plt.plot(x2, m*x2 + b, 'k-', zorder=-10)
plt.gca().set_aspect('equal')
plt.scatter(x, y, c='r', edgecolor='', s=50)
plt.xlim(-3, 9)
plt.ylim(-3, 4)
plt.xlabel('$x$')
plt.ylabel('$y$')
plt.show()
# The particular form of linear regression we'll be using here is "ordinary least squares" (OLS), in which we attempt to find estimates of the parameters $m$ and $b$, denoted by $\hat m$ and $\hat b$, that minimize the total squared distance between the model output $\hat y$ and the observed output $y$. This sum of squared residuals (SSR) can be written as
#
# $$
# \mathrm{SSR} = \sum_{i=1}^{n} (y_i - \hat y)^2 = \sum_{i=1}^{n} (y_i - \hat m x_i - \hat b)^2
# $$
#
# Note that this method minimizes the _vertical_ distance between the model and the points, not the orthogonal distance (which is called "total least squares"). The assumption we make when using OLS is that the noise $\varepsilon$ affects the model's output only—not the input variables. You can compare the two methods in the following plots:
# In[7]:
plt.figure(figsize=(12, 8))
axs = [plt.subplot(121), plt.subplot(122)]
for ax in axs:
ax.set_aspect('equal')
ax.scatter(x, y, c='r', edgecolor='', zorder=10)
ax.set_xlim(-3, 9)
ax.set_ylim(-3, 4)
ax.plot([-3, 9], [-3*m+b, 9*m+b], 'k-')
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
axs[0].set_title('Ordinary least squares')
axs[1].set_title('Total least squares')
for i, j in zip(x, y):
axs[0].plot([i, i], [j, m*i+b], 'k-', lw=1)
for i, j in zip(x, y):
axs[1].plot([i, (i+m*j-m*b)/(m*m + 1)],
[j, m*(i +m*j-m*b)/(m*m+1) + b], 'k-', lw=1)
plt.show()
# The previous equation for the SSR can be solved for $\hat m$ and $\hat b$ in that expanded form, but it becomes cumbersome when we include additional variables. Instead, we'll represent this equation in matrix form. We'll put our $y_i$ into a column,
#
# $$
# Y = \left[ \begin{array}{c} y_1 \\ y_2 \\ \vdots \\ y_n \end{array} \right]
# $$
#
# and our inputs $x_i$ into a matrix with a column representing our constant term,
#
# $$
# X = \left[ \begin{array}{cc} x_1 & 1 \\ x_2 & 1 \\ \vdots & \vdots \\ x_n & 1 \end{array} \right].
# $$
#
# This matrix of input $X$ is called the [_design matrix_](https://en.wikipedia.org/wiki/Design_matrix) for our model. Finally, our parameters $m$ and $b$ will go into a matrix of their own,
#
# $$
# \beta = \left[ \begin{array}{c} m \\ b \end{array} \right].
# $$
#
# Notice that the [product](https://en.wikipedia.org/wiki/Matrix_multiplication) of $X$ and $\beta$ gives us
#
# $$
# X \cdot \beta = \left[ \begin{array}{cc} x_1 & 1 \\ x_2 & 1 \\ \vdots & \vdots \\ x_n & 1 \end{array} \right] \cdot \left[ \begin{array}{c} m \\ b \end{array} \right] = \left[ \begin{array}{c} m x_1 + b \\ m x_2 + b \\ \vdots \\ m x_n + b \end{array} \right],
# $$
#
# and so our model can be represented in a much shorter equation, $Y = X\beta + \varepsilon$, regardless of how many input variables we have. Now, we're aiming to find the parameters $\hat \beta$ that minimize the SSR, whose new form is
#
# $$
# \mathrm{SSR} = (Y-X\hat\beta)^T(Y-X\hat\beta)
# $$
#
# where $T$ represents the matrix transpose. If we differentiate the equation with respect to $\hat\beta$ and set it equal to zero, we find that the SSR is at a minimum when $\hat\beta = (X^TX)^{-1}X^TY$. Let's calculate this value for our example above and compare it to the true values. This code is written in Python 3.5, which [introduced a dedicated operator](https://www.python.org/dev/peps/pep-0465/) for matrix multiplication, `@`. In case you're using an older version of Python, all the following statements are equivalent:
#
# ```
# C = A @ B
# C = A.dot(B)
# C = np.dot(A, B)
# ```
# Also note that [`np.c_[]`](http://docs.scipy.org/doc/numpy/reference/generated/numpy.c_.html) is used here as shorthand for `np.colstack()`. See also the documentation for [`np.r_[]`](http://docs.scipy.org/doc/numpy/reference/generated/numpy.r_.html) which has better examples.
# In[8]:
# Create the design matrix
X = np.c_[x, np.ones_like(x)]
# Evaluate the matrix equation
print(np.linalg.inv(X.T @ X) @ X.T @ y)
# These values are in close agreement with what was originally used to generate the data. Going forward, it's preferable to use `np.linalg.solve(A, B)` in place of `np.linalg.inv(A) @ B`, as it is more numerically stable.
#
# Let's now consider our primary problem of modeling $\Delta N(t)=A r_1 e^{-r_1 t}+C$. Because we are only interested in estimating $A$ and $C$, we can isolate $r_1 e^{-r_1 t}$ and turn it into a single variable instead of a combination of variables and constants. Our design matrix and parameter matrix now become
#
# $$
# X \cdot \beta = \left[ \begin{array}{cc} r_1 e^{-r_1 t_1} & 1 \\ r_1 e^{-r_1 t_2} & 1 \\ \vdots & \vdots \\ r_1 e^{-r_1 t_n} & 1 \end{array} \right] \cdot \left[ \begin{array}{c} A \\ C \end{array} \right]
# $$
#
# With our matrix equations, we can quickly calculate the parameters for this first model:
# In[9]:
X = np.c_[decay(t, r1), np.ones_like(t)]
B = np.linalg.solve(X.T @ X, X.T @ N)
print(B)
# Instead of our first plot that showed $\Delta N$ against $t$, let's plot $\Delta N$ against the first column of the design matrix, and a line using the parameters we just calculated. Remember that $\hat Y = X \hat B$, which equates to `Y = X @ B`.
# In[10]:
plt.scatter(X[:,0], N, color='r', edgecolor='', s=50)
plt.plot(X[:,0], X @ B, 'k-')
plt.xlabel('$r_1 e^{-r_1 t}$')
plt.ylabel('$\Delta N$')
plt.show()
# The transformed data also resemble a line, matching our intuition of what it means to be "linear" in the context of linear regression. The power of the matrix equation we used becomes evident when we attempt to repeat the process for the second model, $\Delta N(t)=A r_1 e^{-r_1 t}+B r_2 e^{-r_2 t}+C$. We just create the new design matrix and use the same equation:
# In[11]:
X = np.c_[decay(t, r1), decay(t, r2), np.ones_like(t)]
B = np.linalg.solve(X.T @ X, X.T @ N)
print(B)
# Let's visualize the data again, this time using two columns and plotting in three-dimensional space, adding in a plane defined by the parameters we just found:
# In[12]:
from mpl_toolkits.mplot3d import Axes3D
x = np.linspace(-0.01, 0.06)
y = np.linspace(0.02, 0.07)
x, y = np.meshgrid(x, y)
z = B[0]*x + B[1]*y + B[2]
for r in np.arange(45, 180, 45):
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111, projection='3d')
ax.scatter(X[:,0], X[:,1], N, c='r', edgecolor='')
ax.plot_surface(x, y, z, color='none')
ax.azim -= r
ax.set_xlabel('$r_1 e^{-r_1 t}$')
ax.set_ylabel('$r_2 e^{-r_2 t}$')
ax.set_zlabel('$\Delta N$')
plt.show()
# In higher dimensions, the data now lie on a plane. When performing linear regression, it's important to know that "linear" refers to our model being linear in the _parameters_, and does not impose any constraints on the variables we use. Mathematically, the matrix equations are all about lines and (hyper)planes, but the variables can be transformed any way we want before building the design matrix.
#
# While these plots and equations have hopefully helped you to understand how linear regression works, you will likely never need to work with the raw matrices. Now let's run the same analysis using `statsmodels`. In particular, we'll be using the API from the `statsmodels.formula` module, which allows us to describe models as string equations. If we were using `statsmodels.api` directly, we'd have to create the design matrix manually.
#
# The basic form for the `formula` API equations is simply `y ~ x1 + x2 + ... + xn`. Here, `y` is the output we wish to model and the `xi` are input variables. However, the API lets us use a `pandas.DataFrame` as input and reference variables by the column name. We can also run the variables through other functions, all in a single string. Note that the intercept is implicit and does not need to be included in your input equation. You can find more [examples](http://statsmodels.sourceforge.net/devel/example_formulas.html) in the `statsmodels` documentation.
# In[13]:
from statsmodels.formula import api as smf
m1 = smf.ols('N ~ decay(t, r1)', data=d).fit()
print(m1.summary())
# This seems like a great fit, with a high [$R^2$](https://en.wikipedia.org/wiki/Coefficient_of_determination) and small $p$-values for the coefficients. Remember, these $p$-values are testing the null hypothesis that a coefficient is equal to 0, so a low $p$-value means that the probability of the coefficient being _non_-zero due to random chance is small.
#
# Let's plot the fit and inspect it visually.
# In[14]:
plt.scatter(t, N, color='r', edgecolor='', s=50)
plt.xlabel('$t$')
plt.ylabel('$\Delta N$')
plt.plot(t, m1.predict(), 'k-')
plt.show()
# This seems like an excellent fit, and if we didn't suspect that there might be an extra term in the model, we might have stopped here. Let's create the second model and see how it compares.
# In[15]:
m2 = smf.ols('N ~ decay(t, r1) + decay(t, r2)', data=d).fit()
print(m2.summary())
# This second models also has a high coefficient of determination, but not much higher. Let's compare them visually:
# In[16]:
plt.scatter(t, N, color='r', edgecolor='', s=50)
plt.xlabel('$t$')
plt.ylabel('$\Delta N$')
plt.plot(t, m1.predict(), 'k--', lw=1, label='One isotope')
plt.plot(t, m2.predict(), 'k-', label='Two isotopes')
plt.legend()
plt.show()
# The second model looks slightly better, but this slight visual difference isn't sufficient justification for choosing one over the other. The question is now: how can we objectively assess whether the two-isotope model is a better description of the data?
#
# There are multiple ways to answer this question, but for this exercise we're going to use the [likelihood-ratio test](https://en.wikipedia.org/wiki/Likelihood-ratio_test). This test will tell us how much more likely the data are in the new/alternative model compared to the first/null model, as long as the models are nested. (A pair of models is nested if one model is a special case of the other, e.g., our first model is a special case of the second model where we set $B=0$.) The test statistic that we need to perform this inference is
#
# $$
# D = -2 \ln \left( \frac{\mathcal L}{\mathcal L ^ \prime} \right) = 2 (\ln\mathcal L^\prime - \ln\mathcal L)
# $$
#
# where $\mathcal L$ and $\mathcal L^\prime$ are the likelihood of the null and alternative models respectively. One of the important assumptions of linear regression is that the noise on $Y$ is normally distributed with a mean of 0. Based on the likelihood function of the normal distribution and the data in our model, `statsmodels` provides us with the log-likelihoods in the `llf` attribute of the model, which we can then use to calculate $D$.
# In[17]:
D = 2 * (m2.llf - m1.llf)
print(D)
# On its own, this value doesn't mean much to us. However, because the two models are nested, we know this test statistic will follow a [chi-squared distribution](https://en.wikipedia.org/wiki/Chi-squared_distribution) with the degrees of freedom equal to the difference in the degrees of freedom of the two models. In most cases, this difference is just equal to the difference in the number of parameters of the models. In our case, the statistic $D$ would follow a chi-squared distribution with one degree of freedom.
#
# In the context of the chi-squared distribution, we can ask ourselves: what's the probability of attaining a value of $D$ or higher by chance? While many statistical tests in SciPy automatically provide the associated $p$-values, the likelihood-ratio test is not part of the package, so we'll have to calculate this ourselves. Let's begin by plotting the distribution, and letting $D=1$ for illustration purposes.
# In[18]:
from scipy import stats
D = 1
dof = 1 # degrees of freedom
x = np.linspace(0, 2, 200)
y = stats.chi2.pdf(x, dof)
plt.plot(x, y, 'k-')
plt.vlines(D, 0, stats.chi2.pdf(D, dof), color='r')
plt.ylim(0, 3)
plt.xlabel('$x$')
plt.ylabel('$f_1(x)$')
plt.show()
# The probability of attaining a value of $D$ or higher is equal to the integral of the distribution from $D$ to infinity, i.e.,
#
# $$
# p = \int_D^\infty f_1(x)dx
# $$
#
# which visually corresponds to the hatched area in this next plot:
# In[19]:
x2 = np.linspace(D, 2)
y2 = stats.chi2.pdf(x2, 1)
plt.plot(x, y, 'k-')
plt.vlines(D, 0, stats.chi2.pdf(D, 1), color='r')
plt.fill_between(x2, y2, facecolor='', hatch='//', edgecolor='r')
plt.ylim(0, 3)
plt.xlabel('$x$')
plt.ylabel('$f_1(x)$')
plt.show()
# This integral is called the [_survival function_](https://en.wikipedia.org/wiki/Survival_function), and is conveniently provided by the `sf()` method of the distribution. (The survival function is just the complement of the [cumulative distribution function](https://en.wikipedia.org/wiki/Cumulative_distribution_function) $CDF(x)$, i.e., $SF(x) = 1 - CDF(x)$.) Restoring the former value of $D$, we can easily find the associated $p$-value:
# In[20]:
D = 2*(m2.llf - m1.llf)
p = stats.chi2.sf(D, dof)
print(p)
# The likelihood-ratio has tested the null hypothesis that the data we observe are no more likely under the second model than the first, so this $p$-value tells us that the probability of the likelihood being greater in the second model due to random chance is very small. We can thus conclude that there are two isotopes in the fuel sample.
#
# The remainder of the exercise requests that we determine what percentage of the initial fuel is contaminated, by calculating the ratio $P=B/(A+B)$, and also determine the associated uncertainty for this percentage. The parameters of the model and their associated uncertainties are available in the `params` and `bse` attributes, respectively, so we can extract them to perform our final calculation. The formula for the final uncertainty can be found by propagating uncertainty through the equation $P=B/(A+B)$, combining the second and fifth equations in [this table](https://en.wikipedia.org/wiki/Propagation_of_uncertainty#Example_formulas).
# In[21]:
C, A, B = m2.params
# The variance is the square of the standard error
vC, vA, vB = m2.bse**2
P = B/(A+B)
vP = P**2 * ( vA/(A**2) + (vA+vB)/((A+B)**2) )
print(P, np.sqrt(vP))
# While the estimated percent of contaminated fuel, 19.5%, is less than the 20% upper limit, given the uncertainty of 1.3% it isn't unreasonable for the true contamination to have exceeded 20%.
| gpl-3.0 |
mzwiessele/GPyNotebook | GPyNotebook/plotting/plot.py | 1 | 2160 | '''
Created on Mar 27, 2015
@author: maxz
'''
from IPython.html.widgets import HTML
import matplotlib.pyplot as plt, numpy as np
from IPython.core.pylabtools import print_figure
from matplotlib.axis import Axis
class Plot(HTML):
def __init__(self, figsize=None, figtype=None, *args, **kwargs):
"""
A simple plot, holding a figure and an axis, freely adjustable.
Properties:
ax: the axis object to draw in
fig: the figure corresponding to the axis
draw(): render the figure in its current state
"""
super(Plot, self).__init__(*args, **kwargs)
fig, ax = plt.subplots(figsize=figsize)
self.figsize = fig.get_size_inches()
self.ax = ax
self.fig = ax.figure
plt.close(fig)
self.figtype = figtype or 'svg'
def draw(self):
"""
Render the updated plot to the HTML output.
"""
self.value = print_figure(self.fig, self.figtype)
class LabelDictPlot(Plot):
def __init__(self, lab_dict, figsize=None, figtype=None, *a, **k):
"""
A plot with label support, if lab_dict is a collection with labels, we will only use the one.
"""
super(LabelDictPlot, self).__init__(figsize, figtype, *a, **k)
if not isinstance(lab_dict, dict):
lab_dict = dict(default=lab_dict)
self._lab_dict = lab_dict
self._update_to_name(lab_dict.keys()[0])
def _update_to_name(self, new):
self.labels = self._lab_dict[new]
self.ulabels = np.unique(self.labels)
def labels_updated(self):
"""
This function needs to be overwritten by inheriting classes.
It makes sure the plot gets updated, and calls self.draw() when finished updating the plot.
"""
raise NotImplementedError('Abstract super class.')
def change_labels(self, name, old, new):
"""
trait update function, to be called with the dictionary key of which the labels should be updated to.
"""
self._update_to_name(new)
self.labels_updated() | bsd-2-clause |
wsriley/cuda-convnet2 | shownet.py | 180 | 18206 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from tarfile import TarFile, TarInfo
from matplotlib import pylab as pl
import numpy as n
import getopt as opt
from python_util.util import *
from math import sqrt, ceil, floor
from python_util.gpumodel import IGPUModel
import random as r
import numpy.random as nr
from convnet import ConvNet
from python_util.options import *
from PIL import Image
from time import sleep
class ShowNetError(Exception):
pass
class ShowConvNet(ConvNet):
def __init__(self, op, load_dic):
ConvNet.__init__(self, op, load_dic)
def init_data_providers(self):
self.need_gpu = self.op.get_value('show_preds')
class Dummy:
def advance_batch(self):
pass
if self.need_gpu:
ConvNet.init_data_providers(self)
else:
self.train_data_provider = self.test_data_provider = Dummy()
def import_model(self):
if self.need_gpu:
ConvNet.import_model(self)
def init_model_state(self):
if self.op.get_value('show_preds'):
self.softmax_name = self.op.get_value('show_preds')
def init_model_lib(self):
if self.need_gpu:
ConvNet.init_model_lib(self)
def plot_cost(self):
if self.show_cost not in self.train_outputs[0][0]:
raise ShowNetError("Cost function with name '%s' not defined by given convnet." % self.show_cost)
# print self.test_outputs
train_errors = [eval(self.layers[self.show_cost]['outputFilter'])(o[0][self.show_cost], o[1])[self.cost_idx] for o in self.train_outputs]
test_errors = [eval(self.layers[self.show_cost]['outputFilter'])(o[0][self.show_cost], o[1])[self.cost_idx] for o in self.test_outputs]
if self.smooth_test_errors:
test_errors = [sum(test_errors[max(0,i-len(self.test_batch_range)):i])/(i-max(0,i-len(self.test_batch_range))) for i in xrange(1,len(test_errors)+1)]
numbatches = len(self.train_batch_range)
test_errors = n.row_stack(test_errors)
test_errors = n.tile(test_errors, (1, self.testing_freq))
test_errors = list(test_errors.flatten())
test_errors += [test_errors[-1]] * max(0,len(train_errors) - len(test_errors))
test_errors = test_errors[:len(train_errors)]
numepochs = len(train_errors) / float(numbatches)
pl.figure(1)
x = range(0, len(train_errors))
pl.plot(x, train_errors, 'k-', label='Training set')
pl.plot(x, test_errors, 'r-', label='Test set')
pl.legend()
ticklocs = range(numbatches, len(train_errors) - len(train_errors) % numbatches + 1, numbatches)
epoch_label_gran = int(ceil(numepochs / 20.))
epoch_label_gran = int(ceil(float(epoch_label_gran) / 10) * 10) if numepochs >= 10 else epoch_label_gran
ticklabels = map(lambda x: str((x[1] / numbatches)) if x[0] % epoch_label_gran == epoch_label_gran-1 else '', enumerate(ticklocs))
pl.xticks(ticklocs, ticklabels)
pl.xlabel('Epoch')
# pl.ylabel(self.show_cost)
pl.title('%s[%d]' % (self.show_cost, self.cost_idx))
# print "plotted cost"
def make_filter_fig(self, filters, filter_start, fignum, _title, num_filters, combine_chans, FILTERS_PER_ROW=16):
MAX_ROWS = 24
MAX_FILTERS = FILTERS_PER_ROW * MAX_ROWS
num_colors = filters.shape[0]
f_per_row = int(ceil(FILTERS_PER_ROW / float(1 if combine_chans else num_colors)))
filter_end = min(filter_start+MAX_FILTERS, num_filters)
filter_rows = int(ceil(float(filter_end - filter_start) / f_per_row))
filter_pixels = filters.shape[1]
filter_size = int(sqrt(filters.shape[1]))
fig = pl.figure(fignum)
fig.text(.5, .95, '%s %dx%d filters %d-%d' % (_title, filter_size, filter_size, filter_start, filter_end-1), horizontalalignment='center')
num_filters = filter_end - filter_start
if not combine_chans:
bigpic = n.zeros((filter_size * filter_rows + filter_rows + 1, filter_size*num_colors * f_per_row + f_per_row + 1), dtype=n.single)
else:
bigpic = n.zeros((3, filter_size * filter_rows + filter_rows + 1, filter_size * f_per_row + f_per_row + 1), dtype=n.single)
for m in xrange(filter_start,filter_end ):
filter = filters[:,:,m]
y, x = (m - filter_start) / f_per_row, (m - filter_start) % f_per_row
if not combine_chans:
for c in xrange(num_colors):
filter_pic = filter[c,:].reshape((filter_size,filter_size))
bigpic[1 + (1 + filter_size) * y:1 + (1 + filter_size) * y + filter_size,
1 + (1 + filter_size*num_colors) * x + filter_size*c:1 + (1 + filter_size*num_colors) * x + filter_size*(c+1)] = filter_pic
else:
filter_pic = filter.reshape((3, filter_size,filter_size))
bigpic[:,
1 + (1 + filter_size) * y:1 + (1 + filter_size) * y + filter_size,
1 + (1 + filter_size) * x:1 + (1 + filter_size) * x + filter_size] = filter_pic
pl.xticks([])
pl.yticks([])
if not combine_chans:
pl.imshow(bigpic, cmap=pl.cm.gray, interpolation='nearest')
else:
bigpic = bigpic.swapaxes(0,2).swapaxes(0,1)
pl.imshow(bigpic, interpolation='nearest')
def plot_filters(self):
FILTERS_PER_ROW = 16
filter_start = 0 # First filter to show
if self.show_filters not in self.layers:
raise ShowNetError("Layer with name '%s' not defined by given convnet." % self.show_filters)
layer = self.layers[self.show_filters]
filters = layer['weights'][self.input_idx]
# filters = filters - filters.min()
# filters = filters / filters.max()
if layer['type'] == 'fc': # Fully-connected layer
num_filters = layer['outputs']
channels = self.channels
filters = filters.reshape(channels, filters.shape[0]/channels, filters.shape[1])
elif layer['type'] in ('conv', 'local'): # Conv layer
num_filters = layer['filters']
channels = layer['filterChannels'][self.input_idx]
if layer['type'] == 'local':
filters = filters.reshape((layer['modules'], channels, layer['filterPixels'][self.input_idx], num_filters))
filters = filters[:, :, :, self.local_plane] # first map for now (modules, channels, pixels)
filters = filters.swapaxes(0,2).swapaxes(0,1)
num_filters = layer['modules']
# filters = filters.swapaxes(0,1).reshape(channels * layer['filterPixels'][self.input_idx], num_filters * layer['modules'])
# num_filters *= layer['modules']
FILTERS_PER_ROW = layer['modulesX']
else:
filters = filters.reshape(channels, filters.shape[0]/channels, filters.shape[1])
# Convert YUV filters to RGB
if self.yuv_to_rgb and channels == 3:
R = filters[0,:,:] + 1.28033 * filters[2,:,:]
G = filters[0,:,:] + -0.21482 * filters[1,:,:] + -0.38059 * filters[2,:,:]
B = filters[0,:,:] + 2.12798 * filters[1,:,:]
filters[0,:,:], filters[1,:,:], filters[2,:,:] = R, G, B
combine_chans = not self.no_rgb and channels == 3
# Make sure you don't modify the backing array itself here -- so no -= or /=
if self.norm_filters:
#print filters.shape
filters = filters - n.tile(filters.reshape((filters.shape[0] * filters.shape[1], filters.shape[2])).mean(axis=0).reshape(1, 1, filters.shape[2]), (filters.shape[0], filters.shape[1], 1))
filters = filters / n.sqrt(n.tile(filters.reshape((filters.shape[0] * filters.shape[1], filters.shape[2])).var(axis=0).reshape(1, 1, filters.shape[2]), (filters.shape[0], filters.shape[1], 1)))
#filters = filters - n.tile(filters.min(axis=0).min(axis=0), (3, filters.shape[1], 1))
#filters = filters / n.tile(filters.max(axis=0).max(axis=0), (3, filters.shape[1], 1))
#else:
filters = filters - filters.min()
filters = filters / filters.max()
self.make_filter_fig(filters, filter_start, 2, 'Layer %s' % self.show_filters, num_filters, combine_chans, FILTERS_PER_ROW=FILTERS_PER_ROW)
def plot_predictions(self):
epoch, batch, data = self.get_next_batch(train=False) # get a test batch
num_classes = self.test_data_provider.get_num_classes()
NUM_ROWS = 2
NUM_COLS = 4
NUM_IMGS = NUM_ROWS * NUM_COLS if not self.save_preds else data[0].shape[1]
NUM_TOP_CLASSES = min(num_classes, 5) # show this many top labels
NUM_OUTPUTS = self.model_state['layers'][self.softmax_name]['outputs']
PRED_IDX = 1
label_names = [lab.split(',')[0] for lab in self.test_data_provider.batch_meta['label_names']]
if self.only_errors:
preds = n.zeros((data[0].shape[1], NUM_OUTPUTS), dtype=n.single)
else:
preds = n.zeros((NUM_IMGS, NUM_OUTPUTS), dtype=n.single)
#rand_idx = nr.permutation(n.r_[n.arange(1), n.where(data[1] == 552)[1], n.where(data[1] == 795)[1], n.where(data[1] == 449)[1], n.where(data[1] == 274)[1]])[:NUM_IMGS]
rand_idx = nr.randint(0, data[0].shape[1], NUM_IMGS)
if NUM_IMGS < data[0].shape[1]:
data = [n.require(d[:,rand_idx], requirements='C') for d in data]
# data += [preds]
# Run the model
print [d.shape for d in data], preds.shape
self.libmodel.startFeatureWriter(data, [preds], [self.softmax_name])
IGPUModel.finish_batch(self)
print preds
data[0] = self.test_data_provider.get_plottable_data(data[0])
if self.save_preds:
if not gfile.Exists(self.save_preds):
gfile.MakeDirs(self.save_preds)
preds_thresh = preds > 0.5 # Binarize predictions
data[0] = data[0] * 255.0
data[0][data[0]<0] = 0
data[0][data[0]>255] = 255
data[0] = n.require(data[0], dtype=n.uint8)
dir_name = '%s_predictions_batch_%d' % (os.path.basename(self.save_file), batch)
tar_name = os.path.join(self.save_preds, '%s.tar' % dir_name)
tfo = gfile.GFile(tar_name, "w")
tf = TarFile(fileobj=tfo, mode='w')
for img_idx in xrange(NUM_IMGS):
img = data[0][img_idx,:,:,:]
imsave = Image.fromarray(img)
prefix = "CORRECT" if data[1][0,img_idx] == preds_thresh[img_idx,PRED_IDX] else "FALSE_POS" if preds_thresh[img_idx,PRED_IDX] == 1 else "FALSE_NEG"
file_name = "%s_%.2f_%d_%05d_%d.png" % (prefix, preds[img_idx,PRED_IDX], batch, img_idx, data[1][0,img_idx])
# gf = gfile.GFile(file_name, "w")
file_string = StringIO()
imsave.save(file_string, "PNG")
tarinf = TarInfo(os.path.join(dir_name, file_name))
tarinf.size = file_string.tell()
file_string.seek(0)
tf.addfile(tarinf, file_string)
tf.close()
tfo.close()
# gf.close()
print "Wrote %d prediction PNGs to %s" % (preds.shape[0], tar_name)
else:
fig = pl.figure(3, figsize=(12,9))
fig.text(.4, .95, '%s test samples' % ('Mistaken' if self.only_errors else 'Random'))
if self.only_errors:
# what the net got wrong
if NUM_OUTPUTS > 1:
err_idx = [i for i,p in enumerate(preds.argmax(axis=1)) if p not in n.where(data[2][:,i] > 0)[0]]
else:
err_idx = n.where(data[1][0,:] != preds[:,0].T)[0]
print err_idx
err_idx = r.sample(err_idx, min(len(err_idx), NUM_IMGS))
data[0], data[1], preds = data[0][:,err_idx], data[1][:,err_idx], preds[err_idx,:]
import matplotlib.gridspec as gridspec
import matplotlib.colors as colors
cconv = colors.ColorConverter()
gs = gridspec.GridSpec(NUM_ROWS*2, NUM_COLS,
width_ratios=[1]*NUM_COLS, height_ratios=[2,1]*NUM_ROWS )
#print data[1]
for row in xrange(NUM_ROWS):
for col in xrange(NUM_COLS):
img_idx = row * NUM_COLS + col
if data[0].shape[0] <= img_idx:
break
pl.subplot(gs[(row * 2) * NUM_COLS + col])
#pl.subplot(NUM_ROWS*2, NUM_COLS, row * 2 * NUM_COLS + col + 1)
pl.xticks([])
pl.yticks([])
img = data[0][img_idx,:,:,:]
pl.imshow(img, interpolation='lanczos')
show_title = data[1].shape[0] == 1
true_label = [int(data[1][0,img_idx])] if show_title else n.where(data[1][:,img_idx]==1)[0]
#print true_label
#print preds[img_idx,:].shape
#print preds[img_idx,:].max()
true_label_names = [label_names[i] for i in true_label]
img_labels = sorted(zip(preds[img_idx,:], label_names), key=lambda x: x[0])[-NUM_TOP_CLASSES:]
#print img_labels
axes = pl.subplot(gs[(row * 2 + 1) * NUM_COLS + col])
height = 0.5
ylocs = n.array(range(NUM_TOP_CLASSES))*height
pl.barh(ylocs, [l[0] for l in img_labels], height=height, \
color=['#ffaaaa' if l[1] in true_label_names else '#aaaaff' for l in img_labels])
#pl.title(", ".join(true_labels))
if show_title:
pl.title(", ".join(true_label_names), fontsize=15, fontweight='bold')
else:
print true_label_names
pl.yticks(ylocs + height/2, [l[1] for l in img_labels], x=1, backgroundcolor=cconv.to_rgba('0.65', alpha=0.5), weight='bold')
for line in enumerate(axes.get_yticklines()):
line[1].set_visible(False)
#pl.xticks([width], [''])
#pl.yticks([])
pl.xticks([])
pl.ylim(0, ylocs[-1] + height)
pl.xlim(0, 1)
def start(self):
self.op.print_values()
# print self.show_cost
if self.show_cost:
self.plot_cost()
if self.show_filters:
self.plot_filters()
if self.show_preds:
self.plot_predictions()
if pl:
pl.show()
sys.exit(0)
@classmethod
def get_options_parser(cls):
op = ConvNet.get_options_parser()
for option in list(op.options):
if option not in ('gpu', 'load_file', 'inner_size', 'train_batch_range', 'test_batch_range', 'multiview_test', 'data_path', 'pca_noise', 'scalar_mean'):
op.delete_option(option)
op.add_option("show-cost", "show_cost", StringOptionParser, "Show specified objective function", default="")
op.add_option("show-filters", "show_filters", StringOptionParser, "Show learned filters in specified layer", default="")
op.add_option("norm-filters", "norm_filters", BooleanOptionParser, "Individually normalize filters shown with --show-filters", default=0)
op.add_option("input-idx", "input_idx", IntegerOptionParser, "Input index for layer given to --show-filters", default=0)
op.add_option("cost-idx", "cost_idx", IntegerOptionParser, "Cost function return value index for --show-cost", default=0)
op.add_option("no-rgb", "no_rgb", BooleanOptionParser, "Don't combine filter channels into RGB in layer given to --show-filters", default=False)
op.add_option("yuv-to-rgb", "yuv_to_rgb", BooleanOptionParser, "Convert RGB filters to YUV in layer given to --show-filters", default=False)
op.add_option("channels", "channels", IntegerOptionParser, "Number of channels in layer given to --show-filters (fully-connected layers only)", default=0)
op.add_option("show-preds", "show_preds", StringOptionParser, "Show predictions made by given softmax on test set", default="")
op.add_option("save-preds", "save_preds", StringOptionParser, "Save predictions to given path instead of showing them", default="")
op.add_option("only-errors", "only_errors", BooleanOptionParser, "Show only mistaken predictions (to be used with --show-preds)", default=False, requires=['show_preds'])
op.add_option("local-plane", "local_plane", IntegerOptionParser, "Local plane to show", default=0)
op.add_option("smooth-test-errors", "smooth_test_errors", BooleanOptionParser, "Use running average for test error plot?", default=1)
op.options['load_file'].default = None
return op
if __name__ == "__main__":
#nr.seed(6)
try:
op = ShowConvNet.get_options_parser()
op, load_dic = IGPUModel.parse_options(op)
model = ShowConvNet(op, load_dic)
model.start()
except (UnpickleError, ShowNetError, opt.GetoptError), e:
print "----------------"
print "Error:"
print e
| apache-2.0 |
PaddlePaddle/models | PaddleAudio/examples/audioset_training/evaluate.py | 1 | 3489 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import numpy as np
import paddle
import paddle.nn.functional as F
import paddleaudio as pa
import yaml
from dataset import get_val_loader
from model import resnet50
from paddle.utils import download
from sklearn.metrics import average_precision_score, roc_auc_score
from utils import compute_dprime,download_assets
checkpoint_url = 'https://bj.bcebos.com/paddleaudio/paddleaudio/resnet50_weight_averaging_mAP0.416.pdparams'
def evaluate(epoch, val_loader, model, loss_fn):
model.eval()
avg_loss = 0.0
all_labels = []
all_preds = []
for batch_id, (x, y) in enumerate(val_loader()):
x = x.unsqueeze((1))
label = y
logits = model(x)
loss_val = loss_fn(logits, label)
pred = F.sigmoid(logits)
all_labels += [label.numpy()]
all_preds += [pred.numpy()]
avg_loss = (avg_loss * batch_id + loss_val.numpy()[0]) / (1 + batch_id)
msg = f'eval epoch:{epoch}, batch:{batch_id}'
msg += f'|{len(val_loader)}'
msg += f',loss:{avg_loss:.3}'
if batch_id % 20 == 0:
print(msg)
all_preds = np.concatenate(all_preds, 0)
all_labels = np.concatenate(all_labels, 0)
mAP_score = np.mean(
average_precision_score(all_labels, all_preds, average=None))
auc_score = np.mean(roc_auc_score(all_labels, all_preds, average=None))
dprime = compute_dprime(auc_score)
return avg_loss, mAP_score, auc_score, dprime
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Audioset inference')
parser.add_argument('--config',
type=str,
required=False,
default='./assets/config.yaml')
parser.add_argument('--device',
help='set the gpu device number',
type=int,
required=False,
default=0)
parser.add_argument('--weight', type=str, required=False, default='')
args = parser.parse_args()
download_assets()
with open(args.config) as f:
c = yaml.safe_load(f)
paddle.set_device('gpu:{}'.format(args.device))
ModelClass = eval(c['model_type'])
model = ModelClass(pretrained=False,
num_classes=c['num_classes'],
dropout=c['dropout'])
if args.weight.strip() == '':
print(f'Using pretrained weight: {checkpoint_url}')
args.weight = download.get_weights_path_from_url(checkpoint_url)
model.load_dict(paddle.load(args.weight))
model.eval()
val_loader = get_val_loader(c)
print(f'Evaluating...')
avg_loss, mAP_score, auc_score, dprime = evaluate(
0, val_loader, model, F.binary_cross_entropy_with_logits)
print(f'mAP: {mAP_score:.3}')
print(f'auc: {auc_score:.3}')
print(f'd-prime: {dprime:.3}')
| apache-2.0 |
LiuVII/Machine_learning_and_AI | Bag_of_Words/log_regression.py | 1 | 6731 | from __future__ import print_function
import tensorflow as tf
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer, TfidfTransformer
import math
import itertools
from collections import Counter
from sklearn.metrics import auc, roc_auc_score
import matplotlib.pyplot as plt
MAX_FEAT = 10000
NUM_CLASSES = 1
LEARNING_RATE = 2
TRAIN_STEPS = 100
BATCH_SIZE = 2000
TEMP_SIZE = 5000
NGRAM = 3
BETA_INIT = 4e-3
MAX_FEAT *= NGRAM
train_data = pd.read_csv("Clean_train.csv", names=['id', 'reviews', 'sentiment'], quoting=3)
validation_data = pd.read_csv("Clean_validation.csv", names=['id', 'reviews', 'sentiment'], quoting=3)
TRAIN_SIZE = len(train_data['sentiment'])
validation_SIZE = len(validation_data['sentiment'])
#print(validation_data)
vectorizer = CountVectorizer(analyzer = "word", \
tokenizer = None, \
preprocessor = None, \
stop_words = None, \
ngram_range=(1, NGRAM), \
max_features = MAX_FEAT)
# vectorizer = TfidfVectorizer(analyzer = "word", \
# tokenizer = None, \
# preprocessor = None, \
# stop_words = None, \
# ngram_range=(1, NGRAM), \
# max_features = MAX_FEAT)
#transformer = TfidfTransformer(smooth_idf=False)
x_train_raw = vectorizer.fit_transform(train_data['reviews'])
#x_train_raw = transformer.fit_transform(x_train_raw)
x_train = x_train_raw.toarray()
# train_data['asentiment'] = 1 - train_data['sentiment']
y_train = np.reshape(train_data['sentiment'].values, (TRAIN_SIZE, NUM_CLASSES))
# y_train = np.reshape(train_data[['sentiment', 'asentiment']].values, (TRAIN_SIZE, NUM_CLASSES))
vocab = vectorizer.get_feature_names()
x_validation_raw = vectorizer.transform(validation_data['reviews'])
#x_validation_raw = transformer.fit_transform(x_validation_raw)
x_validation = x_validation_raw.toarray()
# validation_data['asentiment'] = 1 - validation_data['sentiment']
y_validation = np.reshape(validation_data['sentiment'].values, (validation_SIZE, NUM_CLASSES))
# y_validation = np.reshape(validation_data[['sentiment', 'asentiment']].values, (validation_SIZE, NUM_CLASSES))
print(x_train.shape, y_train.shape)
print(x_validation.shape, y_validation.shape)
sess = None
def ResetSession():
tf.reset_default_graph()
global sess
if sess is not None: sess.close()
sess = tf.InteractiveSession()
ResetSession()
x = tf.placeholder(tf.float32, [None, MAX_FEAT], name='x')
y = tf.placeholder(tf.float32, [None, NUM_CLASSES], name='y_label')
beta = tf.placeholder(tf.float32, name='beta')
def weight_variable(inputs, outputs, name):
# Random small values
initial = tf.truncated_normal(shape=[inputs, outputs], stddev=1.0 / math.sqrt(float(inputs)))
return tf.Variable(initial, name=name)
def bias_variable(shape, name):
initial = tf.constant(0.0, shape=[shape])
return tf.Variable(initial, name=name)
def batch_iter(data, batch_size, num_epochs, shuffle=True):
# Generates a batch iterator for a dataset.
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = int((len(data)-1)/batch_size) + 1
for epoch in range(num_epochs):
# Shuffle the data at each epoch
if shuffle:
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
else:
shuffled_data = data
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index]
W = weight_variable(MAX_FEAT, NUM_CLASSES, 'weights')
b = bias_variable(NUM_CLASSES, name='bias')
h = tf.matmul(x, W) + b
h_sig = tf.sigmoid(h)
# loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=h, labels=y))
regularizer = tf.nn.l2_loss(W)
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=h, labels=y) + beta * regularizer)
# train_step = tf.train.GradientDescentOptimizer(LEARNING_RATE).minimize(loss)
train_step = tf.train.AdamOptimizer(1e-3).minimize(loss)
h_class = tf.cast((h_sig > 0.5), tf.float32)
prediction = tf.equal((h_sig > 0.5), tf.cast(y, tf.bool))
accuracy = tf.reduce_mean(tf.cast(prediction, tf.float32))
init_g = tf.global_variables_initializer()
#i_lst = []
j_beta = BETA_INIT
mult = 2
j_tr_loss = []
j_vl_loss = []
j_beta_lst = []
for j in range(1):
# tr_loss_lst = []
# ts_loss_lst = []
sess.run(init_g)
i = 0
avg_loss = 0
batches = batch_iter(zip(x_train, y_train), BATCH_SIZE, TRAIN_STEPS)
for batch in batches:
x_batch, y_batch = zip(*batch)
#i_lst.append(i)
i_loss, _ = sess.run([loss, train_step], feed_dict={x: x_batch, y: y_batch, beta: j_beta})
# avg_loss += i_loss
# if i % 10 == 0:
# print(int((i * 10) / TRAIN_STEPS), i_loss)
# tr_loss_lst.append(avg_loss/10)
# avg_loss = 0
# ts_loss_lst.append(sess.run(accuracy, feed_dict={x: x_validation, y: y_validation, beta: 0}))
if i % 100 == 0:
print("Train accuracy %f" % sess.run(accuracy, feed_dict={x: x_train, y: y_train}))
# sess.run(init_l)
exh, exy = sess.run([h_sig, y], feed_dict={x: x_batch, y: y_batch})
print("Train AUC %f" % roc_auc_score(exy[:,0],exh[:,0]))
i += 1
print("%d, beta %f" % (j, j_beta))
j_beta_lst.append(j_beta)
print("Train accuracy %f" % sess.run(accuracy, feed_dict={x: x_train, y: y_train}))
exh, exy, j_loss = sess.run([h_sig, y, loss], feed_dict={x: x_train, y: y_train, beta: 0})
j_tr_loss.append(j_loss)
# print(exh)
print("Train AUC %f" % roc_auc_score(exy[:,0],exh[:,0]))
print("Validation accuracy %f" % sess.run(accuracy, feed_dict={x: x_validation, y: y_validation}))
exh, exy, j_loss = sess.run([h_sig, y, loss], feed_dict={x: x_validation, y: y_validation, beta: 0})
j_vl_loss.append(j_loss)
print("Validation AUC %f" % roc_auc_score(exy[:,0],exh[:,0]))
j_beta *= mult
# Plot loss and validation accuracy
# plt.plot(tr_loss_lst, 'b')
# plt.plot(ts_loss_lst, 'g')
# Plot train and validation loss vs regularization parameter
# plt.plot(j_beta_lst, j_tr_loss, 'b')
# plt.plot(j_beta_lst, j_vl_loss, 'g')
# plt.show()
# Copy the results to a pandas dataframe with an "id" column and
# a "sentiment" column
test_data = pd.read_csv("Clean_test.csv", names=['id', 'reviews'], quoting=3)
# print(test_data['id'].shape)
x_test = (vectorizer.transform(test_data['reviews'])).toarray()
# print(x_test)
result, _ = sess.run([h_sig, h], feed_dict={x: x_test})
print(result)
output = pd.DataFrame( data={"id":test_data["id"], "sentiment":result[:,0]} )
# Use pandas to write the comma-separated output file
output.to_csv( "result.csv", index=False, quoting=3 ) | mit |
lorenzo-desantis/mne-python | mne/dipole.py | 3 | 26432 | # Authors: Alexandre Gramfort <[email protected]>
# Eric Larson <[email protected]>
#
# License: Simplified BSD
import numpy as np
from scipy import linalg
from copy import deepcopy
import re
from .cov import read_cov, _get_whitener_data
from .io.pick import pick_types, channel_type
from .io.proj import make_projector, _has_eeg_average_ref_proj
from .bem import _fit_sphere
from .transforms import (_print_coord_trans, _coord_frame_name,
apply_trans, invert_transform, Transform)
from .forward._make_forward import (_get_mri_head_t, _setup_bem,
_prep_meg_channels, _prep_eeg_channels)
from .forward._compute_forward import (_compute_forwards_meeg,
_prep_field_computation)
from .externals.six import string_types
from .surface import (transform_surface_to, _normalize_vectors,
_get_ico_surface, _compute_nearest)
from .bem import _bem_find_surface, _bem_explain_surface
from .source_space import (_make_volume_source_space, SourceSpaces,
_points_outside_surface)
from .parallel import parallel_func
from .fixes import partial
from .utils import logger, verbose, _time_mask
class Dipole(object):
"""Dipole class
Used to store positions, orientations, amplitudes, times, goodness of fit
of dipoles, typically obtained with Neuromag/xfit, mne_dipole_fit
or certain inverse solvers.
Parameters
----------
times : array, shape (n_dipoles,)
The time instants at which each dipole was fitted (sec).
pos : array, shape (n_dipoles, 3)
The dipoles positions (m).
amplitude : array, shape (n_dipoles,)
The amplitude of the dipoles (nAm).
ori : array, shape (n_dipoles, 3)
The dipole orientations (normalized to unit length).
gof : array, shape (n_dipoles,)
The goodness of fit.
name : str | None
Name of the dipole.
"""
def __init__(self, times, pos, amplitude, ori, gof, name=None):
self.times = times
self.pos = pos
self.amplitude = amplitude
self.ori = ori
self.gof = gof
self.name = name
def __repr__(self):
s = "n_times : %s" % len(self.times)
s += ", tmin : %s" % np.min(self.times)
s += ", tmax : %s" % np.max(self.times)
return "<Dipole | %s>" % s
def save(self, fname):
"""Save dipole in a .dip file
Parameters
----------
fname : str
The name of the .dip file.
"""
fmt = " %7.1f %7.1f %8.2f %8.2f %8.2f %8.3f %8.3f %8.3f %8.3f %6.1f"
with open(fname, 'wb') as fid:
fid.write('# CoordinateSystem "Head"\n'.encode('utf-8'))
fid.write('# begin end X (mm) Y (mm) Z (mm)'
' Q(nAm) Qx(nAm) Qy(nAm) Qz(nAm) g/%\n'
.encode('utf-8'))
t = self.times[:, np.newaxis] * 1000.
gof = self.gof[:, np.newaxis]
amp = 1e9 * self.amplitude[:, np.newaxis]
out = np.concatenate((t, t, self.pos / 1e-3, amp,
self.ori * amp, gof), axis=-1)
np.savetxt(fid, out, fmt=fmt)
if self.name is not None:
fid.write(('## Name "%s dipoles" Style "Dipoles"'
% self.name).encode('utf-8'))
def crop(self, tmin=None, tmax=None):
"""Crop data to a given time interval
Parameters
----------
tmin : float | None
Start time of selection in seconds.
tmax : float | None
End time of selection in seconds.
"""
mask = _time_mask(self.times, tmin, tmax)
for attr in ('times', 'pos', 'gof', 'amplitude', 'ori'):
setattr(self, attr, getattr(self, attr)[mask])
def copy(self):
"""Copy the Dipoles object
Returns
-------
dip : instance of Dipole
The copied dipole instance.
"""
return deepcopy(self)
@verbose
def plot_locations(self, trans, subject, subjects_dir=None,
bgcolor=(1, 1, 1), opacity=0.3,
brain_color=(0.7, 0.7, 0.7), mesh_color=(1, 1, 0),
fig_name=None, fig_size=(600, 600), mode='cone',
scale_factor=0.1e-1, colors=None, verbose=None):
"""Plot dipole locations as arrows
Parameters
----------
trans : dict
The mri to head trans.
subject : str
The subject name corresponding to FreeSurfer environment
variable SUBJECT.
subjects_dir : None | str
The path to the freesurfer subjects reconstructions.
It corresponds to Freesurfer environment variable SUBJECTS_DIR.
The default is None.
bgcolor : tuple of length 3
Background color in 3D.
opacity : float in [0, 1]
Opacity of brain mesh.
brain_color : tuple of length 3
Brain color.
mesh_color : tuple of length 3
Mesh color.
fig_name : tuple of length 2
Mayavi figure name.
fig_size : tuple of length 2
Mayavi figure size.
mode : str
Should be ``'cone'`` or ``'sphere'`` to specify how the
dipoles should be shown.
scale_factor : float
The scaling applied to amplitudes for the plot.
colors: list of colors | None
Color to plot with each dipole. If None defaults colors are used.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig : instance of mlab.Figure
The mayavi figure.
"""
from .viz import plot_dipole_locations
dipoles = []
for t in self.times:
dipoles.append(self.copy())
dipoles[-1].crop(t, t)
return plot_dipole_locations(
dipoles, trans, subject, subjects_dir, bgcolor, opacity,
brain_color, mesh_color, fig_name, fig_size, mode, scale_factor,
colors)
def plot_amplitudes(self, color='k', show=True):
"""Plot the dipole amplitudes as a function of time
Parameters
----------
color: matplotlib Color
Color to use for the trace.
show : bool
Show figure if True.
Returns
-------
fig : matplotlib.figure.Figure
The figure object containing the plot.
"""
from .viz import plot_dipole_amplitudes
return plot_dipole_amplitudes([self], [color], show)
def __getitem__(self, idx_slice):
"""Handle indexing"""
if isinstance(idx_slice, int): # make sure attributes stay 2d
idx_slice = [idx_slice]
selected_times = self.times[idx_slice].copy()
selected_pos = self.pos[idx_slice, :].copy()
selected_amplitude = self.amplitude[idx_slice].copy()
selected_ori = self.ori[idx_slice, :].copy()
selected_gof = self.gof[idx_slice].copy()
selected_name = self.name
new_dipole = Dipole(selected_times, selected_pos,
selected_amplitude, selected_ori,
selected_gof, selected_name)
return new_dipole
def __len__(self):
"""Handle len function"""
return self.pos.shape[0]
# #############################################################################
# IO
@verbose
def read_dipole(fname, verbose=None):
"""Read .dip file from Neuromag/xfit or MNE
Parameters
----------
fname : str
The name of the .dip file.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
dipole : instance of Dipole
The dipole.
"""
try:
data = np.loadtxt(fname, comments='%')
except:
data = np.loadtxt(fname, comments='#') # handle 2 types of comments...
name = None
with open(fname, 'r') as fid:
for line in fid.readlines():
if line.startswith('##') or line.startswith('%%'):
m = re.search('Name "(.*) dipoles"', line)
if m:
name = m.group(1)
break
if data.ndim == 1:
data = data[None, :]
logger.info("%d dipole(s) found" % len(data))
times = data[:, 0] / 1000.
pos = 1e-3 * data[:, 2:5] # put data in meters
amplitude = data[:, 5]
norm = amplitude.copy()
amplitude /= 1e9
norm[norm == 0] = 1
ori = data[:, 6:9] / norm[:, np.newaxis]
gof = data[:, 9]
return Dipole(times, pos, amplitude, ori, gof, name)
# #############################################################################
# Fitting
def _dipole_forwards(fwd_data, whitener, rr, n_jobs=1):
"""Compute the forward solution and do other nice stuff"""
B = _compute_forwards_meeg(rr, fwd_data, n_jobs, verbose=False)
B = np.concatenate(B, axis=1)
B_orig = B.copy()
# Apply projection and whiten (cov has projections already)
B = np.dot(B, whitener.T)
# column normalization doesn't affect our fitting, so skip for now
# S = np.sum(B * B, axis=1) # across channels
# scales = np.repeat(3. / np.sqrt(np.sum(np.reshape(S, (len(rr), 3)),
# axis=1)), 3)
# B *= scales[:, np.newaxis]
scales = np.ones(3)
return B, B_orig, scales
def _make_guesses(surf_or_rad, r0, grid, exclude, mindist, n_jobs):
"""Make a guess space inside a sphere or BEM surface"""
if isinstance(surf_or_rad, dict):
surf = surf_or_rad
logger.info('Guess surface (%s) is in %s coordinates'
% (_bem_explain_surface(surf['id']),
_coord_frame_name(surf['coord_frame'])))
else:
radius = surf_or_rad[0]
logger.info('Making a spherical guess space with radius %7.1f mm...'
% (1000 * radius))
surf = _get_ico_surface(3)
_normalize_vectors(surf['rr'])
surf['rr'] *= radius
surf['rr'] += r0
logger.info('Filtering (grid = %6.f mm)...' % (1000 * grid))
src = _make_volume_source_space(surf, grid, exclude, 1000 * mindist,
do_neighbors=False, n_jobs=n_jobs)
# simplify the result to make things easier later
src = dict(rr=src['rr'][src['vertno']], nn=src['nn'][src['vertno']],
nuse=src['nuse'], coord_frame=src['coord_frame'],
vertno=np.arange(src['nuse']))
return SourceSpaces([src])
def _fit_eval(rd, B, B2, fwd_svd=None, fwd_data=None, whitener=None):
"""Calculate the residual sum of squares"""
if fwd_svd is None:
fwd = _dipole_forwards(fwd_data, whitener, rd[np.newaxis, :])[0]
uu, sing, vv = linalg.svd(fwd, overwrite_a=True, full_matrices=False)
else:
uu, sing, vv = fwd_svd
gof = _dipole_gof(uu, sing, vv, B, B2)[0]
# mne-c uses fitness=B2-Bm2, but ours (1-gof) is just a normalized version
return 1. - gof
def _dipole_gof(uu, sing, vv, B, B2):
"""Calculate the goodness of fit from the forward SVD"""
ncomp = 3 if sing[2] / sing[0] > 0.2 else 2
one = np.dot(vv[:ncomp], B)
Bm2 = np.sum(one * one)
gof = Bm2 / B2
return gof, one
def _fit_Q(fwd_data, whitener, proj_op, B, B2, B_orig, rd):
"""Fit the dipole moment once the location is known"""
fwd, fwd_orig, scales = _dipole_forwards(fwd_data, whitener,
rd[np.newaxis, :])
uu, sing, vv = linalg.svd(fwd, full_matrices=False)
gof, one = _dipole_gof(uu, sing, vv, B, B2)
ncomp = len(one)
# Counteract the effect of column normalization
Q = scales[0] * np.sum(uu.T[:ncomp] * (one / sing[:ncomp])[:, np.newaxis],
axis=0)
# apply the projector to both elements
B_residual = np.dot(proj_op, B_orig) - np.dot(np.dot(Q, fwd_orig),
proj_op.T)
return Q, gof, B_residual
def _fit_dipoles(min_dist_to_inner_skull, data, times, guess_rrs,
guess_fwd_svd, fwd_data, whitener, proj_op, n_jobs):
"""Fit a single dipole to the given whitened, projected data"""
from scipy.optimize import fmin_cobyla
parallel, p_fun, _ = parallel_func(_fit_dipole, n_jobs)
# parallel over time points
res = parallel(p_fun(min_dist_to_inner_skull, B, t, guess_rrs,
guess_fwd_svd, fwd_data, whitener, proj_op,
fmin_cobyla)
for B, t in zip(data.T, times))
pos = np.array([r[0] for r in res])
amp = np.array([r[1] for r in res])
ori = np.array([r[2] for r in res])
gof = np.array([r[3] for r in res]) * 100 # convert to percentage
residual = np.array([r[4] for r in res]).T
return pos, amp, ori, gof, residual
'''Simplex code in case we ever want/need it for testing
def _make_tetra_simplex():
"""Make the initial tetrahedron"""
#
# For this definition of a regular tetrahedron, see
#
# http://mathworld.wolfram.com/Tetrahedron.html
#
x = np.sqrt(3.0) / 3.0
r = np.sqrt(6.0) / 12.0
R = 3 * r
d = x / 2.0
simplex = 1e-2 * np.array([[x, 0.0, -r],
[-d, 0.5, -r],
[-d, -0.5, -r],
[0., 0., R]])
return simplex
def try_(p, y, psum, ndim, fun, ihi, neval, fac):
"""Helper to try a value"""
ptry = np.empty(ndim)
fac1 = (1.0 - fac) / ndim
fac2 = fac1 - fac
ptry = psum * fac1 - p[ihi] * fac2
ytry = fun(ptry)
neval += 1
if ytry < y[ihi]:
y[ihi] = ytry
psum[:] += ptry - p[ihi]
p[ihi] = ptry
return ytry, neval
def _simplex_minimize(p, ftol, stol, fun, max_eval=1000):
"""Minimization with the simplex algorithm
Modified from Numerical recipes"""
y = np.array([fun(s) for s in p])
ndim = p.shape[1]
assert p.shape[0] == ndim + 1
mpts = ndim + 1
neval = 0
psum = p.sum(axis=0)
loop = 1
while(True):
ilo = 1
if y[1] > y[2]:
ihi = 1
inhi = 2
else:
ihi = 2
inhi = 1
for i in range(mpts):
if y[i] < y[ilo]:
ilo = i
if y[i] > y[ihi]:
inhi = ihi
ihi = i
elif y[i] > y[inhi]:
if i != ihi:
inhi = i
rtol = 2 * np.abs(y[ihi] - y[ilo]) / (np.abs(y[ihi]) + np.abs(y[ilo]))
if rtol < ftol:
break
if neval >= max_eval:
raise RuntimeError('Maximum number of evaluations exceeded.')
if stol > 0: # Has the simplex collapsed?
dsum = np.sqrt(np.sum((p[ilo] - p[ihi]) ** 2))
if loop > 5 and dsum < stol:
break
ytry, neval = try_(p, y, psum, ndim, fun, ihi, neval, -1.)
if ytry <= y[ilo]:
ytry, neval = try_(p, y, psum, ndim, fun, ihi, neval, 2.)
elif ytry >= y[inhi]:
ysave = y[ihi]
ytry, neval = try_(p, y, psum, ndim, fun, ihi, neval, 0.5)
if ytry >= ysave:
for i in range(mpts):
if i != ilo:
psum[:] = 0.5 * (p[i] + p[ilo])
p[i] = psum
y[i] = fun(psum)
neval += ndim
psum = p.sum(axis=0)
loop += 1
'''
def _fit_dipole(min_dist_to_inner_skull, B_orig, t, guess_rrs,
guess_fwd_svd, fwd_data, whitener, proj_op,
fmin_cobyla):
"""Fit a single bit of data"""
B = np.dot(whitener, B_orig)
# make constraint function to keep the solver within the inner skull
if isinstance(fwd_data['inner_skull'], dict): # bem
surf = fwd_data['inner_skull']
def constraint(rd):
dist = _compute_nearest(surf['rr'], rd[np.newaxis, :],
return_dists=True)[1][0]
if _points_outside_surface(rd[np.newaxis, :], surf, 1)[0]:
dist *= -1.
# Once we know the dipole is below the inner skull,
# let's check if its distance to the inner skull is at least
# min_dist_to_inner_skull. This can be enforced by adding a
# constrain proportional to its distance.
dist -= min_dist_to_inner_skull
return dist
else: # sphere
surf = None
R, r0 = fwd_data['inner_skull']
R_adj = R - min_dist_to_inner_skull
def constraint(rd):
return R_adj - np.sqrt(np.sum((rd - r0) ** 2))
# Find a good starting point (find_best_guess in C)
B2 = np.dot(B, B)
if B2 == 0:
logger.warning('Zero field found for time %s' % t)
return np.zeros(3), 0, np.zeros(3), 0
idx = np.argmin([_fit_eval(guess_rrs[[fi], :], B, B2, fwd_svd)
for fi, fwd_svd in enumerate(guess_fwd_svd)])
x0 = guess_rrs[idx]
fun = partial(_fit_eval, B=B, B2=B2, fwd_data=fwd_data, whitener=whitener)
# Tested minimizers:
# Simplex, BFGS, CG, COBYLA, L-BFGS-B, Powell, SLSQP, TNC
# Several were similar, but COBYLA won for having a handy constraint
# function we can use to ensure we stay inside the inner skull /
# smallest sphere
rd_final = fmin_cobyla(fun, x0, (constraint,), consargs=(),
rhobeg=5e-2, rhoend=5e-5, disp=False)
# simplex = _make_tetra_simplex() + x0
# _simplex_minimize(simplex, 1e-4, 2e-4, fun)
# rd_final = simplex[0]
# Compute the dipole moment at the final point
Q, gof, residual = _fit_Q(fwd_data, whitener, proj_op, B, B2, B_orig,
rd_final)
amp = np.sqrt(np.dot(Q, Q))
norm = 1. if amp == 0. else amp
ori = Q / norm
msg = '---- Fitted : %7.1f ms' % (1000. * t)
if surf is not None:
dist_to_inner_skull = _compute_nearest(surf['rr'],
rd_final[np.newaxis, :],
return_dists=True)[1][0]
msg += (", distance to inner skull : %2.4f mm"
% (dist_to_inner_skull * 1000.))
logger.info(msg)
return rd_final, amp, ori, gof, residual
@verbose
def fit_dipole(evoked, cov, bem, trans=None, min_dist=5., n_jobs=1,
verbose=None):
"""Fit a dipole
Parameters
----------
evoked : instance of Evoked
The dataset to fit.
cov : str | instance of Covariance
The noise covariance.
bem : str | dict
The BEM filename (str) or a loaded sphere model (dict).
trans : str | None
The head<->MRI transform filename. Must be provided unless BEM
is a sphere model.
min_dist : float
Minimum distance (in milimeters) from the dipole to the inner skull.
Must be positive. Note that because this is a constraint passed to
a solver it is not strict but close, i.e. for a ``min_dist=5.`` the
fits could be 4.9 mm from the inner skull.
n_jobs : int
Number of jobs to run in parallel (used in field computation
and fitting).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
dip : instance of Dipole
The dipole fits.
residual : ndarray, shape (n_meeg_channels, n_times)
The good M-EEG data channels with the fitted dipolar activity
removed.
See Also
--------
mne.beamformer.rap_music
Notes
-----
.. versionadded:: 0.9.0
"""
# This could eventually be adapted to work with other inputs, these
# are what is needed:
evoked = evoked.copy()
# Determine if a list of projectors has an average EEG ref
if "eeg" in evoked and not _has_eeg_average_ref_proj(evoked.info['projs']):
raise ValueError('EEG average reference is mandatory for dipole '
'fitting.')
if min_dist < 0:
raise ValueError('min_dist should be positive. Got %s' % min_dist)
data = evoked.data
info = evoked.info
times = evoked.times.copy()
comment = evoked.comment
# Convert the min_dist to meters
min_dist_to_inner_skull = min_dist / 1000.
del min_dist
# Figure out our inputs
neeg = len(pick_types(info, meg=False, eeg=True, exclude=[]))
if isinstance(bem, string_types):
logger.info('BEM : %s' % bem)
if trans is not None:
logger.info('MRI transform : %s' % trans)
mri_head_t, trans = _get_mri_head_t(trans)
else:
mri_head_t = Transform('head', 'mri', np.eye(4))
bem = _setup_bem(bem, bem, neeg, mri_head_t)
if not bem['is_sphere']:
if trans is None:
raise ValueError('mri must not be None if BEM is provided')
# Find the best-fitting sphere
inner_skull = _bem_find_surface(bem, 'inner_skull')
inner_skull = inner_skull.copy()
R, r0 = _fit_sphere(inner_skull['rr'], disp=False)
r0 = apply_trans(mri_head_t['trans'], r0[np.newaxis, :])[0]
logger.info('Grid origin : '
'%6.1f %6.1f %6.1f mm rad = %6.1f mm.'
% (1000 * r0[0], 1000 * r0[1], 1000 * r0[2], 1000 * R))
else:
r0 = bem['r0']
logger.info('Sphere model : origin at (% 7.2f % 7.2f % 7.2f) mm'
% (1000 * r0[0], 1000 * r0[1], 1000 * r0[2]))
if 'layers' in bem:
R = bem['layers'][0]['rad']
else:
R = np.inf
inner_skull = [R, r0]
r0_mri = apply_trans(invert_transform(mri_head_t)['trans'],
r0[np.newaxis, :])[0]
# Eventually these could be parameters, but they are just used for
# the initial grid anyway
guess_grid = 0.02 # MNE-C uses 0.01, but this is faster w/similar perf
guess_mindist = max(0.005, min_dist_to_inner_skull)
guess_exclude = 0.02
accurate = False # can be made an option later (shouldn't make big diff)
logger.info('Guess grid : %6.1f mm' % (1000 * guess_grid,))
if guess_mindist > 0.0:
logger.info('Guess mindist : %6.1f mm' % (1000 * guess_mindist,))
if guess_exclude > 0:
logger.info('Guess exclude : %6.1f mm' % (1000 * guess_exclude,))
logger.info('Using %s MEG coil definitions.'
% ("accurate" if accurate else "standard"))
if isinstance(cov, string_types):
logger.info('Noise covariance : %s' % (cov,))
cov = read_cov(cov, verbose=False)
logger.info('')
_print_coord_trans(mri_head_t)
_print_coord_trans(info['dev_head_t'])
logger.info('%d bad channels total' % len(info['bads']))
# Forward model setup (setup_forward_model from setup.c)
ch_types = [channel_type(info, idx) for idx in range(info['nchan'])]
megcoils, compcoils, megnames, meg_info = [], [], [], None
eegels, eegnames = [], []
if 'grad' in ch_types or 'mag' in ch_types:
megcoils, compcoils, megnames, meg_info = \
_prep_meg_channels(info, exclude='bads',
accurate=accurate, verbose=verbose)
if 'eeg' in ch_types:
eegels, eegnames = _prep_eeg_channels(info, exclude='bads',
verbose=verbose)
# Ensure that MEG and/or EEG channels are present
if len(megcoils + eegels) == 0:
raise RuntimeError('No MEG or EEG channels found.')
# Whitener for the data
logger.info('Decomposing the sensor noise covariance matrix...')
picks = pick_types(info, meg=True, eeg=True)
# In case we want to more closely match MNE-C for debugging:
# from .io.pick import pick_info
# from .cov import prepare_noise_cov
# info_nb = pick_info(info, picks)
# cov = prepare_noise_cov(cov, info_nb, info_nb['ch_names'], verbose=False)
# nzero = (cov['eig'] > 0)
# n_chan = len(info_nb['ch_names'])
# whitener = np.zeros((n_chan, n_chan), dtype=np.float)
# whitener[nzero, nzero] = 1.0 / np.sqrt(cov['eig'][nzero])
# whitener = np.dot(whitener, cov['eigvec'])
whitener = _get_whitener_data(info, cov, picks, verbose=False)
# Proceed to computing the fits (make_guess_data)
logger.info('\n---- Computing the forward solution for the guesses...')
guess_src = _make_guesses(inner_skull, r0_mri,
guess_grid, guess_exclude, guess_mindist,
n_jobs=n_jobs)[0]
if isinstance(inner_skull, dict):
transform_surface_to(inner_skull, 'head', mri_head_t)
transform_surface_to(guess_src, 'head', mri_head_t)
# C code computes guesses using a sphere model for speed, don't bother here
logger.info('Go through all guess source locations...')
fwd_data = dict(coils_list=[megcoils, eegels], infos=[meg_info, None],
ccoils_list=[compcoils, None], coil_types=['meg', 'eeg'],
inner_skull=inner_skull)
_prep_field_computation(guess_src['rr'], bem, fwd_data, n_jobs,
verbose=False)
guess_fwd = _dipole_forwards(fwd_data, whitener, guess_src['rr'],
n_jobs=n_jobs)[0]
# decompose ahead of time
guess_fwd_svd = [linalg.svd(fwd, overwrite_a=True, full_matrices=False)
for fwd in np.array_split(guess_fwd,
len(guess_src['rr']))]
del guess_fwd # destroyed
logger.info('[done %d sources]' % guess_src['nuse'])
# Do actual fits
data = data[picks]
ch_names = [info['ch_names'][p] for p in picks]
proj_op = make_projector(info['projs'], ch_names, info['bads'])[0]
out = _fit_dipoles(min_dist_to_inner_skull, data, times, guess_src['rr'],
guess_fwd_svd, fwd_data,
whitener, proj_op, n_jobs)
dipoles = Dipole(times, out[0], out[1], out[2], out[3], comment)
residual = out[4]
logger.info('%d dipoles fitted' % len(dipoles.times))
return dipoles, residual
| bsd-3-clause |
benjaminpope/whisky | kergain_sim_phase.py | 2 | 12163 | import numpy as np
import matplotlib.pyplot as plt
import pysco
from pysco.core import *
import fitsio
from k2_epd_george import print_time
from time import time as clock
from old_diffract_tools import *
import pymultinest
from pysco.diffract_tools import shift_image_ft
from pysco.common_tasks import shift_image
from swiftmask import swiftpupil
import matplotlib as mpl
from astropy.table import Table
mpl.style.use('seaborn-colorblind')
mpl.rcParams['figure.figsize']=(8.0,6.0) #(6.0,4.0)
mpl.rcParams['font.size']= 16 #10
mpl.rcParams['savefig.dpi']=200 #72
mpl.rcParams['axes.labelsize'] = 14
mpl.rcParams['xtick.labelsize'] = 10
mpl.rcParams['ytick.labelsize'] = 10
shift = np.fft.fftshift
fft = np.fft.fft2
ifft = np.fft.ifft2
fftfreq = np.fft.fftfreq
fftn = np.fft.fftn
rfftn = np.fft.rfftn
dtor = np.pi/180.0
'''------------------------------------------------------------
kergain_sim.py
Automate a simulation of the effectiveness of raw visibility
fitting versus kernel amplitudes
------------------------------------------------------------'''
pupil = 'plain'
try:
a = pysco.kpi('./geometry/'+pupil+'model.pick')
print 'Loaded kernel phase object'
except:
a = pysco.kpi('./geometry/'+pupil+'.txt')
a.name = 'Test'
a.save_to_file('./geometry/'+pupil+'model.pick')
nbuv, nbh = a.nbuv, a.nbh
try:
KerGain = np.loadtxt('KerGain_plain.csv')
print 'Loaded kernel amplitude matrix'
except:
gtfm = np.abs(a.TFM)
U, S, Vh = np.linalg.svd(gtfm.T, full_matrices=1)
S1 = np.zeros(nbuv)
S1[0:nbh-1] = S
nkg = np.size(np.where(abs(S1) < 1e-3))
print nkg
KGCol = np.where(abs(S1) < 1e-3)[0]
KerGain = np.zeros((nkg, nbuv)) # allocate the array
for i in range(nkg):
KerGain[i,:] = (Vh)[KGCol[i],:]
np.savetxt('KerGain_plain.csv',KerGain)
print 'saved'
###-----------------------------------------
### now initialize a simulation
###-----------------------------------------
'''------------------------------
First, set all your parameters.
------------------------------'''
print '\nSimulating a basic PSF'
wavel = 2.5e-6
rprim = 5.093/2.#36903.e-3/2.
rsec= 1.829/2.
pos = [0,0] #m, deg
spaxel = 36.
piston = 0
nimages = 200
reso = rad2mas(wavel/(2*rprim))
print 'Minimum Lambda/D = %.3g mas' % reso
image, imagex = diffract(wavel,rprim,rsec,pos,piston=piston,spaxel=spaxel,seeing=None,verbose=False,\
show_pupil=False,mode=None)
# image = recenter(image,sg_rad=25)
imsz = image.shape[0]
images = np.zeros((nimages,imsz,imsz))
psfs = np.zeros((nimages,imsz,imsz))
k=0
show=False
'''----------------------------------------
Loop over a range of contrasts
----------------------------------------'''
# contrast_list = [10,50,100,150,200,250,300,350,400,450,500]
contrast_list = [10,50,75,100,125,150,175,200,250,300,350,400,450,500,600,700,800,900,1000,1100,1200,1300,1400,1500]
# contrast_list = [10,50,100,200,300,400,500
contrast_list = np.linspace(10,2000,19)
ncalcs = len(contrast_list)
kseps, kthetas, kcons = np.zeros(ncalcs), np.zeros(ncalcs), np.zeros(ncalcs)
dkseps, dkthetas, dkcons = np.zeros(ncalcs), np.zeros(ncalcs), np.zeros(ncalcs)
vseps, vthetas, vcons = np.zeros(ncalcs), np.zeros(ncalcs), np.zeros(ncalcs)
dvseps, dvthetas, dvcons = np.zeros(ncalcs), np.zeros(ncalcs), np.zeros(ncalcs)
t0 = clock()
sep, theta = 48, 45
xb,yb = np.cos(theta*np.pi/180)*sep/spaxel, np.sin(theta*np.pi/180)*sep/spaxel
print 'x',xb,',y',yb
seeingamp = 0.75
try:
dummy = fitsio.FITS('psf_cube_phase_%.2f_wavel_%.2f.fits' % (seeingamp,wavel*1e6))
psfs = dummy[0][:,:,:]
print 'Loaded PSFs'
except:
print 'Creating PSFs'
for j in range(nimages):
psfs[j,:,:], imagex = diffract(wavel,rprim,rsec,pos,piston=piston,spaxel=spaxel,verbose=False,\
show_pupil=show,mode='phase',
perturbation=None,amp=0.0,seeingamp=seeingamp)
fitsio.write('psf_cube_phase_%.2f_wavel_%.2f.fits' % (seeingamp,wavel*1e6),psfs)
print_time(clock()-t0)
rev = 1
ac = shift(fft(shift(image)))
ac /= (np.abs(ac)).max() / a.nbh
'''----------------------------------------
Initialise pysco with a pupil model
----------------------------------------'''
# meter to pixel conversion factor
scale = 1.0
m2pix = mas2rad(spaxel) * imsz/ wavel * scale
uv_samp = a.uv * m2pix + imsz/2 # uv sample coordinates in pixels
x = a.mask[:,0]
y = a.mask[:,1]
uv_samp_rev=np.cast['int'](np.round(uv_samp))
uv_samp_rev[:,0]*=rev
data_cplx=ac[uv_samp_rev[:,1], uv_samp_rev[:,0]]
vis2 = np.abs(data_cplx)
vis2 /= vis2.max() #normalise to the origin
mvis = a.RED/a.RED.max().astype('float')
'''----------------------------------------
Loop over contrasts
----------------------------------------'''
for trial, contrast in enumerate(contrast_list):
print '\nSimulating for contrast %f' % contrast
thistime = clock()
for j in range(nimages):
images[j,:,:] = psfs[j,:,:] + shift_image_ft(psfs[j,:,:],[-yb,-xb])/contrast#shift_image(psf,x=x,y=y,doRoll=True)/contrast
imsz = image.shape[0]
show=False
k+=1
'''----------------------------------------
Extract Visibilities
----------------------------------------'''
# kpd_phase = np.angle(data_cplx)/dtor
# kpd_signal = np.dot(a.KerPhi, kpd_phase)
kervises=np.zeros((nimages/2,KerGain.shape[0]))
vis2s = np.zeros((nimages/2,vis2.shape[0]))
vis2_cals = np.zeros((nimages/2,vis2.shape[0]))
kpd_signals = np.zeros((nimages/2,a.KerPhi.shape[0]))
# phases = np.zeros((nimages,vis2.shape[0]))
randomGain = np.random.randn(np.shape(KerGain)[0],np.shape(KerGain)[1])
for j in range(nimages/2):
image3 = psfs[j,:,:]
ac3 = shift(fft(shift(image3)))
ac3 /= (np.abs(ac3)).max() / a.nbh
data_cplx3=ac3[uv_samp_rev[:,1], uv_samp_rev[:,0]]
vis2c = np.abs(data_cplx3)
vis2c /= vis2c.max() #normalise to the origin
# vi2sc[vis2c>1] = 1
vis2_cals[j,:]=vis2c
vis2cal = np.mean(vis2_cals,axis=0)
for j in range(nimages/2):
image2 = images[j+nimages/2,:,:]
ac2 = shift(fft(shift(image2)))
ac2 /= (np.abs(ac2)).max() / a.nbh
data_cplx2=ac2[uv_samp_rev[:,1], uv_samp_rev[:,0]]
vis2b = np.abs(data_cplx2)
vis2b /= vis2b.max() #normalise to the origin
# vis2b[vis2b>1.] = 1.
vis2s[j,:]= vis2b
# log_data_complex_b = np.log(np.abs(data_cplx2))+1.j*np.angle(data_cplx2)
# phases[j,:] = np.angle(data_cplx2)/dtor
kervises[j,:] = np.dot(KerGain,vis2b/vis2cal-1.)
# kervises[j,:] = np.dot(KerGain,np.sqrt(vis2b/vis2cal)**2-1)
# kervises[j,:] = np.dot(randomGain, np.sqrt(vis2b)-mvis)
# kpd_signals[j,:] = np.dot(a.KerPhi,np.angle(data_cplx2))/dtor
# kercomplexb = np.dot(KerBispect,log_data_complex_b)
# kervises_cplx[j,:] = np.abs(kercomplexb)
paramlimits = [20.,80.,30.,60.,contrast/2.,contrast*2.]
hdr = {'tel':'HST',
'filter':wavel,
'orient':0}
def myprior(cube, ndim, n_params,paramlimits=paramlimits):
cube[0] = (paramlimits[1] - paramlimits[0])*cube[0]+paramlimits[0]
cube[1] = (paramlimits[3] - paramlimits[2])*cube[1]+paramlimits[2]
for k in range(2,ndim):
cube[k] = (paramlimits[5] - paramlimits[4])*cube[k]+paramlimits[4]
def vis_loglikelihood(cube,vdata,ve,kpi):
'''Calculate chi2 for single band vis2 data.
Used both in the MultiNest and MCMC Hammer implementations.'''
vises = pysco.binary_model(cube[0:3],kpi,hdr,vis2=True)
chi2 = np.sum(((vdata-vises)/ve)**2)
return -chi2/2.
'''-----------------------------------------------
First do kernel amplitudes
-----------------------------------------------'''
my_observable = np.mean(kervises,axis=0)
# raw_data = np.dot(KerGain,np.sqrt((vis2s/vis2cal)**2).T-1.).T
# mycov = np.cov(raw_data.T) # calculate statistically independent KA
# my_eigs, my_s_matrix = np.linalg.eigh(mycov) # hermitian
# thismatrix = np.dot(my_s_matrix,KerGain)
thismatrix = KerGain
my_observable = np.mean(np.dot(thismatrix,(vis2s/vis2cal).T-1.),axis=1)
def kg_loglikelihood(cube,kgd,kge,kpi):
'''Calculate chi2 for single band kernel amplitude data.
Used both in the MultiNest and MCMC Hammer implementations.'''
vises = np.sqrt(pysco.binary_model(cube[0:3],kpi,hdr,vis2=True))
kergains = np.dot(thismatrix,vises-1)
chi2 = np.sum(((kgd-kergains)/kge)**2)
return -chi2/2.
# addederror = np.std(my_observable) # in case there are bad frames
addederror = 1e-5
my_error = np.sqrt(np.std(kervises,axis=0)**2+addederror**2)
# my_error = np.sqrt(my_eigs**2+addederror**2)
print 'Error:', my_error
def myloglike_kg(cube,ndim,n_params):
try:
loglike = kg_loglikelihood(cube,my_observable,my_error,a)
return loglike
except:
return -np.inf
parameters = ['Separation','Position Angle','Contrast']
n_params = len(parameters)
resume=False
eff=0.3
multi=True,
max_iter= 0
ndim = n_params
pymultinest.run(myloglike_kg, myprior, n_params, wrapped_params=[1],
verbose=True,resume=False)
thing = pymultinest.Analyzer(n_params = n_params)
s = thing.get_stats()
this_j = trial
kseps[this_j], dkseps[this_j] = s['marginals'][0]['median'], s['marginals'][0]['sigma']
kthetas[this_j], dkthetas[this_j] = s['marginals'][1]['median'], s['marginals'][1]['sigma']
kcons[this_j], dkcons[this_j] = s['marginals'][2]['median'], s['marginals'][2]['sigma']
stuff = thing.get_best_fit()
best_params = stuff['parameters']
model_vises = np.sqrt(pysco.binary_model(best_params,a,hdr,vis2=True))
model_kervises = np.dot(KerGain,model_vises-1.)
plt.clf()
# plt.errorbar(my_observable,model_kervises,xerr=my_error,
# ls='',markersize=10,linewidth=2.5)
plt.plot(my_observable,model_kervises,'.',
ls='',markersize=10,linewidth=2.5)
plt.xlabel('Measured Kernel Amplitudes')
plt.ylabel('Model Kernel Amplitudes')
plt.title('Model Fit: Kernel Amplitudes, Contrast %.1f' % contrast)
plt.savefig('kpfit_bin_phase_%.1f_con.png' % contrast)
print 'Kernel amplitudes done'
print_time(clock()-thistime)
print ''
'''-----------------------------------------------
Now do visibilities
-----------------------------------------------'''
my_observable = np.mean((vis2s/vis2cal)**2,axis=0)
print '\nDoing raw visibilities'
addederror = 0.000001
my_error = np.sqrt(np.std((vis2s/vis2cal)**2,axis=0)**2+addederror**2)
print 'Error:', my_error
def myloglike_vis(cube,ndim,n_params):
try:
loglike = vis_loglikelihood(cube,my_observable,my_error,a)
return loglike
except:
return -np.inf
thistime = clock()
pymultinest.run(myloglike_vis, myprior, n_params, wrapped_params=[1],
verbose=True,resume=False)
thing = pymultinest.Analyzer(n_params = n_params)
s = thing.get_stats()
this_j = trial
vseps[this_j], dvseps[this_j] = s['marginals'][0]['median'], s['marginals'][0]['sigma']
vthetas[this_j], dvthetas[this_j] = s['marginals'][1]['median'], s['marginals'][1]['sigma']
vcons[this_j], dvcons[this_j] = s['marginals'][2]['median'], s['marginals'][2]['sigma']
stuff = thing.get_best_fit()
best_params = stuff['parameters']
model_vises = pysco.binary_model(best_params,a,hdr,vis2=True)
plt.clf()
# plt.errorbar(my_observable,model_vises,xerr=my_error,
# ls='',markersize=10,linewidth=2.5)
plt.plot(my_observable,model_vises,'.',
ls='',markersize=10,linewidth=2.5)
plt.xlabel('Measured Visibilities')
plt.ylabel('Model Visibilities')
plt.title('Model Fit: Visibilities, Contrast %.1f' % contrast)
plt.savefig('vis2_bin_phase_%.1f_con.png' % contrast)
print 'Visibilities done'
print_time(clock()-thistime)
'''------------------------------------
Now save!
------------------------------------'''
cmin, cmax = np.min(contrast_list), np.max(contrast_list)
vdata = Table({'Seps':vseps,
'Thetas':vthetas,
'Cons':vcons,
'Dseps':dvseps,
'Dthetas':dvthetas,
'Dcons':dvcons})
vdata.write('raw_vis_sims_phase_%.0f_%.0f.csv' % (cmin,cmax))
print 'Visibility fits saved to raw_vis_sims_phase_%.0f_%.0f.csv' % (cmin,cmax)
kdata = Table({'Seps':kseps,
'Thetas':kthetas,
'Cons':kcons,
'Dseps':dkseps,
'Dthetas':dkthetas,
'Dcons':dkcons})
kdata.write('kernel_amplitude_sims_phase_%.0f_%.0f.csv' % (cmin,cmax))
print 'Kernel amplitude fits saved to kernel_amplitude_sims_phase_%.0f_%.0f.csv' \
% (cmin,cmax)
print 'Finished contrast loop'
print_time(clock()-t0) | gpl-3.0 |
PaddlePaddle/models | PaddleCV/adversarial/tutorials/mnist_tutorial_jsma.py | 1 | 5131 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
JSMA tutorial on mnist using advbox tool.
JSMA method supports both targeted attack and non-targeted attack.
"""
import sys
sys.path.append("..")
import matplotlib.pyplot as plt
import paddle.fluid as fluid
import paddle
from advbox.adversary import Adversary
from advbox.attacks.saliency import JSMA
from advbox.models.paddle import PaddleModel
from tutorials.mnist_model import mnist_cnn_model
def main():
"""
Advbox demo which demonstrate how to use advbox.
"""
TOTAL_NUM = 500
IMG_NAME = 'img'
LABEL_NAME = 'label'
img = fluid.layers.data(name=IMG_NAME, shape=[1, 28, 28], dtype='float32')
# gradient should flow
img.stop_gradient = False
label = fluid.layers.data(name=LABEL_NAME, shape=[1], dtype='int64')
logits = mnist_cnn_model(img)
cost = fluid.layers.cross_entropy(input=logits, label=label)
avg_cost = fluid.layers.mean(x=cost)
# use CPU
place = fluid.CPUPlace()
# use GPU
# place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
BATCH_SIZE = 1
train_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.mnist.train(), buf_size=128 * 10),
batch_size=BATCH_SIZE)
test_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.mnist.test(), buf_size=128 * 10),
batch_size=BATCH_SIZE)
fluid.io.load_params(
exe, "./mnist/", main_program=fluid.default_main_program())
# advbox demo
m = PaddleModel(
fluid.default_main_program(),
IMG_NAME,
LABEL_NAME,
logits.name,
avg_cost.name, (-1, 1),
channel_axis=1)
attack = JSMA(m)
attack_config = {
"max_iter": 2000,
"theta": 0.1,
"max_perturbations_per_pixel": 7
}
# use train data to generate adversarial examples
total_count = 0
fooling_count = 0
for data in train_reader():
total_count += 1
adversary = Adversary(data[0][0], data[0][1])
# JSMA non-targeted attack
adversary = attack(adversary, **attack_config)
# JSMA targeted attack
# tlabel = 0
# adversary.set_target(is_targeted_attack=True, target_label=tlabel)
# adversary = attack(adversary, **attack_config)
# JSMA may return None
if adversary is not None and adversary.is_successful():
fooling_count += 1
print(
'attack success, original_label=%d, adversarial_label=%d, count=%d'
% (data[0][1], adversary.adversarial_label, total_count))
# plt.imshow(adversary.target, cmap='Greys_r')
# plt.show()
# np.save('adv_img', adversary.target)
else:
print('attack failed, original_label=%d, count=%d' %
(data[0][1], total_count))
if total_count >= TOTAL_NUM:
print(
"[TRAIN_DATASET]: fooling_count=%d, total_count=%d, fooling_rate=%f"
% (fooling_count, total_count,
float(fooling_count) / total_count))
break
# use test data to generate adversarial examples
total_count = 0
fooling_count = 0
for data in test_reader():
total_count += 1
adversary = Adversary(data[0][0], data[0][1])
# JSMA non-targeted attack
adversary = attack(adversary, **attack_config)
# JSMA targeted attack
# tlabel = 0
# adversary.set_target(is_targeted_attack=True, target_label=tlabel)
# adversary = attack(adversary, **attack_config)
# JSMA may return None
if adversary is not None and adversary.is_successful():
fooling_count += 1
print(
'attack success, original_label=%d, adversarial_label=%d, count=%d'
% (data[0][1], adversary.adversarial_label, total_count))
# plt.imshow(adversary.target, cmap='Greys_r')
# plt.show()
# np.save('adv_img', adversary.target)
else:
print('attack failed, original_label=%d, count=%d' %
(data[0][1], total_count))
if total_count >= TOTAL_NUM:
print(
"[TEST_DATASET]: fooling_count=%d, total_count=%d, fooling_rate=%f"
% (fooling_count, total_count,
float(fooling_count) / total_count))
break
print("jsma attack done")
if __name__ == '__main__':
import paddle
paddle.enable_static()
main()
| apache-2.0 |
timtammittee/thorns | tests/test_map.py | 1 | 4729 | #!/usr/bin/env python
from __future__ import division, print_function, absolute_import
__author__ = "Marek Rudnicki"
import tempfile
import shutil
import pytest
import numpy as np
from numpy.testing import assert_equal
import pandas as pd
from pandas.util.testing import assert_frame_equal
import thorns as th
def square(x):
return x**2
def multiply(x,y):
return x*y
@pytest.fixture(scope="function")
def workdir(request):
workdir = tempfile.mkdtemp()
def fin():
print("Removing temp dir: {}".format(workdir))
shutil.rmtree(workdir, ignore_errors=True)
request.addfinalizer(fin)
return workdir
def test_map_serial(workdir):
space = [{'x': i} for i in range(10)]
results = th.util.map(
square,
space,
backend='serial',
cache='no',
workdir=workdir,
)
expected = pd.DataFrame(
{
'x': range(10),
0: np.arange(10)**2
}
).set_index('x')
assert_frame_equal(results, expected)
def test_map_cache(workdir):
data = np.arange(10)
dicts = [{'x':i} for i in data]
df1 = th.util.map(
square,
dicts,
backend='serial',
cache='yes',
workdir=workdir,
)
df2 = th.util.map(
square,
dicts,
backend='serial',
cache='yes',
workdir=workdir,
)
assert_frame_equal(df1, df2)
def test_map_kwargs(workdir):
space = [{'x': i} for i in range(10)]
kwargs = {'y': 2}
results = th.util.map(
multiply,
space,
backend='serial',
cache='no',
workdir=workdir,
kwargs=kwargs,
)
expected = pd.DataFrame(
{
'x': range(10),
0: np.arange(10)*2
}
).set_index('x')
assert_frame_equal(results, expected)
def test_map_cache_with_kwargs(workdir):
space = [{'x': i} for i in range(10)]
th.util.map(
multiply,
space,
backend='serial',
cache='yes',
workdir=workdir,
kwargs={'y': 2},
)
# It should *not* recall old results, even thour space is the
# same. It should calculate new results, because kwargs are not
# the same.
results = th.util.map(
multiply,
space,
backend='serial',
cache='yes',
workdir=workdir,
kwargs={'y': 3},
)
expected = pd.DataFrame(
{
'x': range(10),
0: np.arange(10)*3
}
).set_index('x')
assert_frame_equal(results, expected)
def test_map_multiprocessing(workdir):
space = [{'x': i} for i in range(10)]
results = th.util.map(
square,
space,
backend='multiprocessing',
cache='no',
workdir=workdir,
)
expected = pd.DataFrame(
{
'x': range(10),
0: np.arange(10)**2
}
).set_index('x')
assert_frame_equal(results, expected)
@pytest.mark.skipif('True')
def test_map_serial_isolated(workdir):
space = [{'x': i} for i in range(10)]
results = th.util.map(
square,
space,
backend='serial_isolated',
cache='no',
workdir=workdir,
)
expected = pd.DataFrame(
{
'x': range(10),
0: np.arange(10)**2
}
).set_index('x')
assert_frame_equal(results, expected)
@pytest.mark.skipif('True')
def test_ipython_map(workdir):
data = np.arange(10)
dicts = [{'x':i} for i in data]
results1 = th.util.map(
square,
dicts,
backend='ipython',
workdir=workdir
)
results2 = th.util.map(
square,
dicts,
backend='ipython',
workdir=workdir
)
assert_equal(
data**2,
list(results1)
)
assert_equal(
data**2,
list(results2)
)
def test_cache(workdir):
square_cached = th.util.cache(square, workdir=workdir)
square_cached(x=2)
result = square_cached(x=2)
assert_equal(result, 4)
def test_dict_of_lists():
dict_of_lists = {
'x': [1,2,3],
'y': [4,5]
}
actual = th.util.map(
multiply,
dict_of_lists,
cache='no',
backend='serial',
)
list_of_dicts = [
{'x': 1, 'y': 4},
{'x': 2, 'y': 4},
{'x': 3, 'y': 4},
{'x': 1, 'y': 5},
{'x': 2, 'y': 5},
{'x': 3, 'y': 5},
]
expected = th.util.map(
multiply,
list_of_dicts,
cache='no',
backend='serial',
)
assert_frame_equal(actual, expected)
if __name__ == '__main__':
test_ipython_map()
| gpl-3.0 |
rickdberg/database | table_updater.py | 1 | 1035 | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 6 13:03:56 2017
@author: rickdberg
"""
import pandas as pd
import MySQLdb
from sqlalchemy import create_engine
# Connect to database
user = 'root'
passwd = 'neogene227'
host = '127.0.0.1'
db = 'iodp_compiled'
conctable = 'iw_all'
portable = 'mad_all'
isotopetable = 'mg_isotopes'
con = MySQLdb.connect(user=user, passwd=passwd, host=host, db=db)
cursor = con.cursor()
# Download entire table from database
sql = """SELECT * FROM {}; """.format(isotopetable)
db_data = pd.read_sql(sql, con)
db_data = db_data.iloc[:,:18]
# Load data to be updated
new_data = pd.read_csv(r"C:\Users\rickdberg\Documents\UW Projects\Magnesium uptake\Data\dMg\batch_1_final_for_db.csv")
# Alter data
combo_data = pd.merge(db_data, new_data, how = 'outer', on = 'lab_id')
combo_data = combo_data.sort_values(by = 'dmg_index')
engine = create_engine("mysql://root:neogene227@localhost/iodp_compiled")
combo_data.to_sql(name='mg_isotopes_copy', flavor='mysql', con=engine, if_exists='replace')
# eof
| mit |
maaskola/GPy | GPy/plotting/matplot_dep/base_plots.py | 8 | 5445 | # #Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
try:
#import Tango
from matplotlib import pyplot as pb
except:
pass
import numpy as np
def ax_default(fignum, ax):
if ax is None:
fig = pb.figure(fignum)
ax = fig.add_subplot(111)
else:
fig = ax.figure
return fig, ax
def meanplot(x, mu, color='#3300FF', ax=None, fignum=None, linewidth=2,**kw):
_, axes = ax_default(fignum, ax)
return axes.plot(x,mu,color=color,linewidth=linewidth,**kw)
def gpplot(x, mu, lower, upper, edgecol='#3300FF', fillcol='#33CCFF', ax=None, fignum=None, **kwargs):
_, axes = ax_default(fignum, ax)
mu = mu.flatten()
x = x.flatten()
lower = lower.flatten()
upper = upper.flatten()
plots = []
#here's the mean
plots.append(meanplot(x, mu, edgecol, axes))
#here's the box
kwargs['linewidth']=0.5
if not 'alpha' in kwargs.keys():
kwargs['alpha'] = 0.3
plots.append(axes.fill(np.hstack((x,x[::-1])),np.hstack((upper,lower[::-1])),color=fillcol,**kwargs))
#this is the edge:
plots.append(meanplot(x, upper,color=edgecol,linewidth=0.2,ax=axes))
plots.append(meanplot(x, lower,color=edgecol,linewidth=0.2,ax=axes))
return plots
def gperrors(x, mu, lower, upper, edgecol=None, ax=None, fignum=None, **kwargs):
_, axes = ax_default(fignum, ax)
mu = mu.flatten()
x = x.flatten()
lower = lower.flatten()
upper = upper.flatten()
plots = []
if edgecol is None:
edgecol='#3300FF'
if not 'alpha' in kwargs.keys():
kwargs['alpha'] = 1.
if not 'lw' in kwargs.keys():
kwargs['lw'] = 1.
plots.append(axes.errorbar(x,mu,yerr=np.vstack([mu-lower,upper-mu]),color=edgecol,**kwargs))
plots[-1][0].remove()
return plots
def removeRightTicks(ax=None):
ax = ax or pb.gca()
for i, line in enumerate(ax.get_yticklines()):
if i%2 == 1: # odd indices
line.set_visible(False)
def removeUpperTicks(ax=None):
ax = ax or pb.gca()
for i, line in enumerate(ax.get_xticklines()):
if i%2 == 1: # odd indices
line.set_visible(False)
def fewerXticks(ax=None,divideby=2):
ax = ax or pb.gca()
ax.set_xticks(ax.get_xticks()[::divideby])
def align_subplots(N,M,xlim=None, ylim=None):
"""make all of the subplots have the same limits, turn off unnecessary ticks"""
#find sensible xlim,ylim
if xlim is None:
xlim = [np.inf,-np.inf]
for i in range(N*M):
pb.subplot(N,M,i+1)
xlim[0] = min(xlim[0],pb.xlim()[0])
xlim[1] = max(xlim[1],pb.xlim()[1])
if ylim is None:
ylim = [np.inf,-np.inf]
for i in range(N*M):
pb.subplot(N,M,i+1)
ylim[0] = min(ylim[0],pb.ylim()[0])
ylim[1] = max(ylim[1],pb.ylim()[1])
for i in range(N*M):
pb.subplot(N,M,i+1)
pb.xlim(xlim)
pb.ylim(ylim)
if (i)%M:
pb.yticks([])
else:
removeRightTicks()
if i<(M*(N-1)):
pb.xticks([])
else:
removeUpperTicks()
def align_subplot_array(axes,xlim=None, ylim=None):
"""
Make all of the axes in the array hae the same limits, turn off unnecessary ticks
use pb.subplots() to get an array of axes
"""
#find sensible xlim,ylim
if xlim is None:
xlim = [np.inf,-np.inf]
for ax in axes.flatten():
xlim[0] = min(xlim[0],ax.get_xlim()[0])
xlim[1] = max(xlim[1],ax.get_xlim()[1])
if ylim is None:
ylim = [np.inf,-np.inf]
for ax in axes.flatten():
ylim[0] = min(ylim[0],ax.get_ylim()[0])
ylim[1] = max(ylim[1],ax.get_ylim()[1])
N,M = axes.shape
for i,ax in enumerate(axes.flatten()):
ax.set_xlim(xlim)
ax.set_ylim(ylim)
if (i)%M:
ax.set_yticks([])
else:
removeRightTicks(ax)
if i<(M*(N-1)):
ax.set_xticks([])
else:
removeUpperTicks(ax)
def x_frame1D(X,plot_limits=None,resolution=None):
"""
Internal helper function for making plots, returns a set of input values to plot as well as lower and upper limits
"""
assert X.shape[1] ==1, "x_frame1D is defined for one-dimensional inputs"
if plot_limits is None:
xmin,xmax = X.min(0),X.max(0)
xmin, xmax = xmin-0.2*(xmax-xmin), xmax+0.2*(xmax-xmin)
elif len(plot_limits)==2:
xmin, xmax = plot_limits
else:
raise ValueError("Bad limits for plotting")
Xnew = np.linspace(xmin,xmax,resolution or 200)[:,None]
return Xnew, xmin, xmax
def x_frame2D(X,plot_limits=None,resolution=None):
"""
Internal helper function for making plots, returns a set of input values to plot as well as lower and upper limits
"""
assert X.shape[1] ==2, "x_frame2D is defined for two-dimensional inputs"
if plot_limits is None:
xmin,xmax = X.min(0),X.max(0)
xmin, xmax = xmin-0.2*(xmax-xmin), xmax+0.2*(xmax-xmin)
elif len(plot_limits)==2:
xmin, xmax = plot_limits
else:
raise ValueError("Bad limits for plotting")
resolution = resolution or 50
xx,yy = np.mgrid[xmin[0]:xmax[0]:1j*resolution,xmin[1]:xmax[1]:1j*resolution]
Xnew = np.vstack((xx.flatten(),yy.flatten())).T
return Xnew, xx, yy, xmin, xmax
| bsd-3-clause |
andnovar/ggplot | ggplot/themes/theme_seaborn.py | 12 | 8893 | from .theme import theme
import matplotlib as mpl
import matplotlib.pyplot as plt
from numpy import isreal
class theme_seaborn(theme):
"""
Theme for seaborn.
Copied from mwaskom's seaborn:
https://github.com/mwaskom/seaborn/blob/master/seaborn/rcmod.py
Parameters
----------
style: whitegrid | darkgrid | nogrid | ticks
Style of axis background.
context: notebook | talk | paper | poster
Intended context for resulting figures.
gridweight: extra heavy | heavy | medium | light
Width of the grid lines. None
"""
def __init__(self, style="whitegrid", gridweight=None, context="notebook"):
super(theme_seaborn, self).__init__(complete=True)
self.style = style
self.gridweight = gridweight
self.context = context
self._set_theme_seaborn_rcparams(self._rcParams, self.style, self.gridweight, self.context)
def _set_theme_seaborn_rcparams(self, rcParams, style, gridweight, context):
"""helper method to set the default rcParams and other theming relevant
things
"""
# select grid line width:
gridweights = {
'extra heavy': 1.5,
'heavy': 1.1,
'medium': 0.8,
'light': 0.5,
}
if gridweight is None:
if context == "paper":
glw = gridweights["medium"]
else:
glw = gridweights['extra heavy']
elif isreal(gridweight):
glw = gridweight
else:
glw = gridweights[gridweight]
if style == "darkgrid":
lw = .8 if context == "paper" else 1.5
ax_params = {"axes.facecolor": "#EAEAF2",
"axes.edgecolor": "white",
"axes.linewidth": 0,
"axes.grid": True,
"axes.axisbelow": True,
"grid.color": "w",
"grid.linestyle": "-",
"grid.linewidth": glw}
elif style == "whitegrid":
lw = 1.0 if context == "paper" else 1.7
ax_params = {"axes.facecolor": "white",
"axes.edgecolor": "#CCCCCC",
"axes.linewidth": lw,
"axes.grid": True,
"axes.axisbelow": True,
"grid.color": "#DDDDDD",
"grid.linestyle": "-",
"grid.linewidth": glw}
elif style == "nogrid":
ax_params = {"axes.grid": False,
"axes.facecolor": "white",
"axes.edgecolor": "black",
"axes.linewidth": 1}
elif style == "ticks":
ticksize = 3. if context == "paper" else 6.
tickwidth = .5 if context == "paper" else 1
ax_params = {"axes.grid": False,
"axes.facecolor": "white",
"axes.edgecolor": "black",
"axes.linewidth": 1,
"xtick.direction": "out",
"ytick.direction": "out",
"xtick.major.width": tickwidth,
"ytick.major.width": tickwidth,
"xtick.minor.width": tickwidth,
"xtick.minor.width": tickwidth,
"xtick.major.size": ticksize,
"xtick.minor.size": ticksize / 2,
"ytick.major.size": ticksize,
"ytick.minor.size": ticksize / 2}
rcParams.update(ax_params)
# Determine the font sizes
if context == "talk":
font_params = {"axes.labelsize": 16,
"axes.titlesize": 19,
"xtick.labelsize": 14,
"ytick.labelsize": 14,
"legend.fontsize": 13,
}
elif context == "notebook":
font_params = {"axes.labelsize": 11,
"axes.titlesize": 12,
"xtick.labelsize": 10,
"ytick.labelsize": 10,
"legend.fontsize": 10,
}
elif context == "poster":
font_params = {"axes.labelsize": 18,
"axes.titlesize": 22,
"xtick.labelsize": 16,
"ytick.labelsize": 16,
"legend.fontsize": 16,
}
elif context == "paper":
font_params = {"axes.labelsize": 8,
"axes.titlesize": 12,
"xtick.labelsize": 8,
"ytick.labelsize": 8,
"legend.fontsize": 8,
}
rcParams.update(font_params)
# Set other parameters
rcParams.update({
"lines.linewidth": 1.1 if context == "paper" else 1.4,
"patch.linewidth": .1 if context == "paper" else .3,
"xtick.major.pad": 3.5 if context == "paper" else 7,
"ytick.major.pad": 3.5 if context == "paper" else 7,
})
# # Set the constant defaults
# mpl.rc("font", family=font)
# mpl.rc("legend", frameon=False, numpoints=1)
# mpl.rc("lines", markeredgewidth=0, solid_capstyle="round")
# mpl.rc("figure", figsize=(8, 5.5))
# mpl.rc("image", cmap="cubehelix")
rcParams["timezone"] = "UTC"
# rcParams["lines.linewidth"] = "1.0"
# rcParams["lines.antialiased"] = "True"
# rcParams["patch.linewidth"] = "0.5"
# rcParams["patch.facecolor"] = "348ABD"
# rcParams["patch.edgecolor"] = "#E5E5E5"
rcParams["patch.antialiased"] = "True"
rcParams["font.family"] = "sans-serif"
rcParams["font.size"] = "12.0"
rcParams["font.serif"] = ["Times", "Palatino", "New Century Schoolbook",
"Bookman", "Computer Modern Roman",
"Times New Roman"]
rcParams["font.sans-serif"] = ["Helvetica", "Avant Garde",
"Computer Modern Sans serif", "Arial"]
# rcParams["axes.facecolor"] = "#E5E5E5"
# rcParams["axes.edgecolor"] = "bcbcbc"
# rcParams["axes.linewidth"] = "1"
# rcParams["axes.grid"] = "True"
# rcParams["axes.titlesize"] = "x-large"
# rcParams["axes.labelsize"] = "large"
# rcParams["axes.labelcolor"] = "black"
# rcParams["axes.axisbelow"] = "True"
rcParams["axes.color_cycle"] = ["#333333", "348ABD", "7A68A6", "A60628",
"467821", "CF4457", "188487", "E24A33"]
# rcParams["grid.color"] = "white"
# rcParams["grid.linewidth"] = "1.4"
# rcParams["grid.linestyle"] = "solid"
# rcParams["xtick.major.size"] = "0"
# rcParams["xtick.minor.size"] = "0"
# rcParams["xtick.major.pad"] = "6"
# rcParams["xtick.minor.pad"] = "6"
# rcParams["xtick.color"] = "#7F7F7F"
# rcParams["xtick.direction"] = "out" # pointing out of axis
# rcParams["ytick.major.size"] = "0"
# rcParams["ytick.minor.size"] = "0"
# rcParams["ytick.major.pad"] = "6"
# rcParams["ytick.minor.pad"] = "6"
# rcParams["ytick.color"] = "#7F7F7F"
# rcParams["ytick.direction"] = "out" # pointing out of axis
rcParams["legend.fancybox"] = "True"
rcParams["figure.figsize"] = "11, 8"
rcParams["figure.facecolor"] = "1.0"
rcParams["figure.edgecolor"] = "0.50"
rcParams["figure.subplot.hspace"] = "0.5"
def apply_theme(self, ax):
""""Styles x,y axes to appear like ggplot2
Must be called after all plot and axis manipulation operations have
been carried out (needs to know final tick spacing)
From: https://github.com/wrobstory/climatic/blob/master/climatic/stylers.py
"""
#Remove axis border
for child in ax.get_children():
if isinstance(child, mpl.spines.Spine):
child.set_alpha(0)
#Restyle the tick lines
for line in ax.get_xticklines() + ax.get_yticklines():
line.set_markersize(5)
line.set_markeredgewidth(1.4)
#Only show bottom left ticks
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
#Set minor grid lines
ax.grid(True, 'minor', color='#F2F2F2', linestyle='-', linewidth=0.7)
if not isinstance(ax.xaxis.get_major_locator(), mpl.ticker.LogLocator):
ax.xaxis.set_minor_locator(mpl.ticker.AutoMinorLocator(2))
if not isinstance(ax.yaxis.get_major_locator(), mpl.ticker.LogLocator):
ax.yaxis.set_minor_locator(mpl.ticker.AutoMinorLocator(2))
| bsd-2-clause |
chrisjdavie/Olympics_redo | Scrapy_spider/build_medals_table/Pandas_to_MySql_db.py | 1 | 4273 | '''
Old code, doesn't really work anymore.
Created on 24 Oct 2014
@author: chris
'''
from sqlalchemy.orm.session import sessionmaker
def main():
import sqlalchemy as sa
conn_str = "mysql://chris:pythonaccess@localhost/winter_olympics"
engine = sa.create_engine(conn_str,echo=True)
# metadata = sa.MetaData(engine)
Session = sessionmaker(bind=engine)
sess = Session()
for a in sess.query(Dates):
print a.id, a.year
for a in sess.query(Country):
print a.id, a.name
# raw_input()
from os import listdir
pickle_files = [ f for f in listdir('.') if f[-2:] == '.p' ]
for fname in pickle_files[:1]:
year = int(fname[:-2])
year_q = sess.query(sa.exists().where(Dates.year == year)).scalar()
print year_q
if not year_q:
this_year = Dates(year=year)
sess.add(this_year)
sess.commit()
year_id = this_year.id
# raw_input()
import pickle
with open(fname) as f:
medals_table = pickle.load(f)
print len(medals_table)
raw_input()
for rank in medals_table:
country_name = rank.name
country_q = sess.query(sa.exists().where(Country.name == country_name)).scalar()
if not country_q:
country = Country(name=country_name)
sess.add(country)
sess.commit()
county_id = sess.query(Country.id).filter(Country.name==country_name).first()
rank_tmp = Results(country_id=county_id,year_id=year_id,rank=rank.rank,golds=rank.golds,silvers=rank.silvers,bronzes=rank.bronzes)
''''So, this is where I got to. Put this in the db, check the db is working, and it's all golden?'''
sess.add(country)
sess.commit()
for a in sess.query(Dates):
print a.id, a.year
for a in sess.query(Country):
print a.id, a.name
sess.query(Dates).delete()
sess.query(Country).delete()
sess.query(Results).delete()
sess.commit()
# import pickle
# with open(fname) as f:
# yr_medals=pickle.load(f)
#
# print yr_medals[0].name
#
# # from pandas.io import sql
# # import MySQLdb
# #
# # con = MySQLdb.connect(user="chris",passwd="pythonaccess")
# import sqlalchemy as sa
# print sa.__version__
#
# conn_str = "mysql://chris:pythonaccess@localhost/winter_olympics"
# engine = sa.create_engine(conn_str,echo=True)
# metadata = sa.MetaData(engine)
# # conn = engine.connect()
# metadata.reflect(engine)
#
# print metadata.tables.keys()
#
# Session = sessionmaker(bind=engine)
# sess = Session()
# print 'sam'
# bob = sess.query(sa.exists().where(Country.name == 'United States')).scalar() #Country).filter_by(name='can').exists().scalar()
# print bob
'''Add a row'''
# US_country = Country(name='United States')
# sess.add(US_country)
# sess.commit()
'''Create table line'''
# Country.metadata.create_all(engine)
'''Create db lines (requires appropriate privaledges, did it but don't know which line did that)'''
# conn.execute("commit")
# conn.execute("create database winter_olympics")
# conn.close()
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
from sqlalchemy import Column, Integer, String
class Country(Base):
__tablename__ = 'country'
id = Column(Integer,primary_key=True)
name = Column(String(32))
class Results(Base):
__tablename__ = 'results'
id = Column(Integer,primary_key=True)
country_id = Column(Integer)
year_id = Column(Integer)
rank = Column(Integer)
golds = Column(Integer)
silvers = Column(Integer)
bronzes = Column(Integer)
class Dates(Base):
__tablename__ = 'dates'
id = Column(Integer,primary_key=True)
year = Column(Integer)
if __name__ == '__main__':
main() | mit |
ElDeveloper/scikit-learn | examples/ensemble/plot_adaboost_twoclass.py | 347 | 3268 | """
==================
Two-class AdaBoost
==================
This example fits an AdaBoosted decision stump on a non-linearly separable
classification dataset composed of two "Gaussian quantiles" clusters
(see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision
boundary and decision scores. The distributions of decision scores are shown
separately for samples of class A and B. The predicted class label for each
sample is determined by the sign of the decision score. Samples with decision
scores greater than zero are classified as B, and are otherwise classified
as A. The magnitude of a decision score determines the degree of likeness with
the predicted class label. Additionally, a new dataset could be constructed
containing a desired purity of class B, for example, by only selecting samples
with a decision score above some value.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_gaussian_quantiles
# Construct dataset
X1, y1 = make_gaussian_quantiles(cov=2.,
n_samples=200, n_features=2,
n_classes=2, random_state=1)
X2, y2 = make_gaussian_quantiles(mean=(3, 3), cov=1.5,
n_samples=300, n_features=2,
n_classes=2, random_state=1)
X = np.concatenate((X1, X2))
y = np.concatenate((y1, - y2 + 1))
# Create and fit an AdaBoosted decision tree
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1),
algorithm="SAMME",
n_estimators=200)
bdt.fit(X, y)
plot_colors = "br"
plot_step = 0.02
class_names = "AB"
plt.figure(figsize=(10, 5))
# Plot the decision boundaries
plt.subplot(121)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = bdt.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis("tight")
# Plot the training points
for i, n, c in zip(range(2), class_names, plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1],
c=c, cmap=plt.cm.Paired,
label="Class %s" % n)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.legend(loc='upper right')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Decision Boundary')
# Plot the two-class decision scores
twoclass_output = bdt.decision_function(X)
plot_range = (twoclass_output.min(), twoclass_output.max())
plt.subplot(122)
for i, n, c in zip(range(2), class_names, plot_colors):
plt.hist(twoclass_output[y == i],
bins=10,
range=plot_range,
facecolor=c,
label='Class %s' % n,
alpha=.5)
x1, x2, y1, y2 = plt.axis()
plt.axis((x1, x2, y1, y2 * 1.2))
plt.legend(loc='upper right')
plt.ylabel('Samples')
plt.xlabel('Score')
plt.title('Decision Scores')
plt.tight_layout()
plt.subplots_adjust(wspace=0.35)
plt.show()
| bsd-3-clause |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/tests/test_msgpack/test_unpack.py | 9 | 1993 | from io import BytesIO
import sys
from pandas.msgpack import Unpacker, packb, OutOfData, ExtType
import pandas.util.testing as tm
import nose
class TestUnpack(tm.TestCase):
def test_unpack_array_header_from_file(self):
f = BytesIO(packb([1, 2, 3, 4]))
unpacker = Unpacker(f)
assert unpacker.read_array_header() == 4
assert unpacker.unpack() == 1
assert unpacker.unpack() == 2
assert unpacker.unpack() == 3
assert unpacker.unpack() == 4
self.assertRaises(OutOfData, unpacker.unpack)
def test_unpacker_hook_refcnt(self):
if not hasattr(sys, 'getrefcount'):
raise nose.SkipTest('no sys.getrefcount()')
result = []
def hook(x):
result.append(x)
return x
basecnt = sys.getrefcount(hook)
up = Unpacker(object_hook=hook, list_hook=hook)
assert sys.getrefcount(hook) >= basecnt + 2
up.feed(packb([{}]))
up.feed(packb([{}]))
assert up.unpack() == [{}]
assert up.unpack() == [{}]
assert result == [{}, [{}], {}, [{}]]
del up
assert sys.getrefcount(hook) == basecnt
def test_unpacker_ext_hook(self):
class MyUnpacker(Unpacker):
def __init__(self):
super(MyUnpacker, self).__init__(ext_hook=self._hook,
encoding='utf-8')
def _hook(self, code, data):
if code == 1:
return int(data)
else:
return ExtType(code, data)
unpacker = MyUnpacker()
unpacker.feed(packb({'a': 1}, encoding='utf-8'))
assert unpacker.unpack() == {'a': 1}
unpacker.feed(packb({'a': ExtType(1, b'123')}, encoding='utf-8'))
assert unpacker.unpack() == {'a': 123}
unpacker.feed(packb({'a': ExtType(2, b'321')}, encoding='utf-8'))
assert unpacker.unpack() == {'a': ExtType(2, b'321')}
| gpl-3.0 |
equialgo/scikit-learn | examples/model_selection/plot_nested_cross_validation_iris.py | 19 | 4415 | """
=========================================
Nested versus non-nested cross-validation
=========================================
This example compares non-nested and nested cross-validation strategies on a
classifier of the iris data set. Nested cross-validation (CV) is often used to
train a model in which hyperparameters also need to be optimized. Nested CV
estimates the generalization error of the underlying model and its
(hyper)parameter search. Choosing the parameters that maximize non-nested CV
biases the model to the dataset, yielding an overly-optimistic score.
Model selection without nested CV uses the same data to tune model parameters
and evaluate model performance. Information may thus "leak" into the model
and overfit the data. The magnitude of this effect is primarily dependent on
the size of the dataset and the stability of the model. See Cawley and Talbot
[1]_ for an analysis of these issues.
To avoid this problem, nested CV effectively uses a series of
train/validation/test set splits. In the inner loop (here executed by
:class:`GridSearchCV <sklearn.model_selection.GridSearchCV>`), the score is
approximately maximized by fitting a model to each training set, and then
directly maximized in selecting (hyper)parameters over the validation set. In
the outer loop (here in :func:`cross_val_score
<sklearn.model_selection.cross_val_score>`), generalization error is estimated
by averaging test set scores over several dataset splits.
The example below uses a support vector classifier with a non-linear kernel to
build a model with optimized hyperparameters by grid search. We compare the
performance of non-nested and nested CV strategies by taking the difference
between their scores.
.. topic:: See Also:
- :ref:`cross_validation`
- :ref:`grid_search`
.. topic:: References:
.. [1] `Cawley, G.C.; Talbot, N.L.C. On over-fitting in model selection and
subsequent selection bias in performance evaluation.
J. Mach. Learn. Res 2010,11, 2079-2107.
<http://jmlr.csail.mit.edu/papers/volume11/cawley10a/cawley10a.pdf>`_
"""
from sklearn.datasets import load_iris
from matplotlib import pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV, cross_val_score, KFold
import numpy as np
print(__doc__)
# Number of random trials
NUM_TRIALS = 30
# Load the dataset
iris = load_iris()
X_iris = iris.data
y_iris = iris.target
# Set up possible values of parameters to optimize over
p_grid = {"C": [1, 10, 100],
"gamma": [.01, .1]}
# We will use a Support Vector Classifier with "rbf" kernel
svr = SVC(kernel="rbf")
# Arrays to store scores
non_nested_scores = np.zeros(NUM_TRIALS)
nested_scores = np.zeros(NUM_TRIALS)
# Loop for each trial
for i in range(NUM_TRIALS):
# Choose cross-validation techniques for the inner and outer loops,
# independently of the dataset.
# E.g "LabelKFold", "LeaveOneOut", "LeaveOneLabelOut", etc.
inner_cv = KFold(n_splits=4, shuffle=True, random_state=i)
outer_cv = KFold(n_splits=4, shuffle=True, random_state=i)
# Non_nested parameter search and scoring
clf = GridSearchCV(estimator=svr, param_grid=p_grid, cv=inner_cv)
clf.fit(X_iris, y_iris)
non_nested_scores[i] = clf.best_score_
# Nested CV with parameter optimization
nested_score = cross_val_score(clf, X=X_iris, y=y_iris, cv=outer_cv)
nested_scores[i] = nested_score.mean()
score_difference = non_nested_scores - nested_scores
print("Average difference of {0:6f} with std. dev. of {1:6f}."
.format(score_difference.mean(), score_difference.std()))
# Plot scores on each trial for nested and non-nested CV
plt.figure()
plt.subplot(211)
non_nested_scores_line, = plt.plot(non_nested_scores, color='r')
nested_line, = plt.plot(nested_scores, color='b')
plt.ylabel("score", fontsize="14")
plt.legend([non_nested_scores_line, nested_line],
["Non-Nested CV", "Nested CV"],
bbox_to_anchor=(0, .4, .5, 0))
plt.title("Non-Nested and Nested Cross Validation on Iris Dataset",
x=.5, y=1.1, fontsize="15")
# Plot bar chart of the difference.
plt.subplot(212)
difference_plot = plt.bar(range(NUM_TRIALS), score_difference)
plt.xlabel("Individual Trial #")
plt.legend([difference_plot],
["Non-Nested CV - Nested CV Score"],
bbox_to_anchor=(0, 1, .8, 0))
plt.ylabel("score difference", fontsize="14")
plt.show()
| bsd-3-clause |
gregmedlock/Medusa | medusa/reconstruct/load_from_file.py | 1 | 11255 | from __future__ import print_function
from __future__ import absolute_import
import medusa
import cobra
import cobra.test
import numpy as np
import csv
import glob
import pickle
import pandas as pd
import math
import copy
import time
import random
import time
import sys
import os
import itertools
import logging
LOGGER = logging.getLogger(__name__)
from sys import getsizeof
from copy import deepcopy
from collections import defaultdict
from warnings import warn
from itertools import chain
from optlang.symbolics import Zero
from optlang.interface import OPTIMAL
from itertools import chain
import cobra.core.model
from cobra.flux_analysis import sample
from cobra.core.solution import get_solution
from cobra.flux_analysis.sampling import OptGPSampler
from cobra.manipulation.delete import *
from cobra.medium import find_boundary_types
from cobra.flux_analysis import pfba
from cobra.util import solver as sutil
from cobra.core.solution import get_solution
from cobra.core import DictList
from medusa.core.ensemble import Ensemble
from medusa.flux_analysis.flux_balance import optimize_ensemble
from medusa.core.feature import Feature
def parent_attr_of_base_component(base_comp):
"""
Output a string to indicate the parent attribute of the cobra.core object.
Parameters
----------
base_comp : cobra.core object
Ensemble base_component of feature. i.e. cobra reaction, metabolite, or gene
"""
if type(base_comp) == cobra.core.Reaction:
parent_attr = "reactions"
elif type(base_comp) == cobra.core.Metabolite:
parent_attr = "metabolites"
elif type(base_comp) == cobra.core.Gene:
parent_attr = "genes"
else:
raise AttributeError("Only cobra.core.Reaction, cobra.core.Metabolite, and cobra.core.Gene supported for base_component type")
return parent_attr
def batch_load_from_files(model_file_names, identifier='ensemble', batchsize=5, verbose = False):
"""
Loads a list of models my file name in batches to and generates an ensemble object.
This function is meant to be used to limit how much flash memory is required to
generate very large ensembles.
Parameters
----------
model_jsons : List
List of json cobra.model file names.
batchsize : Integer
Total number of models loaded into memory.
"""
total = len(model_file_names)
range_lists = []
iterations = math.ceil(total/batchsize)
fix_last = 0
for i in range(iterations):
start = batchsize*i
stop = batchsize*(1+i)
if stop > total:
stop = total
if len(range(start,stop)) == 1:
fix_last = 1
range_lists.append(list(range(start,stop)))
if fix_last == 1:
range_lists[iterations-1] = [range_lists[iterations-2][batchsize-1]] + range_lists[iterations-1]
del range_lists[iterations-2][batchsize-1]
for range_list in range_lists:
model_list = []
for model_file in [model_file_names[i] for i in range_list]:
if model_file.endswith(".json"):
model = cobra.io.load_json_model(model_file)
elif model_file.endswith(".xml"):
model = cobra.io.read_sbml_model(model_file)
else:
raise AttributeError("Only .json or .xml files supported")
if not isinstance(model, cobra.core.Model):
raise AttributeError("Only cobra.core.Model objects supported")
model_list.append(model)
if range_list[0] == 0:
final_ensemble = medusa.Ensemble(model_list, identifier = identifier)
if verbose == True:
print("Ensemble 1 finished")
else:
new_ensemble = medusa.Ensemble(model_list)
final_ensemble = add_ensembles(final_ensemble,new_ensemble)
del new_ensemble
if verbose == True:
print("Next ensemble finished")
del model_list
del model
return final_ensemble
def add_ensembles(e1,e2,verbose=False):
"""
Adds two ensemble objects together.
Parameters
----------
e1 & e2 : Ensemble Objects
Generated using medusa.Ensemble()
"""
# Deep copy ensembles
emodel1 = e1
emodel2 = e2
emodel3 = copy.deepcopy(e1)
# Add reactions to new base_model: Base_model1 + Base_model2 = base_model3
base_model = copy.deepcopy(emodel1.base_model)
all_reactions = set()
all_reactions = set([rxn.id for rxn in base_model.reactions])
new_reactions = set([rxn.id for rxn in emodel2.base_model.reactions]) - all_reactions
reactions_to_add = [emodel2.base_model.reactions.get_by_id(rxn) for rxn in new_reactions]
base_model.add_reactions(reactions_to_add)
emodel3.base_model = base_model
all_feats = set()
all_feats = set([feat.id for feat in emodel1.features])
new_feats = set()
new_feats = set([feat.id for feat in emodel2.features]) - all_feats
old_feats = set()
old_feats = all_feats - new_feats
# Add new features to base ensemble
for feat_id in new_feats:
feat = emodel2.features.get_by_id(feat_id)
emodel3.features = emodel3.features + [feat]
# Swap feature objects to make consistent across ensemble
emodel3.members = emodel1.members + emodel2.members
em1_feats = set([feat.id for feat in emodel1.features])
em2_feats = set([feat.id for feat in emodel2.features])
for member in emodel2.members:
member_obj = emodel3.members.get_by_id(member.id)
new_feat_dict = dict()
for feat, val in member_obj.states.items():
if feat.id in em1_feats:
base_feat = emodel1.features.get_by_id(feat.id)
new_feat_dict[base_feat] = val
else:
new_feat_dict[feat] = val
member_obj.states = new_feat_dict
# Make feature.base_components consistent with base_model
# This may need to be updated to account for metabolite and gene features
for feat_obj in emodel3.features:
if isinstance(feat_obj.base_component, cobra.core.Reaction):
rxn_base_obj = emodel3.base_model.reactions.get_by_id(feat_obj.base_component.id)
feat_obj.base_component = rxn_base_obj
em1_rxns = set([rxn.id for rxn in emodel1.base_model.reactions])
em2_rxns = set([rxn.id for rxn in emodel2.base_model.reactions])
# Create features for reactions missing from either base_model without an existing feature
missing_rxns = (em2_rxns - em1_rxns) | (em1_rxns - em2_rxns)
exist_feat_ids = set([feat.base_component.id for feat in emodel3.features])
attr_list = ['lower_bound','upper_bound']
states1 = emodel1.features[0].states
states2 = emodel2.features[0].states
for rxn_id in missing_rxns:
if not rxn_id in exist_feat_ids:
for attr_str in attr_list:
if rxn_id in em1_rxns:
attr1 = getattr(getattr(emodel1.base_model, "reactions").get_by_id(rxn_id), attr_str)
else:
attr1 = 0.0
if rxn_id in em2_rxns:
attr2 = getattr(getattr(emodel2.base_model, "reactions").get_by_id(rxn_id), attr_str)
else:
attr2 = 0.0
rxn_from_base = emodel3.base_model.reactions.get_by_id(rxn_id)
feature_id = rxn_from_base.id + '_' + attr_str
states1 = dict.fromkeys(states1, attr1)
states2 = dict.fromkeys(states2, attr2)
states = dict(states1, **states2)
feature = Feature(ensemble=emodel3,\
identifier=feature_id,\
name=rxn_from_base.name,\
base_component=rxn_from_base,\
component_attribute=attr_str,\
states=states)
emodel3.features = emodel3.features + [feature]
if verbose == True:
print("New feature added: " + feature_id)
# Check for new features that need to be added because the base models don't align
# Needs to be generalized to all feature types beyond reactions
ovrlp_rxns = (em1_rxns & em2_rxns) - exist_feat_ids
attr_list = ['lower_bound','upper_bound']
states1 = emodel1.features[0].states
states2 = emodel2.features[0].states
for rxn_id in ovrlp_rxns:
for attr_str in attr_list:
attr1 = getattr(getattr(emodel1.base_model, "reactions").get_by_id(rxn_id), attr_str)
attr2 = getattr(getattr(emodel2.base_model, "reactions").get_by_id(rxn_id), attr_str)
if attr1 != attr2:
rxn_from_base = emodel3.base_model.reactions.get_by_id(rxn_id)
feature_id = rxn_from_base.id + '_' + attr_str
states1 = dict.fromkeys(states1, attr1)
states2 = dict.fromkeys(states2, attr2)
states = dict(states1, **states2)
feature = Feature(ensemble=emodel3,\
identifier=feature_id,\
name=rxn_from_base.name,\
base_component=rxn_from_base,\
component_attribute=attr_str,\
states=states)
emodel3.features = emodel3.features + [feature]
if verbose == True:
print("New feature added: " + feature_id)
# Set feature.states
for feature_obj in emodel3.features:
dict1_zeros = dict.fromkeys(emodel1.features[0].states, 0.0)
dict2_zeros = dict.fromkeys(emodel2.features[0].states, 0.0)
if feature_obj.id in em1_feats:
dict1 = emodel1.features.get_by_id(feature_obj.id).states
elif feature_obj.base_component.id in em1_rxns:
base_comp = feature_obj.base_component
comp_attr = feature_obj.component_attribute
parent_attribute = parent_attr_of_base_component(base_comp)
attr = getattr(getattr(emodel1.base_model, parent_attribute).get_by_id(base_comp.id), comp_attr)
for member in emodel1.members:
dict1_zeros[member.id] = attr
dict1 = dict1_zeros
else:
dict1 = dict1_zeros
if feature_obj.id in em2_feats:
dict2 = emodel2.features.get_by_id(feature_obj.id).states
elif feature_obj.base_component.id in em2_rxns:
base_comp = feature_obj.base_component
comp_attr = feature_obj.component_attribute
attr = getattr(getattr(emodel2.base_model, "reactions").get_by_id(base_comp.id), comp_attr)
for member in emodel2.members:
dict2_zeros[member.id] = attr
dict2 = dict2_zeros
else:
dict2 = dict2_zeros
feature_obj.states = dict(dict1, **dict2)
# Set member.states
for member in emodel3.members:
member.ensemble = emodel3
temp_dict = dict()
for feat in emodel3.features:
temp_dict[feat] = feat.states[member.id]
member.states = temp_dict
return emodel3
| mit |
emon10005/scikit-image | doc/examples/plot_nonlocal_means.py | 7 | 1313 | """
=================================================
Non-local means denoising for preserving textures
=================================================
In this example, we denoise a detail of the astronaut image using the non-local
means filter. The non-local means algorithm replaces the value of a pixel by an
average of a selection of other pixels values: small patches centered on the
other pixels are compared to the patch centered on the pixel of interest, and
the average is performed only for pixels that have patches close to the current
patch. As a result, this algorithm can restore well textures, that would be
blurred by other denoising algoritm.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data, img_as_float
from skimage.restoration import denoise_nl_means
astro = img_as_float(data.astronaut())
astro = astro[30:180, 150:300]
noisy = astro + 0.3 * np.random.random(astro.shape)
noisy = np.clip(noisy, 0, 1)
denoise = denoise_nl_means(noisy, 7, 9, 0.08)
fig, ax = plt.subplots(ncols=2, figsize=(8, 4))
ax[0].imshow(noisy)
ax[0].axis('off')
ax[0].set_title('noisy')
ax[1].imshow(denoise)
ax[1].axis('off')
ax[1].set_title('non-local means')
fig.subplots_adjust(wspace=0.02, hspace=0.2,
top=0.9, bottom=0.05, left=0, right=1)
plt.show()
| bsd-3-clause |
pism/pism | site-packages/PISM/invert/listener.py | 1 | 2448 | # Copyright (C) 2011, 2012, 2015, 2018 David Maxwell
#
# This file is part of PISM.
#
# PISM is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
#
# PISM is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License
# along with PISM; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Contains the abstract base class PlotListener for listeners that
create plots of vectors at each iteration.
"""
import PISM.logging
import PISM.vec
def pauseListener(*args):
"""Listener that temporarily halts operation at each iteration waiting for a key press."""
PISM.logging.pause()
class PlotListener(object):
"""Base class for listeners that create plots of vectors at each iteration.
Provides objects for converting :cpp:class:`IceModelVec`'s to ``numpy`` vectors
on processor zero, as well as basic ``matplotlib`` figure management."""
def __init__(self, grid):
self.grid = grid
self.figs = {}
def toproczero(self, *args):
"""Returns a ``numpy`` vector on processor zero corresponding to an :cpp:class:`IceModelVec`.
Takes as input either a single :cpp:class:`IceModelVec` or dictionary of such
vectors and the name of an entry. Returns ``None`` on other processors."""
if len(args) == 2:
data = args[0]
name = args[1]
v = data[name]
else:
v = args[0]
if v is None:
return None
return v.numpy()
def figure(self, name='default'):
"""Returns a ``matplotlib`` figure based on a string name. If the instance has not yet
created a figure with the given name, a new figure is created and associated with the given name."""
fig = self.figs.get(name)
if fig is None:
import matplotlib.pyplot as pp
fig = pp.figure()
self.figs[name] = fig
return fig.number
def __call__(self, solver, itr, data):
raise NotImplementedError()
| gpl-3.0 |
neeraj-kumar/nkpylib | nksift.py | 1 | 60489 | """Various sift utilities
Licensed under the 3-clause BSD License:
Copyright (c) 2011-2014, Neeraj Kumar (neerajkumar.org)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL NEERAJ KUMAR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os, sys, time, math, os.path, random
from PIL import Image, ImageDraw, ImageColor
from nkutils import log, getListAsStr
try:
import simplejson as json
except:
import json
#TODO add nice command line options
COLORS = ImageColor.colormap.values()
def existsnonzero(fname):
"""Checks if the given file exists and is non-zero"""
try:
if os.stat(fname).st_size > 0: return 1
except Exception:
pass
return 0
def siftfname(imfname, ext=None, dir=None):
"""Returns the sift filename for the given image filename.
Assumes it's in the same directory, unless you specify a dir.
Tries all formats, in this order:
.projected.gz - gzipped projected output
.projected - projected output
.sift.gz - gzipped vlfeat output
.sift - vlfeat output
.key - Lowe's binary output
Or you can specify the extension yourself, either as a string, or a list of strings to try.
Returns a filename, or the empty string if no suitable file found.
Note that we're not actually checking that the file is actually in the right format.
"""
siftdir = dir if dir else os.path.dirname(imfname)
base = os.path.join(siftdir, os.path.basename(imfname).rsplit('.', 1)[0])
# if they didn't specify an extension, check them all from most projected to least
if not ext:
ext = '.projected.gz .projected .sift.gz .sift .key'.split()
# make list of extensions to check
exts = [ext] if isinstance(ext, basestring) else ext
# check each extension
for ext in exts:
if ext[0] != '.':
ext = '.'+ext
fname = base+ext
if existsnonzero(fname):
return fname
# if we're here, then no valid file was found
return ''
def grouper(n, iterable, padvalue=None):
"""Taken from Python's itertools recipes.
>>> list(grouper(3, 'abcdefg', 'x'))
[('a', 'b', 'c'), ('d', 'e', 'f'), ('g', 'x', 'x')]"""
from itertools import izip, chain, repeat
return izip(*[chain(iterable, repeat(padvalue, n-1))]*n)
class SiftFeat:
"""Keeps track of a single sift descriptor"""
def __init__(self, x, y, scale, orientation, data):
"""Creates a new sift descriptor from all the relevant information"""
# x, y, and scale are all to sub-pixel accuracy
self.x, self.y, self.scale = x, y, scale
# orientation is in radians from -PI to +PI
self.orientation = orientation
# the actual descriptor should all be bytes (0-255)
self.data = data
@classmethod
def fakefeat(cls, val, cache={}, **kw):
"""Makes a fake sift feature for the given val.
This function is memoized in cache, so you'll always get the same feat for the same input val.
You can optionally pass in any of x, y, scale, orientation, data.
Otherwise, they are initialized to:
x: uniform(0, 100)
y: uniform(0, 100)
scale: uniform(0, 10)
orientation: uniform(0, pi)
data: randint(0,256)*128
"""
from random import uniform, randint
from math import pi
if val not in cache:
for varname, maxval in zip('x y scale orientation'.split(), [100, 100, 10, math.pi]):
if varname not in kw:
kw[varname] = uniform(0, maxval)
if 'data' not in kw:
kw['data'] = [randint(0,256) for i in range(128)]
ret = cache[val] = cls(**kw)
return cache[val]
@classmethod
def siftFromFile(cls, f, fmt=None):
"""Creates a list of sift features from a given file or filename or vectors.
This tries to do the appropriate format detection.
If f is a string, then we assume it's a filename. We handle:
'.key' files, as dumped by Lowe's binary
'.sift' files, as dumped by VLFeat's sift binary
'.sift.gz' files, compressed versions of .sift files.
'.projected' files, as dumped by projectindivmain()
'.projected.gz' files, compressed versions of .projected files
You can optionally specify the fmt if you know it:
'lowe'
'vlfeat'
'projected'
If f is a file, assumes it's in Lowe's format.
Else, assumes it's a pair of (locs, fvecs).
Returns an empty string or list on error.
"""
if fmt == 'lowe': return cls.siftFromLowe(f)
if fmt == 'vlfeat': return cls.siftFromVLFeat(f)
if fmt == 'projected': return cls.siftFromProjected(f)
if isinstance(f, basestring):
# it's a filename
if f.endswith('.key'): return cls.siftFromLowe(open(f))
if f.endswith('.sift') or f.endswith('.sift.gz'): return cls.siftFromVLFeat(f)
if f.endswith('.projected') or f.endswith('.projected.gz'): return cls.siftFromProjected(f)
elif isinstance(f, file):
# it's a file itself, so assume it's in lowe's format
return cls.siftFromLowe(f)
else:
# it's a list
try:
# see if it's a pair of (locs, fvecs)
ret = [cls(x,y,s,o,fvec) for (x,y,s,o), fvec in zip(*f)]
except Exception:
ret = []
for el in f:
if isinstance(el, cls):
# check if it's already a siftfeat
ret.append(el)
return ret
return []
@classmethod
def siftFromLowe(cls, f):
"""Creates a list of sift features from text output from Lowe's original sift binary"""
if isinstance(f, basestring):
f = open(f)
# read the number of points and the length of each descriptor
num, length = [int(i) for i in f.readline().split()]
# read the rest of it and transform it appropriately
all = ''.join(f.readlines()).replace('\n', ' ').split()
items = grouper(length+4, all)
# now read each feature
feats = []
for item in items:
# the first four correspond to the metadata for that feature
y, x, scale, orientation = [float(i) for i in item[:4]]
# the rest of it corresponds to the actual data for the descriptor
data = [int(i) for i in item[4:]]
feats.append(cls(x, y, scale, orientation, data))
return feats
@classmethod
def siftFromVLFeat(cls, f):
"""Creates a list of sift features from text output from VLFeat's sift binary"""
import gzip
import numpy as np
# if the file is actually a filename, open it first
if isinstance(f, basestring):
# check for gzipped files (since we often gzip the sift files)
f = gzip.open(f) if f.endswith('.gz') else open(f)
# descrips are one-per-line, making it easy to parse
feats = []
for l in f:
els = l.rstrip().split()
x, y, scale, ori = map(float, els[:4])
data = np.array(map(int, els[4:]), dtype=np.float32)
feats.append(cls(x, y, scale, ori, data))
return feats
@classmethod
def siftFromProjected(cls, f):
"""Creates a list of sift features from projected features output.
This is for "extended mode" output, which includes the locations as well.
"""
import gzip
import numpy as np
# if the file is actually a filename, open it first
if isinstance(f, basestring):
# check for gzipped files (since we often gzip the sift files)
f = gzip.open(f) if f.endswith('.gz') else open(f)
# descrips are one-per-line, making it easy to parse
feats = []
for l in f:
els = l.rstrip().split()
x, y, scale, ori = map(float, els[:4])
fnum = int(els[4])
#data = np.array(map(int, els[4:]), dtype=np.float32)
feats.append(cls(x, y, scale, ori, [fnum]))
return feats
@classmethod
def siftsFromVocab(cls, f, fnames=None):
"""Loads projected sift (or other) features from the given file.
The file can be a filename (and optionally compressed).
If 'fnames' is None (default), loads all lines of the file.
Else, fnames can contain a list of:
strings - assumes each is a filename (first 1st col of file)
ints - line numbers to read (prunes out 1st col of file)
Returns a dict mapping filenames to lists of ints.
"""
import gzip
if isinstance(f, basestring):
f = gzip.open(f) if f.endswith('.gz') else open(f)
tomatch = set(fnames) if fnames else None
ret = {}
for i, l in enumerate(f):
fname, fvec = l.rstrip('\n').split('\t', 1)
if tomatch and i not in tomatch and fname not in tomatch: continue
ret[fname] = map(int, fvec.split())
return ret
def __repr__(self):
"""Reproducible description (vlsift format)"""
from nkutils import getListAsStr
return '%s %s %s %s %s' % (self.x, self.y, self.scale, self.orientation, getListAsStr(map(int,self.data), ' '))
def __str_disabled__(self):
"""Returns simple descrip"""
return "x: %f, y: %f, scale: %f, orientation: %f, start of data: %d %d, length of data: %d" % (self.x, self.y, self.scale, self.orientation, self.data[0], self.data[1], len(self.data))
def dist(self, other):
"""Returns some measure of distance between the descriptors of self and other.
Currently this is square of simple euclidean (L2) distance.
Simple test example with length-3 descriptors:
>>> a = SiftFeat(1, 2, 3, 4, [0, 40, 0])
>>> b = SiftFeat(1, 2, 3, 4, [30, 0, 0])
>>> a.dist(b)
2500.0
>>> b.dist(a)
2500.0
"""
from math import sqrt
ret = 0.0
for s, o in zip(self.data, other.data):
ret += (s-o)*(s-o)
return ret
def findClose(self, seq, **kw):
"""Finds the closest descriptors from self to the given seq.
The kw are passed to filternnresults (prominently, k and r)
"""
from nkutils import simplenn, filternnresults
import numpy as np
if not isinstance(seq, np.ndarray):
seq = np.array([o.data for o in seq])
#log(' Got seq of type %s, shape %s, and data shape %s and data %s' % (type(seq), seq.shape, type(self.data), self.data))
# deal with degenerate data (happens with projected inputs)
if isinstance(self.data, list) or len(self.data.shape) == 0:
self.data = np.array([self.data])
if len(seq.shape) == 1:
seq = seq.reshape((len(seq), 1))
#log(' Got seq of type %s, shape %s, and data shape %s' % (type(seq), seq.shape, self.data.shape))
#raise NotImplementedError
dists = simplenn(seq, self.data)
kw.setdefault('sort', 1)
dists = filternnresults(dists, **kw)
return dists
def drawOnImage(self, im, color="white", minsize=1, drawline=1, fill=0):
"""Draws this descriptor onto the given image (destructive)"""
draw = ImageDraw.Draw(im)
# draw a circle to represent this descriptor
s = max(self.scale*1, 1) # *6 for equivalent to sift binary
if s < minsize: return
bbox = (self.x-s, self.y-s, self.x+s, self.y+s)
bbox = [int(i) for i in bbox]
if fill:
draw.ellipse(bbox, fill=color)
else:
draw.arc(bbox, 0, 360, fill=color)
# draw a line to show what direction it's in
if not drawline: return
s = max(s, 6)
dir = [s*i for i in [math.cos(self.orientation), -math.sin(self.orientation)]]
draw.line([self.x, self.y, self.x+dir[0], self.y+dir[1]], fill=color)
return im
def getImage(self):
"""Returns an image which represents the descriptor"""
im = Image.new('RGB', (19, 11), (0, 0, 0))
# each group of 8 in the descriptor corresponds to an orientation histogram
vals = list(grouper(8, self.data))
cur = 0
# go through each cell and use its histogram to draw the image
for yloc in range(4):
y = yloc * 3
for xloc in range(4):
x = xloc * 5
# the first row of this cell contains the first 4 orientation bins
for i, v in enumerate(vals[cur][:4]):
color = (v, 0, 255-v)
#print "Putting bin %d at %d, %d" % (i+cur*8, x+i, y)
im.putpixel((x+i, y), color)
# the second row contains the latter 4 orientation bins
for i, v in enumerate(vals[cur][4:]):
color = (v, 0, 255-v)
#print "Putting bin %d at %d, %d" % (4+i+cur*8, x+i, y)
im.putpixel((x+i, y+1), color)
cur += 1
return im
def getDiffImage(self, other):
"""Returns an image which represents the difference between two descriptors"""
# get the two images and then take their difference
from PIL import ImageChops
im1, im2 = self.getImage(), other.getImage()
ret = ImageChops.difference(im1, im2)
return ret
def getDiffGraph(self, other):
"""Returns a graph which is the difference between two descriptors"""
w, h = 512, 64
im = Image.new('RGB', (w, h), (255, 255, 255))
draw = ImageDraw.Draw(im)
vals1 = [int(i) for i in self.data]
vals2 = [int(i) for i in other.data]
difs = [i-j for i, j in zip(vals1, vals2)]
# draw the baseline
draw.line((0, h/2, w-1, h/2), fill="black")
lines = []
for i, v in enumerate(difs):
x = i*4
y = h - 1 - int(v*h/512) - h/2
lines.append((x, y))
# draw them all
#print "Drawing %s for %s" % (lines, difs)
draw.line(lines, fill="black")
return im
def getGraph(self):
"""Returns a graph which represents the descriptor"""
w, h = 24, 16
im = Image.new('RGB', (w*4, (h+1)*4), (255, 255, 255))
draw = ImageDraw.Draw(im)
# each group of 8 in the descriptor corresponds to an orientation histogram
vals = list(grouper(8, self.data))
cur = 0
# go through each cell and draw its histogram
for yloc in range(4):
y = (yloc+1) * (h+1) - 1
for xloc in range(4):
x = xloc * w
# collect the points in this histogram
lines = []
for i, v in enumerate(vals[cur]):
curx = 3*i + 1 + x
cury = y - int(v*h/256)
lines.append((curx, cury))
# draw them all
#print "Drawing %s for %s" % (lines, vals[cur])
draw.line(lines, fill="black")
cur += 1
return im
def getOverallGraph(self):
"""Returns a single graph which represents the descriptor"""
w, h = 512, 32
im = Image.new('RGB', (w, h), (255, 255, 255))
draw = ImageDraw.Draw(im)
vals = [int(i) for i in self.data]
lines = []
for i, v in enumerate(vals):
x = i*4
y = h - 1 - int(v*h/256)
lines.append((x, y))
# draw them all
#print "Drawing %s for %s" % (lines, vals)
draw.line(lines, fill="black")
return im
class Path:
"""A path is a simple struct that takes into account a point appearing and moving"""
def __init__(self, imagenum, descrip):
"""Creates a path that starts with the given image number and index within that image"""
self.imagenums = [imagenum]
self.descrips = [descrip]
self.active = 1
self.color = random.choice(COLORS)
def findNext(self, descrips, imagenum):
"""Finds the closest descriptor from the given ones"""
# if we're inactive, return
if not self.active: return -1
last = self.descrips[-1]
close = last.findClose(descrips)
# if the first one is very far away, assume it's gone
n = close[0][1]
dist = math.sqrt((last.x-descrips[n].x)**2 + (last.y-descrips[n].y)**2)
if dist > 10:
self.active = 0
return -1
# otherwise, assume it's a good match
self.imagenums.append(imagenum)
self.descrips.append(descrips[n])
return n
def __repr__(self):
"""Returns the list of imagenums for this path"""
return "%s" % (self.imagenums)
def mainloop(path):
"""Runs the mainloop"""
# read all the sift files
fnames = [f for f in os.listdir(path) if f.endswith('.key')]
fnames.sort()
sifts = []
print "Reading sift descriptors"
for fname in fnames:
sifts.append(SiftFeat.siftFromFile(open("%s%s" % (path, fname))))
print "Read %d key files. Initializing paths" % (len(sifts))
paths = []
for n, descrip in enumerate(sifts[0]):
paths.append(Path(0, descrip))
# now go through and trace paths by following points and seeing where similar descriptors move to
for imagenum, s in enumerate(sifts[1:]):
print "On image %d with %d descrips" % (imagenum+1, len(s))
cur = s[:]
# first check all paths
found = 0
for n, p in enumerate(paths):
index = p.findNext(cur, imagenum+1)
if index >= 0:
# delete this index from cur
cur.pop(index)
found += 1
print " Found %d out of %d paths searched\r" % (found, n),
# now that we have removed all points that correspond to existing paths, add the rest as new paths
for n, descrip in enumerate(cur):
paths.append(Path(imagenum+1, descrip))
# now print out the paths
print "Paths:"
for p in paths:
print p
def findPaths():
"""Finds coherent paths"""
path = './'
if len(sys.argv) > 1:
path = sys.argv[1]
mainloop(path)
def testDraw(siftPath, imgPath, outPath="./"):
"""Tests out the drawing of descriptors"""
siftFnames = [f for f in os.listdir(siftPath) if f.endswith('.key')]
basenames = [f.rsplit('.')[-2] for f in siftFnames]
imgFnames = ["%s%s.png" % (imgPath, f) for f in basenames]
for siftname, imgname, basename in zip(siftFnames, imgFnames, basenames):
descrips = SiftFeat.siftFromFile(open("%s%s" % (siftPath, siftname)))
print "Opened %s with %d descriptors" % (siftname, len(descrips))
im = Image.open(imgname).convert('RGB')
for i, d in enumerate(descrips):
c = "white" #COLORS[i%len(COLORS)]
d.drawOnImage(im, c, minsize=3, drawline=0)
im.save('%s%s_out.png' % (outPath, basename))
def extract(fnames, checkexist=1, compress=1):
"""Extracts sift for the given filenames.
The outputs are stored to {filename without ext}.sift if compress is 0,
or with .gz appended if compress is 1.
Yields list of output filenames, in same order as input.
If checkexist is true (default), then first sees if the output exists and is non-zero size."""
from subprocess import Popen, PIPE
vlsiftexe = ['vlsift', '-o', '/dev/stdout', '/dev/stdin']
gzipexe = ['gzip', '-c']
for i, f in enumerate(fnames):
log('On input %d of %d: %s' % (i+1, len(fnames), f))
t1 = time.time()
outfname = f.rsplit('.',1)[0]+'.sift'
if compress:
outfname += '.gz'
done = 0
if checkexist:
if existsnonzero(outfname):
done = 1
log(' Outfile %s already existed...' % (outfname))
if not done:
outf = open(outfname, 'wb')
p1 = Popen(['convert', f, 'pgm:-'], stdout=PIPE)
if compress:
p2 = Popen(vlsiftexe, stdin=p1.stdout, stdout=PIPE)
p3 = Popen(gzipexe, stdin=p2.stdout, stdout=outf)
p1.stdout.close()
p2.stdout.close()
p3.communicate()
else:
p2 = Popen(vlsiftexe, stdin=p1.stdout, stdout=outf)
p1.stdout.close()
p2.communicate()
outf.close()
log(' Took %0.3fs to extract sift feats to %s...' % (time.time()-t1, outfname))
yield outfname
def extractmain(*fnames):
"""A driver that extracts sift matches for filenames given in args.
This calls extract(fnames, checkexist=1, compress=1)
Prints sift output filenames on separate lines, one per input
"""
for outfname in extract(fnames, checkexist=1, compress=1):
print outfname
sys.stdout.flush()
def oldmain():
#findPaths()
if len(sys.argv) < 3:
print "Usage: python %s <sift files path> <image files path>" % (sys.argv[0])
sys.exit()
if sys.argv[1][-1] != '/': sys.argv[1] += '/'
if sys.argv[2][-1] != '/': sys.argv[2] += '/'
testDraw(sys.argv[1], sys.argv[2])
def drawmain(*args):
"""A driver that draws sift points onto images"""
minsize = 2
if len(args) < 3:
print 'Draw args: <vlsift output filename> <img> <out imgfname> [<minsize=%d>]' % (minsize)
sys.exit()
featfname, imgfname, outfname= args[0:3]
try:
minsize = int(args[3])
except Exception: pass
im = Image.open(imgfname)
feats = SiftFeat.siftFromVLFeat(featfname)
# generate color
from nkimageutils import colormap, indexedcolor
for i, f in enumerate(feats):
c = 'white'
#c = colormap(i/float(len(feats)))
c = indexedcolor(i%100, 100)
f.drawOnImage(im, minsize=minsize, color=c)
im.save(outfname)
def oldmatchmain(*args):
"""A driver that finds matches between two sift files"""
import numpy as np
maxratio = 0.8
if len(args) < 4:
print 'Match args: <vlsift outfile 1> <vlsift outfile 2> <outsift1> <outsift2> [<maxratio=%f>]' % (maxratio)
sys.exit()
func = SiftFeat.siftFromVLFeat
f1, f2 = map(func, args[0:2])
print >>sys.stderr, 'Read %d feats in file %s, %d feats in file %s' % (len(f1), args[0], len(f2), args[1])
matches = []
f2data = np.array([f.data for f in f2])
outf1, outf2 = open(args[2], 'wb'), open(args[3], 'wb')
for i, f in enumerate(f1):
if i % 1 == 0:
print >>sys.stderr, ' On point %d of %d, %d matches so far %s... \r' % (i+1, len(f1), len(matches), matches[-1] if matches else []),
d = f.findClose(f2data, k=2)
#print d
if len(d) < 2: continue
ratio = d[0][0]/d[1][0]
if ratio > maxratio: continue
j = d[0][1]
matches.append((i, j))
print >>outf1, f
print >>outf2, f2[j]
outf1.flush()
outf2.flush()
print >>sys.stderr
def match(f1, f2, outfname=None, maxratio=0.8, maxk=5, maxr=None, checkexist=1, *args, **kw):
"""Matches features between two lists of feature vectors and optionally saves to outfname.
The inputs can be in any parseable format.
For each pt in f1, finds closest points in f2.
The outputs are written in tab-separated lines with the following fields:
distance - normalized distance between fvecs
feat number of match in 1st input
feat number of match in 2nd input
full line of 1st input (tabs converted to spaces)
full line of 2nd input (tabs converted to spaces)
Parameters:
maxk: maximum number of neighbors to retrieve
maxr: maximum distance to allow (set to 0 if matching projected points)
maxratio: if >0, then applies the ratio test
checkexist: if 1, then if the output file exists, doesn't do anything.
"""
import numpy as np
if isinstance(f1, basestring) and isinstance(f2, basestring):
log('Matching features in %s vs %s to %s with maxratio %s, maxk %s, maxr %s' % (f1, f2, outfname, maxratio, maxk, maxr))
if checkexist and existsnonzero(outfname):
log(' Outfile %s existed already, so returning' % (outfname))
return list(readmatches(outfname))
feats1 = SiftFeat.siftFromFile(f1)
feats2 = SiftFeat.siftFromFile(f2)
#log('%s (%d) vs %s (%d)' % (f1, len(feats1), f2, len(feats2)))
other = np.array([o.data for o in feats2])
matches = []
outf = open(outfname, 'wb') if outfname else None
for i, f1 in enumerate(feats1):
dists = f1.findClose(other, k=maxk, r=maxr)
if not dists: continue
best = dists[0][0]
matchratio = 0 if maxratio and maxratio > 0 else 1
if best == 0:
matchratio = 1
# for ratio test, see if we have any
nonzero = [d for d, j in dists[1:] if d > 0]
if not matchratio and maxratio and maxratio > 0 and nonzero:
ratio = best/float(nonzero[0])
if ratio < maxratio:
matchratio = 1
toadd = []
for mnum, (d, j) in enumerate(dists):
if d == 0 or (matchratio and d == best):
# if it has 0 distance, just add it
# if we're matching ratio, then we add all results with the best score.
toadd.append((d, i, j, f1, feats2[j]))
else:
break
matches.extend(toadd)
if outf:
for m in toadd:
print >>outf, getListAsStr(m, '\t')
#log(' On %d of %d, with data %s, %s best, %d dists, %d toadd, %d matches: %s' % (i+1, len(feats1), f1.data[:2], best, len(dists), len(toadd), len(matches), [(d, j, f2.data[:2]) for d, i, j, f1, f2 in toadd[:3]]))
return matches
def matchmain(f1, f2, outfname, *args):
"""Driver for match().
Checks filenames. Asserts both are of same type.
If projected, then runs with k=5 (or int(args[0])), maxr=0, maxratio=None.
Else, then runs with maxratio=0.8 (or float(args[0])), k=5.
"""
p = '.projected'
proj = p in f1
if proj:
assert p in f2
else:
assert p not in f2
if proj:
try:
k = int(args[0])
except:
k = 5
return match(f1, f2, outfname, maxratio=None, maxk=k, maxr=0)
else:
try:
maxratio = float(args[0])
except:
maxratio = 0.8
return match(f1, f2, outfname, maxratio=maxratio, maxk=5, maxr=None)
def readmatches(f):
"""Parses the given match file and yields (dist, i, j, f1, f2)"""
from gzip import GzipFile
f = GzipFile(f) if f.endswith('.gz') else open(f)
def makefeat(feat):
els = feat.split()
x, y, s, o = map(float, els[:4])
data = map(int, els[4:])
return SiftFeat(x, y, s, o, data)
for l in f:
d, i, j, f1, f2 = l.rstrip('\n').split('\t')
d = float(d)
i, j = int(i), int(j)
f1 = makefeat(f1)
f2 = makefeat(f2)
yield (d, i, j, f1, f2)
def drawmatchesmain(matchesfname, outfname, im1fname, im2fname, minscale=0, orientation='vertical', pointsize=3):
"""Draws matches between images.
Parameters:
matchesfname - text file with matches (as output from match())
outfname - output image filename
im1fname - 1st image of matchpair
im2fname - 2nd image of matchpair
minscale - the minimum "scale" of points to draw matches for
orientation - 'horizontal' or 'vertical' (default)
pointsize - if > 0, then draws circles on each sift point at this radius.
"""
from PIL import Image, ImageDraw
from nkimageutils import ImagePair, randcolor
from nkutils import rectat
im1 = Image.open(im1fname)
im2 = Image.open(im2fname)
ip = ImagePair(im1, im2, orientation=orientation, background=(255,255,255))
minscale = float(minscale)
draw = ImageDraw.Draw(ip.outim)
pairs = []
# first collect pairs, and optionally draw points
for d, i, j, f1, f2 in readmatches(matchesfname):
if f1.scale < minscale and f2.scale < minscale: continue
loc1 = (0, f1.x, f1.y)
loc2 = (1, f2.x, f2.y)
color = randcolor('RGB')
pairs.append((loc1, loc2, color))
if pointsize > 0:
p1 = ip.globalcoords(0, (f1.x, f1.y))
p2 = ip.globalcoords(1, (f2.x, f2.y))
draw.ellipse(rectat(p1[0], p1[1], pointsize*2, pointsize*2), outline=color, fill=color)
draw.ellipse(rectat(p2[0], p2[1], pointsize*2, pointsize*2), outline=color, fill=color)
# draw lines
for loc1, loc2, color in pairs:
ip.drawline([loc1, loc2], fill=color)
ip.outim.save(outfname)
def ransacmain(matchesfname, outfname, minerror=0.5, maxiters=5000):
"""Runs ransac on the given set of matches.
Writes the filtered matches out to the given outfname.
Also prints (to stdout) the homography.
Returns [list of inlier matches, dict of best model].
The best model dict has fields:
inliers: list of inliers, as simple indices
model: the best HomographyModel
error: the error (measured as percentage of inliers)
finalerror: the final error (as reprojection error on only the inliers)
"""
from nkransac import Ransac, HomographyModel
minerror = float(minerror)
matches = list(readmatches(matchesfname))
data = [((f1.x, f1.y), (f2.x, f2.y)) for d, i, j, f1, f2 in matches]
dataidx = dict((d, i) for i, d in enumerate(data))
def callback(niters, elapsed, best, ransac, **kw):
"""Generic callback"""
perc = len(best['inliers'])/float(len(dataidx))
perc = min(perc, 0.8)
ransac.maxiters = min(maxiters, Ransac.estimateNiters(4, perc))
print >>sys.stderr, 'On iter %d (%0.3fs elapsed), best model has %d inliers (%0.3f%%), error %0.2f, new maxiters %d' % (niters, elapsed, len(best['inliers']), perc, best['error'], ransac.maxiters)
r = Ransac(4, HomographyModel, callback=callback, strongthresh=10, weakthresh=30, mininliers=4, minerror=minerror)
try:
ret = r.run(data)
except KeyboardInterrupt:
ret = r.best
# also compute final error
ret['finalerror'] = ret['model'].geterror(ret['inliers'])
outf = open(outfname, 'wb')
toret = [[], ret]
for pair in ret['inliers']:
idx = dataidx[pair]
m = matches[idx]
toret[0].append(m)
print >>outf, getListAsStr(m, '\t')
print 'Error (1-inlier perc): %s, reprojection error: %s, model:\n%s' % (ret['error'], ret['finalerror'], ret['model'])
return toret
def fullsiftmain(im1, im2, vocabname=None, minscale=0, orientation='vertical', minerror=0.5):
"""Does the full sift pipeline for the given two images.
- Extracts sift features for both
- Computes matches, storing the output to '%(im1)s-%(im2)s-matches.txt'
- Draws matches to same fname, but as .png (using minscale)
- Runs ransac on the matches, storing to ...-rmatches.txt
- Draws ransac matches, to same fname but as .png (using minscale)
"""
minscale = float(minscale)
minerror = float(minerror)
siftfnames = list(extract([im1, im2]))
if vocabname:
# project using given vocab
outfnames = projectindiv(vocabname, siftfnames)
siftfnames = outfnames
base = lambda f: os.path.basename(f).rsplit('.',1)[0]
matchbase = '%s-%s-matches' % (base(im1), base(im2))
matchmain(siftfnames[0], siftfnames[1], matchbase+'.txt')
drawmatchesmain(matchbase+'.txt', matchbase+'.png', im1, im2, minscale=minscale, orientation=orientation)
rbase = matchbase.replace('-matches', '-rmatches')
inliermatches, best = ransacmain(matchbase+'.txt', rbase+'.txt', minerror=minerror)
print 'Best error is %s, %d inliers, finalerror %s, model is\n%s' % (best['error'], len(best['inliers']), best['finalerror'], best['model'])
drawmatchesmain(rbase+'.txt', rbase+'.png', im1, im2, minscale=minscale, orientation=orientation)
def match1stmain(userims, googims):
"""Matches userims to google images and returns scores for each userim"""
user = [(f, locs, fvecs) for i, f, locs, fvecs in readfvecs(userims)]
ret = []
for i, f, locs, fvecs in readfvecs(googims):
if fvecs is None or len(fvecs) == 0: continue
for j, (userfname, userlocs, userfvecs) in enumerate(user):
matches = match((locs, fvecs), (userlocs, userfvecs), 'testmatches', maxratio=None, maxk=5, maxr=0)
filtmatches, best = ransacmain('testmatches', 'outransac')
print 'Went from %d matches to %d filtered' % (len(matches), len(filtmatches))
sys.exit()
def readfvecs(fnames, ss=0, nprocs=1, minsize=0):
"""Reads filenames from the given input.
If it's a string, then assumes it's a filename which lists descriptor filenames.
Else, should be a list of descriptor filenames.
The descriptors are read, and can be:
filename - read using GzipFile if ends with .gz, else read directly
list of fvecs
These are converted to an np.array, with dtype=uint8.
Does subsampling of feature vectors based on ss:
<= 0: no subsampling
int: maximum number of fvecs per input
You can also pass in:
minsize: if > 0, descriptors with 'scale' less than this are discarded (only if we know scales)
The method yields (i, f, locs, fvecs) for each input descriptor file.
Locs is a list of (x, y, scale, orientation) float tuples.
This corresponds to the fvecs, which are np.array(dtype=np.uint8)
"""
from gzip import GzipFile
from Queue import Queue
import numpy as np
from nkthreadutils import spawnWorkers
from nkutils import minsample
import cPickle as pickle
if isinstance(fnames, basestring):
fnames = [f.rstrip('\n') for f in open(fnames) if f.rstrip('\n')]
inq, outq = Queue(), Queue(5)
ss = int(ss)
def inqloop():
last = None
while 1:
i, f = inq.get()
if isinstance(f, basestring):
# it's a filename, so read it
ident = f
try:
# pickle files are already fully processed, so just continue after reading it
if f.endswith('.pickle'):
locs, fvecs, allfnames = pickle.load(open(f))
# filter by size
indices = [i for i, (x, y, s, o) in enumerate(locs) if not (0 < s < minsize)]
# subsample as well
if 0 < ss < len(indices):
indices = minsample(indices, ss)
fvecs = fvecs[indices]
locs = locs[indices]
outq.put((i, ident, locs, fvecs))
continue
curf = GzipFile(f) if f.endswith('.gz') else open(f)
fvecs = []
try:
for l in curf:
try:
row = l.strip().split()
x,y,s,o = loc = map(float, row[:4])
if 0 < s < minsize: continue # skip small points
fvec = map(int, row[4:])
fvecs.append((loc,fvec))
except Exception:
pass
except Exception: # this is needed for gzip iteration, which sometimes causes a problem here
pass
log(' Loaded file %d: %s with len %d (ss %s, minsize %s)' % (i, f, len(fvecs), ss, minsize))
except Exception, e:
raise
fvecs = last
else:
# it must a list of fvecs already -- see if it has locs
rows = []
for row in fvecs:
if len(row) == 2: # (loc, fvec)
(x,y,s,o), fvec = row
if 0 < s < minsize: continue # skip small points
rows.append(row)
else: # fvec only
rows.append((0,0,0,0), row)
fvecs = rows
ident = i
last = fvecs
# subsample
if ss > 0:
fvecs = minsample(fvecs, ss)
# make nparray
fvecs = [(loc, f) for loc, f in fvecs if len(f) == 128 or len(f) == 1]
if fvecs:
locs, fvecs = zip(*fvecs)
if len(fvecs[0]) == 1:
fvecs = np.array(fvecs, dtype=np.uint32)
else:
fvecs = np.array(fvecs, dtype=np.uint8)
else:
locs = []
fvecs = np.array((), dtype=np.uint8)
# put it on the output queue
outq.put((i, ident, locs, fvecs))
# spawn workers
inworkers = spawnWorkers(nprocs, inqloop)
# send inputs to inqloop, in a separate thread, so that we don't block outputs
def sendinputs():
for i, f in enumerate(fnames):
inq.put((i, f))
sendinputworker = spawnWorkers(1, sendinputs)
# read outputs
t1 = time.time()
ntotal = 0
ndone = 0
while 1:
i, f, locs, fvecs = outq.get()
ntotal += len(fvecs)
ndone += 1
log(' Loaded %d fvecs (%d total) from input %d of %d (%0.2fs elapsed)' % (len(fvecs), ntotal, ndone, len(fnames), time.time()-t1))
yield (i, f, locs, fvecs)
del f, locs, fvecs
if ndone >= len(fnames): break
def aggregatemain(outfname, ss, minsize, *fnames):
"""Aggregates sift features from many files into one pickled file.
This file contains (alllocs, allfvecs, allfnames), which are:
alllocs: a Nx4 numpy array of x,y,scale,orientation values for each point
allfvecs: a Nx128 numpy array of descriptors for each point
allfnames: a list of len N with the filename for each point
"""
import cPickle as pickle
import numpy as np
alllocs = []
allfvecs = []
t1 = time.time()
ss = int(ss)
minsize = float(minsize)
allfnames = []
for i, f, locs, fvecs in readfvecs(fnames, ss=ss, minsize=minsize):
alllocs.append(np.array(locs))
allfvecs.append(np.array(fvecs))
allfnames.extend([f]*len(fvecs))
t2 = time.time()
alllocs = np.vstack(alllocs)
allfvecs = np.vstack(allfvecs)
t3 = time.time()
log('Got final matrices of shape %s (%s), %s (%s) in %0.3fs' % (alllocs.shape, alllocs.dtype, allfvecs.shape, allfvecs.dtype, t3-t1))
pickle.dump((alllocs,allfvecs,allfnames), open(outfname, 'wb'), -1)
def applyrootsift(fvecs):
"""Applies the root sift transformation to a given set of feature vectors.
This is L1 normalization followed sqrt.
"""
import numpy as np
fvecs = fvecs.astype(np.float32)
norms = np.sum(fvecs, axis=1).reshape((1, len(fvecs)))
fvecs = np.sqrt(fvecs/norms.transpose())
return fvecs
def kmcombosmain(siftdir=''):
"""Generates commandlines for k-means combinations"""
from math import ceil
def num2str(k):
"""Converts a k value into a k-string"""
units = [(1000000, 'm'), (1000, 'k')]
for val, unit in units:
if k >= val:
return '%d%s' % (k//val, unit)
return str(k)
print '#!/bin/bash\n'
for k in [10000, 100000, 500000, 1000000]:
for npickles in [1, 10, 25, 50]:
for ss in [100000, 500000]:
if npickles*ss/1.1 < k: continue # skip combos with nsamples ~ k
for rootsift in [1, 0]:
kstr = num2str(k)
mem = ss*128*4*npickles*8
memgb = mem/1024.0/1024/1024
if memgb > 60: continue # skip combinations which are very high memory
memgbstr = '%0.2f GB' % (memgb)
outname = 'siftvocab-%(kstr)s-%(npickles)s-%(ss)s-%(rootsift)s.pickle' % (locals())
exe = 'python'
memneeded = int(ceil(memgb))
exe = 'qsub -N %(outname)s -cwd -o outs/ -e errs/ -l mem_free=%(memneeded)sG ~/pysub.sh' % (locals())
cmdline = '%(exe)s ~/pylib/sift.py makevocab ~/db/siftvecs/siftvecs-%(npickles)s.lst %(outname)s %(k)s %(ss)s %(rootsift)s' % (locals())
print cmdline
def makevocab(fnames, outfname, k=10000, ss=0, rootsift=0, n_jobs=4, max_iter=100, normalkmeans=0, **kw):
"""Runs kmeans using the list of filenames given, and given k.
Parameters:
k - number of clusters
ss - maximum number of descriptors to take from each input file
rootsift - if 1, then computes centers on RootSift descriptors
this is done by L1-norming the vector, then taking the sqrt of each element.
n_jobs - number of simultaneous cpus to use (only for normal k-means)
max_iter - number of iterations to run within k-means
normalkmeans - if true, uses normal k-means; else uses mini-batch k-means
All other kw are passed to the KMeans initializer.
batch_size is an important parameter for minibatch k-means
Vocab created April 24, 2013:
python sift.py kmeans <(shuf -n 5000 allsift.txt) siftvocab-10k-5000-500.pickle 10000 500
Vocabs created May 7, 2013:
Combinations (using sift fvec pickles, created from 1000 files * 500 vecs each):
K = 10k,100k,500k,1m
ss = 0 (use entire pickle), 100000 (~100 fvecs per image)
npickles = 1, 10, 25, 50
rootsift = 0, 1
Fname fmt:
siftvocab-%(K)-%(npickles)-%(ss)-%(rootsift).pickle
Cmd line usage:
python sift.py kmeans ~/db/picasa/shufsift-%(nfiles).txt) %(outfile) %(K) %(ss) %(rootsift)
Rough guide to memory usage: x million descriptors ~ x gigs of ram
"""
from sklearn.cluster import KMeans, MiniBatchKMeans
import cPickle as pickle
import numpy as np
from nkutils import specializeDict
m = []
rootsift = int(rootsift)
t1 = time.time()
for i, f, locs, fvecs in readfvecs(fnames, ss=ss):
if rootsift:
fvecs = applyrootsift(fvecs)
m.append(np.array(fvecs))
t2 = time.time()
m = np.vstack(m)
t3 = time.time()
log('Got final matrix of shape %s, dtype %s in %0.3fs' % (m.shape, m.dtype, t3-t1))
# now run kmeans
if normalkmeans:
# normal k-means
defkw = dict(precompute_distances=0, verbose=3, n_jobs=n_jobs, max_iter=max_iter, copy_x=0)
else:
# mini-batch
defkw = dict(compute_labels=0, verbose=3, n_jobs=n_jobs, max_iter=max_iter)
defkw['batch_size'] = max(1000, int(0.002*len(m)))
defkw.update(kw)
defkw = specializeDict(defkw)
if not normalkmeans:
try:
del defkw['n_jobs']
except Exception: pass
log('Running kmeans with k %s and kw %s' % (k, defkw))
kmfunc = KMeans if normalkmeans else MiniBatchKMeans
km = kmfunc(int(k), **defkw)
km.fit(m)
km.rootsift = rootsift
t4 = time.time()
log('KMeans finished, taking %0.3fs (%0.3fs total)' % (t4-t3, t4-t1))
pickle.dump(km, open(outfname, 'wb'), -1)
t5 = time.time()
log('Dumped KMeans to file %s. Total time %0.3fs' % (outfname, t5-t1))
makevocabmain = makevocab
def project(model, fnames, **kw):
"""Projects sift descriptors in 'fnames' using the 'model'.
The model can either be the model itself, or a pickled filename.
KW args are passed onto readfvecs.
If the model contains a field 'rootsift' which is true, then
applies the rootsift operator to all feature vectors.
Yields (i, fname, locs, projected values)
"""
from sklearn.cluster import KMeans, MiniBatchKMeans
import cPickle as pickle
if isinstance(model, basestring):
model = pickle.load(open(model))
rootsift = ('rootsift' in dir(model) and model.rootsift)
transfunc = applyrootsift if rootsift else lambda fvecs: fvecs
incr = 1000
for i, f, locs, fvecs in readfvecs(fnames, **kw):
log(' In main loop with %s, %s, %d' % (i, f, len(fvecs)))
try:
# generate outputs incrementally (to avoid soaking up all memory)
y = []
t1 = time.time()
for start in range(0, len(fvecs), incr):
if start >= len(fvecs): break
# get the current batch of feature vectors, applying any transformations (such as rootsift)
curfvecs = transfunc(fvecs[start:start+incr])
cur = model.predict(curfvecs)
y.extend(cur)
log(' Projected %d vecs from %d, %d total pts so far in %0.3fs' % (len(cur), start, len(y), time.time()-t1))
del fvecs
yield (i, f, locs, y)
except Exception, e:
log(' Error %s on %d, %s (%s): %s' % (type(e), i, f, type(fvecs), e))
def projectindiv(model, fnames, checkexist=1, compress=5, minscale=0, cachedir=None, returnfvecs=0, printout=1, **kw):
"""Projects sift descriptors in 'fnames' using the model from 'model'.
Writes individual output files with extension '.projected', including pos/scale/orientation.
If printout, Prints the outfname for each processed input (not all inputs).
If checkexist is true, skips existing outfiles.
If compress is >0, then compresses using gzip with given compression level.
In this case, outfilenames have '.gz' appended to them.
Else, no compression is applied.
If returnfvecs is true, then returns list of (i, f, locs, projectedvals).
Else, returns a list of all output names.
"""
from nkutils import getListAsStr
from gzip import GzipFile
from itertools import izip
import numpy as np
todo = []
ret = []
for fname in fnames:
outdir = cachedir if cachedir else os.path.dirname(fname)
outfname = os.path.join(outdir, os.path.basename(fname).rsplit('.sift', 1)[0]+'.projected')
if compress > 0:
outfname += '.gz'
try:
os.makedirs(os.path.dirname(outfname))
except OSError: pass
if checkexist and existsnonzero(outfname):
log(' Outfile %s already existed...' % (outfname))
if returnfvecs:
ret.append(list(readfvecs([outfname]))[0])
else:
ret.append(outfname)
continue
todo.append((fname, outfname))
if not todo: return ret
fnames, outfnames = zip(*todo)
for (i, f, locs, y), outfname in izip(project(model, fnames, **kw), outfnames):
outf = GzipFile(outfname, 'wb', compresslevel=compress) if compress > 0 else open(outfname, 'wb')
if locs:
for loc, val in zip(locs, y):
xloc, yloc, scale, ori = loc
if scale < minscale: continue
print >>outf, '%s\t%s' % (getListAsStr(loc, sep='\t'), val)
outf.close()
if printout:
print outfname
sys.stdout.flush()
if returnfvecs:
# convert y into a 2d numpy array, which is what we expect to use
if isinstance(y, list):
y = np.array(y, dtype=np.uint32).reshape((len(y), 1))
ret.append((i, f, locs, y))
else:
ret.append(outfname)
return ret
def projectindivmain(model, minsize, *fnames):
"""Driver for projectindiv().
This calls projectindiv(model, fnames, checkexist=1, compress=3)
Writes individual output files with extension '.projected.gz', including pos/scale/orientation.
"""
minsize = float(minsize)
return projectindiv(model, fnames, checkexist=1, compress=3, minsize=minsize)
def projectallmain(model, *fnames):
"""Projects sift descriptors in 'fnames' using the model from 'model'.
Prints <input filename> <list of cluster ids>, all separated by tabs, to stdout.
Note that outputs are not guaranteed to be in same order as inputs, if you use multithreading.
"""
from nkutils import getListAsStr
for i, fname, locs, y in project(model, fnames):
print '%s\t%s' % (fname, getListAsStr(y, sep='\t'))
sys.stdout.flush()
def evalvocabmain(model, outfname, impairs, cachedir='./', maxk=50):
"""Evaluates a vocabulary on a set of image pairs.
Steps:
- extracts sift from each pair of images
- computes matches between each pair
- projects each sift point using the given vocab
- computes matches using projected points
- computes score: # common matches/# original matches
- outputs scores for each pair as [score, image 1, image 2], tab-separated to 'outfname'
Parameters:
model - either the model itself, or the pickle filename
outfname - filename where the output scores are written to
impairs - either a filename which lists image pairs (tab-separated, one pair per line)
or a list of fname pairs directly
cachedir - optionally, gives the directory to store projected sift vectors to.
defaults to current directory
"""
import cPickle as pickle
maxk = int(maxk)
base = lambda f: os.path.basename(f).rsplit('.',1)[0]
if isinstance(model, basestring):
model = pickle.load(open(model))
# get list of image pairs
if isinstance(impairs, basestring):
impairs = [l.rstrip('\n').split('\t')[:2] for l in open(impairs)]
# operate on each pair
outf = open(outfname, 'wb')
for im1, im2 in impairs:
# extract sift
sift1, sift2 = list(extract([im1,im2]))
# compute matches and matchpairs
matchfname = '%s-%s-matches.txt' % (base(im1), base(im2))
matches = match(sift1, sift2, matchfname)
matchpairs = set((i,j) for d, i, j, _, _ in matches)
# project
pf1, pf2 = projectindiv(model, [sift1,sift2], cachedir=cachedir, returnfvecs=1)
pf1, pf2 = pf1[2:4], pf2[2:4]
# match projections
pmatchfname = os.path.join(cachedir, matchfname.replace('-matches', '-pmatches'))
pmatches = match(pf1, pf2, pmatchfname, maxr=0, maxk=maxk)
pmatchpairs = set((i,j) for d, i, j, _, _ in pmatches)
common = matchpairs & pmatchpairs
score = len(common)/float(len(matchpairs))
log('Got %d raw matches, %d projected matches, %d common, for score %s' % (len(matchpairs), len(pmatchpairs), len(common), score))
print >>outf, '%s\t%s\t%s' % (score, im1, im2)
def scoreinvindex(invindex, pvec):
"""Scores a projected vector against an inverted index.
The invindex can either be the data structure loaded from disk, or a json filename.
It can also be compressed with gzip.
Yields sorted list of matches from the index ((i, score, imnum, imfname, invindex))
"""
from gzip import GzipFile
from collections import defaultdict
if isinstance(invindex, basestring):
f = GzipFile(invindex) if invindex.endswith('.gz') else open(invindex)
invindex = json.load(f)
imscores = defaultdict(float)
for c in pvec:
for imnum, s in invindex['words'][c]:
imscores[imnum] += s
matches = sorted(imscores.items(), key=lambda pair: pair[1], reverse=1)
for i, (imnum, score) in enumerate(matches):
yield ((i, score, imnum, invindex['ims'][imnum], invindex))
#print '%d\t%s\t%s' % (i+1, score, invindex['ims'][imnum])
def makeindex(outfname, pfnames, k, tfidf=1, minscale=0, targetsize=0, minscore=0):
"""Makes an inverted index from the given list of projected individual outputs.
Parameters:
outfname: the json filename to write to
tfidf: if 1, then uses tfidf weighing
minscale: if > 0, then discards points at a scale smaller than that
targetsize: if > 0, then sets minscale=max(im.size)/targetsize
minscore: if the final score is less than this, we don't include it in the output
"""
import math
from collections import defaultdict
ret = dict(k=k, ims=[], words=[{} for i in range(k)])
if isinstance(pfnames, basestring):
pfnames = [l.rstrip('\n') for l in open(pfnames)]
t1 = time.time()
nimwords = []
wordcounts = defaultdict(int)
for i, pfname in enumerate(pfnames):
if i % 1 == 0:
log(' On cvec %d, pfname %s, %0.3fs elapsed' % (i+1, pfname, time.time()-t1))
fvecs = SiftFeat.siftFromFile(pfname, fmt='.projected.gz .projected'.split())
if targetsize > 0:
# get the maximum size
maxsize = 0
for f in fvecs:
maxsize = max(maxsize, f.x, f.y)
minscale = 0 if maxsize < targetsize else maxsize/float(targetsize)
#print pfname, maxsize, minscale, [(f.data[0], f.scale) for f in fvecs[:5]]
# get the list of points to use in the index based on minscale
cvec = [f.data[0] for f in fvecs if f.scale >= minscale]
log(' Went from %d features down to %d, due to targetsize %s, maxsize %s, and minscale %s' % (len(fvecs), len(cvec), targetsize, maxsize, minscale))
nimwords.append(len(cvec))
imnum = len(ret['ims'])
ret['ims'].append(pfname)
curwords = set()
for c in cvec:
curwords.add(c)
d = ret['words'][c]
d.setdefault(imnum, 0.0)
d[imnum] += 1.0
# accumulate curcounts into wordcounts
for word in curwords:
wordcounts[word] += 1
#if i > 1: break
# convert word dicts to lists, also applying tf-idf if wanted
ret['nims'] = len(ret['ims'])
log('Accumulating results')
for i, wd in enumerate(ret['words']):
lst = sorted(wd.items())
if tfidf:
newlst = []
for imgnum, nid in lst:
N = ret['nims']
ni = float(wordcounts[i])
l = math.log(N/ni)
nd = float(nimwords[imgnum])
score = l*nid/nd
#log(' Got nid %s, nd %s for tf %s, N %s, ni %s, for idf %s, log %s, score %s' % (nid, nd, nid/nd, N, ni, N/ni, l, score))
newlst.append((imgnum, score))
lst = newlst
# filter by minimum score
oldlen = len(lst)
lst = [(imgnum, score) for imgnum, score in lst if score > minscore]
if len(lst) != oldlen:
log(' For word %d, minscore %s, filtered %d items down to %d' % (i, minscore, oldlen, len(lst)))
ret['words'][i] = lst
log('Printing json output to %s' % (outfname))
json.dump(ret, open(outfname, 'wb'), sort_keys=1, indent=2)
def makeindexmain(outfname, pfnames, k=10000, targetsize=200, minscore=1e-7):
"""Makes inverted sift index."""
makeindex(outfname, pfnames, k=int(k), tfidf=1, minscale=0, targetsize=int(targetsize), minscore=float(minscore))
def verifyransac(f1, f2, minerror=0.5, maxiters=5000, maxk=10):
"""Runs ransac on the given set of matches.
Returns dict of best model
The best model dict has fields:
inliers: list of inliers, as simple indices
model: the best HomographyModel
error: the error (measured as percentage of inliers)
finalerror: the final error (as reprojection error on only the inliers)
"""
from nkransac import Ransac, HomographyModel
from nkutils import getTimeDiffs
minerror = float(minerror)
# get matches
times = [time.time()]
matches = match(f1, f2, outfname=None, maxratio=None, maxk=10, maxr=0)
times.append(time.time())
data = [((f1.x, f1.y), (f2.x, f2.y)) for d, i, j, f1, f2 in matches]
dataidx = dict((d, i) for i, d in enumerate(data))
times.append(time.time())
def callback(niters, elapsed, best, ransac, **kw):
"""Generic callback"""
perc = len(best['inliers'])/float(len(dataidx))
perc = min(perc, 0.8)
ransac.maxiters = min(maxiters, Ransac.estimateNiters(4, perc))
#print >>sys.stderr, 'On iter %d (%0.3fs elapsed), best model has %d inliers (%0.3f%%), error %0.2f, new maxiters %d' % (niters, elapsed, len(best['inliers']), perc, best['error'], ransac.maxiters)
r = Ransac(4, HomographyModel, callback=callback, strongthresh=10, weakthresh=30, mininliers=8, minerror=minerror)
times.append(time.time())
try:
ret = r.run(data)
except KeyboardInterrupt:
ret = r.best
times.append(time.time())
# also compute final error
if ret['model']:
ret['finalerror'] = ret['model'].geterror(ret['inliers'])
times.append(time.time())
log(' Got final times: %s' % (getTimeDiffs(times)))
#print 'Error (1-inlier perc): %s, reprojection error: %s, model:\n%s' % (ret['error'], ret['finalerror'], ret['model'])
return ret
def makescalesmain(targetsize, *fnames):
"""Prints scales for the given images"""
targetsize = float(targetsize)
print '#!/bin/bash\n'
for f in fnames:
im = Image.open(f)
m = min(im.size)
if m < targetsize: continue
scale = m/targetsize
print 'qsub -N project -cwd -o ~/outs/ -e ~/errs/ ~/pysub.sh ~/pylib/nksift.py projectindiv ~/projects/photorecall/curvocab.pickle %s %s' % (scale, f.replace('.jpg', '.sift.gz'))
#print '%s\t%s' % (f, m/targetsize)
def combineprojmain(projnamelst, outfname):
"""Combines multiple projections into a single pickle file"""
import cPickle as pickle
from gzip import GzipFile
fnames = [l.rstrip('\n') for l in open(projnamelst)]
obj = {}
for fname in fnames:
print fname
els = [l.strip().split() for l in GzipFile(fname)]
data = [(map(float, el[:4]), map(int, el[4:])) for el in els]
locs, fvecs = zip(*data)
if 0:
feats = SiftFeat.siftFromFile((locs, fvecs))
obj[fname] = feats
else:
obj[fname] = (locs, fvecs)
if outfname.endswith('.json'):
json.dump(obj, open(outfname, 'wb'), sort_keys=1, indent=2)
elif outfname.endswith('.pickle'):
pickle.dump(obj, open(outfname, 'wb'), -1)
if __name__ == "__main__":
tasks = 'draw match drawmatches ransac fullsift match1st extract projectindiv projectall makevocab evalvocab aggregate kmcombos makeindex makescales combineproj'.split()
if len(sys.argv) < 2:
print 'Usage: python %s <%s> <args> ...' % (sys.argv[0], '|'.join(tasks))
sys.exit()
task = sys.argv[1]
if task not in tasks:
print 'Invalid task "%s", should be one of: %s' % (task, ','.join(tasks))
sys.exit()
func = eval('%smain' % task)
func(*sys.argv[2:])
| bsd-3-clause |
ipower2/Sudoku-Solver | sudokusolver/plotutilities.py | 1 | 7907 | """
This module contains the functions needed to graphically display
the solution to Sudoku puzzle. This module depends on matplotlib
library. The graphical output displays the cell evaluated in each
iteration, time taken to solve and shades the cells based on difficulty.
"""
__author__ = 'krishnakumarramamoorthy'
import time
import matplotlib.pyplot as plt
import matplotlib.patches as patch
import xumpy as np
def visualize_solution(input_matrix, solved_matrix, graph, zero_indices, ts, out_path, time_to_solution):
"""
Plotting function that takes sudoku solution and intermediate data
and plots them. It create three axes, one for plotting the progress,
one for printing the summary metrics, and one for solved sudoku.
"""
# prepare figure window for plotting
plt.figure().patch.set_facecolor('white')
plt.suptitle('Sudoku Solver\n', fontsize=20, color=plt.cm.Blues(0.9))
# plot the performance: number of iterations vs. numbers filled
ax1 = plt.subplot2grid((20, 3), (1, 0), rowspan=17, colspan=2)
[x, y, ylabels, eval_histogram, n] = generate_progress_data(graph, zero_indices)
max_eval_per_cell = max(eval_histogram)
total_iterations = n
plot_decorate_performance_data(ax1, x, y, ylabels)
# work on sudoku box area
ax2 = plt.subplot2grid((20, 3), (10, 2), rowspan=10, colspan=1)
create_colorbar(ax2, max_eval_per_cell)
decorate_sudoku_box(ax2)
fill_numbers(ax2, input_matrix, solved_matrix)
shade_cell_by_difficulty(ax2, zero_indices, eval_histogram, max_eval_per_cell)
# work on statistics area
ax3 = plt.subplot2grid((20, 3), (1, 2), rowspan=7, colspan=1)
time_to_plot = time.time() - ts
write_statistics(ax3, time_to_solution, time_to_plot, total_iterations)
# save figure and show
plt.savefig(out_path)
plt.show()
def generate_progress_data(graph, zero_indices):
"""
Generate data for plotting the number of evaluations and cell being evaluated
"""
counter = 0
n = 0
eval_histogram = [0] * len(zero_indices[0])
ylabels = []
x = []
y = []
for n in range(len(graph['index'])):
y.append(graph['index'][n][1])
eval_histogram[graph['index'][n][1]] += 1
ylabels.append('[{},{}]'.format(zero_indices[0][graph['index'][n][1]] + 1,
zero_indices[1][graph['index'][n][1]] + 1))
x.append(counter)
counter += 1
return [x, y, ylabels, eval_histogram, n]
def plot_decorate_performance_data(ax1, x, y, ylabels):
"""
Modify the objects in performance plot. Add labels, resize font, etc.
"""
ax1.plot(x, y)
ax1.set_title('Progress', fontsize=11, color='black', alpha=0.9)
plt.xlabel('How long did it take to solve? \n (Number of Evaluations)', fontsize=10)
plt.ylabel('Which cell was evaluated? (Unfilled Cell Index)', fontsize=10)
# label the y-axis with the cell location in sudoku grid
plt.yticks(y, ylabels, color='gray', fontsize=8)
plt.xticks(color='gray')
def create_colorbar(ax2, max_eval_per_cell):
"""
Create a color index for the colors used to show difficulty of cells in sudoku grid
"""
# create a temporary imshow and use it to create a colorbar, then remove the imshow object
data = np.randn(10, 10, max_eval_per_cell)
cax = plt.imshow(data, interpolation='nearest', cmap=plt.cm.Blues)
cbar = plt.colorbar(cax, orientation='horizontal', ticks=[])
cbar.solids.set_edgecolor(None)
cbar.ax.set_xlabel('How difficult was it to solve?\n(Easy - - - > Difficult)', fontsize=10)
# clear dummy imshow plot; anything plotted before in ax2 won't persist. cbar is on its own axes
ax2.cla()
def decorate_sudoku_box(ax2):
"""
Modify line and color properties of sudoku grid to make it look good
"""
[x, y, xr, yr] = generate_sudoku_box_lines()
ax2.set_title('Solution\n\n', fontsize=11, color='black', alpha=0.9)
ax2.plot(x, y, color='gray')
ax2.plot(xr, yr, color='gray', linewidth=2)
# move the x tick to top
ax2.xaxis.tick_top()
# turn off the tick markers
ax2.tick_params(bottom='off', top='off', left='off', right='off')
# rewrite the tick labels so that they are aligned at the center of each box and not at the border
plt.xticks(np.arange(0.5, 9.5, 1), np.arange(1, 10, 1), color='gray', fontsize=8)
# start numbering y labels from the top
plt.yticks(np.arange(0.5, 9.5, 1), np.arange(1, 10, 1), color='gray', fontsize=8)
def fill_numbers(ax2, input_matrix, solved_matrix):
# fill the numbers
for i in range(9):
for j in range(9):
# quirk: when plotting matrix, transpose it; i is in y-axis and j is in x-axis
if input_matrix[i][j] == 0:
ax2.text(j + 0.5, i + 0.5, solved_matrix[i][j], horizontalalignment='center',
verticalalignment='center', color='black', fontsize=10)
else:
ax2.text(j + 0.5, i + 0.5, solved_matrix[i][j], horizontalalignment='center',
verticalalignment='center', color='gray', alpha=0.7, fontsize=10)
def shade_cell_by_difficulty(ax2, zero_indices, eval_histogram, max_eval_per_cell):
"""
Shade the unfilled cells in sudoku box with a color representing
difficulty. Difficulty is defined as number of times that cell
was evaluated.
"""
# fill the background with difficulty metric
for c in range(len(zero_indices[0])):
i = zero_indices[0][c]
j = zero_indices[1][c]
# quirk: when plotting matrix, transpose it; i is in y-axis and j is in x-axis
ax2.add_patch(patch.Rectangle((j, i), 1, 1, facecolor=plt.cm.Blues(eval_histogram[c] * 1.0 / max_eval_per_cell),
alpha=0.5))
def write_statistics(ax3, time_to_solution, time_to_plot, total_iterations):
ax3.get_xaxis().set_visible(False)
ax3.get_yaxis().set_visible(False)
ax3.spines['left'].set_color('white')
ax3.spines['right'].set_color('white')
ax3.spines['top'].set_color('white')
ax3.spines['bottom'].set_color('white')
ax3.text(0.5, 0.9, '{:.2f} secs\n'.format(time_to_solution),
horizontalalignment='center', verticalalignment='center', fontsize=16)
ax3.text(0.5, 0.85, 'Time to solution', horizontalalignment='center', verticalalignment='center',
fontsize=12, color='gray')
ax3.text(0.5, 0.6, '{:.2f} secs\n'.format(time_to_plot), horizontalalignment='center',
verticalalignment='center', fontsize=16)
ax3.text(0.5, 0.55, 'Time to plot', horizontalalignment='center', verticalalignment='center',
fontsize=12, color='gray')
ax3.text(0.5, 0.3, '{} \n'.format(total_iterations), horizontalalignment='center',
verticalalignment='center', fontsize=16)
ax3.text(0.5, 0.25, 'Number of evaluations', horizontalalignment='center', verticalalignment='center',
fontsize=12, color='gray')
def generate_sudoku_box_lines():
# lines for cells
x = []
y = []
# lines for regions
xr = []
yr = []
# data for vertical lines
for i in range(9):
x.append(i)
x.append(i)
y.append(0)
y.append(9)
x.append(None)
y.append(None)
if divmod(i, 3)[1] == 0:
xr.append(i)
xr.append(i)
yr.append(0)
yr.append(9)
xr.append(None)
yr.append(None)
# data for horizontal lines
for j in range(9):
x.append(0)
x.append(9)
y.append(j)
y.append(j)
x.append(None)
y.append(None)
if divmod(j, 3)[1] == 0:
xr.append(0)
xr.append(9)
yr.append(j)
yr.append(j)
xr.append(None)
yr.append(None)
return [x, y, xr, yr]
| bsd-3-clause |
DreamLiMu/ML_Python | tools/Ch03/treePlotter.py | 3 | 3824 | '''
Created on Oct 14, 2010
@author: Peter Harrington
'''
import matplotlib.pyplot as plt
decisionNode = dict(boxstyle="sawtooth", fc="0.8")
leafNode = dict(boxstyle="round4", fc="0.8")
arrow_args = dict(arrowstyle="<-")
def getNumLeafs(myTree):
numLeafs = 0
firstStr = myTree.keys()[0]
secondDict = myTree[firstStr]
for key in secondDict.keys():
if type(secondDict[key]).__name__=='dict':#test to see if the nodes are dictonaires, if not they are leaf nodes
numLeafs += getNumLeafs(secondDict[key])
else: numLeafs +=1
return numLeafs
def getTreeDepth(myTree):
maxDepth = 0
firstStr = myTree.keys()[0]
secondDict = myTree[firstStr]
for key in secondDict.keys():
if type(secondDict[key]).__name__=='dict':#test to see if the nodes are dictonaires, if not they are leaf nodes
thisDepth = 1 + getTreeDepth(secondDict[key])
else: thisDepth = 1
if thisDepth > maxDepth: maxDepth = thisDepth
return maxDepth
def plotNode(nodeTxt, centerPt, parentPt, nodeType):
createPlot.ax1.annotate(nodeTxt, xy=parentPt, xycoords='axes fraction',
xytext=centerPt, textcoords='axes fraction',
va="center", ha="center", bbox=nodeType, arrowprops=arrow_args )
def plotMidText(cntrPt, parentPt, txtString):
xMid = (parentPt[0]-cntrPt[0])/2.0 + cntrPt[0]
yMid = (parentPt[1]-cntrPt[1])/2.0 + cntrPt[1]
createPlot.ax1.text(xMid, yMid, txtString, va="center", ha="center", rotation=30)
def plotTree(myTree, parentPt, nodeTxt):#if the first key tells you what feat was split on
numLeafs = getNumLeafs(myTree) #this determines the x width of this tree
depth = getTreeDepth(myTree)
firstStr = myTree.keys()[0] #the text label for this node should be this
cntrPt = (plotTree.xOff + (1.0 + float(numLeafs))/2.0/plotTree.totalW, plotTree.yOff)
plotMidText(cntrPt, parentPt, nodeTxt)
plotNode(firstStr, cntrPt, parentPt, decisionNode)
secondDict = myTree[firstStr]
plotTree.yOff = plotTree.yOff - 1.0/plotTree.totalD
for key in secondDict.keys():
if type(secondDict[key]).__name__=='dict':#test to see if the nodes are dictonaires, if not they are leaf nodes
plotTree(secondDict[key],cntrPt,str(key)) #recursion
else: #it's a leaf node print the leaf node
plotTree.xOff = plotTree.xOff + 1.0/plotTree.totalW
plotNode(secondDict[key], (plotTree.xOff, plotTree.yOff), cntrPt, leafNode)
plotMidText((plotTree.xOff, plotTree.yOff), cntrPt, str(key))
plotTree.yOff = plotTree.yOff + 1.0/plotTree.totalD
#if you do get a dictonary you know it's a tree, and the first element will be another dict
def createPlot(inTree):
fig = plt.figure(1, facecolor='white')
fig.clf()
axprops = dict(xticks=[], yticks=[])
createPlot.ax1 = plt.subplot(111, frameon=False, **axprops) #no ticks
#createPlot.ax1 = plt.subplot(111, frameon=False) #ticks for demo puropses
plotTree.totalW = float(getNumLeafs(inTree))
plotTree.totalD = float(getTreeDepth(inTree))
plotTree.xOff = -0.5/plotTree.totalW; plotTree.yOff = 1.0;
plotTree(inTree, (0.5,1.0), '')
plt.show()
#def createPlot():
# fig = plt.figure(1, facecolor='white')
# fig.clf()
# createPlot.ax1 = plt.subplot(111, frameon=False) #ticks for demo puropses
# plotNode('a decision node', (0.5, 0.1), (0.1, 0.5), decisionNode)
# plotNode('a leaf node', (0.8, 0.1), (0.3, 0.8), leafNode)
# plt.show()
def retrieveTree(i):
listOfTrees =[{'no surfacing': {0: 'no', 1: {'flippers': {0: 'no', 1: 'yes'}}}},
{'no surfacing': {0: 'no', 1: {'flippers': {0: {'head': {0: 'no', 1: 'yes'}}, 1: 'no'}}}}
]
return listOfTrees[i]
#createPlot(thisTree) | gpl-2.0 |
avishek-r-kumar/dfitools | DFI_tools/aligndfi.py | 1 | 2257 |
def aligndfi(pdbID):
"""
Align DFI. Given a pdbID this function
will return a dataframe of an aligned sequence.
"""
import dfi
import dfi.fastaseq
import dfi.fasta_convert
from clustalo import clustalo
import numpy as np
import pandas as pd
remove_header = lambda x:''.join([s for s in x.split('\n') if not(s.startswith('>'))])
seqfasta=dfi.fastaseq.get_fastaseq(pdbID)
#pdbseq=dfi.fasta_convert.fafsa_format(pdbID+'.pdb')
pdbseq=dfi.fastaseq.get_fastaseq('P04062')
pdb=remove_header(pdbseq)
fasta=remove_header(seqfasta)
input = {'pdbseq':pdb,'fastaseq':fasta}
aligned = clustalo(input,seqtype=3)
df_dfi = dfi.calc_dfi(pdbID+'.pdb')
df_dfi['pctfdfi'] = len(df_dfi)*np.nan
pctdfi = df_dfi['pctdfi']
chainIDs = df_dfi['ChainID']
ResIDs = df_dfi['ResI']
pctfdfi = df_dfi['pctfdfi']
df_aligned = pd.DataFrame()
df_aligned['fastaseq'] = [s for s in aligned['fastaseq']]
df_aligned['pdbseq'] = [s for s in aligned['pdbseq']]
i = 0
align_pctdfi = []
align_chain = []
align_resi = []
align_pctfdfi = []
for site in df_aligned['pdbseq']:
if i >= len(pctdfi):
break
print 'site',site
if site == '-':
print np.nan
align_pctdfi.append(np.nan)
align_chain.append(np.nan)
align_resi.append(np.nan)
align_pctfdfi.append(np.nan)
else:
print pctdfi[i]
align_pctdfi.append(pctdfi[i])
align_chain.append(chainIDs[i])
align_resi.append(int(ResIDs[i]))
align_pctfdfi.append(pctfdfi[i])
i += 1
while len(align_pctdfi) < len(df_aligned['pdbseq']):
print np.nan
align_pctdfi.append(np.nan)
align_chain.append(np.nan)
align_resi.append(np.nan)
align_pctfdfi.append(np.nan)
print len(align_pctdfi), len(align_chain), len(align_resi)
df_aligned['pctdfi'] = align_pctdfi
df_aligned['ChainID'] = align_chain
df_aligned['ResI'] = align_resi
df_aligned['ind'] = range(1,len(align_pctdfi)+1)
df_aligned['pctfdfi'] = align_pctfdfi
df_aligned.to_csv(pdbID+'-dfialigned.csv')
| bsd-3-clause |
zhenv5/scikit-learn | sklearn/decomposition/truncated_svd.py | 199 | 7744 | """Truncated SVD for sparse matrices, aka latent semantic analysis (LSA).
"""
# Author: Lars Buitinck <[email protected]>
# Olivier Grisel <[email protected]>
# Michael Becker <[email protected]>
# License: 3-clause BSD.
import numpy as np
import scipy.sparse as sp
try:
from scipy.sparse.linalg import svds
except ImportError:
from ..utils.arpack import svds
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array, as_float_array, check_random_state
from ..utils.extmath import randomized_svd, safe_sparse_dot, svd_flip
from ..utils.sparsefuncs import mean_variance_axis
__all__ = ["TruncatedSVD"]
class TruncatedSVD(BaseEstimator, TransformerMixin):
"""Dimensionality reduction using truncated SVD (aka LSA).
This transformer performs linear dimensionality reduction by means of
truncated singular value decomposition (SVD). It is very similar to PCA,
but operates on sample vectors directly, instead of on a covariance matrix.
This means it can work with scipy.sparse matrices efficiently.
In particular, truncated SVD works on term count/tf-idf matrices as
returned by the vectorizers in sklearn.feature_extraction.text. In that
context, it is known as latent semantic analysis (LSA).
This estimator supports two algorithm: a fast randomized SVD solver, and
a "naive" algorithm that uses ARPACK as an eigensolver on (X * X.T) or
(X.T * X), whichever is more efficient.
Read more in the :ref:`User Guide <LSA>`.
Parameters
----------
n_components : int, default = 2
Desired dimensionality of output data.
Must be strictly less than the number of features.
The default value is useful for visualisation. For LSA, a value of
100 is recommended.
algorithm : string, default = "randomized"
SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy
(scipy.sparse.linalg.svds), or "randomized" for the randomized
algorithm due to Halko (2009).
n_iter : int, optional
Number of iterations for randomized SVD solver. Not used by ARPACK.
random_state : int or RandomState, optional
(Seed for) pseudo-random number generator. If not given, the
numpy.random singleton is used.
tol : float, optional
Tolerance for ARPACK. 0 means machine precision. Ignored by randomized
SVD solver.
Attributes
----------
components_ : array, shape (n_components, n_features)
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
explained_variance_ : array, [n_components]
The variance of the training samples transformed by a projection to
each component.
Examples
--------
>>> from sklearn.decomposition import TruncatedSVD
>>> from sklearn.random_projection import sparse_random_matrix
>>> X = sparse_random_matrix(100, 100, density=0.01, random_state=42)
>>> svd = TruncatedSVD(n_components=5, random_state=42)
>>> svd.fit(X) # doctest: +NORMALIZE_WHITESPACE
TruncatedSVD(algorithm='randomized', n_components=5, n_iter=5,
random_state=42, tol=0.0)
>>> print(svd.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.07825... 0.05528... 0.05445... 0.04997... 0.04134...]
>>> print(svd.explained_variance_ratio_.sum()) # doctest: +ELLIPSIS
0.27930...
See also
--------
PCA
RandomizedPCA
References
----------
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
Notes
-----
SVD suffers from a problem called "sign indeterminancy", which means the
sign of the ``components_`` and the output from transform depend on the
algorithm and random state. To work around this, fit instances of this
class to data once, then keep the instance around to do transformations.
"""
def __init__(self, n_components=2, algorithm="randomized", n_iter=5,
random_state=None, tol=0.):
self.algorithm = algorithm
self.n_components = n_components
self.n_iter = n_iter
self.random_state = random_state
self.tol = tol
def fit(self, X, y=None):
"""Fit LSI model on training data X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
self : object
Returns the transformer object.
"""
self.fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit LSI model to X and perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = as_float_array(X, copy=False)
random_state = check_random_state(self.random_state)
# If sparse and not csr or csc, convert to csr
if sp.issparse(X) and X.getformat() not in ["csr", "csc"]:
X = X.tocsr()
if self.algorithm == "arpack":
U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
Sigma = Sigma[::-1]
U, VT = svd_flip(U[:, ::-1], VT[::-1])
elif self.algorithm == "randomized":
k = self.n_components
n_features = X.shape[1]
if k >= n_features:
raise ValueError("n_components must be < n_features;"
" got %d >= %d" % (k, n_features))
U, Sigma, VT = randomized_svd(X, self.n_components,
n_iter=self.n_iter,
random_state=random_state)
else:
raise ValueError("unknown algorithm %r" % self.algorithm)
self.components_ = VT
# Calculate explained variance & explained variance ratio
X_transformed = np.dot(U, np.diag(Sigma))
self.explained_variance_ = exp_var = np.var(X_transformed, axis=0)
if sp.issparse(X):
_, full_var = mean_variance_axis(X, axis=0)
full_var = full_var.sum()
else:
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
return X_transformed
def transform(self, X):
"""Perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = check_array(X, accept_sparse='csr')
return safe_sparse_dot(X, self.components_.T)
def inverse_transform(self, X):
"""Transform X back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data.
Returns
-------
X_original : array, shape (n_samples, n_features)
Note that this is always a dense array.
"""
X = check_array(X)
return np.dot(X, self.components_)
| bsd-3-clause |
q1ang/scikit-learn | sklearn/manifold/tests/test_locally_linear.py | 232 | 4761 | from itertools import product
from nose.tools import assert_true
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from scipy import linalg
from sklearn import neighbors, manifold
from sklearn.manifold.locally_linear import barycenter_kneighbors_graph
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import ignore_warnings
eigen_solvers = ['dense', 'arpack']
#----------------------------------------------------------------------
# Test utility routines
def test_barycenter_kneighbors_graph():
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = barycenter_kneighbors_graph(X, 1)
assert_array_almost_equal(
A.toarray(),
[[0., 1., 0.],
[1., 0., 0.],
[0., 1., 0.]])
A = barycenter_kneighbors_graph(X, 2)
# check that columns sum to one
assert_array_almost_equal(np.sum(A.toarray(), 1), np.ones(3))
pred = np.dot(A.toarray(), X)
assert_less(linalg.norm(pred - X) / X.shape[0], 1)
#----------------------------------------------------------------------
# Test LLE by computing the reconstruction error on some manifolds.
def test_lle_simple_grid():
# note: ARPACK is numerically unstable, so this test will fail for
# some random seeds. We choose 2 because the tests pass.
rng = np.random.RandomState(2)
tol = 0.1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(5), repeat=2)))
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
clf = manifold.LocallyLinearEmbedding(n_neighbors=5,
n_components=n_components,
random_state=rng)
tol = 0.1
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X, 'fro')
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
assert_less(reconstruction_error, tol)
assert_almost_equal(clf.reconstruction_error_,
reconstruction_error, decimal=1)
# re-embed a noisy version of X using the transform method
noise = rng.randn(*X.shape) / 100
X_reembedded = clf.transform(X + noise)
assert_less(linalg.norm(X_reembedded - clf.embedding_), tol)
def test_lle_manifold():
rng = np.random.RandomState(0)
# similar test on a slightly more complex manifold
X = np.array(list(product(np.arange(18), repeat=2)))
X = np.c_[X, X[:, 0] ** 2 / 18]
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
for method in ["standard", "hessian", "modified", "ltsa"]:
clf = manifold.LocallyLinearEmbedding(n_neighbors=6,
n_components=n_components,
method=method, random_state=0)
tol = 1.5 if method == "standard" else 3
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X)
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
details = ("solver: %s, method: %s" % (solver, method))
assert_less(reconstruction_error, tol, msg=details)
assert_less(np.abs(clf.reconstruction_error_ -
reconstruction_error),
tol * reconstruction_error, msg=details)
def test_pipeline():
# check that LocallyLinearEmbedding works fine as a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
from sklearn import pipeline, datasets
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('filter', manifold.LocallyLinearEmbedding(random_state=0)),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
# Test the error raised when the weight matrix is singular
def test_singular_matrix():
from nose.tools import assert_raises
M = np.ones((10, 3))
f = ignore_warnings
assert_raises(ValueError, f(manifold.locally_linear_embedding),
M, 2, 1, method='standard', eigen_solver='arpack')
| bsd-3-clause |
shikhardb/scikit-learn | doc/datasets/mldata_fixture.py | 367 | 1183 | """Fixture module to skip the datasets loading when offline
Mock urllib2 access to mldata.org and create a temporary data folder.
"""
from os import makedirs
from os.path import join
import numpy as np
import tempfile
import shutil
from sklearn import datasets
from sklearn.utils.testing import install_mldata_mock
from sklearn.utils.testing import uninstall_mldata_mock
def globs(globs):
# Create a temporary folder for the data fetcher
global custom_data_home
custom_data_home = tempfile.mkdtemp()
makedirs(join(custom_data_home, 'mldata'))
globs['custom_data_home'] = custom_data_home
return globs
def setup_module():
# setup mock urllib2 module to avoid downloading from mldata.org
install_mldata_mock({
'mnist-original': {
'data': np.empty((70000, 784)),
'label': np.repeat(np.arange(10, dtype='d'), 7000),
},
'iris': {
'data': np.empty((150, 4)),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
})
def teardown_module():
uninstall_mldata_mock()
shutil.rmtree(custom_data_home)
| bsd-3-clause |
Akshay0724/scikit-learn | examples/cluster/plot_mini_batch_kmeans.py | 86 | 4092 | """
====================================================================
Comparison of the K-Means and MiniBatchKMeans clustering algorithms
====================================================================
We want to compare the performance of the MiniBatchKMeans and KMeans:
the MiniBatchKMeans is faster, but gives slightly different results (see
:ref:`mini_batch_kmeans`).
We will cluster a set of data, first with KMeans and then with
MiniBatchKMeans, and plot the results.
We will also plot the points that are labelled differently between the two
algorithms.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import MiniBatchKMeans, KMeans
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
np.random.seed(0)
batch_size = 45
centers = [[1, 1], [-1, -1], [1, -1]]
n_clusters = len(centers)
X, labels_true = make_blobs(n_samples=3000, centers=centers, cluster_std=0.7)
##############################################################################
# Compute clustering with Means
k_means = KMeans(init='k-means++', n_clusters=3, n_init=10)
t0 = time.time()
k_means.fit(X)
t_batch = time.time() - t0
##############################################################################
# Compute clustering with MiniBatchKMeans
mbk = MiniBatchKMeans(init='k-means++', n_clusters=3, batch_size=batch_size,
n_init=10, max_no_improvement=10, verbose=0)
t0 = time.time()
mbk.fit(X)
t_mini_batch = time.time() - t0
##############################################################################
# Plot result
fig = plt.figure(figsize=(8, 3))
fig.subplots_adjust(left=0.02, right=0.98, bottom=0.05, top=0.9)
colors = ['#4EACC5', '#FF9C34', '#4E9A06']
# We want to have the same colors for the same cluster from the
# MiniBatchKMeans and the KMeans algorithm. Let's pair the cluster centers per
# closest one.
k_means_cluster_centers = np.sort(k_means.cluster_centers_, axis=0)
mbk_means_cluster_centers = np.sort(mbk.cluster_centers_, axis=0)
k_means_labels = pairwise_distances_argmin(X, k_means_cluster_centers)
mbk_means_labels = pairwise_distances_argmin(X, mbk_means_cluster_centers)
order = pairwise_distances_argmin(k_means_cluster_centers,
mbk_means_cluster_centers)
# KMeans
ax = fig.add_subplot(1, 3, 1)
for k, col in zip(range(n_clusters), colors):
my_members = k_means_labels == k
cluster_center = k_means_cluster_centers[k]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('KMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' % (
t_batch, k_means.inertia_))
# MiniBatchKMeans
ax = fig.add_subplot(1, 3, 2)
for k, col in zip(range(n_clusters), colors):
my_members = mbk_means_labels == order[k]
cluster_center = mbk_means_cluster_centers[order[k]]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('MiniBatchKMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' %
(t_mini_batch, mbk.inertia_))
# Initialise the different array to all False
different = (mbk_means_labels == 4)
ax = fig.add_subplot(1, 3, 3)
for k in range(n_clusters):
different += ((k_means_labels == k) != (mbk_means_labels == order[k]))
identic = np.logical_not(different)
ax.plot(X[identic, 0], X[identic, 1], 'w',
markerfacecolor='#bbbbbb', marker='.')
ax.plot(X[different, 0], X[different, 1], 'w',
markerfacecolor='m', marker='.')
ax.set_title('Difference')
ax.set_xticks(())
ax.set_yticks(())
plt.show()
| bsd-3-clause |
ClimbsRocks/scikit-learn | examples/model_selection/grid_search_digits.py | 8 | 2760 | """
============================================================
Parameter estimation using grid search with cross-validation
============================================================
This examples shows how a classifier is optimized by cross-validation,
which is done using the :class:`sklearn.model_selection.GridSearchCV` object
on a development set that comprises only half of the available labeled data.
The performance of the selected hyper-parameters and trained model is
then measured on a dedicated evaluation set that was not used during
the model selection step.
More details on tools available for model selection can be found in the
sections on :ref:`cross_validation` and :ref:`grid_search`.
"""
from __future__ import print_function
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
print(__doc__)
# Loading the Digits dataset
digits = datasets.load_digits()
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
X = digits.images.reshape((n_samples, -1))
y = digits.target
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5,
scoring='%s_macro' % score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
means = clf.results_['test_mean_score']
stds = clf.results_['test_std_score']
for i in range(len(clf.results_['params'])):
print("%0.3f (+/-%0.03f) for %r"
% (means[i], stds[i] * 2, clf.results_['params'][i]))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# Note the problem is too easy: the hyperparameter plateau is too flat and the
# output model is the same for precision and recall with ties in quality.
| bsd-3-clause |
bnaul/scikit-learn | sklearn/preprocessing/tests/test_function_transformer.py | 16 | 5431 | import pytest
import numpy as np
from scipy import sparse
from sklearn.preprocessing import FunctionTransformer
from sklearn.utils._testing import (assert_array_equal,
assert_allclose_dense_sparse)
from sklearn.utils._testing import assert_warns_message, assert_no_warnings
def _make_func(args_store, kwargs_store, func=lambda X, *a, **k: X):
def _func(X, *args, **kwargs):
args_store.append(X)
args_store.extend(args)
kwargs_store.update(kwargs)
return func(X)
return _func
def test_delegate_to_func():
# (args|kwargs)_store will hold the positional and keyword arguments
# passed to the function inside the FunctionTransformer.
args_store = []
kwargs_store = {}
X = np.arange(10).reshape((5, 2))
assert_array_equal(
FunctionTransformer(_make_func(args_store, kwargs_store)).transform(X),
X, 'transform should have returned X unchanged',
)
# The function should only have received X.
assert args_store == [X], ('Incorrect positional arguments passed to '
'func: {args}'.format(args=args_store))
assert not kwargs_store, ('Unexpected keyword arguments passed to '
'func: {args}'.format(args=kwargs_store))
# reset the argument stores.
args_store[:] = []
kwargs_store.clear()
transformed = FunctionTransformer(
_make_func(args_store, kwargs_store),
).transform(X)
assert_array_equal(transformed, X,
err_msg='transform should have returned X unchanged')
# The function should have received X
assert args_store == [X], ('Incorrect positional arguments passed '
'to func: {args}'.format(args=args_store))
assert not kwargs_store, ('Unexpected keyword arguments passed to '
'func: {args}'.format(args=kwargs_store))
def test_np_log():
X = np.arange(10).reshape((5, 2))
# Test that the numpy.log example still works.
assert_array_equal(
FunctionTransformer(np.log1p).transform(X),
np.log1p(X),
)
def test_kw_arg():
X = np.linspace(0, 1, num=10).reshape((5, 2))
F = FunctionTransformer(np.around, kw_args=dict(decimals=3))
# Test that rounding is correct
assert_array_equal(F.transform(X),
np.around(X, decimals=3))
def test_kw_arg_update():
X = np.linspace(0, 1, num=10).reshape((5, 2))
F = FunctionTransformer(np.around, kw_args=dict(decimals=3))
F.kw_args['decimals'] = 1
# Test that rounding is correct
assert_array_equal(F.transform(X), np.around(X, decimals=1))
def test_kw_arg_reset():
X = np.linspace(0, 1, num=10).reshape((5, 2))
F = FunctionTransformer(np.around, kw_args=dict(decimals=3))
F.kw_args = dict(decimals=1)
# Test that rounding is correct
assert_array_equal(F.transform(X), np.around(X, decimals=1))
def test_inverse_transform():
X = np.array([1, 4, 9, 16]).reshape((2, 2))
# Test that inverse_transform works correctly
F = FunctionTransformer(
func=np.sqrt,
inverse_func=np.around, inv_kw_args=dict(decimals=3),
)
assert_array_equal(
F.inverse_transform(F.transform(X)),
np.around(np.sqrt(X), decimals=3),
)
def test_check_inverse():
X_dense = np.array([1, 4, 9, 16], dtype=np.float64).reshape((2, 2))
X_list = [X_dense,
sparse.csr_matrix(X_dense),
sparse.csc_matrix(X_dense)]
for X in X_list:
if sparse.issparse(X):
accept_sparse = True
else:
accept_sparse = False
trans = FunctionTransformer(func=np.sqrt,
inverse_func=np.around,
accept_sparse=accept_sparse,
check_inverse=True,
validate=True)
assert_warns_message(UserWarning,
"The provided functions are not strictly"
" inverse of each other. If you are sure you"
" want to proceed regardless, set"
" 'check_inverse=False'.",
trans.fit, X)
trans = FunctionTransformer(func=np.expm1,
inverse_func=np.log1p,
accept_sparse=accept_sparse,
check_inverse=True,
validate=True)
Xt = assert_no_warnings(trans.fit_transform, X)
assert_allclose_dense_sparse(X, trans.inverse_transform(Xt))
# check that we don't check inverse when one of the func or inverse is not
# provided.
trans = FunctionTransformer(func=np.expm1, inverse_func=None,
check_inverse=True, validate=True)
assert_no_warnings(trans.fit, X_dense)
trans = FunctionTransformer(func=None, inverse_func=np.expm1,
check_inverse=True, validate=True)
assert_no_warnings(trans.fit, X_dense)
def test_function_transformer_frame():
pd = pytest.importorskip('pandas')
X_df = pd.DataFrame(np.random.randn(100, 10))
transformer = FunctionTransformer()
X_df_trans = transformer.fit_transform(X_df)
assert hasattr(X_df_trans, 'loc')
| bsd-3-clause |
vshtanko/scikit-learn | examples/linear_model/plot_multi_task_lasso_support.py | 249 | 2211 | #!/usr/bin/env python
"""
=============================================
Joint feature selection with multi-task Lasso
=============================================
The multi-task lasso allows to fit multiple regression problems
jointly enforcing the selected features to be the same across
tasks. This example simulates sequential measurements, each task
is a time instant, and the relevant features vary in amplitude
over time while being the same. The multi-task lasso imposes that
features that are selected at one time point are select for all time
point. This makes feature selection by the Lasso more stable.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import MultiTaskLasso, Lasso
rng = np.random.RandomState(42)
# Generate some 2D coefficients with sine waves with random frequency and phase
n_samples, n_features, n_tasks = 100, 30, 40
n_relevant_features = 5
coef = np.zeros((n_tasks, n_features))
times = np.linspace(0, 2 * np.pi, n_tasks)
for k in range(n_relevant_features):
coef[:, k] = np.sin((1. + rng.randn(1)) * times + 3 * rng.randn(1))
X = rng.randn(n_samples, n_features)
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
coef_multi_task_lasso_ = MultiTaskLasso(alpha=1.).fit(X, Y).coef_
###############################################################################
# Plot support and time series
fig = plt.figure(figsize=(8, 5))
plt.subplot(1, 2, 1)
plt.spy(coef_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'Lasso')
plt.subplot(1, 2, 2)
plt.spy(coef_multi_task_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'MultiTaskLasso')
fig.suptitle('Coefficient non-zero location')
feature_to_plot = 0
plt.figure()
plt.plot(coef[:, feature_to_plot], 'k', label='Ground truth')
plt.plot(coef_lasso_[:, feature_to_plot], 'g', label='Lasso')
plt.plot(coef_multi_task_lasso_[:, feature_to_plot],
'r', label='MultiTaskLasso')
plt.legend(loc='upper center')
plt.axis('tight')
plt.ylim([-1.1, 1.1])
plt.show()
| bsd-3-clause |
sebalander/VisionUNQ | dev/bootstrap.py | 1 | 4340 | # -*- coding: utf-8 -*-
"""
para sacar las barras de error a los parametros de calibracion.
se sabe que cuando se proyecta de VCA a Mapa hay un error las
coordenadas x,y que tiee dist gaussiana que en pixeles tiene desv
estandar s_N-1 = 3.05250940223 la idea es generara aleatoriamente 42
puntos para entrenar ubicado en VCA, los transformamos segun los
parametros optimos y se le suma el error. asi se obtienen los puntos
Mapa para entrenar. En cada entrenamiento se guardan los parametros para despues hacer
astadistica
@author: sebalander
"""
#%% IMPORTS
import numpy as np
from lmfit import minimize, Parameters
import pickle
from copy import deepcopy as copiar
import matplotlib.pyplot as plt
import sys
sys.path.append('../modules') # where modules are
from VCA2Earth_RelErr import VCA2Earth_RelErr
from forwardVCA2Map import forwardVCA2Map
if __name__ == '__main__':
#%% LOAD DATA
# load mu and covariance matrix
data=np.loadtxt('comparoSalidas.txt').T
#vamos a ver la correlacion entre error en x e y
erx=data[4]-data[0]
ery=data[5]-data[1]
# para calcular la gaussiana eliptica
X=np.array([erx,ery]) # lista de valores
mu=np.array(np.matrix(np.mean(X,1)))[0] # valor medio
SS=np.dot(X,X.T)-np.dot(mu,mu.T) #matriz de covarianza
# load optimized parameters (best fit)
prm=Parameters()
with open('parametros.pkl', 'rb') as f:
prm = pickle.load(f)
# aseguramos de variar los seis parametros a la vez
prm['Px'].vary = prm['Py'].vary = prm['Pz'].vary=True
prm['gamma'].vary = prm['beta'].vary = prm['alfa'].vary=True
#%% PARAMETERS
centro=1920/2 # posicion del centro de la imagen
Rmax=873 # distancia maxima que pueden tener del centro de la imagen
s=3.05250940223 # desviacion estandar de la gausiana de ruido
N=10000# se va a hacer N veces en 'experimento'
M=42 # cantidad de pares de puntos a usar
XX=np.empty((4,M))
PAR=np.empty((N,6)) # donde guardar los valores de los parametros
print(N,'experimentos con',M,'puntos.')
#%% LOOP
for i in range(N):
print( ' experimento',i,'esimo')
t = np.random.random_sample(M)# variable auxiliar de donde sacar los radios
r = Rmax*np.sqrt(t)# radios generados
fi = 2*np.pi*np.random.random_sample(M)# angulos generados
# no tomar en cuenta la discretizacion en pixeles
# esto bien podria salir de buscar puntos de interes
XX[0] = r*np.cos(fi)+centro # posiciones en x
XX[1] = r*np.sin(fi)+centro # posisiones en y
# tengo los puntos en la imagen VCA, ahora a transformarlos y
# y ponerles ruido
XX[2],XX[3] = forwardVCA2Map(prm,XX[0],XX[1]) # con parametros optimos
# ruido=np.random.normal(0.0,s,(2,M))
#ruido de la gausiana bivaluada
ruido = np.random.multivariate_normal(mu,SS,M)
XX[2:] = XX[2:]+ruido.T
#ahora tengo los puntos listos para entrenar
prm_aux = copiar(prm) # hago una copia para modificar parametros
res = minimize(VCA2Earth_RelErr,prm_aux,args=([XX]))# optimizacion
# guardo parametros
PAR[i,:] = np.array([
prm_aux['Px'].value,
prm_aux['Py'].value,
prm_aux['Pz'].value,
prm_aux['gamma'].value,
prm_aux['beta'].value,
prm_aux['alfa'].value ])
#%% PRINT PARAMETERS
print( 'promedios y desviaciones estandar en este orden:')
print( 'px,py,pz,gamma, beta, alfa')
for i in range(6):
print( ' =',np.mean(PAR[:,i]),'\pm',np.std(PAR[:,i],ddof=1),'\\')
"""
se obtiene
promedios y desviaciones estandar en este orden:
px,py,pz,gamma, beta, alfa
= 297.404047304 \pm 4.43071704293 \
= 272.780351563 \pm 7.35354637011 \
= 29.7903688186 \pm 1.57777006693 \
= 3.17515588912 \pm 0.0110500500912 \
= -0.0237482769487 \pm 0.0131952778028 \
= 1.68907876337 \pm 0.0464673979487 \
"""
#%% PRINT PLOT
fig, axis = plt.subplots(nrows=2,ncols=3);
binposition=50
for i in range(3):
axis[0,i].hist(PAR[:,i],binposition,normed=True,histtype='step')
binsangle=50
for i in range(3):
axis[1,i].hist(PAR[:,i+3],binposition,normed=True,histtype='step')
plt.savefig('boots.eps')
| bsd-3-clause |
simonsfoundation/CaImAn | demos/obsolete/1_1/demo_pipeline_cnmfE_1_1.py | 2 | 11045 | #!/usr/bin/env python
"""
Complete demo pipeline for motion correction, source extraction, and deconvolution
of one photon microendoscopic calcium imaging data using the CaImAn package.
Demo is also available as a jupyter notebook (see demo_pipeline_cnmfE.ipynb)
"""
from __future__ import division
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
try:
if __IPYTHON__:
print('Detected iPython')
# this is used for debugging purposes only. allows to reload classes when changed
get_ipython().magic('load_ext autoreload')
get_ipython().magic('autoreload 2')
except NameError:
pass
import caiman as cm
from caiman.source_extraction import cnmf
from caiman.utils.utils import download_demo
from caiman.utils.visualization import inspect_correlation_pnr
from caiman.motion_correction import motion_correct_oneP_rigid, motion_correct_oneP_nonrigid
from caiman.source_extraction.cnmf import params as params
from copy import deepcopy
#%%
def main():
pass # For compatibility between running under Spyder and the CLI
#%% First setup some parameters
# dataset dependent parameters
display_images = False # Set to true to show movies and images
fnames = ['data_endoscope.tif'] # filename to be processed
fr = 10 # movie frame rate
decay_time = 0.4 # length of a typical transient in seconds
# motion correction parameters
do_motion_correction_nonrigid = False
do_motion_correction_rigid = True # choose motion correction type
gSig_filt = (3, 3) # size of filter, in general gSig (see below),
# change this one if algorithm does not work
max_shifts = (5, 5) # maximum allowed rigid shift
strides = (48, 48) # start a new patch for pw-rigid motion correction every x pixels
overlaps = (24, 24) # overlap between pathes (size of patch strides+overlaps)
# for parallelization split the movies in num_splits chuncks across time
# (make sure that length_movie/num_splits_to_process_rig>100)
splits_rig = 10
splits_els = 10
upsample_factor_grid = 4 # upsample factor to avoid smearing when merging patches
# maximum deviation allowed for patch with respect to rigid shifts
max_deviation_rigid = 3
#%% start the cluster
try:
cm.stop_server() # stop it if it was running
except():
pass
c, dview, n_processes = cm.cluster.setup_cluster(backend='local',
n_processes=24, # number of process to use, if you go out of memory try to reduce this one
single_thread=False)
#%% download demo file
fnames = [download_demo(fnames[0])]
filename_reorder = fnames
#%% MOTION CORRECTION
if do_motion_correction_nonrigid or do_motion_correction_rigid:
# do motion correction rigid
mc = motion_correct_oneP_rigid(fnames,
gSig_filt=gSig_filt,
max_shifts=max_shifts,
dview=dview,
splits_rig=splits_rig,
save_movie=not(do_motion_correction_nonrigid),
border_nan='copy'
)
new_templ = mc.total_template_rig
plt.subplot(1, 2, 1); plt.imshow(new_templ) # % plot template
plt.subplot(1, 2, 2); plt.plot(mc.shifts_rig) # % plot rigid shifts
plt.legend(['x shifts', 'y shifts'])
plt.xlabel('frames'); plt.ylabel('pixels')
# borders to eliminate from movie because of motion correction
bord_px = np.ceil(np.max(np.abs(mc.shifts_rig))).astype(np.int)
filename_reorder = mc.fname_tot_rig
# do motion correction nonrigid
if do_motion_correction_nonrigid:
mc = motion_correct_oneP_nonrigid(
fnames,
gSig_filt=gSig_filt,
max_shifts=max_shifts,
strides=strides,
overlaps=overlaps,
splits_els=splits_els,
upsample_factor_grid=upsample_factor_grid,
max_deviation_rigid=max_deviation_rigid,
dview=dview,
splits_rig=None,
save_movie=True, # whether to save movie in memory mapped format
new_templ=new_templ, # template to initialize motion correction
border_nan='copy'
)
filename_reorder = mc.fname_tot_els
bord_px = np.ceil(
np.maximum(np.max(np.abs(mc.x_shifts_els)),
np.max(np.abs(mc.y_shifts_els)))).astype(np.int)
# create memory mappable file in the right order on the hard drive (C order)
fname_new = cm.save_memmap(
filename_reorder,
base_name='memmap_',
order='C',
border_to_0=bord_px,
dview=dview)
# load memory mappable file
Yr, dims, T = cm.load_memmap(fname_new)
Y = Yr.T.reshape((T,) + dims, order='F')
#%% parameters for source extraction and deconvolution
p = 1 # order of the autoregressive system
K = None # upper bound on number of components per patch, in general None
gSig = (3, 3) # gaussian width of a 2D gaussian kernel, which approximates a neuron
gSiz = (13, 13) # average diameter of a neuron, in general 4*gSig+1
Ain = None # possibility to seed with predetermined binary masks
merge_thresh = .7 # merging threshold, max correlation allowed
rf = 40 # half-size of the patches in pixels. e.g., if rf=40, patches are 80x80
stride_cnmf = 20 # amount of overlap between the patches in pixels
# (keep it at least large as gSiz, i.e 4 times the neuron size gSig)
tsub = 2 # downsampling factor in time for initialization,
# increase if you have memory problems
ssub = 1 # downsampling factor in space for initialization,
# increase if you have memory problems
# you can pass them here as boolean vectors
low_rank_background = None # None leaves background of each patch intact,
# True performs global low-rank approximation if gnb>0
gnb = 0 # number of background components (rank) if positive,
# else exact ring model with following settings
# gnb= 0: Return background as b and W
# gnb=-1: Return full rank background B
# gnb<-1: Don't return background
nb_patch = 0 # number of background components (rank) per patch if gnb>0,
# else it is set automatically
min_corr = .8 # min peak value from correlation image
min_pnr = 10 # min peak to noise ration from PNR image
ssub_B = 2 # additional downsampling factor in space for background
ring_size_factor = 1.4 # radius of ring is gSiz*ring_size_factor
# parameters for component evaluation
min_SNR = 3 # adaptive way to set threshold on the transient size
r_values_min = 0.85 # threshold on space consistency (if you lower more components
# will be accepted, potentially with worst quality)
opts = params.CNMFParams(dims=dims, fr=fr, decay_time=decay_time,
method_init='corr_pnr', # use this for 1 photon
k=K,
gSig=gSig,
gSiz=gSiz,
merge_thresh=merge_thresh,
p=p,
tsub=tsub,
ssub=ssub,
rf=rf,
stride=stride_cnmf,
only_init_patch=True, # set it to True to run CNMF-E
gnb=gnb,
nb_patch=nb_patch,
method_deconvolution='oasis', # could use 'cvxpy' alternatively
low_rank_background=low_rank_background,
update_background_components=True, # sometimes setting to False improve the results
min_corr=min_corr,
min_pnr=min_pnr,
normalize_init=False, # just leave as is
center_psf=True, # leave as is for 1 photon
ssub_B=ssub_B,
ring_size_factor=ring_size_factor,
del_duplicates=True, # whether to remove duplicates from initialization
border_pix=bord_px) # number of pixels to not consider in the borders)
#%% compute some summary images (correlation and peak to noise)
# change swap dim if output looks weird, it is a problem with tiffile
cn_filter, pnr = cm.summary_images.correlation_pnr(Y, gSig=gSig[0], swap_dim=False)
# inspect the summary images and set the parameters
inspect_correlation_pnr(cn_filter, pnr)
# print parameters set above, modify them if necessary based on summary images
print(min_corr) # min correlation of peak (from correlation image)
print(min_pnr) # min peak to noise ratio
#%% RUN CNMF ON PATCHES
cnm = cnmf.CNMF(n_processes=n_processes, dview=dview, Ain=Ain, params=opts)
cnm.fit(Y)
#%% DISCARD LOW QUALITY COMPONENTS
cnm.params.set('quality', {'min_SNR': min_SNR,
'rval_thr': r_values_min,
'use_cnn': False})
cnm.estimates.evaluate_components(Y, cnm.params, dview=dview)
print(' ***** ')
print('Number of total components: ', len(cnm.estimates.C))
print('Number of accepted components: ', len(cnm.estimates.idx_components))
#%% PLOT COMPONENTS
cnm.dims = dims
if display_images:
cnm.estimates.plot_contours(img=cn_filter, idx=cnm.estimates.idx_components)
cnm.estimates.view_components(Y, idx=cnm.estimates.idx_components)
#%% MOVIES
if display_images:
# fully reconstructed movie
cnm.estimates.play_movie(Y, q_max=99.9, magnification=2,
include_bck=True, gain_res=10, bpx=bord_px)
# movie without background
cnm.estimates.play_movie(Y, q_max=99.9, magnification=2,
include_bck=False, gain_res=4, bpx=bord_px)
#%% STOP SERVER
cm.stop_server(dview=dview)
# This is to mask the differences between running this demo in Spyder
# versus from the CLI
if __name__ == "__main__":
main()
| gpl-2.0 |
yanlend/scikit-learn | sklearn/neighbors/unsupervised.py | 117 | 4755 | """Unsupervised nearest neighbors learner"""
from .base import NeighborsBase
from .base import KNeighborsMixin
from .base import RadiusNeighborsMixin
from .base import UnsupervisedMixin
class NearestNeighbors(NeighborsBase, KNeighborsMixin,
RadiusNeighborsMixin, UnsupervisedMixin):
"""Unsupervised learner for implementing neighbor searches.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p: integer, optional (default = 2)
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : string or callable, default 'minkowski'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Affects only :meth:`k_neighbors` and :meth:`kneighbors_graph` methods.
Examples
--------
>>> import numpy as np
>>> from sklearn.neighbors import NearestNeighbors
>>> samples = [[0, 0, 2], [1, 0, 0], [0, 0, 1]]
>>> neigh = NearestNeighbors(2, 0.4)
>>> neigh.fit(samples) #doctest: +ELLIPSIS
NearestNeighbors(...)
>>> neigh.kneighbors([[0, 0, 1.3]], 2, return_distance=False)
... #doctest: +ELLIPSIS
array([[2, 0]]...)
>>> nbrs = neigh.radius_neighbors([[0, 0, 1.3]], 0.4, return_distance=False)
>>> np.asarray(nbrs[0][0])
array(2)
See also
--------
KNeighborsClassifier
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
BallTree
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, radius=1.0,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, n_jobs=1, **kwargs):
self._init_params(n_neighbors=n_neighbors,
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
| bsd-3-clause |
EtienneCmb/brainpipe | brainpipe/classification/_classification.py | 1 | 14820 | import numpy as np
import matplotlib.pyplot as plt
from sklearn.base import clone
from sklearn.metrics import accuracy_score, confusion_matrix
from joblib import Parallel, delayed
from .utils._classif import *
from .utils._clfplt import clfplt
from ..statistics import (bino_da2p, bino_p2da, perm_2pvalue, permIntraClass)
from ..tools import groupInList, list2index, uorderlst, adaptsize
from itertools import product
import pandas as pd
__all__ = ['classify',
'defClf',
'defCv',
'defVoting',
'generalization'
]
class classify(_classification, clfplt):
"""Define a classification object and apply to classify data.
This class can be consider as a centralization of scikit-learn
tools, with a few more options.
To classify data, two objects are necessary :
- A classifier object (lda, svm, knn...)
- A cross-validation object which is used to validate a classification
performance.
This two objects can either be defined before the classify object with
defCv and defClf, or they can be directly defined inside the classify
class.
Args:
y: array
The vector label
Kwargs:
clf: int / string / classifier object, optional, [def: 0]
Define a classifier. If clf is an integer or a string, the
classifier will be defined inside classify. Otherwise, it is
possible to define a classifier before with defClf and past it in clf.
cvtype: string / cross-validation object, optional, [def: 'skfold']
Define a cross-validation. If cvtype is a string, the
cross-validation will be defined inside classify. Otherwise, it is
possible to define a cross-validation before with defCv and past it
in cvtype.
clfArg: dictionnary, optional, [def: {}]
This dictionnary can be used to define supplementar arguments for the
classifier. See the documentation of defClf.
cvArg: dictionnary, optional, [def: {}]
This dictionnary can be used to define supplementar arguments for the
cross-validation. See the documentation of defCv.
Example:
>>> # 1) Define a classifier and a cross-validation before classify():
>>> # Define a 50 times 5-folds cross-validation :
>>> cv = defCv(y, cvtype='kfold', rep=50, n_folds=5)
>>> # Define a Random Forest with 200 trees :
>>> clf = defClf(y, clf='rf', n_tree=200, random_state=100)
>>> # Past the two objects inside classify :
>>> clfObj = classify(y, clf=clf, cvtype=cv)
>>> # 2) Define a classifier and a cross-validation inside classify():
>>> clfObj = classify(y, clf = 'rf', cvtype = 'kfold',
>>> clfArg = {'n_tree':200, 'random_state':100},
>>> cvArg = {'rep':50, 'n_folds':5})
>>> # 1) and 2) are equivalent. Then use clfObj.fit() to classify data.
"""
def __str__(self):
return self.lgStr
def fit(self, x, mf=False, center=False, grp=None,
method='bino', n_perm=200, rndstate=0, n_jobs=-1):
"""Apply the classification and cross-validation objects to the array x.
Args:
x: array
Data to classify. Consider that x.shape = (N, M), N is the number
of trials (which should be the length of y). M, the number of
colums, is a supplementar dimension for classifying data. If M = 1,
the data is consider as a single feature. If M > 1, use the
parameter mf to say if x should be consider as a single feature
(mf=False) or multi-features (mf=True)
Kargs:
mf: bool, optional, [def: False]
If mf=False, the returned decoding accuracy (da) will have a
shape of (1, rep) where rep, is the number of repetitions.
This mean that all the features are used together. If mf=True,
da.shape = (M, rep), where M is the number of columns of x.
center: optional, bool, [def: False]
Normalize fatures with a zero mean by substracting then dividing
by the mean. The center parameter should be set to True if the
classifier is a svm.
grp: array, optional, [def: None]
If mf=True, the grp parameter allow to define group of features.
If x.shape = (N, 5) and grp=np.array([0,0,1,2,1]), this mean that
3 groups of features will be considered : (0,1,2)
method: string, optional, [def: 'bino']
Four methods are implemented to test the statistical significiance
of the decoding accuracy :
- 'bino': binomial test
- 'label_rnd': randomly shuffle the labels
- 'full_rnd': randomly shuffle the whole array x
- 'intra_rnd': randomly shuffle x inside each class and each feature
Methods 2, 3 and 4 are based on permutations. The method 2 and 3
should provide similar results. But 4 should be more conservative.
n_perm: integer, optional, [def: 200]
Number of permutations for the methods 2, 3 and 4
rndstate: integer, optional, [def: 0]
Fix the random state of the machine. Usefull to reproduce results.
n_jobs: integer, optional, [def: -1]
Control the number of jobs to cumpute the decoding accuracy. If
n_jobs = -1, all the jobs are used.
Return:
da: array
The decoding accuracy of shape n_repetitions x n_features
pvalue: array
Array of associated pvalue of shape n_features
daPerm: array
Array of all the decodings obtained for each permutations of shape
n_perm x n_features
.. rubric:: Footnotes
.. [#f8] `Ojala and Garriga, 2010 <http://www.jmlr.org/papers/volume11/ojala10a/ojala10a.pdf>`_
.. [#f9] `Combrisson and Jerbi, 2015 <http://www.ncbi.nlm.nih.gov/pubmed/25596422/>`_
"""
# Get the true decoding accuracy:
da, x, y, self._ytrue, self._ypred = _fit(x, self._y, self._clf, self._cv.cvr,
mf, grp, center, n_jobs)
nfeat = len(x)
rndstate = np.random.RandomState(rndstate)
score = np.array([np.mean(k) for k in da])
# Get statistics:
# -------------------------------------------------------------
# Binomial :
# -------------------------------------------------------------
if method == 'bino':
pvalue = bino_da2p(y, score)
daPerm = None
pperm = None
# -------------------------------------------------------------
# Permutations :
# -------------------------------------------------------------
elif method.lower().find('_rnd')+1:
# Generate idx tricks :
iteract = product(range(n_perm), range(nfeat))
# -> Shuffle the labels :
if method == 'label_rnd':
y_sh = [rndstate.permutation(y) for k in range(n_perm)]
cvs = Parallel(n_jobs=n_jobs)(delayed(_cvscore)(
x[k], y_sh[i], clone(self._clf), self._cv.cvr[0])
for i, k in iteract)
# -> Full randomization :
elif method == 'full_rnd':
cvs = Parallel(n_jobs=n_jobs)(delayed(_cvscore)(
rndstate.permutation(x[k]), y, clone(self._clf),
self._cv.cvr[0]) for i, k in iteract)
# -> Shuffle intra-class :
elif method == 'intra_rnd':
cvs = Parallel(n_jobs=n_jobs)(delayed(_cvscore)(
x[k][permIntraClass(y, rnd=i), :], y, clone(self._clf),
self._cv.cvr[0]) for i, k in iteract)
# Reconstruct daPerm and get the associated p-value:
daPerm, _, _ = zip(*cvs)
daPerm = np.array(daPerm).reshape(n_perm, nfeat)
pvalue = perm_2pvalue(score, daPerm, n_perm, tail=1)
pperm = pvalue
else:
raise ValueError('No statistical method '+method+' found')
# Get features informations:
try:
if grp is not None:
grp = uorderlst(grp)
self.info.featinfo = self.info._featinfo(self._clf, self._cv, da,
grp=grp, pperm=pperm)
except:
pass
return da.T, pvalue, daPerm
def cm(self, normalize=True):
"""Get the confusion matrix of each feature.
Kargs:
normalize: bool, optional, [def: True]
Normalize or not the confusion matrix
update: bool, optional, [def: True]
If update is True, the data will be re-classified. But, if update
is set to False, and if the methods .fit() or .fit_stat() have been
run before, the data won't we re-classified. Instead, the labels
previously found will be used to get confusion matrix.
Return:
CM: array
Array of confusion matrix of shape (n_features x n_class x n_class)
"""
# Re-classify data or use the already existing labels :
if not ((hasattr(self, '_ytrue')) and (hasattr(self, '_ypred'))):
raise ValueError("No labels found. Please run .fit()")
else:
# Get variables and compute confusion matrix:
y_pred, y_true = self._ypred, self._ytrue
nfeat, nrep = len(y_true), len(y_true[0])
CM = [np.mean(np.array([confusion_matrix(y_true[k][i], y_pred[
k][i]) for i in range(nrep)]), 0) for k in range(nfeat)]
# Normalize the confusion matrix :
if normalize:
CM = [100*k/k.sum(axis=1)[:, np.newaxis] for k in CM]
return np.array(CM)
def _fit(x, y, clf, cv, mf, grp, center, n_jobs):
"""Sub function for fitting
"""
# Check the inputs size :
x, y = checkXY(x, y, mf, grp, center)
rep, nfeat = len(cv), len(x)
# Tricks : construct a list of tuple containing the index of
# (repetitions,features) & loop on it. Optimal for parallel computing :
claIdx, listRep, listFeat = list2index(rep, nfeat)
# Run the classification :
cvs = Parallel(n_jobs=n_jobs)(delayed(_cvscore)(
x[k[1]], y, clone(clf), cv[k[0]]) for k in claIdx)
da, y_true, y_pred = zip(*cvs)
# Reconstruct elements :
da = np.array(groupInList(da, listFeat))
y_true = groupInList(y_true, listFeat)
y_pred = groupInList(y_pred, listFeat)
return da, x, y, y_true, y_pred
class generalization(object):
"""Generalize the decoding performance of features.
The generalization consist of training and testing at diffrents
moments. The use is to see if a feature is consistent and performant
in diffrents period of time.
Args:
time: array/list
The time vector of dimension npts
y: array
The vector label of dimension ntrials
x: array
The data to generalize. If x is a 2D array, the dimension of x
should be (ntrials, npts). If x is 3D array, the third dimension
is consider as multi-features. This can be usefull to do time
generalization in multi-features.
Kargs:
clf: int / string / classifier object, optional, [def: 0]
Define a classifier. If clf is an integer or a string, the
classifier will be defined inside classify. Otherwise, it is
possible to define a classifier before with defClf and past it in clf.
cvtype: string / cross-validation object, optional, [def: None]
Define a cross-validation. If cvtype is None, the diagonal of the
matrix of decoding accuracy will be set at zero. If cvtype is defined,
a cross-validation will be performed on the diagonal. If cvtype is a
string, the cross-validation will be defined inside classify.
Otherwise, it is possible to define a cross-validation before with
defCv and past it in cvtype.
clfArg: dictionnary, optional, [def: {}]
This dictionnary can be used to define supplementar arguments for the
classifier. See the documentation of defClf.
cvArg: dictionnary, optional, [def: {}]
This dictionnary can be used to define supplementar arguments for the
cross-validation. See the documentation of defCv.
Return:
An array of dimension (npts, npts) containing the decoding accuracy. The y
axis is the training time and the x axis is the testing time (also known
as "generalization time")
"""
def __init__(time, y, x, clf='lda', cvtype=None, clfArg={},
cvArg={}):
pass
def __new__(self, time, y, x, clf='lda', cvtype=None, clfArg={},
cvArg={}):
self.y = np.ravel(y)
self.time = time
# Define clf if it's not defined :
if isinstance(clf, (int, str)):
clf = defClf(y, clf=clf, **clfArg)
self.clf = clf
# Define cv if it's not defined :
if isinstance(cvtype, str) and (cvtype is not None):
cvtype = defCv(y, cvtype=cvtype, rep=1, **cvArg)
self.cv = cvtype
if isinstance(cvtype, list):
cvtype = cvtype[0]
# Check the size of x:
x = np.atleast_3d(x)
npts, ntrials = len(time), len(y)
if x.shape[0] is not npts:
raise ValueError('First dimension of x must be '+str(npts))
if x.shape[1] is not ntrials:
raise ValueError('Second dimension of x must be '+str(ntrials))
da = np.zeros([npts, npts])
# Training dimension
for k in range(npts):
xx = x[k, ...]
# Testing dimension
for i in range(npts):
xy = x[i, ...]
# If cv is defined, do a cv on the diagonal
if (k == i) and (cvtype is not None):
da[i, k] = _cvscore(xx, y, clf, self.cv.cvr[0])[0]/100
# If cv is not defined, let the diagonal at zero
elif (k == i) and (cvtype is None):
pass
else:
da[i, k] = accuracy_score(y, clf.fit(xx, y).predict(xy))
return 100*da
| gpl-3.0 |
GCantergiani/centrality-measure-lth-model | src/ltc_centrality.py | 1 | 4257 | import networkx as nx
import pandas as pd
import sys
import numpy as np
FILE_NETWORK_RETWEET_WEIGHT = "./higgs-retweet_network.edgelist"
ALERNATIVE_BOOSTRAP_NEIGHBORS_ELECTION = False
PRINT_STEPS = True
def generate_graph():
df_retweets = pd.read_csv(FILE_NETWORK_RETWEET_WEIGHT, sep=' ', names = ['source', 'target','weight'])
# Create first directed graph
G1 = nx.DiGraph()
for idx,row in df_retweets.iterrows():
G1.add_edge(row['target'], row['source'], weight= row['weight'])
# Create second directed graph
G2 = nx.DiGraph()
for idx,row in df_retweets.iterrows():
G2.add_edge(row['source'], row['target'], weight= row['weight'])
return G1, G2
def add_plurality_attribute(G1, G2, nodes):
for n in nodes:
if(G1.in_degree(n) == 0):
plurality = sys.maxsize
else:
sum_weight = 0
for g2_node_source in G2.neighbors(n):
sum_weight = sum_weight + G1.get_edge_data(g2_node_source,n)['weight']
plurality = int(sum_weight/2) + 1
G1.node[n]['plurality'] = plurality
return G1
def write_result(nodes, df_ltc):
df_centrality = pd.DataFrame(nodes, columns=["node"])
df_centrality["lineal_threshold"] = df_ltc
df_centrality.to_csv('lineal_threshold_centrality_retweets.csv', index = False)
def main():
unique_nodes = []
lineal_threshold_to_pandas = []
# Read graphs
G1, G2 = generate_graph()
for node in G1.nodes_iter():
unique_nodes.append(node)
# Remove duplicates nodes
unique_nodes = list(set(unique_nodes))
# Add plurality attribute
G1 = add_plurality_attribute(G1, G2, unique_nodes)
# For each unique nodes
for idx, n in enumerate(unique_nodes):
if PRINT_STEPS:
print('Node: {0}'.format(n))
first_step_per_node = True
nodes_to_add_group = []
neighbors = []
if(ALERNATIVE_BOOSTRAP_NEIGHBORS_ELECTION):
neighbors.extend(G1.neighbors(n))
else:
neighbors.extend(G1.neighbors(n))
neighbors.extend(G2.neighbors(n))
group = []
group.append(n)
group.extend(neighbors)
if PRINT_STEPS:
print('I: 0 ; Neighbors: {0} ; count: {1}'.format(neighbors, len(neighbors)))
depth_level = 0
while( first_step_per_node or (len(nodes_to_add_group) >= 1) ):
first_step_per_node = False
neighbors.extend(nodes_to_add_group)
group.extend(nodes_to_add_group)
nodes_to_add_group = []
if PRINT_STEPS:
print('\t neighbors: {1}'.format(n, neighbors))
dispersion = []
vei = []
for v in neighbors:
vei.extend(G1.neighbors(v))
dispersion = list(set(vei) - set(group))
if PRINT_STEPS:
print('\t dispersion {0} '.format(dispersion))
for n_sub_level in dispersion:
plurality = G1.node[n_sub_level]['plurality']
if PRINT_STEPS:
print('\t \t Reach node {0} | plurality {1}'.format(n_sub_level,plurality))
group_influce = 0
for node_group in group:
if(G1.get_edge_data(node_group,n_sub_level)):
group_influce = group_influce + G1.get_edge_data(node_group,n_sub_level)['weight']
if PRINT_STEPS:
print('\t \t group {0} | influce {1}'.format(group, group_influce))
if(group_influce >= plurality):
nodes_to_add_group.append(n_sub_level)
if PRINT_STEPS:
print('\t \t \t new group {0} '.format(nodes_to_add_group))
print('{0} ; {1} ; {2}'.format(n, depth_level, len(neighbors)))
print()
depth_level = depth_level +1
lineal_threshold_to_pandas.append(len(neighbors) + 1)
write_result(unique_nodes, lineal_threshold_to_pandas)
if __name__ == "__main__":
main() | mit |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/sklearn/cluster/mean_shift_.py | 42 | 15514 | """Mean shift clustering algorithm.
Mean shift clustering aims to discover *blobs* in a smooth density of
samples. It is a centroid based algorithm, which works by updating candidates
for centroids to be the mean of the points within a given region. These
candidates are then filtered in a post-processing stage to eliminate
near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
"""
# Authors: Conrad Lee <[email protected]>
# Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Martino Sorbaro <[email protected]>
import numpy as np
import warnings
from collections import defaultdict
from ..externals import six
from ..utils.validation import check_is_fitted
from ..utils import extmath, check_random_state, gen_batches, check_array
from ..base import BaseEstimator, ClusterMixin
from ..neighbors import NearestNeighbors
from ..metrics.pairwise import pairwise_distances_argmin
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
def estimate_bandwidth(X, quantile=0.3, n_samples=None, random_state=0,
n_jobs=1):
"""Estimate the bandwidth to use with the mean-shift algorithm.
That this function takes time at least quadratic in n_samples. For large
datasets, it's wise to set that parameter to a small value.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points.
quantile : float, default 0.3
should be between [0, 1]
0.5 means that the median of all pairwise distances is used.
n_samples : int, optional
The number of samples to use. If not given, all samples are used.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
bandwidth : float
The bandwidth parameter.
"""
random_state = check_random_state(random_state)
if n_samples is not None:
idx = random_state.permutation(X.shape[0])[:n_samples]
X = X[idx]
nbrs = NearestNeighbors(n_neighbors=int(X.shape[0] * quantile),
n_jobs=n_jobs)
nbrs.fit(X)
bandwidth = 0.
for batch in gen_batches(len(X), 500):
d, _ = nbrs.kneighbors(X[batch, :], return_distance=True)
bandwidth += np.max(d, axis=1).sum()
return bandwidth / X.shape[0]
# separate function for each seed's iterative loop
def _mean_shift_single_seed(my_mean, X, nbrs, max_iter):
# For each seed, climb gradient until convergence or max_iter
bandwidth = nbrs.get_params()['radius']
stop_thresh = 1e-3 * bandwidth # when mean has converged
completed_iterations = 0
while True:
# Find mean of points within bandwidth
i_nbrs = nbrs.radius_neighbors([my_mean], bandwidth,
return_distance=False)[0]
points_within = X[i_nbrs]
if len(points_within) == 0:
break # Depending on seeding strategy this condition may occur
my_old_mean = my_mean # save the old mean
my_mean = np.mean(points_within, axis=0)
# If converged or at max_iter, adds the cluster
if (extmath.norm(my_mean - my_old_mean) < stop_thresh or
completed_iterations == max_iter):
return tuple(my_mean), len(points_within)
completed_iterations += 1
def mean_shift(X, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, max_iter=300,
n_jobs=1):
"""Perform mean shift clustering of data using a flat kernel.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input data.
bandwidth : float, optional
Kernel bandwidth.
If bandwidth is not given, it is determined using a heuristic based on
the median of all pairwise distances. This will take quadratic time in
the number of samples. The sklearn.cluster.estimate_bandwidth function
can be used to do this more efficiently.
seeds : array-like, shape=[n_seeds, n_features] or None
Point used as initial kernel locations. If None and bin_seeding=False,
each data point is used as a seed. If None and bin_seeding=True,
see bin_seeding.
bin_seeding : boolean, default=False
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
Ignored if seeds argument is not None.
min_bin_freq : int, default=1
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
max_iter : int, default 300
Maximum number of iterations, per seed point before the clustering
operation terminates (for that seed point), if has not converged yet.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
.. versionadded:: 0.17
Parallel Execution using *n_jobs*.
Returns
-------
cluster_centers : array, shape=[n_clusters, n_features]
Coordinates of cluster centers.
labels : array, shape=[n_samples]
Cluster labels for each point.
Notes
-----
See examples/cluster/plot_mean_shift.py for an example.
"""
if bandwidth is None:
bandwidth = estimate_bandwidth(X, n_jobs=n_jobs)
elif bandwidth <= 0:
raise ValueError("bandwidth needs to be greater than zero or None,\
got %f" % bandwidth)
if seeds is None:
if bin_seeding:
seeds = get_bin_seeds(X, bandwidth, min_bin_freq)
else:
seeds = X
n_samples, n_features = X.shape
center_intensity_dict = {}
nbrs = NearestNeighbors(radius=bandwidth, n_jobs=n_jobs).fit(X)
# execute iterations on all seeds in parallel
all_res = Parallel(n_jobs=n_jobs)(
delayed(_mean_shift_single_seed)
(seed, X, nbrs, max_iter) for seed in seeds)
# copy results in a dictionary
for i in range(len(seeds)):
if all_res[i] is not None:
center_intensity_dict[all_res[i][0]] = all_res[i][1]
if not center_intensity_dict:
# nothing near seeds
raise ValueError("No point was within bandwidth=%f of any seed."
" Try a different seeding strategy \
or increase the bandwidth."
% bandwidth)
# POST PROCESSING: remove near duplicate points
# If the distance between two kernels is less than the bandwidth,
# then we have to remove one because it is a duplicate. Remove the
# one with fewer points.
sorted_by_intensity = sorted(center_intensity_dict.items(),
key=lambda tup: tup[1], reverse=True)
sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
unique = np.ones(len(sorted_centers), dtype=np.bool)
nbrs = NearestNeighbors(radius=bandwidth,
n_jobs=n_jobs).fit(sorted_centers)
for i, center in enumerate(sorted_centers):
if unique[i]:
neighbor_idxs = nbrs.radius_neighbors([center],
return_distance=False)[0]
unique[neighbor_idxs] = 0
unique[i] = 1 # leave the current point as unique
cluster_centers = sorted_centers[unique]
# ASSIGN LABELS: a point belongs to the cluster that it is closest to
nbrs = NearestNeighbors(n_neighbors=1, n_jobs=n_jobs).fit(cluster_centers)
labels = np.zeros(n_samples, dtype=np.int)
distances, idxs = nbrs.kneighbors(X)
if cluster_all:
labels = idxs.flatten()
else:
labels.fill(-1)
bool_selector = distances.flatten() <= bandwidth
labels[bool_selector] = idxs.flatten()[bool_selector]
return cluster_centers, labels
def get_bin_seeds(X, bin_size, min_bin_freq=1):
"""Finds seeds for mean_shift.
Finds seeds by first binning data onto a grid whose lines are
spaced bin_size apart, and then choosing those bins with at least
min_bin_freq points.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points, the same points that will be used in mean_shift.
bin_size : float
Controls the coarseness of the binning. Smaller values lead
to more seeding (which is computationally more expensive). If you're
not sure how to set this, set it to the value of the bandwidth used
in clustering.mean_shift.
min_bin_freq : integer, optional
Only bins with at least min_bin_freq will be selected as seeds.
Raising this value decreases the number of seeds found, which
makes mean_shift computationally cheaper.
Returns
-------
bin_seeds : array-like, shape=[n_samples, n_features]
Points used as initial kernel positions in clustering.mean_shift.
"""
# Bin points
bin_sizes = defaultdict(int)
for point in X:
binned_point = np.round(point / bin_size)
bin_sizes[tuple(binned_point)] += 1
# Select only those bins as seeds which have enough members
bin_seeds = np.array([point for point, freq in six.iteritems(bin_sizes) if
freq >= min_bin_freq], dtype=np.float32)
if len(bin_seeds) == len(X):
warnings.warn("Binning data failed with provided bin_size=%f,"
" using data points as seeds." % bin_size)
return X
bin_seeds = bin_seeds * bin_size
return bin_seeds
class MeanShift(BaseEstimator, ClusterMixin):
"""Mean shift clustering using a flat kernel.
Mean shift clustering aims to discover "blobs" in a smooth density of
samples. It is a centroid-based algorithm, which works by updating
candidates for centroids to be the mean of the points within a given
region. These candidates are then filtered in a post-processing stage to
eliminate near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
bandwidth : float, optional
Bandwidth used in the RBF kernel.
If not given, the bandwidth is estimated using
sklearn.cluster.estimate_bandwidth; see the documentation for that
function for hints on scalability (see also the Notes, below).
seeds : array, shape=[n_samples, n_features], optional
Seeds used to initialize kernels. If not set,
the seeds are calculated by clustering.get_bin_seeds
with bandwidth as the grid size and default values for
other parameters.
bin_seeding : boolean, optional
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
default value: False
Ignored if seeds argument is not None.
min_bin_freq : int, optional
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds. If not defined, set to 1.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers.
labels_ :
Labels of each point.
Notes
-----
Scalability:
Because this implementation uses a flat kernel and
a Ball Tree to look up members of each kernel, the complexity will tend
towards O(T*n*log(n)) in lower dimensions, with n the number of samples
and T the number of points. In higher dimensions the complexity will
tend towards O(T*n^2).
Scalability can be boosted by using fewer seeds, for example by using
a higher value of min_bin_freq in the get_bin_seeds function.
Note that the estimate_bandwidth function is much less scalable than the
mean shift algorithm and will be the bottleneck if it is used.
References
----------
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
def __init__(self, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, n_jobs=1):
self.bandwidth = bandwidth
self.seeds = seeds
self.bin_seeding = bin_seeding
self.cluster_all = cluster_all
self.min_bin_freq = min_bin_freq
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Perform clustering.
Parameters
-----------
X : array-like, shape=[n_samples, n_features]
Samples to cluster.
"""
X = check_array(X)
self.cluster_centers_, self.labels_ = \
mean_shift(X, bandwidth=self.bandwidth, seeds=self.seeds,
min_bin_freq=self.min_bin_freq,
bin_seeding=self.bin_seeding,
cluster_all=self.cluster_all, n_jobs=self.n_jobs)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_")
return pairwise_distances_argmin(X, self.cluster_centers_)
| mit |
mananshah99/game-ai | battleship-hm.py | 1 | 8326 | # Battleship AI
import os
import sys
import random
from tqdm import tqdm
import matplotlib.pyplot as plt
import numpy as np
#np.random.seed(0)
BLANK = '.'
HIT = 'h'
MISS = 'm'
w = 10
h = 10
ships = [2, 3, 3, 4, 5]
ships_left = [2, 3, 3, 4, 5]
board = [[BLANK for x in range(w)] for y in range(h)]
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def guess_empty():
x = random.randint(0,9)
y = random.randint(0,9)
while board[x][y] != BLANK:
x = random.randint(0,9)
y = random.randint(0,9)
return x, y
def ok(ship_size, x, y):
# return pair: horizontal_ok, vertical_ok
h_ok = True
v_ok = True
for i in range(x, x + ship_size):
if i > w-1: # too big
h_ok = False
elif board[i][y] == MISS: # already missed
h_ok = False
for i in range(y, y + ship_size):
if i > h-1: # too big
v_ok = False
elif board[x][i] == MISS: # already missed
v_ok = False
return h_ok, v_ok
def guess(random=True, plot=True, interactive=True, title=""):
#if not any(HIT in row for row in board): # no hits, guess randomly
# return guess_empty()
# else:
# check if pursuing a ship
# use probabilities in heatmap
hm = heatmap()
hm = np.array(hm)
if not random:
# absolute max i, j
i, j = np.unravel_index(hm.argmax(), hm.shape)
else:
# random choice i, j
t = np.reshape(hm, (w*h,))
s = np.sum(t)
t = [i / float(s) for i in t]
choice = np.random.choice(w * h, p=t)
i = (choice / h)
j = (choice % w)
if plot:
plot_heatmap(hm, title)
if interactive:
feedback = str(raw_input('Guess is (%d, %d). Feedback: '%(i, j)))
if feedback == 'H':
board[i][j] = HIT
elif feedback == 'M':
board[i][j] = MISS
else:
feedback = feedback.split(' ')
board[i][j] = HIT
shipno = int(feedback[1])
ships_left.remove(shipno)
print str(ships_left)
else:
return i, j
def plot_heatmap(a, title):
plt.imshow(a, cmap='hot', interpolation='nearest')
plt.title(title)
for y in range(a.shape[0]):
for x in range(a.shape[1]):
plt.text(x, y, '%d' % a[y, x],
horizontalalignment='center',
verticalalignment='center',
)
plt.pause(1e-5)
plt.cla()
def print_heatmap(a):
print('\n'.join([''.join(['{:4}'.format(item) for item in row]) for row in a]))
def heatmap():
# for all hits, try to make ships out of them
# avoid all misses
# do this until you can't anymore or 6000 iterations are up
hit_indices = []
miss_indices = []
for i in range(0, w):
for j in range(0, h):
if board[i][j] == HIT:
hit_indices.append((i, j))
elif board[i][j] == MISS:
miss_indices.append((i, j))
# where can we place the ships with differing lengths?
# we don't want all possible *boards*, we want all possible *ship combos*
# ie we might be wrong about one placement but that shouldnt affect the overall positioning
# so we can base this on the miss indices
places = []
for ship_len in ships_left:
for i in range(0, w):
for j in range(0, h):
tup = ok(ship_len, i, j)
if tup[0] or tup[1]:
# tuple is ship_length, coordinates, tuple of horizontal/vertical
c = (i, j)
t = [ship_len, c, tup]
places.append(t)
heatmap = [[0 for x in range(w)] for y in range(h)]
for p in places:
l = p[0]
c = p[1]
t = p[2]
# check if we have a hit in this path already
# if so, multiply path by a factor
h_hit = 1
v_hit = 1
if t[0]: # horizontal
for i in range(c[0], c[0] + l):
if board[i][c[1]] == HIT:
h_hit = 20
if t[1]: # vertical
for i in range(c[1], c[1] + l):
if board[c[0]][i] == HIT:
v_hit = 20
if t[0]: # horizontal
for i in range(c[0], c[0] + l):
heatmap[i][c[1]] += h_hit * 1
if t[1]: # vertical
for i in range(c[1], c[1] + l):
heatmap[c[0]][i] += v_hit * 1
# no repeat guesses
# no guesses at missed points
for a in list(set(hit_indices) | set(miss_indices)):
heatmap[a[0]][a[1]] = 0
X = h-1
Y = w-1
neighbors = lambda x, y : [(x2, y2) for x2 in range(x-1, x+2)
for y2 in range(y-1, y+2)
if (-1 < x <= X and
-1 < y <= Y and
(x != x2 or y != y2) and
(0 <= x2 <= X) and
(0 <= y2 <= Y))]
# weight near the hits
for a in hit_indices:
tmp = neighbors(a[0], a[1])
for b in tmp:
heatmap[b[0]][b[1]] = heatmap[b[0]][b[1]] * 1.5
# weight away from the center -- no one places ships there
heatmap[5][5] = heatmap[5][5] / float(1.5)
for a in neighbors(5, 5):
heatmap[a[0]][a[1]] = heatmap[a[0]][a[1]] / float(1.5)
return heatmap
def make_board(visualize_board):
#Loop through each ship
ship_positions = set()
for SHIP_SIZE in [5, 4, 3, 3, 2]:
isValid = False
newPositions = set()
#Keep picking random ship positions until one works
while (not isValid):
isHorizontal = np.random.uniform(0, 1) > .5
if (isHorizontal):
row = np.random.randint(10)
col = np.random.randint(10 - SHIP_SIZE + 1)
ship_left = row * 10 + col
newPositions = set(range(ship_left, ship_left + SHIP_SIZE))
else:
row = np.random.randint(10 - SHIP_SIZE + 1)
col = np.random.randint(10)
newPositions = set([(row + i) * 10 + col for i in range(SHIP_SIZE)])
if (not newPositions.intersection(ship_positions)):
isValid = True
ship_positions = ship_positions.union(newPositions)
if visualize_board:
for r in range(10):
out = ""
for c in range(10):
if r * 10 + c in ship_positions:
out += "x "
else:
out += "* "
print(out)
print("\n")
return ship_positions
def human_test(plot=False):
while True:
guess(random=False, plot=plot, interactive=True)
def auto_test(log=True, show_grid = True, show_plot = True):
ship_positions = make_board(show_grid)
ps = []
for i in ship_positions:
p = (i / h, i % w)
ps.append(p)
num = 0
while len(ps) > 0:
move = guess(random=False, plot=show_plot, interactive=False, title="Move " + str(num) + " | Remaining: " + str(len(ps)))
i = move[0]
j = move[1]
t = (move in ps)
if t:
board[i][j] = HIT
ps.remove(move)
else:
board[i][j] = MISS
num += 1
if log:
if t:
print bcolors.OKGREEN + "GUESS: (%d, %d)"%(i, j) + "\tREMAINING: " + str(len(ps)) + "\tMOVES: " + str(num) + bcolors.ENDC
else:
print bcolors.FAIL + "GUESS: (%d, %d)"%(i, j) + "\tREMAINING: " + str(len(ps)) + "\tMOVES: " + str(num) + bcolors.ENDC
if num > 100:
sys.exit(0)
return num
def run_stats():
ls = []
for i in tqdm(range(0, 100)):
v = auto_test(log=False, show_grid = False, show_plot = False)
ls.append(v)
board = [[BLANK for x in range(w)] for y in range(h)]
ls = np.array(ls)
print np.mean(ls), np.std(ls), np.min(ls), np.max(ls)
plt.boxplot(ls)
plt.show()
def run_game():
human_test(plot=True)
if __name__ == "__main__":
run_game()
| mit |
DailyActie/Surrogate-Model | examples/sklearn_feature.py | 1 | 1451 | # MIT License
#
# Copyright (c) 2016 Daily Actie
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Author: Quan Pan <[email protected]>
# License: MIT License
# Create: 2016-12-02
from sklearn.datasets import load_iris
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
iris = load_iris()
X, y = iris.data, iris.target
X.shape
X_new = SelectKBest(chi2, k=2).fit_transform(X, y)
X_new.shape
| mit |
thilaire/missionBoard | docs/compR3.py | 1 | 1076 |
from matplotlib import pyplot as plt
from math import sqrt
from operator import itemgetter
# data
#data = [0, 24, 49, 75, 79, 103, 127, 150, 120, 142, 164, 186, 190, 210, 230, 249]
data = [0, 9, 23, 32, 45, 54, 65, 73, 90, 97, 107, 113, 122, 128, 136, 142]
# sorted list of tuple (data, value to get)
i = sorted(zip(data, range(16)), key=itemgetter(0))
print(i)
# diff to get the intervals
intervals = [i.pop(0)]
prevd = intervals[0][0]
for d in i:
intervals.append(((d[0]+prevd)//2, d[1]))
prevd = d[0]
print(intervals)
inter, sw = list(zip(*intervals))
print("{" + ", ".join(str(v) for v in inter) + "}")
print("{" + ", ".join(str(v) for v in sw) + "}")
def test(x):
ind = 8
lvl = 8
while lvl > 1:
lvl = lvl // 2
if x < inter[ind]:
ind -= lvl
else: #elif x > inter[ind]:
ind += lvl
return sw[ind-1] if x<inter[ind] else sw[ind]
# test if our test function is right
for v,d in enumerate(data):
for i in range(-2,3):
if test(d+i) == v:
pass
# print("test(%d+%d)=%d"%(d, i, v))
else:
print(u"\u26A0" + "!! test(%d) != %d" % (d+i, v))
| gpl-3.0 |
flightgong/scikit-learn | examples/linear_model/plot_ransac.py | 250 | 1673 | """
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model, datasets
n_samples = 1000
n_outliers = 50
X, y, coef = datasets.make_regression(n_samples=n_samples, n_features=1,
n_informative=1, noise=10,
coef=True, random_state=0)
# Add outlier data
np.random.seed(0)
X[:n_outliers] = 3 + 0.5 * np.random.normal(size=(n_outliers, 1))
y[:n_outliers] = -3 + 10 * np.random.normal(size=n_outliers)
# Fit line using all data
model = linear_model.LinearRegression()
model.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
model_ransac = linear_model.RANSACRegressor(linear_model.LinearRegression())
model_ransac.fit(X, y)
inlier_mask = model_ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Predict data of estimated models
line_X = np.arange(-5, 5)
line_y = model.predict(line_X[:, np.newaxis])
line_y_ransac = model_ransac.predict(line_X[:, np.newaxis])
# Compare estimated coefficients
print("Estimated coefficients (true, normal, RANSAC):")
print(coef, model.coef_, model_ransac.estimator_.coef_)
plt.plot(X[inlier_mask], y[inlier_mask], '.g', label='Inliers')
plt.plot(X[outlier_mask], y[outlier_mask], '.r', label='Outliers')
plt.plot(line_X, line_y, '-k', label='Linear regressor')
plt.plot(line_X, line_y_ransac, '-b', label='RANSAC regressor')
plt.legend(loc='lower right')
plt.show()
| bsd-3-clause |
cerrno/neurokernel | examples/timing_connectome/timing_connectome_demo_gpu.py | 1 | 23130 | #!/usr/bin/env python
"""
Create and run multiple empty LPUs to time data reception throughput.
Notes
-----
* Requires connectivity matrix in Document S2 from
http://dx.doi.org/10.1016/j.cub.2015.03.021
* Requires CUDA 7.0 when using MPS because of multi-GPU support.
* The maximum allowed number of open file descriptors must be sufficiently high.
"""
import argparse
import glob
import itertools
import numbers
import os
import re
import sys
import time
import warnings
try:
import cudamps
except ImportError:
mps_avail = False
else:
mps_avail = True
import dill
import lmdb
from mpi4py import MPI
import networkx as nx
import numpy as np
import pandas as pd
import pycuda.driver as drv
import pycuda.gpuarray as gpuarray
import pymetis
import twiggy
from neurokernel.all_global_vars import all_global_vars
from neurokernel.core_gpu import CTRL_TAG, GPOT_TAG, SPIKE_TAG, Manager, Module
from neurokernel.pattern import Pattern
from neurokernel.plsel import Selector, SelectorMethods
from neurokernel.tools.logging import setup_logger
class MyModule(Module):
"""
Empty module class.
This module class doesn't do anything in its execution step apart from
transmit/receive dummy data. All spike ports are assumed to
produce/consume data at every step.
"""
def __init__(self, sel, sel_in, sel_out,
sel_gpot, sel_spike, data_gpot, data_spike,
columns=['interface', 'io', 'type'],
ctrl_tag=CTRL_TAG, gpot_tag=GPOT_TAG, spike_tag=SPIKE_TAG,
id=None, device=None,
routing_table=None, rank_to_id=None,
debug=False, time_sync=False, cache_file='cache.db'):
if data_gpot is None:
data_gpot = np.zeros(SelectorMethods.count_ports(sel_gpot), float)
if data_spike is None:
data_spike = np.zeros(SelectorMethods.count_ports(sel_spike), int)
super(MyModule, self).__init__(sel, sel_in, sel_out,
sel_gpot, sel_spike, data_gpot, data_spike,
columns,
ctrl_tag, gpot_tag, spike_tag,
id, device,
routing_table, rank_to_id,
debug, time_sync)
self.cache_file = cache_file
self.pm['gpot'][self.interface.out_ports().gpot_ports(tuples=True)] = 1.0
self.pm['spike'][self.interface.out_ports().spike_ports(tuples=True)] = 1
def pre_run(self):
self.log_info('running code before body of worker %s' % self.rank)
# Initialize _out_port_dict and _in_port_dict attributes:
env = lmdb.open(self.cache_file, map_size=10**10)
with env.begin() as txn:
data = txn.get(self.id)
if data is not None:
data = dill.loads(data)
self.log_info('loading cached port data')
self._out_ids = data['_out_ids']
self._out_ranks = [self.rank_to_id[:i] for i in self._out_ids]
self._out_port_dict = data['_out_port_dict']
self._out_port_dict_ids = data['_out_port_dict_ids']
for out_id in self._out_ids:
if not isinstance(self._out_port_dict_ids['gpot'][out_id],
gpuarray.GPUArray):
self._out_port_dict_ids['gpot'][out_id] = \
gpuarray.to_gpu(self._out_port_dict_ids['gpot'][out_id])
if not isinstance(self._out_port_dict_ids['spike'][out_id],
gpuarray.GPUArray):
self._out_port_dict_ids['spike'][out_id] = \
gpuarray.to_gpu(self._out_port_dict_ids['spike'][out_id])
self._in_ids = data['_in_ids']
self._in_ranks = [self.rank_to_id[:i] for i in self._in_ids]
self._in_port_dict = data['_in_port_dict']
self._in_port_dict_ids = data['_in_port_dict_ids']
for in_id in self._in_ids:
if not isinstance(self._in_port_dict_ids['gpot'][in_id],
gpuarray.GPUArray):
self._in_port_dict_ids['gpot'][in_id] = \
gpuarray.to_gpu(self._in_port_dict_ids['gpot'][in_id])
if not isinstance(self._in_port_dict_ids['spike'][in_id],
gpuarray.GPUArray):
self._in_port_dict_ids['spike'][in_id] = \
gpuarray.to_gpu(self._in_port_dict_ids['spike'][in_id])
else:
self.log_info('no cached port data found - generating')
self._init_port_dicts()
with env.begin(write=True) as txn:
data = dill.dumps({'_in_ids': self._in_ids,
'_in_port_dict': self._in_port_dict,
'_in_port_dict_ids': self._in_port_dict_ids,
'_out_ids': self._out_ids,
'_out_port_dict': self._out_port_dict,
'_out_port_dict_ids': self._out_port_dict_ids})
txn.put(self.id, data)
# Initialize GPU transmission buffers:
self._init_comm_bufs()
# Start timing the main loop:
if self.time_sync:
self.intercomm.isend(['start_time', (self.rank, time.time())],
dest=0, tag=self._ctrl_tag)
self.log_info('sent start time to manager')
class MyManager(Manager):
"""
Manager that can use Multi-Process Service.
Parameters
----------
use_mps : bool
If True, use Multi-Process Service so that multiple MPI processes
can use the same GPUs concurrently.
"""
def __init__(self, use_mps=False):
super(MyManager, self).__init__()
if use_mps:
self._mps_man = cudamps.MultiProcessServiceManager()
else:
self._mps_man = None
def spawn(self, part_map):
"""
Spawn MPI processes for and execute each of the managed targets.
Parameters
----------
part_map : dict
Maps GPU ID to list of target MPI ranks.
"""
if self._is_parent:
# The number of GPUs over which the targets are partitioned may not
# exceed the actual number of supported devices:
n_part_gpus = len(part_map.keys())
n_avail_gpus = 0
drv.init()
for i in xrange(drv.Device.count()):
# MPS requires Tesla/Quadro GPUs with compute capability 3.5 or greater:
if mps_avail:
d = drv.Device(i)
if d.compute_capability() >= (3, 5) and \
re.search('Tesla|Quadro', d.name()):
n_avail_gpus += 1
else:
n_avail_gpus += 1
if n_part_gpus > n_avail_gpus:
raise RuntimeError('partition size (%s) exceeds '
'number of available GPUs (%s)' % \
(n_part_gpus, n_avail_gpus))
# Start MPS control daemons (this assumes that the available GPUs
# are numbered consecutively from 0 onwards - as are the elements of
# part_map.keys()):
if self._mps_man:
self._mps_man.start()
self.log_info('starting MPS')
# Find the path to the mpi_backend.py script (which should be in the
# same directory as this module:
import neurokernel.mpi
parent_dir = os.path.dirname(neurokernel.mpi.__file__)
mpi_backend_path = os.path.join(parent_dir, 'mpi_backend.py')
# Check that the union ranks in the partition correspond exactly to
# those of the targets added to the manager:
n_targets = len(self._targets.keys())
if set(self._targets.keys()) != \
set([t for t in itertools.chain.from_iterable(part_map.values())]):
raise ValueError('partition must contain all target ranks')
# Invert mapping of GPUs to MPI ranks:
rank_to_gpu_map = {rank:gpu for gpu in part_map.keys() for rank in part_map[gpu]}
# Set MPS pipe directory:
info = MPI.Info.Create()
if self._mps_man:
mps_dir = self._mps_man.get_mps_dir(self._mps_man.get_mps_ctrl_proc())
info.Set('env', 'CUDA_MPS_PIPE_DIRECTORY=%s' % mps_dir)
# Spawn processes:
self._intercomm = MPI.COMM_SELF.Spawn(sys.executable,
args=[mpi_backend_path],
maxprocs=n_targets,
info=info)
# First, transmit twiggy logging emitters to spawned processes so
# that they can configure their logging facilities:
for i in self._targets.keys():
self._intercomm.send(twiggy.emitters, i)
# Next, serialize the routing table ONCE and then transmit it to all
# of the child nodes:
self._intercomm.bcast(self.routing_table, root=MPI.ROOT)
# Transmit class to instantiate, globals required by the class, and
# the constructor arguments; the backend will wait to receive
# them and then start running the targets on the appropriate nodes.
req = MPI.Request()
r_list = []
for i in self._targets.keys():
target_globals = all_global_vars(self._targets[i])
# Serializing atexit with dill appears to fail in virtualenvs
# sometimes if atexit._exithandlers contains an unserializable function:
if 'atexit' in target_globals:
del target_globals['atexit']
data = (self._targets[i], target_globals, self._kwargs[i])
r_list.append(self._intercomm.isend(data, i))
# Need to clobber data to prevent all_global_vars from
# including it in its output:
del data
req.Waitall(r_list)
def __del__(self):
# Shut down MPS daemon when the manager is cleaned up:
if self._mps_man:
pid = self._mps_man.get_mps_ctrl_proc()
self.log_info('stopping MPS control daemon %i' % pid)
self._mps_man.stop(pid)
def gen_sels(conn_mat, scaling=1):
"""
Generate port selectors for LPUs in benchmark test.
Parameters
----------
conn_mat : numpy.ndarray
Square array containing numbers of directed spiking port connections
between LPUs (which correspond to the row and column indices).
scaling : int
Scaling factor; multiply all connection numbers by this value.
Returns
-------
mod_sels : dict of tuples
Ports in module interfaces; the keys are the module IDs and the values are tuples
containing the respective selectors for all ports, all input ports, all
output ports, all graded potential, and all spiking ports.
pat_sels : dict of tuples
Ports in pattern interfaces; the keys are tuples containing the two
module IDs connected by the pattern and the values are pairs of tuples
containing the respective selectors for all source ports, all
destination ports, all input ports connected to the first module,
all output ports connected to the first module, all graded potential ports
connected to the first module, all spiking ports connected to the first
module, all input ports connected to the second module,
all output ports connected to the second module, all graded potential ports
connected to the second module, and all spiking ports connected to the second
module.
"""
conn_mat = np.asarray(conn_mat)
r, c = conn_mat.shape
assert r == c
n_lpu = r
assert scaling > 0 and isinstance(scaling, numbers.Integral)
conn_mat *= scaling
# Construct selectors describing the ports exposed by each module:
mod_sels = {}
for i in xrange(n_lpu):
lpu_id = 'lpu%s' % i
# Structure ports as
# /lpu_id/in_or_out/spike_or_gpot/other_lpu_id/[0:n_spike]
# where in_or_out is relative to module i:
sel_in_gpot = Selector('')
sel_out_gpot = Selector('')
sel_in_spike = \
Selector(','.join(['/lpu%i/in/spike/lpu%i/[0:%i]' % (i, j, n) for j, n in \
enumerate(conn_mat[:, i]) if (j != i and n != 0)]))
sel_out_spike = \
Selector(','.join(['/lpu%i/out/spike/lpu%i/[0:%i]' % (i, j, n) for j, n in \
enumerate(conn_mat[i, :]) if (j != i and n != 0)]))
mod_sels[lpu_id] = (Selector.union(sel_in_gpot, sel_in_spike,
sel_out_gpot, sel_out_spike),
Selector.union(sel_in_gpot, sel_in_spike),
Selector.union(sel_out_gpot, sel_out_spike),
Selector.union(sel_in_gpot, sel_out_gpot),
Selector.union(sel_in_spike, sel_out_spike))
# Construct selectors describing the ports connected by each pattern:
pat_sels = {}
for i, j in itertools.combinations(xrange(n_lpu), 2):
lpu_i = 'lpu%s' % i
lpu_j = 'lpu%s' % j
# The pattern's input ports are labeled "../out.." because that selector
# describes the output ports of the connected module's interface:
sel_in_gpot_i = Selector('')
sel_out_gpot_i = Selector('')
sel_in_gpot_j = Selector('')
sel_out_gpot_j = Selector('')
sel_in_spike_i = Selector('/%s/out/spike/%s[0:%i]' % (lpu_i, lpu_j,
conn_mat[i, j]))
sel_out_spike_i = Selector('/%s/in/spike/%s[0:%i]' % (lpu_i, lpu_j,
conn_mat[j, i]))
sel_in_spike_j = Selector('/%s/out/spike/%s[0:%i]' % (lpu_j, lpu_i,
conn_mat[j, i]))
sel_out_spike_j = Selector('/%s/in/spike/%s[0:%i]' % (lpu_j, lpu_i,
conn_mat[i, j]))
# The order of these two selectors is important; the individual 'from'
# and 'to' ports must line up properly for Pattern.from_concat to
# produce the right pattern:
sel_from = Selector.add(sel_in_gpot_i, sel_in_spike_i,
sel_in_gpot_j, sel_in_spike_j)
sel_to = Selector.add(sel_out_gpot_j, sel_out_spike_j,
sel_out_gpot_i, sel_out_spike_i)
# Exclude scenarios where the "from" or "to" selector is empty (and
# therefore cannot be used to construct a pattern):
if len(sel_from) and len(sel_to):
pat_sels[(lpu_i, lpu_j)] = \
(sel_from, sel_to,
Selector.union(sel_in_gpot_i, sel_in_spike_i),
Selector.union(sel_out_gpot_i, sel_out_spike_i),
Selector.union(sel_in_gpot_i, sel_out_gpot_i),
Selector.union(sel_in_spike_i, sel_out_spike_i),
Selector.union(sel_in_gpot_j, sel_in_spike_j),
Selector.union(sel_out_gpot_j, sel_out_spike_j),
Selector.union(sel_in_gpot_j, sel_out_gpot_j),
Selector.union(sel_in_spike_j, sel_out_spike_j))
return mod_sels, pat_sels
def partition(mat, n_parts):
"""
Partition a directed graph described by a weighted connectivity matrix.
Parameters
----------
mat : numpy.ndarray
Square weighted connectivity matrix for a directed graph.
n_parts : int
Number of partitions.
Returns
-------
part_map : dict of list
Dictionary of partitions. The dict keys are the partition identifiers,
and the values are the lists of nodes in each partition.
"""
# Combine weights of directed edges to obtain undirected graph:
mat = mat+mat.T
# Convert matrix into METIS-compatible form:
g = nx.from_numpy_matrix(np.array(mat, dtype=[('weight', int)]))
n = g.number_of_nodes()
e = g.number_of_edges()
xadj = np.empty(n+1, int)
adjncy = np.empty(2*e, int)
eweights = np.empty(2*e, int)
end_node = 0
xadj[0] = 0
for i in g.node:
for j, a in g.edge[i].items():
adjncy[end_node] = j
eweights[end_node] = a['weight']
end_node += 1
xadj[i+1] = end_node
# Compute edge-cut partition:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
cutcount, part_vert = pymetis.part_graph(n_parts, xadj=xadj,
adjncy=adjncy, eweights=eweights)
# Find nodes in each partition:
part_map = {}
for i, p in enumerate(set(part_vert)):
ind = np.where(np.array(part_vert) == p)[0]
part_map[p] = ind
return part_map
def emulate(conn_mat, scaling, n_gpus, steps, use_mps, cache_file='cache.db'):
"""
Benchmark inter-LPU communication throughput.
Each LPU is configured to use a different local GPU.
Parameters
----------
conn_mat : numpy.ndarray
Square array containing numbers of directed spiking port connections
between LPUs (which correspond to the row and column indices).
scaling : int
Scaling factor; multiply all connection numbers by this value.
n_gpus : int
Number of GPUs over which to partition the emulation.
steps : int
Number of steps to execute.
use_mps : bool
Use Multi-Process Service if True.
Returns
-------
average_throughput, total_throughput : float
Average per-step and total received data throughput in bytes/seconds.
exec_time : float
Execution time in seconds.
"""
# Time everything starting with manager initialization:
start_all = time.time()
# Set up manager:
man = MyManager(use_mps)
# Generate selectors for configuring modules and patterns:
mod_sels, pat_sels = gen_sels(conn_mat, scaling)
# Partition nodes in connectivity matrix:
part_map = partition(conn_mat, n_gpus)
# Set up modules such that those in each partition use that partition's GPU:
ranks = set([rank for rank in itertools.chain.from_iterable(part_map.values())])
rank_to_gpu_map = {rank:gpu for gpu in part_map for rank in part_map[gpu]}
for i in ranks:
lpu_i = 'lpu%s' % i
sel, sel_in, sel_out, sel_gpot, sel_spike = mod_sels[lpu_i]
man.add(MyModule, lpu_i, sel, sel_in, sel_out, sel_gpot, sel_spike,
None, None, ['interface', 'io', 'type'],
CTRL_TAG, GPOT_TAG, SPIKE_TAG, device=rank_to_gpu_map[i],
time_sync=True)
# Set up connections between module pairs:
env = lmdb.open(cache_file, map_size=10**10)
with env.begin() as txn:
data = txn.get('routing_table')
if data is not None:
man.log_info('loading cached routing table')
routing_table = dill.loads(data)
# Don't replace man.routing_table outright because its reference is
# already in the dict of named args to transmit to the child MPI process:
for c in routing_table.connections:
man.routing_table[c] = routing_table[c]
else:
man.log_info('no cached routing table found - generating')
for lpu_i, lpu_j in pat_sels.keys():
sel_from, sel_to, sel_in_i, sel_out_i, sel_gpot_i, sel_spike_i, \
sel_in_j, sel_out_j, sel_gpot_j, sel_spike_j = pat_sels[(lpu_i, lpu_j)]
pat = Pattern.from_concat(sel_from, sel_to,
from_sel=sel_from, to_sel=sel_to, data=1, validate=False)
pat.interface[sel_in_i, 'interface', 'io'] = [0, 'in']
pat.interface[sel_out_i, 'interface', 'io'] = [0, 'out']
pat.interface[sel_gpot_i, 'interface', 'type'] = [0, 'gpot']
pat.interface[sel_spike_i, 'interface', 'type'] = [0, 'spike']
pat.interface[sel_in_j, 'interface', 'io'] = [1, 'in']
pat.interface[sel_out_j, 'interface', 'io'] = [1, 'out']
pat.interface[sel_gpot_j, 'interface', 'type'] = [1, 'gpot']
pat.interface[sel_spike_j, 'interface', 'type'] = [1, 'spike']
man.connect(lpu_i, lpu_j, pat, 0, 1, compat_check=False)
with env.begin(write=True) as txn:
txn.put('routing_table', dill.dumps(man.routing_table))
man.spawn(part_map)
start_main = time.time()
man.start(steps)
man.wait()
stop_main = time.time()
return man.average_step_sync_time, (time.time()-start_all), (stop_main-start_main), \
(man.stop_time-man.start_time)
if __name__ == '__main__':
import neurokernel.mpi_relaunch
conn_mat_file = 's2.xlsx'
scaling = 1
max_steps = 50
n_gpus = 4
use_mps = False
parser = argparse.ArgumentParser()
parser.add_argument('--debug', default=False,
dest='debug', action='store_true',
help='Enable debug mode.')
parser.add_argument('-l', '--log', default='none', type=str,
help='Log output to screen [file, screen, both, or none; default:none]')
parser.add_argument('-c', '--conn_mat_file', default=conn_mat_file, type=str,
help='Connectivity matrix Excel file [default: %s]' % conn_mat_file)
parser.add_argument('-k', '--scaling', default=scaling, type=int,
help='Connection number scaling factor [default: %s]' % scaling)
parser.add_argument('-m', '--max_steps', default=max_steps, type=int,
help='Maximum number of steps [default: %s]' % max_steps)
parser.add_argument('-g', '--gpus', default=n_gpus, type=int,
help='Number of GPUs [default: %s]' % n_gpus)
parser.add_argument('-p', '--use_mps', action='store_true',
help='Use Multi-Process Service [default: False]')
args = parser.parse_args()
file_name = None
screen = False
if args.log.lower() in ['file', 'both']:
file_name = 'neurokernel.log'
if args.log.lower() in ['screen', 'both']:
screen = True
logger = setup_logger(file_name=file_name, screen=screen,
mpi_comm=MPI.COMM_WORLD,
multiline=True)
conn_mat = pd.read_excel('s2.xlsx',
sheetname='Connectivity Matrix').astype(int).as_matrix()
print (args.gpus,)+emulate(conn_mat, args.scaling, args.gpus, args.max_steps, args.use_mps)
| bsd-3-clause |
wuxue/altanalyze | misopy/sashimi_plot/sashimi_plot.py | 1 | 11215 | # -*- mode: python; -*-
##
## sashimi_plot
##
## Utility for visualizing RNA-Seq densities along gene models and
## for plotting MISO output
##
import os
import sys
import glob
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=UserWarning) ### hides import warnings
import matplotlib
# Use PDF backend
try: matplotlib.use("pdf")
except Exception: pass
from scipy import *
from numpy import *
import pysam
import shelve
import misopy
import misopy.gff_utils as gff_utils
import misopy.pe_utils as pe_utils
from misopy.parse_csv import csv2dictlist_raw
from misopy.samples_utils import load_samples
from misopy.sashimi_plot.Sashimi import Sashimi
from misopy.sashimi_plot.plot_utils.samples_plotter import SamplesPlotter
from misopy.sashimi_plot.plot_utils.plotting import *
from misopy.sashimi_plot.plot_utils.plot_gene import plot_density_from_file
import matplotlib.pyplot as plt
from matplotlib import rc
def plot_bf_dist(bf_filename, settings_filename, output_dir,
max_bf=1e12):
"""
Plot a Bayes factor distribution from a .miso_bf file.
"""
if not bf_filename.endswith(".miso_bf"):
print "WARNING: %s does not end in .miso_bf, are you sure it is the " \
"output of a MISO samples comparison?" %(bf_filename)
# Load BF data
data, h = csv2dictlist_raw(bf_filename)
plot_name = os.path.basename(bf_filename)
sashimi_obj = Sashimi(plot_name, output_dir,
settings_filename=settings_filename)
settings = sashimi_obj.settings
# Setup the figure
sashimi_obj.setup_figure()
# Matrix of bayes factors and delta psi pairs
bfs_and_deltas = []
for event in data:
bf = event['bayes_factor']
delta_psi = event['diff']
if type(bf) == str and "," in bf:
print "WARNING: %s is a multi-isoform event, skipping..." \
%(event)
continue
else:
# Impose upper limit on Bayes factor
bf = min(1e12, float(bf))
delta_psi = float(delta_psi)
bfs_and_deltas.append([bf, delta_psi])
bfs_and_deltas = array(bfs_and_deltas)
num_events = len(bfs_and_deltas)
print "Loaded %d event comparisons." %(num_events)
output_filename = sashimi_obj.output_filename
print "Plotting Bayes factors distribution"
print " - Output filename: %s" %(output_filename)
bf_thresholds = settings["bf_thresholds"]
bar_color = settings["bar_color"]
min_bf_thresh = min(bf_thresholds)
num_events_used = sum(bfs_and_deltas[:, 0] >= min_bf_thresh)
for thresh in bf_thresholds:
if type(thresh) != int:
print "Error: BF thresholds must be integers."
#sys.exit(1)
print "Using BF thresholds: "
print bf_thresholds
print "Using bar color: %s" %(bar_color)
plot_cumulative_bars(bfs_and_deltas[:, 0],
bf_thresholds,
bar_color=bar_color,
logged=True)
plt.xticks(bf_thresholds)
c = 1
plt.xlim([bf_thresholds[0] - c, bf_thresholds[-1] + c])
plt.title("Bayes factor distributions\n(using %d/%d events)" \
%(num_events_used, num_events))
plt.xlabel("Bayes factor thresh.")
plt.ylabel("No. events")
sashimi_obj.save_plot()
def plot_event(event_name, pickle_dir, settings_filename,
output_dir,
no_posteriors=False,
plot_title=None,
plot_label=None):
"""
Visualize read densities across the exons and junctions
of a given MISO alternative RNA processing event.
Also plots MISO estimates and Psi values.
"""
if not os.path.isfile(settings_filename):
print "Error: settings filename %s not found." %(settings_filename)
#sys.exit(1)
if not os.path.isdir(pickle_dir):
print "Error: event pickle directory %s not found." %(pickle_dir)
#sys.exit(1)
# Retrieve the full pickle filename
genes_filename = os.path.join(pickle_dir,
"genes_to_filenames.shelve")
# Check that file basename exists
if len(glob.glob("%s*" %(genes_filename))) == 0:
raise Exception, "Cannot find file %s. Are you sure the events " \
"were indexed with the latest version of index_gff.py?" \
%(genes_filename)
event_to_filenames = shelve.open(genes_filename)
if event_name not in event_to_filenames:
raise Exception, "Event %s not found in pickled directory %s. " \
"Are you sure this is the right directory for the event?" \
%(event_name, pickle_dir)
pickle_filename = event_to_filenames[event_name]
if pickle_dir not in pickle_filename:
import string
pickle_filename = string.replace(pickle_filename,'\\','/')
pickle_filename = pickle_dir + string.split(pickle_filename,'sashimi_index')[1]
import string
#pickle_filename = string.replace(pickle_filename,' 1','')
if no_posteriors:
print "Asked to not plot MISO posteriors."
plot_density_from_file(settings_filename, pickle_filename, event_name,
output_dir,
no_posteriors=no_posteriors,
plot_title=plot_title,
plot_label=plot_label)
def plot_insert_len(insert_len_filename,
settings_filename,
output_dir):
"""
Plot insert length distribution.
"""
if not os.path.isfile(settings_filename):
print "Error: settings filename %s not found." %(settings_filename)
#sys.exit(1)
plot_name = os.path.basename(insert_len_filename)
sashimi_obj = Sashimi(plot_name, output_dir,
settings_filename=settings_filename)
settings = sashimi_obj.settings
num_bins = settings["insert_len_bins"]
output_filename = sashimi_obj.output_filename
sashimi_obj.setup_figure()
s = plt.subplot(1, 1, 1)
print "Plotting insert length distribution..."
print " - Distribution file: %s" %(insert_len_filename)
print " - Output plot: %s" %(output_filename)
insert_dist, params = pe_utils.load_insert_len(insert_len_filename)
mean, sdev, dispersion, num_pairs \
= pe_utils.compute_insert_len_stats(insert_dist)
print "min insert: %.1f" %(min(insert_dist))
print "max insert: %.1f" %(max(insert_dist))
plt.title("%s (%d read-pairs)" \
%(plot_name,
num_pairs),
fontsize=10)
plt.hist(insert_dist, bins=num_bins, color='k',
edgecolor="#ffffff", align='mid')
axes_square(s)
ymin, ymax = s.get_ylim()
plt.text(0.05, 0.95, "$\mu$: %.1f\n$\sigma$: %.1f\n$d$: %.1f" \
%(round(mean, 2),
round(sdev, 2),
round(dispersion, 2)),
horizontalalignment='left',
verticalalignment='top',
bbox=dict(edgecolor='k', facecolor="#ffffff",
alpha=0.5),
fontsize=10,
transform=s.transAxes)
plt.xlabel("Insert length (nt)")
plt.ylabel("No. read pairs")
sashimi_obj.save_plot()
def greeting():
print "Sashimi plot: Visualize spliced RNA-Seq reads along gene models. " \
"Part of the MISO (Mixture of Isoforms model) framework."
print "See --help for usage.\n"
print "Manual available at: http://genes.mit.edu/burgelab/miso/docs/sashimi.html\n"
def main():
from optparse import OptionParser
parser = OptionParser()
parser.add_option("--plot-insert-len", dest="plot_insert_len", nargs=2, default=None,
help="Plot the insert length distribution from a given insert length (*.insert_len) "
"filename. Second argument is a settings file name.")
parser.add_option("--plot-bf-dist", dest="plot_bf_dist", nargs=2, default=None,
help="Plot Bayes factor distributon. Takes the arguments: "
"(1) Bayes factor filename (*.miso_bf) filename, "
"(2) a settings filename.")
parser.add_option("--plot-event", dest="plot_event", nargs=3, default=None,
help="Plot read densities and MISO inferences for a given alternative event. "
"Takes the arguments: (1) event name (i.e. the ID= of the event based on MISO gff3 "
"annotation file, (2) directory where indexed GFF annotation is (output of "
"index_gff.py), (3) path to plotting settings file.")
parser.add_option("--no-posteriors", dest="no_posteriors", default=False, action="store_true",
help="If given this argument, MISO posterior estimates are not plotted.")
parser.add_option("--plot-title", dest="plot_title", default=None, nargs=1,
help="Title of plot: a string that will be displayed at top of plot. Example: " \
"--plot-title \"My favorite gene\".")
parser.add_option("--plot-label", dest="plot_label", default=None, nargs=1,
help="Plot label. If given, plot will be saved in the output directory as " \
"the plot label ending in the relevant extension, e.g. <plot_label>.pdf. " \
"Example: --plot-label my_gene")
parser.add_option("--output-dir", dest="output_dir", nargs=1, default=None,
help="Output directory.")
(options, args) = parser.parse_args()
if options.plot_event is None:
greeting()
#sys.exit(1)
if options.output_dir == None:
print "Error: need --output-dir"
#sys.exit(1)
output_dir = os.path.abspath(os.path.expanduser(options.output_dir))
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
no_posteriors = options.no_posteriors
plot_title = options.plot_title
plot_label = options.plot_label
if options.plot_insert_len != None:
insert_len_filename = os.path.abspath(os.path.expanduser(options.plot_insert_len[0]))
settings_filename = os.path.abspath(os.path.expanduser(options.plot_insert_len[1]))
plot_insert_len(insert_len_filename, settings_filename, output_dir)
if options.plot_bf_dist != None:
bf_filename = os.path.abspath(os.path.expanduser(options.plot_bf_dist[0]))
settings_filename = os.path.abspath(os.path.expanduser(options.plot_bf_dist[1]))
plot_bf_dist(bf_filename, settings_filename, output_dir)
if options.plot_event != None:
event_name = options.plot_event[0]
pickle_dir = os.path.abspath(os.path.expanduser(options.plot_event[1]))
settings_filename = os.path.abspath(os.path.expanduser(options.plot_event[2]))
plot_event(event_name, pickle_dir, settings_filename, output_dir,
no_posteriors=no_posteriors,
plot_title=plot_title,
plot_label=plot_label)
if __name__ == '__main__':
main()
| apache-2.0 |
JPFrancoia/scikit-learn | sklearn/externals/joblib/testing.py | 23 | 3042 | """
Helper for testing.
"""
import sys
import warnings
import os.path
import re
import subprocess
import threading
from sklearn.externals.joblib._compat import PY3_OR_LATER
def warnings_to_stdout():
""" Redirect all warnings to stdout.
"""
showwarning_orig = warnings.showwarning
def showwarning(msg, cat, fname, lno, file=None, line=0):
showwarning_orig(msg, cat, os.path.basename(fname), line, sys.stdout)
warnings.showwarning = showwarning
#warnings.simplefilter('always')
try:
from nose.tools import assert_raises_regex
except ImportError:
# For Python 2.7
try:
from nose.tools import assert_raises_regexp as assert_raises_regex
except ImportError:
# for Python 2.6
def assert_raises_regex(expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Helper function to check for message patterns in exceptions"""
not_raised = False
try:
callable_obj(*args, **kwargs)
not_raised = True
except Exception as e:
error_message = str(e)
if not re.compile(expected_regexp).search(error_message):
raise AssertionError("Error message should match pattern "
"%r. %r does not." %
(expected_regexp, error_message))
if not_raised:
raise AssertionError("Should have raised %r" %
expected_exception(expected_regexp))
def check_subprocess_call(cmd, timeout=1, stdout_regex=None,
stderr_regex=None):
"""Runs a command in a subprocess with timeout in seconds.
Also checks returncode is zero, stdout if stdout_regex is set, and
stderr if stderr_regex is set.
"""
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def kill_process():
proc.kill()
timer = threading.Timer(timeout, kill_process)
try:
timer.start()
stdout, stderr = proc.communicate()
if PY3_OR_LATER:
stdout, stderr = stdout.decode(), stderr.decode()
if proc.returncode != 0:
message = (
'Non-zero return code: {0}.\nStdout:\n{1}\n'
'Stderr:\n{2}').format(
proc.returncode, stdout, stderr)
raise ValueError(message)
if (stdout_regex is not None and
not re.search(stdout_regex, stdout)):
raise ValueError(
"Unexpected stdout: {0!r} does not match:\n{1!r}".format(
stdout_regex, stdout))
if (stderr_regex is not None and
not re.search(stderr_regex, stderr)):
raise ValueError(
"Unexpected stderr: {0!r} does not match:\n{1!r}".format(
stderr_regex, stderr))
finally:
timer.cancel()
| bsd-3-clause |
DynamicGravitySystems/DGP | examples/plot2_prototype.py | 1 | 4218 | import os
import sys
import uuid
import logging
import datetime
import traceback
from PyQt5 import QtCore
import PyQt5.QtWidgets as QtWidgets
import PyQt5.Qt as Qt
import numpy as np
from pandas import Series, DatetimeIndex
from matplotlib.axes import Axes
from matplotlib.patches import Rectangle
from matplotlib.dates import date2num
os.chdir('..')
import dgp.lib.project as project
# from dgp.gui.plotting.plotter2 import FlightLinePlot
class MockDataChannel:
def __init__(self, series, label):
self._series = series
self.label = label
self.uid = uuid.uuid4().__str__()
def series(self):
return self._series
def plot(self, *args):
pass
class PlotExample(QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle('Plotter Testing')
self.setBaseSize(Qt.QSize(600, 600))
self._flight = project.Flight(None, 'test')
self._plot = FlightLinePlot(self._flight, parent=self)
self._plot.set_mode(grab=True)
print("Plot: ", self._plot)
# self.plot.figure.canvas.mpl_connect('pick_event', lambda x: print(
# "Pick event handled"))
# self.plot.mgr = StackedAxesManager(self.plot.figure, rows=2)
# self._toolbar = NavToolbar(self.plot, parent=self)
# self._toolbar.actions()[0] = QtWidgets.QAction("Reset View")
# self._toolbar.actions()[0].triggered.connect(lambda x: print(
# "Action 0 triggered"))
self.tb = self._plot.get_toolbar()
plot_layout = QtWidgets.QVBoxLayout()
plot_layout.addWidget(self._plot)
plot_layout.addWidget(self.tb)
c_widget = QtWidgets.QWidget()
c_widget.setLayout(plot_layout)
self.setCentralWidget(c_widget)
plot_layout.addWidget(QtWidgets.QPushButton("Reset"))
# toolbar = self.plot.get_toolbar(self)
self.show()
def plot_sin(self):
idx = DatetimeIndex(freq='5S', start=datetime.datetime.now(),
periods=1000)
ser = Series([np.sin(x)*3 for x in np.arange(0, 100, 0.1)], index=idx)
self.plot.mgr.add_series(ser)
self.plot.mgr.add_series(-ser)
ins_0 = self.plot.mgr.add_inset_axes(0) # type: Axes
ins_0.plot(ser.index, ser.values)
x0, x1 = ins_0.get_xlim()
width = (x1 - x0) * .5
y0, y1 = ins_0.get_ylim()
height = (y1 - y0) * .5
# Draw rectangle patch on inset axes - proof of concept to add inset
# locator when zoomed in on large data set.
ax0 = self.plot.mgr[0][0] # type: Axes
rect = Rectangle((date2num(idx[0]), 0), width, height,
edgecolor='black',
linewidth=2, alpha=.5, fill='red')
rect.set_picker(True)
patch = ins_0.add_patch(rect) # type: Rectangle
# Future idea: Add click+drag to view patch to pan in main plot
def update_rect(ax: Axes):
x0, x1 = ax.get_xlim()
y0, y1 = ax.get_ylim()
patch.set_x(x0)
patch.set_y(y0)
height = y1 - y0
width = x1 - x0
patch.set_width(width)
patch.set_height(height)
ax.draw_artist(patch)
self.plot.draw()
ax0.callbacks.connect('xlim_changed', update_rect)
ax0.callbacks.connect('ylim_changed', update_rect)
self.plot.draw()
ins_1 = self.plot.mgr.add_inset_axes(1)
def excepthook(type_, value, traceback_):
"""This allows IDE to properly display unhandled exceptions which are
otherwise silently ignored as the application is terminated.
Override default excepthook with
>>> sys.excepthook = excepthook
See: http://pyqt.sourceforge.net/Docs/PyQt5/incompatibilities.html
"""
traceback.print_exception(type_, value, traceback_)
QtCore.qFatal('')
if __name__ == '__main__':
sys.excepthook = excepthook
app = QtWidgets.QApplication(sys.argv)
_log = logging.getLogger()
_log.addHandler(logging.StreamHandler(sys.stdout))
_log.setLevel(logging.DEBUG)
window = PlotExample()
# window.plot_sin()
sys.exit(app.exec_())
| apache-2.0 |
nelango/ViralityAnalysis | model/lib/sklearn/gaussian_process/tests/test_gaussian_process.py | 267 | 6813 | """
Testing for Gaussian Process module (sklearn.gaussian_process)
"""
# Author: Vincent Dubourg <[email protected]>
# Licence: BSD 3 clause
from nose.tools import raises
from nose.tools import assert_true
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from sklearn.gaussian_process import regression_models as regression
from sklearn.gaussian_process import correlation_models as correlation
from sklearn.datasets import make_regression
from sklearn.utils.testing import assert_greater
f = lambda x: x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
def test_1d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a one-dimensional Gaussian Process model.
# Check random start optimization.
# Test the interpolating property.
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=random_start, verbose=False).fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
y2_pred, MSE2 = gp.predict(X2, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.)
and np.allclose(MSE2, 0., atol=10))
def test_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the interpolating property.
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = g(X).ravel()
thetaL = [1e-4] * 2
thetaU = [1e-1] * 2
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=thetaL,
thetaU=thetaU,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
eps = np.finfo(gp.theta_.dtype).eps
assert_true(np.all(gp.theta_ >= thetaL - eps)) # Lower bounds of hyperparameters
assert_true(np.all(gp.theta_ <= thetaU + eps)) # Upper bounds of hyperparameters
def test_2d_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the GP interpolation for 2D output
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
f = lambda x: np.vstack((g(x), g(x))).T
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = f(X)
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=[1e-4] * 2,
thetaU=[1e-1] * 2,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
@raises(ValueError)
def test_wrong_number_of_outputs():
gp = GaussianProcess()
gp.fit([[1, 2, 3], [4, 5, 6]], [1, 2, 3])
def test_more_builtin_correlation_models(random_start=1):
# Repeat test_1d and test_2d for several built-in correlation
# models specified as strings.
all_corr = ['absolute_exponential', 'squared_exponential', 'cubic',
'linear']
for corr in all_corr:
test_1d(regr='constant', corr=corr, random_start=random_start)
test_2d(regr='constant', corr=corr, random_start=random_start)
test_2d_2d(regr='constant', corr=corr, random_start=random_start)
def test_ordinary_kriging():
# Repeat test_1d and test_2d with given regression weights (beta0) for
# different regression models (Ordinary Kriging).
test_1d(regr='linear', beta0=[0., 0.5])
test_1d(regr='quadratic', beta0=[0., 0.5, 0.5])
test_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
test_2d_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
def test_no_normalize():
gp = GaussianProcess(normalize=False).fit(X, y)
y_pred = gp.predict(X)
assert_true(np.allclose(y_pred, y))
def test_random_starts():
# Test that an increasing number of random-starts of GP fitting only
# increases the reduced likelihood function of the optimal theta.
n_samples, n_features = 50, 3
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)
best_likelihood = -np.inf
for random_start in range(1, 5):
gp = GaussianProcess(regr="constant", corr="squared_exponential",
theta0=[1e-0] * n_features,
thetaL=[1e-4] * n_features,
thetaU=[1e+1] * n_features,
random_start=random_start, random_state=0,
verbose=False).fit(X, y)
rlf = gp.reduced_likelihood_function()[0]
assert_greater(rlf, best_likelihood - np.finfo(np.float32).eps)
best_likelihood = rlf
def test_mse_solving():
# test the MSE estimate to be sane.
# non-regression test for ignoring off-diagonals of feature covariance,
# testing with nugget that renders covariance useless, only
# using the mean function, with low effective rank of data
gp = GaussianProcess(corr='absolute_exponential', theta0=1e-4,
thetaL=1e-12, thetaU=1e-2, nugget=1e-2,
optimizer='Welch', regr="linear", random_state=0)
X, y = make_regression(n_informative=3, n_features=60, noise=50,
random_state=0, effective_rank=1)
gp.fit(X, y)
assert_greater(1000, gp.predict(X, eval_MSE=True)[1].mean())
| mit |
droundy/deft | papers/fuzzy-fmt/nm_plot_histdata.py | 2 | 2260 | #!/usr/bin/python2
#Run this program from /deft/papers/fuzzy-fmt by entering ./nm_plot_histdata.py [filename.dat]
import os
import argparse
import numpy as np
import matplotlib.pyplot as plt
import sys
parser = argparse.ArgumentParser(description='Creates a plot.', epilog="stuff...")
parser.add_argument('filedat', metavar='datafile', type=str, nargs='*',
help='file with data to plot')
args=parser.parse_args()
markers = {
0.1: '^',
0.5: 'o',
0.01: '+',
0.05: 'v',
}
mc_constant = {}
percent_neg = {}
estimated_uncertainty = {}
mean_error = {}
error_width = {}
mean_error_uncertainty = {}
for f in args.filedat:
print 'Reading file', f
thisdata = np.loadtxt(f)
gw = thisdata[0,12]
mc_prefactor = thisdata[0,11]
if gw not in mc_constant.keys():
mc_constant[gw] = []
percent_neg[gw] = []
estimated_uncertainty[gw] = []
mean_error[gw] = []
error_width[gw] = []
mean_error_uncertainty[gw] = []
mc_constant[gw].append(thisdata[0,10])
rel_error=thisdata[:,9]
num_seeds = len(thisdata)
percent_neg[gw].append(100.0*len(rel_error[rel_error<0]) / float(len(rel_error)))
estimated_uncertainty[gw].append(100.0*len(rel_error[rel_error<0]) / float(len(rel_error))/np.sqrt(len(rel_error)))
mean_error[gw].append(rel_error.mean())
error_width[gw].append(rel_error.std())
mean_error_uncertainty[gw].append(rel_error.std()/np.sqrt(len(rel_error)))
for gw in mc_constant.keys():
plt.errorbar(mc_constant[gw], percent_neg[gw], estimated_uncertainty[gw], fmt=markers[gw], label='gw=%g' % gw)
plt.legend()
plt.axhspan(60, 40, color='green', alpha=0.15, lw=0)
plt.axhspan(49.5, 50.5, color='black', alpha=0.15, lw=0)
plt.title('Histogram results with mc_prefactor %g' % mc_prefactor)
plt.ylabel('percent of negative relerrors')
plt.xlabel('mc_constant')
plt.figure()
plt.axhspan(-1e-4, 1e-4, color='green', alpha=0.15, lw=0)
plt.axhline(0, color='green')
for gw in mc_constant.keys():
plt.errorbar(mc_constant[gw], mean_error[gw], mean_error_uncertainty[gw], fmt=markers[gw], label='gw=%g' % gw)
plt.legend()
plt.title('Histogram results with mc_prefactor %g' % mc_prefactor)
plt.ylabel('mean relative error')
plt.xlabel('mc_constant')
plt.show()
| gpl-2.0 |
Akshay0724/scikit-learn | sklearn/naive_bayes.py | 20 | 30830 | # -*- coding: utf-8 -*-
"""
The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These
are supervised learning methods based on applying Bayes' theorem with strong
(naive) feature independence assumptions.
"""
# Author: Vincent Michel <[email protected]>
# Minor fixes by Fabian Pedregosa
# Amit Aides <[email protected]>
# Yehuda Finkelstein <[email protected]>
# Lars Buitinck
# Jan Hendrik Metzen <[email protected]>
# (parts based on earlier work by Mathieu Blondel)
#
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from .base import BaseEstimator, ClassifierMixin
from .preprocessing import binarize
from .preprocessing import LabelBinarizer
from .preprocessing import label_binarize
from .utils import check_X_y, check_array, check_consistent_length
from .utils.extmath import safe_sparse_dot, logsumexp
from .utils.multiclass import _check_partial_fit_first_call
from .utils.fixes import in1d
from .utils.validation import check_is_fitted
from .externals import six
__all__ = ['BernoulliNB', 'GaussianNB', 'MultinomialNB']
class BaseNB(six.with_metaclass(ABCMeta, BaseEstimator, ClassifierMixin)):
"""Abstract base class for naive Bayes estimators"""
@abstractmethod
def _joint_log_likelihood(self, X):
"""Compute the unnormalized posterior log probability of X
I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of
shape [n_classes, n_samples].
Input is passed to _joint_log_likelihood as-is by predict,
predict_proba and predict_log_proba.
"""
def predict(self, X):
"""
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Predicted target values for X
"""
jll = self._joint_log_likelihood(X)
return self.classes_[np.argmax(jll, axis=1)]
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
jll = self._joint_log_likelihood(X)
# normalize by P(x) = P(f_1, ..., f_n)
log_prob_x = logsumexp(jll, axis=1)
return jll - np.atleast_2d(log_prob_x).T
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
return np.exp(self.predict_log_proba(X))
class GaussianNB(BaseNB):
"""
Gaussian Naive Bayes (GaussianNB)
Can perform online updates to model parameters via `partial_fit` method.
For details on algorithm used to update feature means and variance online,
see Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Read more in the :ref:`User Guide <gaussian_naive_bayes>`.
Parameters
----------
priors : array-like, shape (n_classes,)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_prior_ : array, shape (n_classes,)
probability of each class.
class_count_ : array, shape (n_classes,)
number of training samples observed in each class.
theta_ : array, shape (n_classes, n_features)
mean of each feature per class
sigma_ : array, shape (n_classes, n_features)
variance of each feature per class
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> Y = np.array([1, 1, 1, 2, 2, 2])
>>> from sklearn.naive_bayes import GaussianNB
>>> clf = GaussianNB()
>>> clf.fit(X, Y)
GaussianNB(priors=None)
>>> print(clf.predict([[-0.8, -1]]))
[1]
>>> clf_pf = GaussianNB()
>>> clf_pf.partial_fit(X, Y, np.unique(Y))
GaussianNB(priors=None)
>>> print(clf_pf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, priors=None):
self.priors = priors
def fit(self, X, y, sample_weight=None):
"""Fit Gaussian Naive Bayes according to X, y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape (n_samples,), optional (default=None)
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Gaussian Naive Bayes supports fitting with *sample_weight*.
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
return self._partial_fit(X, y, np.unique(y), _refit=True,
sample_weight=sample_weight)
@staticmethod
def _update_mean_variance(n_past, mu, var, X, sample_weight=None):
"""Compute online update of Gaussian mean and variance.
Given starting sample count, mean, and variance, a new set of
points X, and optionally sample weights, return the updated mean and
variance. (NB - each dimension (column) in X is treated as independent
-- you get variance, not covariance).
Can take scalar mean and variance, or vector mean and variance to
simultaneously update a number of independent Gaussians.
See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Parameters
----------
n_past : int
Number of samples represented in old mean and variance. If sample
weights were given, this should contain the sum of sample
weights represented in old mean and variance.
mu : array-like, shape (number of Gaussians,)
Means for Gaussians in original set.
var : array-like, shape (number of Gaussians,)
Variances for Gaussians in original set.
sample_weight : array-like, shape (n_samples,), optional (default=None)
Weights applied to individual samples (1. for unweighted).
Returns
-------
total_mu : array-like, shape (number of Gaussians,)
Updated mean for each Gaussian over the combined set.
total_var : array-like, shape (number of Gaussians,)
Updated variance for each Gaussian over the combined set.
"""
if X.shape[0] == 0:
return mu, var
# Compute (potentially weighted) mean and variance of new datapoints
if sample_weight is not None:
n_new = float(sample_weight.sum())
new_mu = np.average(X, axis=0, weights=sample_weight / n_new)
new_var = np.average((X - new_mu) ** 2, axis=0,
weights=sample_weight / n_new)
else:
n_new = X.shape[0]
new_var = np.var(X, axis=0)
new_mu = np.mean(X, axis=0)
if n_past == 0:
return new_mu, new_var
n_total = float(n_past + n_new)
# Combine mean of old and new data, taking into consideration
# (weighted) number of observations
total_mu = (n_new * new_mu + n_past * mu) / n_total
# Combine variance of old and new data, taking into consideration
# (weighted) number of observations. This is achieved by combining
# the sum-of-squared-differences (ssd)
old_ssd = n_past * var
new_ssd = n_new * new_var
total_ssd = (old_ssd + new_ssd +
(n_past / float(n_new * n_total)) *
(n_new * mu - n_new * new_mu) ** 2)
total_var = total_ssd / n_total
return total_mu, total_var
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance and numerical stability overhead,
hence it is better to call partial_fit on chunks of data that are
as large as possible (as long as fitting in the memory budget) to
hide the overhead.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,), optional (default=None)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape (n_samples,), optional (default=None)
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Returns
-------
self : object
Returns self.
"""
return self._partial_fit(X, y, classes, _refit=False,
sample_weight=sample_weight)
def _partial_fit(self, X, y, classes=None, _refit=False,
sample_weight=None):
"""Actual implementation of Gaussian NB fitting.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,), optional (default=None)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
_refit: bool, optional (default=False)
If true, act as though this were the first time we called
_partial_fit (ie, throw away any past fitting and start over).
sample_weight : array-like, shape (n_samples,), optional (default=None)
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
if sample_weight is not None:
sample_weight = check_array(sample_weight, ensure_2d=False)
check_consistent_length(y, sample_weight)
# If the ratio of data variance between dimensions is too small, it
# will cause numerical errors. To address this, we artificially
# boost the variance by epsilon, a small fraction of the standard
# deviation of the largest dimension.
epsilon = 1e-9 * np.var(X, axis=0).max()
if _refit:
self.classes_ = None
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_features = X.shape[1]
n_classes = len(self.classes_)
self.theta_ = np.zeros((n_classes, n_features))
self.sigma_ = np.zeros((n_classes, n_features))
self.class_count_ = np.zeros(n_classes, dtype=np.float64)
# Initialise the class prior
n_classes = len(self.classes_)
# Take into account the priors
if self.priors is not None:
priors = np.asarray(self.priors)
# Check that the provide prior match the number of classes
if len(priors) != n_classes:
raise ValueError('Number of priors must match number of'
' classes.')
# Check that the sum is 1
if priors.sum() != 1.0:
raise ValueError('The sum of the priors should be 1.')
# Check that the prior are non-negative
if (priors < 0).any():
raise ValueError('Priors must be non-negative.')
self.class_prior_ = priors
else:
# Initialize the priors to zeros for each class
self.class_prior_ = np.zeros(len(self.classes_),
dtype=np.float64)
else:
if X.shape[1] != self.theta_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (X.shape[1], self.theta_.shape[1]))
# Put epsilon back in each time
self.sigma_[:, :] -= epsilon
classes = self.classes_
unique_y = np.unique(y)
unique_y_in_classes = in1d(unique_y, classes)
if not np.all(unique_y_in_classes):
raise ValueError("The target label(s) %s in y do not exist in the "
"initial classes %s" %
(unique_y[~unique_y_in_classes], classes))
for y_i in unique_y:
i = classes.searchsorted(y_i)
X_i = X[y == y_i, :]
if sample_weight is not None:
sw_i = sample_weight[y == y_i]
N_i = sw_i.sum()
else:
sw_i = None
N_i = X_i.shape[0]
new_theta, new_sigma = self._update_mean_variance(
self.class_count_[i], self.theta_[i, :], self.sigma_[i, :],
X_i, sw_i)
self.theta_[i, :] = new_theta
self.sigma_[i, :] = new_sigma
self.class_count_[i] += N_i
self.sigma_[:, :] += epsilon
# Update if only no priors is provided
if self.priors is None:
# Empirical prior, with sample_weight taken into account
self.class_prior_ = self.class_count_ / self.class_count_.sum()
return self
def _joint_log_likelihood(self, X):
check_is_fitted(self, "classes_")
X = check_array(X)
joint_log_likelihood = []
for i in range(np.size(self.classes_)):
jointi = np.log(self.class_prior_[i])
n_ij = - 0.5 * np.sum(np.log(2. * np.pi * self.sigma_[i, :]))
n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) /
(self.sigma_[i, :]), 1)
joint_log_likelihood.append(jointi + n_ij)
joint_log_likelihood = np.array(joint_log_likelihood).T
return joint_log_likelihood
class BaseDiscreteNB(BaseNB):
"""Abstract base class for naive Bayes on discrete/categorical data
Any estimator based on this class should provide:
__init__
_joint_log_likelihood(X) as per BaseNB
"""
def _update_class_log_prior(self, class_prior=None):
n_classes = len(self.classes_)
if class_prior is not None:
if len(class_prior) != n_classes:
raise ValueError("Number of priors must match number of"
" classes.")
self.class_log_prior_ = np.log(class_prior)
elif self.fit_prior:
# empirical prior, with sample_weight taken into account
self.class_log_prior_ = (np.log(self.class_count_) -
np.log(self.class_count_.sum()))
else:
self.class_log_prior_ = np.zeros(n_classes) - np.log(n_classes)
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
classes : array-like, shape = [n_classes], optional (default=None)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape = [n_samples], optional (default=None)
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
_, n_features = X.shape
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_effective_classes = len(classes) if len(classes) > 1 else 2
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
elif n_features != self.coef_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (n_features, self.coef_.shape[-1]))
Y = label_binarize(y, classes=self.classes_)
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
n_samples, n_classes = Y.shape
if X.shape[0] != Y.shape[0]:
msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible."
raise ValueError(msg % (X.shape[0], y.shape[0]))
# label_binarize() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently
Y = Y.astype(np.float64)
if sample_weight is not None:
sample_weight = np.atleast_2d(sample_weight)
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
self._count(X, Y)
# XXX: OPTIM: we could introduce a public finalization method to
# be called by the user explicitly just once after several consecutive
# calls to partial_fit and prior any call to predict[_[log_]proba]
# to avoid computing the smooth log probas at each call to partial fit
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
def fit(self, X, y, sample_weight=None):
"""Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples], optional (default=None)
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y, 'csr')
_, n_features = X.shape
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y)
self.classes_ = labelbin.classes_
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
# LabelBinarizer().fit_transform() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently;
# this means we also don't have to cast X to floating point
Y = Y.astype(np.float64)
if sample_weight is not None:
sample_weight = np.atleast_2d(sample_weight)
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
n_effective_classes = Y.shape[1]
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
self._count(X, Y)
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
# XXX The following is a stopgap measure; we need to set the dimensions
# of class_log_prior_ and feature_log_prob_ correctly.
def _get_coef(self):
return (self.feature_log_prob_[1:]
if len(self.classes_) == 2 else self.feature_log_prob_)
def _get_intercept(self):
return (self.class_log_prior_[1:]
if len(self.classes_) == 2 else self.class_log_prior_)
coef_ = property(_get_coef)
intercept_ = property(_get_intercept)
class MultinomialNB(BaseDiscreteNB):
"""
Naive Bayes classifier for multinomial models
The multinomial Naive Bayes classifier is suitable for classification with
discrete features (e.g., word counts for text classification). The
multinomial distribution normally requires integer feature counts. However,
in practice, fractional counts such as tf-idf may also work.
Read more in the :ref:`User Guide <multinomial_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
fit_prior : boolean, optional (default=True)
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size (n_classes,), optional (default=None)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape (n_classes, )
Smoothed empirical log probability for each class.
intercept_ : property
Mirrors ``class_log_prior_`` for interpreting MultinomialNB
as a linear model.
feature_log_prob_ : array, shape (n_classes, n_features)
Empirical log probability of features
given a class, ``P(x_i|y)``.
coef_ : property
Mirrors ``feature_log_prob_`` for interpreting MultinomialNB
as a linear model.
class_count_ : array, shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import MultinomialNB
>>> clf = MultinomialNB()
>>> clf.fit(X, y)
MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2:3]))
[3]
Notes
-----
For the rationale behind the names `coef_` and `intercept_`, i.e.
naive Bayes as a linear classifier, see J. Rennie et al. (2003),
Tackling the poor assumptions of naive Bayes text classifiers, ICML.
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html
"""
def __init__(self, alpha=1.0, fit_prior=True, class_prior=None):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = smoothed_fc.sum(axis=1)
self.feature_log_prob_ = (np.log(smoothed_fc) -
np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
return (safe_sparse_dot(X, self.feature_log_prob_.T) +
self.class_log_prior_)
class BernoulliNB(BaseDiscreteNB):
"""Naive Bayes classifier for multivariate Bernoulli models.
Like MultinomialNB, this classifier is suitable for discrete data. The
difference is that while MultinomialNB works with occurrence counts,
BernoulliNB is designed for binary/boolean features.
Read more in the :ref:`User Guide <bernoulli_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
binarize : float or None, optional (default=0.0)
Threshold for binarizing (mapping to booleans) of sample features.
If None, input is presumed to already consist of binary vectors.
fit_prior : boolean, optional (default=True)
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size=[n_classes,], optional (default=None)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape = [n_classes]
Log probability of each class (smoothed).
feature_log_prob_ : array, shape = [n_classes, n_features]
Empirical log probability of features given a class, P(x_i|y).
class_count_ : array, shape = [n_classes]
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape = [n_classes, n_features]
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(2, size=(6, 100))
>>> Y = np.array([1, 2, 3, 4, 4, 5])
>>> from sklearn.naive_bayes import BernoulliNB
>>> clf = BernoulliNB()
>>> clf.fit(X, Y)
BernoulliNB(alpha=1.0, binarize=0.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2:3]))
[3]
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
A. McCallum and K. Nigam (1998). A comparison of event models for naive
Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for
Text Categorization, pp. 41-48.
V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with
naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS).
"""
def __init__(self, alpha=1.0, binarize=.0, fit_prior=True,
class_prior=None):
self.alpha = alpha
self.binarize = binarize
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = self.class_count_ + self.alpha * 2
self.feature_log_prob_ = (np.log(smoothed_fc) -
np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
n_classes, n_features = self.feature_log_prob_.shape
n_samples, n_features_X = X.shape
if n_features_X != n_features:
raise ValueError("Expected input with %d features, got %d instead"
% (n_features, n_features_X))
neg_prob = np.log(1 - np.exp(self.feature_log_prob_))
# Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob
jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T)
jll += self.class_log_prior_ + neg_prob.sum(axis=1)
return jll
| bsd-3-clause |
datapythonista/pandas | pandas/tests/window/test_pairwise.py | 4 | 8745 | import warnings
import numpy as np
import pytest
from pandas import (
DataFrame,
MultiIndex,
Series,
date_range,
)
import pandas._testing as tm
from pandas.core.algorithms import safe_sort
class TestPairwise:
# GH 7738
@pytest.mark.parametrize("f", [lambda x: x.cov(), lambda x: x.corr()])
def test_no_flex(self, pairwise_frames, pairwise_target_frame, f):
# DataFrame methods (which do not call flex_binary_moment())
result = f(pairwise_frames)
tm.assert_index_equal(result.index, pairwise_frames.columns)
tm.assert_index_equal(result.columns, pairwise_frames.columns)
expected = f(pairwise_target_frame)
# since we have sorted the results
# we can only compare non-nans
result = result.dropna().values
expected = expected.dropna().values
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
@pytest.mark.parametrize(
"f",
[
lambda x: x.expanding().cov(pairwise=True),
lambda x: x.expanding().corr(pairwise=True),
lambda x: x.rolling(window=3).cov(pairwise=True),
lambda x: x.rolling(window=3).corr(pairwise=True),
lambda x: x.ewm(com=3).cov(pairwise=True),
lambda x: x.ewm(com=3).corr(pairwise=True),
],
)
def test_pairwise_with_self(self, pairwise_frames, pairwise_target_frame, f):
# DataFrame with itself, pairwise=True
# note that we may construct the 1st level of the MI
# in a non-monotonic way, so compare accordingly
result = f(pairwise_frames)
tm.assert_index_equal(
result.index.levels[0], pairwise_frames.index, check_names=False
)
tm.assert_numpy_array_equal(
safe_sort(result.index.levels[1]),
safe_sort(pairwise_frames.columns.unique()),
)
tm.assert_index_equal(result.columns, pairwise_frames.columns)
expected = f(pairwise_target_frame)
# since we have sorted the results
# we can only compare non-nans
result = result.dropna().values
expected = expected.dropna().values
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
@pytest.mark.parametrize(
"f",
[
lambda x: x.expanding().cov(pairwise=False),
lambda x: x.expanding().corr(pairwise=False),
lambda x: x.rolling(window=3).cov(pairwise=False),
lambda x: x.rolling(window=3).corr(pairwise=False),
lambda x: x.ewm(com=3).cov(pairwise=False),
lambda x: x.ewm(com=3).corr(pairwise=False),
],
)
def test_no_pairwise_with_self(self, pairwise_frames, pairwise_target_frame, f):
# DataFrame with itself, pairwise=False
result = f(pairwise_frames)
tm.assert_index_equal(result.index, pairwise_frames.index)
tm.assert_index_equal(result.columns, pairwise_frames.columns)
expected = f(pairwise_target_frame)
# since we have sorted the results
# we can only compare non-nans
result = result.dropna().values
expected = expected.dropna().values
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
@pytest.mark.parametrize(
"f",
[
lambda x, y: x.expanding().cov(y, pairwise=True),
lambda x, y: x.expanding().corr(y, pairwise=True),
lambda x, y: x.rolling(window=3).cov(y, pairwise=True),
lambda x, y: x.rolling(window=3).corr(y, pairwise=True),
lambda x, y: x.ewm(com=3).cov(y, pairwise=True),
lambda x, y: x.ewm(com=3).corr(y, pairwise=True),
],
)
def test_pairwise_with_other(
self, pairwise_frames, pairwise_target_frame, pairwise_other_frame, f
):
# DataFrame with another DataFrame, pairwise=True
result = f(pairwise_frames, pairwise_other_frame)
tm.assert_index_equal(
result.index.levels[0], pairwise_frames.index, check_names=False
)
tm.assert_numpy_array_equal(
safe_sort(result.index.levels[1]),
safe_sort(pairwise_other_frame.columns.unique()),
)
expected = f(pairwise_target_frame, pairwise_other_frame)
# since we have sorted the results
# we can only compare non-nans
result = result.dropna().values
expected = expected.dropna().values
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
@pytest.mark.parametrize(
"f",
[
lambda x, y: x.expanding().cov(y, pairwise=False),
lambda x, y: x.expanding().corr(y, pairwise=False),
lambda x, y: x.rolling(window=3).cov(y, pairwise=False),
lambda x, y: x.rolling(window=3).corr(y, pairwise=False),
lambda x, y: x.ewm(com=3).cov(y, pairwise=False),
lambda x, y: x.ewm(com=3).corr(y, pairwise=False),
],
)
def test_no_pairwise_with_other(self, pairwise_frames, pairwise_other_frame, f):
# DataFrame with another DataFrame, pairwise=False
result = (
f(pairwise_frames, pairwise_other_frame)
if pairwise_frames.columns.is_unique
else None
)
if result is not None:
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
# we can have int and str columns
expected_index = pairwise_frames.index.union(pairwise_other_frame.index)
expected_columns = pairwise_frames.columns.union(
pairwise_other_frame.columns
)
tm.assert_index_equal(result.index, expected_index)
tm.assert_index_equal(result.columns, expected_columns)
else:
with pytest.raises(ValueError, match="'arg1' columns are not unique"):
f(pairwise_frames, pairwise_other_frame)
with pytest.raises(ValueError, match="'arg2' columns are not unique"):
f(pairwise_other_frame, pairwise_frames)
@pytest.mark.parametrize(
"f",
[
lambda x, y: x.expanding().cov(y),
lambda x, y: x.expanding().corr(y),
lambda x, y: x.rolling(window=3).cov(y),
lambda x, y: x.rolling(window=3).corr(y),
lambda x, y: x.ewm(com=3).cov(y),
lambda x, y: x.ewm(com=3).corr(y),
],
)
def test_pairwise_with_series(self, pairwise_frames, pairwise_target_frame, f):
# DataFrame with a Series
result = f(pairwise_frames, Series([1, 1, 3, 8]))
tm.assert_index_equal(result.index, pairwise_frames.index)
tm.assert_index_equal(result.columns, pairwise_frames.columns)
expected = f(pairwise_target_frame, Series([1, 1, 3, 8]))
# since we have sorted the results
# we can only compare non-nans
result = result.dropna().values
expected = expected.dropna().values
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
result = f(Series([1, 1, 3, 8]), pairwise_frames)
tm.assert_index_equal(result.index, pairwise_frames.index)
tm.assert_index_equal(result.columns, pairwise_frames.columns)
expected = f(Series([1, 1, 3, 8]), pairwise_target_frame)
# since we have sorted the results
# we can only compare non-nans
result = result.dropna().values
expected = expected.dropna().values
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
def test_corr_freq_memory_error(self):
# GH 31789
s = Series(range(5), index=date_range("2020", periods=5))
result = s.rolling("12H").corr(s)
expected = Series([np.nan] * 5, index=date_range("2020", periods=5))
tm.assert_series_equal(result, expected)
def test_cov_mulittindex(self):
# GH 34440
columns = MultiIndex.from_product([list("ab"), list("xy"), list("AB")])
index = range(3)
df = DataFrame(np.arange(24).reshape(3, 8), index=index, columns=columns)
result = df.ewm(alpha=0.1).cov()
index = MultiIndex.from_product([range(3), list("ab"), list("xy"), list("AB")])
columns = MultiIndex.from_product([list("ab"), list("xy"), list("AB")])
expected = DataFrame(
np.vstack(
(
np.full((8, 8), np.NaN),
np.full((8, 8), 32.000000),
np.full((8, 8), 63.881919),
)
),
index=index,
columns=columns,
)
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
jdhp-sap/sap-cta-data-pipeline | utils/plot_noise_histogram.py | 2 | 3908 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Make statistics on the noise of benchmark FITS files.
"""
import common_functions as common
import argparse
from matplotlib import pyplot as plt
import os
import math
import numpy as np
from datapipe.io import images
def parse_fits_files(fits_file_name_list, progress_bar=True):
fits_noise_list = []
for file_index, file_name in enumerate(fits_file_name_list):
# Read the input file #########
fits_images_dict, fits_metadata_dict = images.load_benchmark_images(file_name)
# Get images ##################
input_img = fits_images_dict["input_image"]
reference_img = fits_images_dict["reference_image"]
pure_noise_image = input_img - reference_img
fits_noise_list.append(pure_noise_image)
# Progress bar ################
if progress_bar:
num_files = len(fits_file_name_list)
relative_steps = math.ceil(num_files / 100.)
if (file_index % relative_steps) == 0:
progress_str = "{:.2f}% ({}/{})".format((file_index + 1)/num_files * 100,
file_index + 1,
num_files)
print(progress_str)
return fits_noise_list
if __name__ == '__main__':
# PARSE OPTIONS ###########################################################
parser = argparse.ArgumentParser(description="Make statistics on the noise of benchmark FITS files.")
parser.add_argument("--output", "-o", default=None,
metavar="FILE",
help="The output file path")
parser.add_argument("--title", default=None,
metavar="STRING",
help="The title of the plot")
parser.add_argument("--logy", "-L", action="store_true", default=False,
help="Use a logaritmic scale on the Y axis")
parser.add_argument("--quiet", "-q", action="store_true",
help="Don't show the plot, just save it")
parser.add_argument("--notebook", action="store_true",
help="Notebook mode")
parser.add_argument("fileargs", nargs=1, metavar="DIRECTORY",
help="The directory containing input images (FITS files) used to make statistics on the noise.")
args = parser.parse_args()
title = args.title
logy = args.logy
quiet = args.quiet
notebook = args.notebook
input_directory_path = args.fileargs[0]
if args.output is None:
output_file_path = "noise_histogram.pdf"
else:
output_file_path = args.output
# FETCH NOISE #############################################################
# Parse the input directory
fits_file_name_list = common.get_fits_files_list(input_directory_path)
# Parse FITS files
data_list = parse_fits_files(fits_file_name_list, progress_bar=not notebook)
# PLOT STATISTICS #########################################################
if not notebook:
print("Plotting...")
fig, ax1 = plt.subplots(nrows=1, ncols=1, figsize=(16, 9))
common.plot_hist1d(axis=ax1,
data_list=[np.array(data_list).flatten()],
label_list=[],
logy=logy,
xlabel="Photoelectrons",
xylabel_fontsize=16,
title=title,
linear_xlabel_style=None,
linear_ylabel_style=None,
num_bins=None,
info_box_rms=False,
info_box_std=True)
# Save file and plot ########
if not notebook:
plt.tight_layout()
plt.savefig(output_file_path, bbox_inches='tight')
if not quiet:
plt.show()
| mit |
AnasGhrab/scikit-learn | sklearn/neural_network/rbm.py | 206 | 12292 | """Restricted Boltzmann Machine
"""
# Authors: Yann N. Dauphin <[email protected]>
# Vlad Niculae
# Gabriel Synnaeve
# Lars Buitinck
# License: BSD 3 clause
import time
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator
from ..base import TransformerMixin
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils import check_random_state
from ..utils import gen_even_slices
from ..utils import issparse
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import log_logistic
from ..utils.fixes import expit # logistic function
from ..utils.validation import check_is_fitted
class BernoulliRBM(BaseEstimator, TransformerMixin):
"""Bernoulli Restricted Boltzmann Machine (RBM).
A Restricted Boltzmann Machine with binary visible units and
binary hiddens. Parameters are estimated using Stochastic Maximum
Likelihood (SML), also known as Persistent Contrastive Divergence (PCD)
[2].
The time complexity of this implementation is ``O(d ** 2)`` assuming
d ~ n_features ~ n_components.
Read more in the :ref:`User Guide <rbm>`.
Parameters
----------
n_components : int, optional
Number of binary hidden units.
learning_rate : float, optional
The learning rate for weight updates. It is *highly* recommended
to tune this hyper-parameter. Reasonable values are in the
10**[0., -3.] range.
batch_size : int, optional
Number of examples per minibatch.
n_iter : int, optional
Number of iterations/sweeps over the training dataset to perform
during training.
verbose : int, optional
The verbosity level. The default, zero, means silent mode.
random_state : integer or numpy.RandomState, optional
A random number generator instance to define the state of the
random permutations generator. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
intercept_hidden_ : array-like, shape (n_components,)
Biases of the hidden units.
intercept_visible_ : array-like, shape (n_features,)
Biases of the visible units.
components_ : array-like, shape (n_components, n_features)
Weight matrix, where n_features in the number of
visible units and n_components is the number of hidden units.
Examples
--------
>>> import numpy as np
>>> from sklearn.neural_network import BernoulliRBM
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = BernoulliRBM(n_components=2)
>>> model.fit(X)
BernoulliRBM(batch_size=10, learning_rate=0.1, n_components=2, n_iter=10,
random_state=None, verbose=0)
References
----------
[1] Hinton, G. E., Osindero, S. and Teh, Y. A fast learning algorithm for
deep belief nets. Neural Computation 18, pp 1527-1554.
http://www.cs.toronto.edu/~hinton/absps/fastnc.pdf
[2] Tieleman, T. Training Restricted Boltzmann Machines using
Approximations to the Likelihood Gradient. International Conference
on Machine Learning (ICML) 2008
"""
def __init__(self, n_components=256, learning_rate=0.1, batch_size=10,
n_iter=10, verbose=0, random_state=None):
self.n_components = n_components
self.learning_rate = learning_rate
self.batch_size = batch_size
self.n_iter = n_iter
self.verbose = verbose
self.random_state = random_state
def transform(self, X):
"""Compute the hidden layer activation probabilities, P(h=1|v=X).
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
The data to be transformed.
Returns
-------
h : array, shape (n_samples, n_components)
Latent representations of the data.
"""
check_is_fitted(self, "components_")
X = check_array(X, accept_sparse='csr', dtype=np.float)
return self._mean_hiddens(X)
def _mean_hiddens(self, v):
"""Computes the probabilities P(h=1|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
h : array-like, shape (n_samples, n_components)
Corresponding mean field values for the hidden layer.
"""
p = safe_sparse_dot(v, self.components_.T)
p += self.intercept_hidden_
return expit(p, out=p)
def _sample_hiddens(self, v, rng):
"""Sample from the distribution P(h|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer.
"""
p = self._mean_hiddens(v)
return (rng.random_sample(size=p.shape) < p)
def _sample_visibles(self, h, rng):
"""Sample from the distribution P(v|h).
Parameters
----------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
"""
p = np.dot(h, self.components_)
p += self.intercept_visible_
expit(p, out=p)
return (rng.random_sample(size=p.shape) < p)
def _free_energy(self, v):
"""Computes the free energy F(v) = - log sum_h exp(-E(v,h)).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
free_energy : array-like, shape (n_samples,)
The value of the free energy.
"""
return (- safe_sparse_dot(v, self.intercept_visible_)
- np.logaddexp(0, safe_sparse_dot(v, self.components_.T)
+ self.intercept_hidden_).sum(axis=1))
def gibbs(self, v):
"""Perform one Gibbs sampling step.
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to start from.
Returns
-------
v_new : array-like, shape (n_samples, n_features)
Values of the visible layer after one Gibbs step.
"""
check_is_fitted(self, "components_")
if not hasattr(self, "random_state_"):
self.random_state_ = check_random_state(self.random_state)
h_ = self._sample_hiddens(v, self.random_state_)
v_ = self._sample_visibles(h_, self.random_state_)
return v_
def partial_fit(self, X, y=None):
"""Fit the model to the data X which should contain a partial
segment of the data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float)
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
if not hasattr(self, 'components_'):
self.components_ = np.asarray(
self.random_state_.normal(
0,
0.01,
(self.n_components, X.shape[1])
),
order='fortran')
if not hasattr(self, 'intercept_hidden_'):
self.intercept_hidden_ = np.zeros(self.n_components, )
if not hasattr(self, 'intercept_visible_'):
self.intercept_visible_ = np.zeros(X.shape[1], )
if not hasattr(self, 'h_samples_'):
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
self._fit(X, self.random_state_)
def _fit(self, v_pos, rng):
"""Inner fit for one mini-batch.
Adjust the parameters to maximize the likelihood of v using
Stochastic Maximum Likelihood (SML).
Parameters
----------
v_pos : array-like, shape (n_samples, n_features)
The data to use for training.
rng : RandomState
Random number generator to use for sampling.
"""
h_pos = self._mean_hiddens(v_pos)
v_neg = self._sample_visibles(self.h_samples_, rng)
h_neg = self._mean_hiddens(v_neg)
lr = float(self.learning_rate) / v_pos.shape[0]
update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T
update -= np.dot(h_neg.T, v_neg)
self.components_ += lr * update
self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0))
self.intercept_visible_ += lr * (np.asarray(
v_pos.sum(axis=0)).squeeze() -
v_neg.sum(axis=0))
h_neg[rng.uniform(size=h_neg.shape) < h_neg] = 1.0 # sample binomial
self.h_samples_ = np.floor(h_neg, h_neg)
def score_samples(self, X):
"""Compute the pseudo-likelihood of X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Values of the visible layer. Must be all-boolean (not checked).
Returns
-------
pseudo_likelihood : array-like, shape (n_samples,)
Value of the pseudo-likelihood (proxy for likelihood).
Notes
-----
This method is not deterministic: it computes a quantity called the
free energy on X, then on a randomly corrupted version of X, and
returns the log of the logistic function of the difference.
"""
check_is_fitted(self, "components_")
v = check_array(X, accept_sparse='csr')
rng = check_random_state(self.random_state)
# Randomly corrupt one feature in each sample in v.
ind = (np.arange(v.shape[0]),
rng.randint(0, v.shape[1], v.shape[0]))
if issparse(v):
data = -2 * v[ind] + 1
v_ = v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape)
else:
v_ = v.copy()
v_[ind] = 1 - v_[ind]
fe = self._free_energy(v)
fe_ = self._free_energy(v_)
return v.shape[1] * log_logistic(fe_ - fe)
def fit(self, X, y=None):
"""Fit the model to the data X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float)
n_samples = X.shape[0]
rng = check_random_state(self.random_state)
self.components_ = np.asarray(
rng.normal(0, 0.01, (self.n_components, X.shape[1])),
order='fortran')
self.intercept_hidden_ = np.zeros(self.n_components, )
self.intercept_visible_ = np.zeros(X.shape[1], )
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
batch_slices = list(gen_even_slices(n_batches * self.batch_size,
n_batches, n_samples))
verbose = self.verbose
begin = time.time()
for iteration in xrange(1, self.n_iter + 1):
for batch_slice in batch_slices:
self._fit(X[batch_slice], rng)
if verbose:
end = time.time()
print("[%s] Iteration %d, pseudo-likelihood = %.2f,"
" time = %.2fs"
% (type(self).__name__, iteration,
self.score_samples(X).mean(), end - begin))
begin = end
return self
| bsd-3-clause |
clarkfitzg/dask | dask/array/numpy_compat.py | 9 | 1606 | import numpy as np
try:
isclose = np.isclose
except AttributeError:
def isclose(*args, **kwargs):
raise RuntimeError("You need numpy version 1.7 or greater to use "
"isclose.")
try:
full = np.full
except AttributeError:
def full(shape, fill_value, dtype=None, order=None):
"""Our implementation of numpy.full because your numpy is old."""
if order is not None:
raise NotImplementedError("`order` kwarg is not supported upgrade "
"to Numpy 1.8 or greater for support.")
return np.multiply(fill_value, np.ones(shape, dtype=dtype),
dtype=dtype)
# Taken from scikit-learn:
# https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/fixes.py#L84
try:
if (not np.allclose(np.divide(.4, 1, casting="unsafe"),
np.divide(.4, 1, casting="unsafe", dtype=np.float))
or not np.allclose(np.divide(.4, 1), .4)):
raise TypeError('Divide not working with dtype: '
'https://github.com/numpy/numpy/issues/3484')
divide = np.divide
except TypeError:
# Divide with dtype doesn't work on Python 3
def divide(x1, x2, out=None, dtype=None):
"""Implementation of numpy.divide that works with dtype kwarg.
Temporary compatibility fix for a bug in numpy's version. See
https://github.com/numpy/numpy/issues/3484 for the relevant issue."""
x = np.divide(x1, x2, out)
if dtype is not None:
x = x.astype(dtype)
return x
| bsd-3-clause |
Sklearn-HMM/scikit-learn-HMM | sklean-hmm/utils/tests/test_testing.py | 13 | 3044 | import warnings
import unittest
import sys
from nose.tools import assert_raises
from sklearn.utils.testing import (
_assert_less,
_assert_greater,
assert_warns,
assert_no_warnings,
assert_equal,
set_random_state,
assert_raise_message)
from sklearn.tree import DecisionTreeClassifier
from sklearn.lda import LDA
try:
from nose.tools import assert_less
def test_assert_less():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_less(0, 1)
_assert_less(0, 1)
assert_raises(AssertionError, assert_less, 1, 0)
assert_raises(AssertionError, _assert_less, 1, 0)
except ImportError:
pass
try:
from nose.tools import assert_greater
def test_assert_greater():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_greater(1, 0)
_assert_greater(1, 0)
assert_raises(AssertionError, assert_greater, 0, 1)
assert_raises(AssertionError, _assert_greater, 0, 1)
except ImportError:
pass
def test_set_random_state():
lda = LDA()
tree = DecisionTreeClassifier()
# LDA doesn't have random state: smoke test
set_random_state(lda, 3)
set_random_state(tree, 3)
assert_equal(tree.random_state, 3)
def test_assert_raise_message():
def _raise_ValueError(message):
raise ValueError(message)
assert_raise_message(ValueError, "test",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "something else",
_raise_ValueError, "test")
assert_raises(ValueError,
assert_raise_message, TypeError, "something else",
_raise_ValueError, "test")
# This class is taken from numpy 1.7
class TestWarns(unittest.TestCase):
def test_warn(self):
def f():
warnings.warn("yo")
return 3
before_filters = sys.modules['warnings'].filters[:]
assert_equal(assert_warns(UserWarning, f), 3)
after_filters = sys.modules['warnings'].filters
assert_raises(AssertionError, assert_no_warnings, f)
assert_equal(assert_no_warnings(lambda x: x, 1), 1)
# Check that the warnings state is unchanged
assert_equal(before_filters, after_filters,
"assert_warns does not preserver warnings state")
def test_warn_wrong_warning(self):
def f():
warnings.warn("yo", DeprecationWarning)
failed = False
filters = sys.modules['warnings'].filters[:]
try:
try:
# Should raise an AssertionError
assert_warns(UserWarning, f)
failed = True
except AssertionError:
pass
finally:
sys.modules['warnings'].filters = filters
if failed:
raise AssertionError("wrong warning caught by assert_warn")
| bsd-3-clause |
harshaneelhg/scikit-learn | examples/cluster/plot_dbscan.py | 346 | 2479 | # -*- coding: utf-8 -*-
"""
===================================
Demo of DBSCAN clustering algorithm
===================================
Finds core samples of high density and expands clusters from them.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
##############################################################################
# Compute DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
weissercn/MLTools | Dalitz_simplified/evaluation_of_optimised_classifiers/dt_Dalitz/dt_Dalitz_evaluation_of_optimised_classifiers.py | 1 | 1172 | import numpy as np
import math
import sys
sys.path.insert(0,'../..')
import os
import classifier_eval_simplified
from sklearn import tree
from sklearn.ensemble import AdaBoostClassifier
from sklearn.svm import SVC
comp_file_list=[]
####################################################################
# Dalitz operaton
####################################################################
for i in range(100):
comp_file_list.append((os.environ['MLToolsDir']+"/Dalitz/dpmodel/data/data.{0}.0.txt".format(i), os.environ['MLToolsDir']+"/Dalitz/dpmodel/data/data.2{0}.1.txt".format(str(i).zfill(2))))
clf = tree.DecisionTreeClassifier('gini','best',46, 100, 1, 0.0, None)
#clf = AdaBoostClassifier(base_estimator=tree.DecisionTreeClassifier(max_depth=2), learning_rate=0.95,n_estimators=440)
#clf = SVC(C=1.0,gamma=0.0955,probability=True, cache_size=7000)
args=["dalitz_dt","particle","antiparticle",100,comp_file_list,1,clf,np.logspace(-2, 10, 13),np.logspace(-9, 3, 13)]
#For nn:
#args=["dalitz_nn","particle","antiparticle",100,comp_file_list,1,clf,np.logspace(-2, 10, 13),np.logspace(-9, 3, 13),71,1]
classifier_eval_simplified.classifier_eval(0,0,args)
| mit |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/matplotlib/tri/triplot.py | 8 | 3150 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import numpy as np
from matplotlib.tri.triangulation import Triangulation
def triplot(ax, *args, **kwargs):
"""
Draw a unstructured triangular grid as lines and/or markers.
The triangulation to plot can be specified in one of two ways;
either::
triplot(triangulation, ...)
where triangulation is a :class:`matplotlib.tri.Triangulation`
object, or
::
triplot(x, y, ...)
triplot(x, y, triangles, ...)
triplot(x, y, triangles=triangles, ...)
triplot(x, y, mask=mask, ...)
triplot(x, y, triangles, mask=mask, ...)
in which case a Triangulation object will be created. See
:class:`~matplotlib.tri.Triangulation` for a explanation of these
possibilities.
The remaining args and kwargs are the same as for
:meth:`~matplotlib.axes.Axes.plot`.
Return a list of 2 :class:`~matplotlib.lines.Line2D` containing
respectively:
- the lines plotted for triangles edges
- the markers plotted for triangles nodes
**Example:**
.. plot:: mpl_examples/pylab_examples/triplot_demo.py
"""
import matplotlib.axes
tri, args, kwargs = Triangulation.get_from_args_and_kwargs(*args, **kwargs)
x, y, edges = (tri.x, tri.y, tri.edges)
# Decode plot format string, e.g., 'ro-'
fmt = ""
if len(args) > 0:
fmt = args[0]
linestyle, marker, color = matplotlib.axes._base._process_plot_format(fmt)
# Insert plot format string into a copy of kwargs (kwargs values prevail).
kw = kwargs.copy()
for key, val in zip(('linestyle', 'marker', 'color'),
(linestyle, marker, color)):
if val is not None:
kw[key] = kwargs.get(key, val)
# Draw lines without markers.
# Note 1: If we drew markers here, most markers would be drawn more than
# once as they belong to several edges.
# Note 2: We insert nan values in the flattened edges arrays rather than
# plotting directly (triang.x[edges].T, triang.y[edges].T)
# as it considerably speeds-up code execution.
linestyle = kw['linestyle']
kw_lines = kw.copy()
kw_lines['marker'] = 'None' # No marker to draw.
kw_lines['zorder'] = kw.get('zorder', 1) # Path default zorder is used.
if (linestyle is not None) and (linestyle not in ['None', '', ' ']):
tri_lines_x = np.insert(x[edges], 2, np.nan, axis=1)
tri_lines_y = np.insert(y[edges], 2, np.nan, axis=1)
tri_lines = ax.plot(tri_lines_x.ravel(), tri_lines_y.ravel(),
**kw_lines)
else:
tri_lines = ax.plot([], [], **kw_lines)
# Draw markers separately.
marker = kw['marker']
kw_markers = kw.copy()
kw_markers['linestyle'] = 'None' # No line to draw.
if (marker is not None) and (marker not in ['None', '', ' ']):
tri_markers = ax.plot(x, y, **kw_markers)
else:
tri_markers = ax.plot([], [], **kw_markers)
return tri_lines + tri_markers
| mit |
elijah513/scikit-learn | sklearn/neighbors/tests/test_neighbors.py | 103 | 41083 | from itertools import product
import numpy as np
from scipy.sparse import (bsr_matrix, coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix)
from sklearn.cross_validation import train_test_split
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn import neighbors, datasets
rng = np.random.RandomState(0)
# load and shuffle iris dataset
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# load and shuffle digits
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
SPARSE_TYPES = (bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dok_matrix,
lil_matrix)
SPARSE_OR_DENSE = SPARSE_TYPES + (np.asarray,)
ALGORITHMS = ('ball_tree', 'brute', 'kd_tree', 'auto')
P = (1, 2, 3, 4, np.inf)
# Filter deprecation warnings.
neighbors.kneighbors_graph = ignore_warnings(neighbors.kneighbors_graph)
neighbors.radius_neighbors_graph = ignore_warnings(
neighbors.radius_neighbors_graph)
def _weight_func(dist):
""" Weight function to replace lambda d: d ** -2.
The lambda function is not valid because:
if d==0 then 0^-2 is not valid. """
# Dist could be multidimensional, flatten it so all values
# can be looped
with np.errstate(divide='ignore'):
retval = 1. / dist
return retval ** 2
def test_unsupervised_kneighbors(n_samples=20, n_features=5,
n_query_pts=2, n_neighbors=5):
# Test unsupervised neighbors methods
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results_nodist = []
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
p=p)
neigh.fit(X)
results_nodist.append(neigh.kneighbors(test,
return_distance=False))
results.append(neigh.kneighbors(test, return_distance=True))
for i in range(len(results) - 1):
assert_array_almost_equal(results_nodist[i], results[i][1])
assert_array_almost_equal(results[i][0], results[i + 1][0])
assert_array_almost_equal(results[i][1], results[i + 1][1])
def test_unsupervised_inputs():
# test the types of valid input into NearestNeighbors
X = rng.random_sample((10, 3))
nbrs_fid = neighbors.NearestNeighbors(n_neighbors=1)
nbrs_fid.fit(X)
dist1, ind1 = nbrs_fid.kneighbors(X)
nbrs = neighbors.NearestNeighbors(n_neighbors=1)
for input in (nbrs_fid, neighbors.BallTree(X), neighbors.KDTree(X)):
nbrs.fit(input)
dist2, ind2 = nbrs.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
def test_unsupervised_radius_neighbors(n_samples=20, n_features=5,
n_query_pts=2, radius=0.5,
random_state=0):
# Test unsupervised radius-based query
rng = np.random.RandomState(random_state)
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm,
p=p)
neigh.fit(X)
ind1 = neigh.radius_neighbors(test, return_distance=False)
# sort the results: this is not done automatically for
# radius searches
dist, ind = neigh.radius_neighbors(test, return_distance=True)
for (d, i, i1) in zip(dist, ind, ind1):
j = d.argsort()
d[:] = d[j]
i[:] = i[j]
i1[:] = i1[j]
results.append((dist, ind))
assert_array_almost_equal(np.concatenate(list(ind)),
np.concatenate(list(ind1)))
for i in range(len(results) - 1):
assert_array_almost_equal(np.concatenate(list(results[i][0])),
np.concatenate(list(results[i + 1][0]))),
assert_array_almost_equal(np.concatenate(list(results[i][1])),
np.concatenate(list(results[i + 1][1])))
def test_kneighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
# Test prediction with y_str
knn.fit(X, y_str)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_kneighbors_classifier_float_labels(n_samples=40, n_features=5,
n_test_pts=10, n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors)
knn.fit(X, y.astype(np.float))
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
def test_kneighbors_classifier_predict_proba():
# Test KNeighborsClassifier.predict_proba() method
X = np.array([[0, 2, 0],
[0, 2, 1],
[2, 0, 0],
[2, 2, 0],
[0, 0, 2],
[0, 0, 1]])
y = np.array([4, 4, 5, 5, 1, 1])
cls = neighbors.KNeighborsClassifier(n_neighbors=3, p=1) # cityblock dist
cls.fit(X, y)
y_prob = cls.predict_proba(X)
real_prob = np.array([[0, 2. / 3, 1. / 3],
[1. / 3, 2. / 3, 0],
[1. / 3, 0, 2. / 3],
[0, 1. / 3, 2. / 3],
[2. / 3, 1. / 3, 0],
[2. / 3, 1. / 3, 0]])
assert_array_equal(real_prob, y_prob)
# Check that it also works with non integer labels
cls.fit(X, y.astype(str))
y_prob = cls.predict_proba(X)
assert_array_equal(real_prob, y_prob)
# Check that it works with weights='distance'
cls = neighbors.KNeighborsClassifier(
n_neighbors=2, p=1, weights='distance')
cls.fit(X, y)
y_prob = cls.predict_proba(np.array([[0, 2, 0], [2, 2, 2]]))
real_prob = np.array([[0, 1, 0], [0, 0.4, 0.6]])
assert_array_almost_equal(real_prob, y_prob)
def test_radius_neighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
neigh.fit(X, y_str)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_radius_neighbors_classifier_when_no_neighbors():
# Test radius-based classifier when no neighbors found.
# In this case it should rise an informative exception
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
weight_func = _weight_func
for outlier_label in [0, -1, None]:
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
rnc = neighbors.RadiusNeighborsClassifier
clf = rnc(radius=radius, weights=weights, algorithm=algorithm,
outlier_label=outlier_label)
clf.fit(X, y)
assert_array_equal(np.array([1, 2]),
clf.predict(z1))
if outlier_label is None:
assert_raises(ValueError, clf.predict, z2)
elif False:
assert_array_equal(np.array([1, outlier_label]),
clf.predict(z2))
def test_radius_neighbors_classifier_outlier_labeling():
# Test radius-based classifier when no neighbors found and outliers
# are labeled.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
correct_labels1 = np.array([1, 2])
correct_labels2 = np.array([1, -1])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm,
outlier_label=-1)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
assert_array_equal(correct_labels2, clf.predict(z2))
def test_radius_neighbors_classifier_zero_distance():
# Test radius-based classifier, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.0, 2.0]])
correct_labels1 = np.array([1, 2])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
def test_neighbors_regressors_zero_distance():
# Test radius-based regressor, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [1.0, 1.0], [2.0, 2.0], [2.5, 2.5]])
y = np.array([1.0, 1.5, 2.0, 0.0])
radius = 0.2
z = np.array([[1.1, 1.1], [2.0, 2.0]])
rnn_correct_labels = np.array([1.25, 2.0])
knn_correct_unif = np.array([1.25, 1.0])
knn_correct_dist = np.array([1.25, 2.0])
for algorithm in ALGORITHMS:
# we don't test for weights=_weight_func since user will be expected
# to handle zero distances themselves in the function.
for weights in ['uniform', 'distance']:
rnn = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
assert_array_almost_equal(rnn_correct_labels, rnn.predict(z))
for weights, corr_labels in zip(['uniform', 'distance'],
[knn_correct_unif, knn_correct_dist]):
knn = neighbors.KNeighborsRegressor(n_neighbors=2,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
assert_array_almost_equal(corr_labels, knn.predict(z))
def test_radius_neighbors_boundary_handling():
"""Test whether points lying on boundary are handled consistently
Also ensures that even with only one query point, an object array
is returned rather than a 2d array.
"""
X = np.array([[1.5], [3.0], [3.01]])
radius = 3.0
for algorithm in ALGORITHMS:
nbrs = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm).fit(X)
results = nbrs.radius_neighbors([0.0], return_distance=False)
assert_equal(results.shape, (1,))
assert_equal(results.dtype, object)
assert_array_equal(results[0], [0, 1])
def test_RadiusNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 2
n_samples = 40
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
for o in range(n_output):
rnn = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train[:, o])
y_pred_so.append(rnn.predict(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
# Multioutput prediction
rnn_mo = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn_mo.fit(X_train, y_train)
y_pred_mo = rnn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
def test_kneighbors_classifier_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-NN classifier on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
X *= X > .2
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
for sparsev in SPARSE_TYPES + (np.asarray,):
X_eps = sparsev(X[:n_test_pts] + epsilon)
y_pred = knn.predict(X_eps)
assert_array_equal(y_pred, y[:n_test_pts])
def test_KNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 5
n_samples = 50
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
y_pred_proba_so = []
for o in range(n_output):
knn = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train[:, o])
y_pred_so.append(knn.predict(X_test))
y_pred_proba_so.append(knn.predict_proba(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
assert_equal(len(y_pred_proba_so), n_output)
# Multioutput prediction
knn_mo = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn_mo.fit(X_train, y_train)
y_pred_mo = knn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
# Check proba
y_pred_proba_mo = knn_mo.predict_proba(X_test)
assert_equal(len(y_pred_proba_mo), n_output)
for proba_mo, proba_so in zip(y_pred_proba_mo, y_pred_proba_so):
assert_array_almost_equal(proba_mo, proba_so)
def test_kneighbors_regressor(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < 0.3))
def test_KNeighborsRegressor_multioutput_uniform_weight():
# Test k-neighbors in multi-output regression with uniform weight
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
knn = neighbors.KNeighborsRegressor(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train)
neigh_idx = knn.kneighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred = knn.predict(X_test)
assert_equal(y_pred.shape, y_test.shape)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_kneighbors_regressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_radius_neighbors_regressor(n_samples=40,
n_features=3,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < radius / 2))
def test_RadiusNeighborsRegressor_multioutput_with_uniform_weight():
# Test radius neighbors in multi-output regression (uniform weight)
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
rnn = neighbors. RadiusNeighborsRegressor(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train)
neigh_idx = rnn.radius_neighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred_idx = np.array(y_pred_idx)
y_pred = rnn.predict(X_test)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_equal(y_pred.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_RadiusNeighborsRegressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression with various weight
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
rnn = neighbors.RadiusNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = rnn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_kneighbors_regressor_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test radius-based regression on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .25).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
for sparsev in SPARSE_OR_DENSE:
X2 = sparsev(X)
assert_true(np.mean(knn.predict(X2).round() == y) > 0.95)
def test_neighbors_iris():
# Sanity checks on the iris dataset
# Puts three points of each label in the plane and performs a
# nearest neighbor query on points near the decision boundary.
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=1,
algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_array_equal(clf.predict(iris.data), iris.target)
clf.set_params(n_neighbors=9, algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_true(np.mean(clf.predict(iris.data) == iris.target) > 0.95)
rgs = neighbors.KNeighborsRegressor(n_neighbors=5, algorithm=algorithm)
rgs.fit(iris.data, iris.target)
assert_true(np.mean(rgs.predict(iris.data).round() == iris.target)
> 0.95)
def test_neighbors_digits():
# Sanity check on the digits dataset
# the 'brute' algorithm has been observed to fail if the input
# dtype is uint8 due to overflow in distance calculations.
X = digits.data.astype('uint8')
Y = digits.target
(n_samples, n_features) = X.shape
train_test_boundary = int(n_samples * 0.8)
train = np.arange(0, train_test_boundary)
test = np.arange(train_test_boundary, n_samples)
(X_train, Y_train, X_test, Y_test) = X[train], Y[train], X[test], Y[test]
clf = neighbors.KNeighborsClassifier(n_neighbors=1, algorithm='brute')
score_uint8 = clf.fit(X_train, Y_train).score(X_test, Y_test)
score_float = clf.fit(X_train.astype(float), Y_train).score(
X_test.astype(float), Y_test)
assert_equal(score_uint8, score_float)
def test_kneighbors_graph():
# Test kneighbors_graph to build the k-Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
# n_neighbors = 1
A = neighbors.kneighbors_graph(X, 1, mode='connectivity')
assert_array_equal(A.toarray(), np.eye(A.shape[0]))
A = neighbors.kneighbors_graph(X, 1, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0.00, 1.01, 0.],
[1.01, 0., 0.],
[0.00, 1.40716026, 0.]])
# n_neighbors = 2
A = neighbors.kneighbors_graph(X, 2, mode='connectivity')
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 0.],
[0., 1., 1.]])
A = neighbors.kneighbors_graph(X, 2, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 2.23606798],
[1.01, 0., 1.40716026],
[2.23606798, 1.40716026, 0.]])
# n_neighbors = 3
A = neighbors.kneighbors_graph(X, 3, mode='connectivity')
assert_array_almost_equal(
A.toarray(),
[[1, 1, 1], [1, 1, 1], [1, 1, 1]])
def test_kneighbors_graph_sparse(seed=36):
# Test kneighbors_graph to build the k-Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.kneighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.kneighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_radius_neighbors_graph():
# Test radius_neighbors_graph to build the Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='connectivity')
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 1.],
[0., 1., 1.]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 0.],
[1.01, 0., 1.40716026],
[0., 1.40716026, 0.]])
def test_radius_neighbors_graph_sparse(seed=36):
# Test radius_neighbors_graph to build the Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.radius_neighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.radius_neighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_neighbors_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm='blah')
X = rng.random_sample((10, 2))
Xsparse = csr_matrix(X)
y = np.ones(10)
for cls in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
assert_raises(ValueError,
cls,
weights='blah')
assert_raises(ValueError,
cls, p=-1)
assert_raises(ValueError,
cls, algorithm='blah')
nbrs = cls(algorithm='ball_tree', metric='haversine')
assert_raises(ValueError,
nbrs.predict,
X)
assert_raises(ValueError,
ignore_warnings(nbrs.fit),
Xsparse, y)
nbrs = cls()
assert_raises(ValueError,
nbrs.fit,
np.ones((0, 2)), np.ones(0))
assert_raises(ValueError,
nbrs.fit,
X[:, :, None], y)
nbrs.fit(X, y)
assert_raises(ValueError,
nbrs.predict,
[])
nbrs = neighbors.NearestNeighbors().fit(X)
assert_raises(ValueError,
nbrs.kneighbors_graph,
X, mode='blah')
assert_raises(ValueError,
nbrs.radius_neighbors_graph,
X, mode='blah')
def test_neighbors_metrics(n_samples=20, n_features=3,
n_query_pts=2, n_neighbors=5):
# Test computing the neighbors for various metrics
# create a symmetric matrix
V = rng.rand(n_features, n_features)
VI = np.dot(V, V.T)
metrics = [('euclidean', {}),
('manhattan', {}),
('minkowski', dict(p=1)),
('minkowski', dict(p=2)),
('minkowski', dict(p=3)),
('minkowski', dict(p=np.inf)),
('chebyshev', {}),
('seuclidean', dict(V=rng.rand(n_features))),
('wminkowski', dict(p=3, w=rng.rand(n_features))),
('mahalanobis', dict(VI=VI))]
algorithms = ['brute', 'ball_tree', 'kd_tree']
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for metric, metric_params in metrics:
results = []
p = metric_params.pop('p', 2)
for algorithm in algorithms:
# KD tree doesn't support all metrics
if (algorithm == 'kd_tree' and
metric not in neighbors.KDTree.valid_metrics):
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm=algorithm,
metric=metric, metric_params=metric_params)
continue
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
metric=metric, p=p,
metric_params=metric_params)
neigh.fit(X)
results.append(neigh.kneighbors(test, return_distance=True))
assert_array_almost_equal(results[0][0], results[1][0])
assert_array_almost_equal(results[0][1], results[1][1])
def test_callable_metric():
metric = lambda x1, x2: np.sqrt(np.sum(x1 ** 2 + x2 ** 2))
X = np.random.RandomState(42).rand(20, 2)
nbrs1 = neighbors.NearestNeighbors(3, algorithm='auto', metric=metric)
nbrs2 = neighbors.NearestNeighbors(3, algorithm='brute', metric=metric)
nbrs1.fit(X)
nbrs2.fit(X)
dist1, ind1 = nbrs1.kneighbors(X)
dist2, ind2 = nbrs2.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
def test_metric_params_interface():
assert_warns(DeprecationWarning, neighbors.KNeighborsClassifier,
metric='wminkowski', w=np.ones(10))
assert_warns(SyntaxWarning, neighbors.KNeighborsClassifier,
metric_params={'p': 3})
def test_predict_sparse_ball_kd_tree():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
y = rng.randint(0, 2, 5)
nbrs1 = neighbors.KNeighborsClassifier(1, algorithm='kd_tree')
nbrs2 = neighbors.KNeighborsRegressor(1, algorithm='ball_tree')
for model in [nbrs1, nbrs2]:
model.fit(X, y)
assert_raises(ValueError, model.predict, csr_matrix(X))
def test_non_euclidean_kneighbors():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Find a reasonable radius.
dist_array = pairwise_distances(X).flatten()
np.sort(dist_array)
radius = dist_array[15]
# Test kneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.kneighbors_graph(
X, 3, metric=metric).toarray()
nbrs1 = neighbors.NearestNeighbors(3, metric=metric).fit(X)
assert_array_equal(nbrs_graph, nbrs1.kneighbors_graph(X).toarray())
# Test radiusneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.radius_neighbors_graph(
X, radius, metric=metric).toarray()
nbrs1 = neighbors.NearestNeighbors(metric=metric, radius=radius).fit(X)
assert_array_equal(nbrs_graph,
nbrs1.radius_neighbors_graph(X).toarray())
# Raise error when wrong parameters are supplied,
X_nbrs = neighbors.NearestNeighbors(3, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.kneighbors_graph, X_nbrs, 3,
metric='euclidean')
X_nbrs = neighbors.NearestNeighbors(radius=radius, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.radius_neighbors_graph, X_nbrs,
radius, metric='euclidean')
def check_object_arrays(nparray, list_check):
for ind, ele in enumerate(nparray):
assert_array_equal(ele, list_check[ind])
def test_k_and_radius_neighbors_train_is_not_query():
# Test kneighbors et.al when query is not training data
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
test_data = [[2], [1]]
# Test neighbors.
dist, ind = nn.kneighbors(test_data)
assert_array_equal(dist, [[1], [0]])
assert_array_equal(ind, [[1], [1]])
dist, ind = nn.radius_neighbors([[2], [1]], radius=1.5)
check_object_arrays(dist, [[1], [1, 0]])
check_object_arrays(ind, [[1], [0, 1]])
# Test the graph variants.
assert_array_equal(
nn.kneighbors_graph(test_data).A, [[0., 1.], [0., 1.]])
assert_array_equal(
nn.kneighbors_graph([[2], [1]], mode='distance').A,
np.array([[0., 1.], [0., 0.]]))
rng = nn.radius_neighbors_graph([[2], [1]], radius=1.5)
assert_array_equal(rng.A, [[0, 1], [1, 1]])
def test_k_and_radius_neighbors_X_None():
# Test kneighbors et.al when query is None
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, [[1], [1]])
assert_array_equal(ind, [[1], [0]])
dist, ind = nn.radius_neighbors(None, radius=1.5)
check_object_arrays(dist, [[1], [1]])
check_object_arrays(ind, [[1], [0]])
# Test the graph variants.
rng = nn.radius_neighbors_graph(None, radius=1.5)
kng = nn.kneighbors_graph(None)
for graph in [rng, kng]:
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.data, [1, 1])
assert_array_equal(rng.indices, [1, 0])
X = [[0, 1], [0, 1], [1, 1]]
nn = neighbors.NearestNeighbors(n_neighbors=2, algorithm=algorithm)
nn.fit(X)
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 1.], [1., 0., 1.], [1., 1., 0]]))
def test_k_and_radius_neighbors_duplicates():
# Test behavior of kneighbors when duplicates are present in query
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
nn.fit([[0], [1]])
# Do not do anything special to duplicates.
kng = nn.kneighbors_graph([[0], [1]], mode='distance')
assert_array_equal(
kng.A,
np.array([[0., 0.], [0., 0.]]))
assert_array_equal(kng.data, [0., 0.])
assert_array_equal(kng.indices, [0, 1])
dist, ind = nn.radius_neighbors([[0], [1]], radius=1.5)
check_object_arrays(dist, [[0, 1], [1, 0]])
check_object_arrays(ind, [[0, 1], [0, 1]])
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5)
assert_array_equal(rng.A, np.ones((2, 2)))
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5,
mode='distance')
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.indices, [0, 1, 0, 1])
assert_array_equal(rng.data, [0, 1, 1, 0])
# Mask the first duplicates when n_duplicates > n_neighbors.
X = np.ones((3, 1))
nn = neighbors.NearestNeighbors(n_neighbors=1)
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, np.zeros((3, 1)))
assert_array_equal(ind, [[1], [0], [1]])
# Test that zeros are explicitly marked in kneighbors_graph.
kng = nn.kneighbors_graph(mode='distance')
assert_array_equal(
kng.A, np.zeros((3, 3)))
assert_array_equal(kng.data, np.zeros(3))
assert_array_equal(kng.indices, [1., 0., 1.])
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 0.], [1., 0., 0.], [0., 1., 0.]]))
def test_include_self_neighbors_graph():
# Test include_self parameter in neighbors_graph
X = [[2, 3], [4, 5]]
kng = neighbors.kneighbors_graph(X, 1, include_self=True).A
kng_not_self = neighbors.kneighbors_graph(X, 1, include_self=False).A
assert_array_equal(kng, [[1., 0.], [0., 1.]])
assert_array_equal(kng_not_self, [[0., 1.], [1., 0.]])
rng = neighbors.radius_neighbors_graph(X, 5.0, include_self=True).A
rng_not_self = neighbors.radius_neighbors_graph(
X, 5.0, include_self=False).A
assert_array_equal(rng, [[1., 1.], [1., 1.]])
assert_array_equal(rng_not_self, [[0., 1.], [1., 0.]])
def test_dtype_convert():
classifier = neighbors.KNeighborsClassifier(n_neighbors=1)
CLASSES = 15
X = np.eye(CLASSES)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:CLASSES]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(result, y) | bsd-3-clause |
kikimaroca/beamtools | beamtools/dev/hybridoscillator.py | 1 | 7285 | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 12 21:13:47 2017
@author: cpkmanchee
Simulate pulse propagation in NALM oscillator
Everything is in SI units: m,s,W,J etc.
"""
import numpy as np
import matplotlib.pyplot as plt
from beamtools import upp, h, c
from tqdm import tqdm, trange
import sys
import shutil
import glob
import os
from datetime import datetime
#create save name and folders for output
start_date = datetime.now().strftime("%Y%m%d")
start_time = datetime.now().strftime("%H%M%S")
#output_folder is outside of git repository in: code_folder/Code Output/...
#if file is in code_folder/X/Y/Z, results are in code_folder/Code Output/X/Y/Z
code_folder = '/Users/cpkmanchee/Documents/Code'
output_folder = (code_folder + '/Code Output'
+ os.path.dirname(__file__).split(code_folder)[-1] + '/'
+ os.path.splitext(os.path.basename(__file__))[0] + '_output')
if not os.path.exists(output_folder):
os.makedirs(output_folder)
result_folder = (output_folder + '/'
+ start_date + os.path.splitext(os.path.basename(__file__))[0])
dataset_num = 0
while not not glob.glob((result_folder+'-'+str(dataset_num).zfill(2)+'*')):
dataset_num = dataset_num + 1
result_folder = result_folder + '-' + str(dataset_num).zfill(2)
os.makedirs(result_folder)
filebase = (result_folder + '/' + start_date + '-'
+ start_time + '-' + str(dataset_num).zfill(2))
fileext = '.pkl'
output_num = 0
filename = filebase + 'pulse' + str(output_num).zfill(3) + fileext
shutil.copy(__file__, result_folder + '/' + os.path.basename(__file__))
def savepulse(pulse, name='pulse'):
'''
to be used locally only
all file/folder names must be previously defined
'''
global output_num, filename
while not not glob.glob(filename):
output_num = output_num + 1
filename = filebase + name + str(output_num).zfill(3) + fileext
upp.save_obj(pulse,filename)
def cavity(pulse,auto_z_step=False):
'''Define cavity round trip
NOTE: pulse object is modified!!!
returns:
pulse.At = current pulse profile
output_At = cavity output (outcoupled) profile
'''
pulse.At = upp.grating_pair(pulse, L_g, N_g, AOI_g, loss = ref_loss_g, return_coef = False)
pulse.At = upp.propagate_fiber(pulse,smf1,autodz=auto_z_step)
Ps = np.sum(np.abs(pulse.At)**2)*pulse.dt/tau_rt
ydf1.gain = upp.calc_gain(ydf1,p1P,Ps)
pulse.At = upp.propagate_fiber(pulse,ydf1,autodz=False)
pulse.At = upp.optical_filter(pulse, filter_type='bpf', bandwidth=30E-9, loss=0.06, order=1)
pulse.At = upp.propagate_fiber(pulse,smf2,autodz=auto_z_step)
pulse.At = upp.saturable_abs(pulse,sat_int_sa,d_sa,mod_depth_sa,loss_sa)
pulse.At = upp.optical_filter(pulse, filter_type='bpf', bandwidth=30E-9, loss=0.06, order=2)
pulse.At, output_At = upp.coupler_2x2(pulse,None,tap=50)
return pulse.At, output_At
def run_sim(
pulse, max_iter=100, err_thresh=1E-6,
save_pulse = 1, auto_z_step=False):
if save_pulse is not False:
N = min(max_iter,save_pulse)
savepulse(pulse,name='cavity')
else:
N = max_iter + 1
f,ax = initialize_plot(10)
update_pulse_plot(pulse,f,ax)
t = trange(max_iter, desc='Total progress')
t.set_postfix(str='{:.1e}'.format(0))
for i in t:
input_At = pulse.At
cavity_At, output_At = cavity(pulse, auto_z_step)
update_pulse_plot(pulse,f,ax)
if (i+1)%N == 0:
savepulse(pulse, name='cavity')
savepulse(pulse.copyPulse(output_At), name='output')
power_in = np.abs(input_At)**2
power_out = np.abs(pulse.At)**2
test = check_residuals(power_in,power_out,
integ_err=err_thresh, p2p_err=err_thresh)
if test[0]:
if save_pulse is not False:
savepulse(pulse,name='cavity')
savepulse(pulse.copyPulse(output_At), name='output')
break
t.set_postfix(str='{:.1e},{:.1e}'.format(test[1],test[2]))
if save_pulse is not False:
savepulse(pulse,name='cavity')
savepulse(pulse.copyPulse(output_At), name='output')
def check_residuals(initial, final, integ_err=1E-4, p2p_err=1E-4):
'''Check residuals for covergence test.
Return True if pass. False if fail.
'''
res = (initial-final)
p2p = np.abs(res).max()/initial.max()
integ = (np.sum(np.abs(res)**2)**(1/2))/np.sum(initial)
if p2p < p2p_err and integ < integ_err:
return True,integ,p2p
else:
return False,integ,p2p
def initialize_plot(N,colormap=plt.cm.viridis):
'''Set up plotting figure for sim.'''
plt.ion()
cm = colormap
f, ax = plt.subplots(1,2)
[[a.plot([],[],c=cm(i/(N-1)),zorder=i)[0] for _,i in enumerate(range(N))] for a in ax]
ax[0].set_xlim([-2E-11,2E-11])
ax[0].set_xlabel('Tau (s)')
ax[1].set_xlim([-5E13,5E13])
ax[1].set_xlabel('Omega ($s^{-1}$)')
plt.show()
plt.pause(0.001)
return f,ax
def update_pulse_plot(pulse,fig,ax):
'''Update pulse plot'''
#Update time plots
lines = ax[0].lines
N = len(lines)
i = np.argmin([li.zorder for li in lines])
lines[i].set_data(pulse.time,pulse.getPt())
lines[i].zorder += N
ax[0].set_ylim([0,np.max(pulse.getPt())])
plt.pause(0.001)
lines = ax[1].lines
N = len(lines)
i = np.argmin([li.zorder for li in lines])
lines[i].set_data(np.fft.fftshift(pulse.freq),np.fft.fftshift(pulse.getPf()))
lines[i].zorder += N
ax[1].set_ylim([0,np.max(pulse.getPf())])
plt.pause(0.001)
#Define Pulse Object
pulse = upp.Pulse(1.03E-6)
pulse.initializeGrid(16, 1.5E-9)
T0 = 700E-15
mshape = 1
chirp0 = -4
P_peak = 1E3 #peak power, 10kW-->1ps pulse, 400mW avg @ 40MHz
pulse.At = np.sqrt(P_peak)*(
np.exp(-((1+1j*chirp0)/(2*T0**2))*pulse.time**(2*mshape)))
#folder = ('/Users/cpkmanchee/Documents/Code/Code Output/'
# 'beamtools/beamtools/dev/nalmoscillator_output/20180208nalmoscillator-36/')
#cavity_file= '20180208-200922-36cavity005.pkl'
#pulse = upp.load_obj(folder+cavity_file)
#Define fiber components
smf1 = upp.Fiber(1.0)
smf1.alpha = 0.000576
smf1.beta = np.array([
0.0251222977,
4.5522276126132602e-05,
-5.0542788517531417e-08])*(1E-12)**(np.array([2,3,4]))
smf1.gamma = 0.00045
smf1.core_d = 5.5E-6
smf2 = smf1.copyFiber(length=3.0)
#gain fiber, nufern ysf-HI
ydf1 = upp.FiberGain(0.64, grid_type='rel',z_grid=100)
ydf1.alpha = 0.00345
ydf1.beta = np.array([
0.0251222977,
4.5522276126132602e-05,
-5.0542788517531417e-08])*(1E-12)**(np.array([2,3,4]))
ydf1.gamma = 0.00045
ydf1.sigma_a = np.array([3.04306,0.04966])*1E-24
ydf1.sigma_e = np.array([3.17025,0.59601])*1E-24
ydf1.lambdas = np.array([0.976,1.030])*1E-6
ydf1.core_d = 6.0E-6
ydf1.N = 1.891669E25
#Pump parameters
p1P = 0.4 #pump power, CW
#Cavity
tau_rt = 1/(38.1E6)
#Define grating parameters
L_g = 0.095
N_g = 600
AOI_g = 27
ref_loss_g = 1-(1-0.3)**4
#Saturable absorber parameters. Mimic 1040-15-500fs from BATOP
sat_int_sa = 0.5 #uJ/cm**2 = 1E-2 J/m**2
d_sa = smf1.core_d #~6um diameter fiber
mod_depth_sa = 0.08
loss_sa = 0.07
| mit |
churchlab/ulutil | ulutil/qPCR2melting.py | 1 | 3374 | # Copyright 2014 Uri Laserson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import matplotlib as mpl
import matplotlib.pyplot as plt
def qPCR2melting(inputfile,output_formats):
outputbasename = os.path.splitext(os.path.basename(inputfile))[0]
# Learn some things about the data:
# How many curves are there?
ip = open(inputfile,'r')
for line in ip:
if line.startswith('Step'):
# Verify the fields in the line:
fields = line.split(',')
if fields[0] != 'Step' or fields[1] != 'Cycle' or fields[2] != 'Dye' or fields[3] != 'Temp.':
raise ValueError, 'Expected line like: "Step,Cycle,Dye,Temp.,..."'
curve_labels = fields[4:-1] # (skip the above four fields and last extra comma)
break
# What step is the melting at?
for line in ip: # advance to data set characterization
if line.strip() == 'Analysis Options':
break
for line in ip:
if line.startswith("Step") and "Melting Curve" in line:
line_id = line.split()[1].strip(':')
break
ip.close()
# Create data structures
temps = []
curves = [[] for curve in curve_labels]
# Load the data
ip = open(inputfile,'r')
for line in ip: # advance to data
if line.startswith('Step'):
break
for line in ip:
if line.strip() == '':
break
if line.split(',')[0] == line_id:
temps.append(float(line.split(',')[3]))
data = map(float,line.split(',')[4:-1])
for (i,value) in enumerate(data):
curves[i].append(value)
# Make the plots
fig = plt.figure()
ax = fig.add_subplot(111)
for (label,curve) in zip(curve_labels,curves):
ax.plot(temps,curve,label=label)
ax.legend(loc=2)
ax.set_xlabel('Temperature')
ax.set_ylabel('Fluorescence (a.u.)')
for format in output_formats:
fig.savefig(outputbasename+'.melting.'+format)
if __name__ == '__main__':
import sys
import optparse
output_formats = set()
def append_format(option,opt_str,value,parser):
output_formats.add(opt_str.strip('-'))
option_parser = optparse.OptionParser()
option_parser.add_option('--png',action='callback',callback=append_format)
option_parser.add_option('--pdf',action='callback',callback=append_format)
option_parser.add_option('--eps',action='callback',callback=append_format)
(options,args) = option_parser.parse_args()
if len(args) != 1:
raise ValueError, "Must give a single file as input."
output_formats = list(output_formats)
if output_formats == []:
output_formats.append('pdf')
output_formats.append('png')
inputfile = args[0]
qPCR2melting(inputfile,output_formats)
| apache-2.0 |
ryanraaum/african-mtdna | popdata_sources/non2011/process.py | 1 | 1277 | from oldowan.mtconvert import seq2sites, sites2seq, str2sites
from oldowan.fasta import fasta
from string import translate
import pandas as pd
import sys
sys.path.append('../../scripts')
from utils import *
## load metadata
metadata = pd.read_csv('metadata.csv', index_col=0)
ff = fasta('non_2011.fasta', 'r')
data = ff.readentries()
ff.close()
hids = []
seqs = []
sites = []
# four sequences are shorter than all the rest, will drop them
for e in data:
if len(e['sequence']) > 350:
hids.append(e['name'].split()[0])
seqs.append(e['sequence'])
sites.append(seq2sites(e['sequence']))
## Validate
passed_validation = True
for i in range(len(sites)):
hid = hids[i]
key = hid[:2]
region = range2region(metadata.ix[key, 'SeqRange'])
seq = translate(sites2seq(sites[i], region), None, '-')
if not seq == seqs[i]:
passed_validation = False
print i, hids[i]
if passed_validation:
counter = {}
for k in metadata.index:
counter[k] = 0
with open('processed.csv', 'w') as f:
for i in range(len(sites)):
hid = hids[i]
grp = hid[:2]
mysites = ' '.join([str(x) for x in sites[i]])
prefix = metadata.ix[grp,'NewPrefix']
counter[grp] += 1
num = str(counter[grp]).zfill(3)
newid = prefix + num
f.write('%s,%s,%s\n' % (newid, hid, mysites)) | cc0-1.0 |
harshaneelhg/scikit-learn | examples/covariance/plot_outlier_detection.py | 235 | 3891 | """
==========================================
Outlier detection with several methods.
==========================================
When the amount of contamination is known, this example illustrates two
different ways of performing :ref:`outlier_detection`:
- based on a robust estimator of covariance, which is assuming that the
data are Gaussian distributed and performs better than the One-Class SVM
in that case.
- using the One-Class SVM and its ability to capture the shape of the
data set, hence performing better when the data is strongly
non-Gaussian, i.e. with two well-separated clusters;
The ground truth about inliers and outliers is given by the points colors
while the orange-filled area indicates which points are reported as inliers
by each method.
Here, we assume that we know the fraction of outliers in the datasets.
Thus rather than using the 'predict' method of the objects, we set the
threshold on the decision_function to separate out the corresponding
fraction.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from scipy import stats
from sklearn import svm
from sklearn.covariance import EllipticEnvelope
# Example settings
n_samples = 200
outliers_fraction = 0.25
clusters_separation = [0, 1, 2]
# define two outlier detection tools to be compared
classifiers = {
"One-Class SVM": svm.OneClassSVM(nu=0.95 * outliers_fraction + 0.05,
kernel="rbf", gamma=0.1),
"robust covariance estimator": EllipticEnvelope(contamination=.1)}
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 500), np.linspace(-7, 7, 500))
n_inliers = int((1. - outliers_fraction) * n_samples)
n_outliers = int(outliers_fraction * n_samples)
ground_truth = np.ones(n_samples, dtype=int)
ground_truth[-n_outliers:] = 0
# Fit the problem with varying cluster separation
for i, offset in enumerate(clusters_separation):
np.random.seed(42)
# Data generation
X1 = 0.3 * np.random.randn(0.5 * n_inliers, 2) - offset
X2 = 0.3 * np.random.randn(0.5 * n_inliers, 2) + offset
X = np.r_[X1, X2]
# Add outliers
X = np.r_[X, np.random.uniform(low=-6, high=6, size=(n_outliers, 2))]
# Fit the model with the One-Class SVM
plt.figure(figsize=(10, 5))
for i, (clf_name, clf) in enumerate(classifiers.items()):
# fit the data and tag outliers
clf.fit(X)
y_pred = clf.decision_function(X).ravel()
threshold = stats.scoreatpercentile(y_pred,
100 * outliers_fraction)
y_pred = y_pred > threshold
n_errors = (y_pred != ground_truth).sum()
# plot the levels lines and the points
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
subplot = plt.subplot(1, 2, i + 1)
subplot.set_title("Outlier detection")
subplot.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7),
cmap=plt.cm.Blues_r)
a = subplot.contour(xx, yy, Z, levels=[threshold],
linewidths=2, colors='red')
subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()],
colors='orange')
b = subplot.scatter(X[:-n_outliers, 0], X[:-n_outliers, 1], c='white')
c = subplot.scatter(X[-n_outliers:, 0], X[-n_outliers:, 1], c='black')
subplot.axis('tight')
subplot.legend(
[a.collections[0], b, c],
['learned decision function', 'true inliers', 'true outliers'],
prop=matplotlib.font_manager.FontProperties(size=11))
subplot.set_xlabel("%d. %s (errors: %d)" % (i + 1, clf_name, n_errors))
subplot.set_xlim((-7, 7))
subplot.set_ylim((-7, 7))
plt.subplots_adjust(0.04, 0.1, 0.96, 0.94, 0.1, 0.26)
plt.show()
| bsd-3-clause |
trankmichael/scikit-learn | benchmarks/bench_glm.py | 297 | 1493 | """
A comparison of different methods in GLM
Data comes from a random square matrix.
"""
from datetime import datetime
import numpy as np
from sklearn import linear_model
from sklearn.utils.bench import total_seconds
if __name__ == '__main__':
import pylab as pl
n_iter = 40
time_ridge = np.empty(n_iter)
time_ols = np.empty(n_iter)
time_lasso = np.empty(n_iter)
dimensions = 500 * np.arange(1, n_iter + 1)
for i in range(n_iter):
print('Iteration %s of %s' % (i, n_iter))
n_samples, n_features = 10 * i + 3, 10 * i + 3
X = np.random.randn(n_samples, n_features)
Y = np.random.randn(n_samples)
start = datetime.now()
ridge = linear_model.Ridge(alpha=1.)
ridge.fit(X, Y)
time_ridge[i] = total_seconds(datetime.now() - start)
start = datetime.now()
ols = linear_model.LinearRegression()
ols.fit(X, Y)
time_ols[i] = total_seconds(datetime.now() - start)
start = datetime.now()
lasso = linear_model.LassoLars()
lasso.fit(X, Y)
time_lasso[i] = total_seconds(datetime.now() - start)
pl.figure('scikit-learn GLM benchmark results')
pl.xlabel('Dimensions')
pl.ylabel('Time (s)')
pl.plot(dimensions, time_ridge, color='r')
pl.plot(dimensions, time_ols, color='g')
pl.plot(dimensions, time_lasso, color='b')
pl.legend(['Ridge', 'OLS', 'LassoLars'], loc='upper left')
pl.axis('tight')
pl.show()
| bsd-3-clause |
dbcli/vcli | vcli/packages/tabulate.py | 28 | 38075 | # -*- coding: utf-8 -*-
"""Pretty-print tabular data."""
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple
from decimal import Decimal
from platform import python_version_tuple
from wcwidth import wcswidth
import re
if python_version_tuple()[0] < "3":
from itertools import izip_longest
from functools import partial
_none_type = type(None)
_int_type = int
_long_type = long
_float_type = float
_text_type = unicode
_binary_type = str
def _is_file(f):
return isinstance(f, file)
else:
from itertools import zip_longest as izip_longest
from functools import reduce, partial
_none_type = type(None)
_int_type = int
_long_type = int
_float_type = float
_text_type = str
_binary_type = bytes
import io
def _is_file(f):
return isinstance(f, io.IOBase)
__all__ = ["tabulate", "tabulate_formats", "simple_separated_format"]
__version__ = "0.7.4"
MIN_PADDING = 2
Line = namedtuple("Line", ["begin", "hline", "sep", "end"])
DataRow = namedtuple("DataRow", ["begin", "sep", "end"])
# A table structure is suppposed to be:
#
# --- lineabove ---------
# headerrow
# --- linebelowheader ---
# datarow
# --- linebewteenrows ---
# ... (more datarows) ...
# --- linebewteenrows ---
# last datarow
# --- linebelow ---------
#
# TableFormat's line* elements can be
#
# - either None, if the element is not used,
# - or a Line tuple,
# - or a function: [col_widths], [col_alignments] -> string.
#
# TableFormat's *row elements can be
#
# - either None, if the element is not used,
# - or a DataRow tuple,
# - or a function: [cell_values], [col_widths], [col_alignments] -> string.
#
# padding (an integer) is the amount of white space around data values.
#
# with_header_hide:
#
# - either None, to display all table elements unconditionally,
# - or a list of elements not to be displayed if the table has column headers.
#
TableFormat = namedtuple("TableFormat", ["lineabove", "linebelowheader",
"linebetweenrows", "linebelow",
"headerrow", "datarow",
"padding", "with_header_hide"])
def _pipe_segment_with_colons(align, colwidth):
"""Return a segment of a horizontal line with optional colons which
indicate column's alignment (as in `pipe` output format)."""
w = colwidth
if align in ["right", "decimal"]:
return ('-' * (w - 1)) + ":"
elif align == "center":
return ":" + ('-' * (w - 2)) + ":"
elif align == "left":
return ":" + ('-' * (w - 1))
else:
return '-' * w
def _pipe_line_with_colons(colwidths, colaligns):
"""Return a horizontal line with optional colons to indicate column's
alignment (as in `pipe` output format)."""
segments = [_pipe_segment_with_colons(a, w) for a, w in zip(colaligns, colwidths)]
return "|" + "|".join(segments) + "|"
def _mediawiki_row_with_attrs(separator, cell_values, colwidths, colaligns):
alignment = { "left": '',
"right": 'align="right"| ',
"center": 'align="center"| ',
"decimal": 'align="right"| ' }
# hard-coded padding _around_ align attribute and value together
# rather than padding parameter which affects only the value
values_with_attrs = [' ' + alignment.get(a, '') + c + ' '
for c, a in zip(cell_values, colaligns)]
colsep = separator*2
return (separator + colsep.join(values_with_attrs)).rstrip()
def _html_row_with_attrs(celltag, cell_values, colwidths, colaligns):
alignment = { "left": '',
"right": ' style="text-align: right;"',
"center": ' style="text-align: center;"',
"decimal": ' style="text-align: right;"' }
values_with_attrs = ["<{0}{1}>{2}</{0}>".format(celltag, alignment.get(a, ''), c)
for c, a in zip(cell_values, colaligns)]
return "<tr>" + "".join(values_with_attrs).rstrip() + "</tr>"
def _latex_line_begin_tabular(colwidths, colaligns, booktabs=False):
alignment = { "left": "l", "right": "r", "center": "c", "decimal": "r" }
tabular_columns_fmt = "".join([alignment.get(a, "l") for a in colaligns])
return "\n".join(["\\begin{tabular}{" + tabular_columns_fmt + "}",
"\\toprule" if booktabs else "\hline"])
LATEX_ESCAPE_RULES = {r"&": r"\&", r"%": r"\%", r"$": r"\$", r"#": r"\#",
r"_": r"\_", r"^": r"\^{}", r"{": r"\{", r"}": r"\}",
r"~": r"\textasciitilde{}", "\\": r"\textbackslash{}",
r"<": r"\ensuremath{<}", r">": r"\ensuremath{>}"}
def _latex_row(cell_values, colwidths, colaligns):
def escape_char(c):
return LATEX_ESCAPE_RULES.get(c, c)
escaped_values = ["".join(map(escape_char, cell)) for cell in cell_values]
rowfmt = DataRow("", "&", "\\\\")
return _build_simple_row(escaped_values, rowfmt)
_table_formats = {"simple":
TableFormat(lineabove=Line("", "-", " ", ""),
linebelowheader=Line("", "-", " ", ""),
linebetweenrows=None,
linebelow=Line("", "-", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0,
with_header_hide=["lineabove", "linebelow"]),
"plain":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"grid":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("+", "=", "+", "+"),
linebetweenrows=Line("+", "-", "+", "+"),
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"fancy_grid":
TableFormat(lineabove=Line("╒", "═", "╤", "╕"),
linebelowheader=Line("╞", "═", "╪", "╡"),
linebetweenrows=Line("├", "─", "┼", "┤"),
linebelow=Line("╘", "═", "╧", "╛"),
headerrow=DataRow("│", "│", "│"),
datarow=DataRow("│", "│", "│"),
padding=1, with_header_hide=None),
"pipe":
TableFormat(lineabove=_pipe_line_with_colons,
linebelowheader=_pipe_line_with_colons,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1,
with_header_hide=["lineabove"]),
"orgtbl":
TableFormat(lineabove=None,
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"psql":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"rst":
TableFormat(lineabove=Line("", "=", " ", ""),
linebelowheader=Line("", "=", " ", ""),
linebetweenrows=None,
linebelow=Line("", "=", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"mediawiki":
TableFormat(lineabove=Line("{| class=\"wikitable\" style=\"text-align: left;\"",
"", "", "\n|+ <!-- caption -->\n|-"),
linebelowheader=Line("|-", "", "", ""),
linebetweenrows=Line("|-", "", "", ""),
linebelow=Line("|}", "", "", ""),
headerrow=partial(_mediawiki_row_with_attrs, "!"),
datarow=partial(_mediawiki_row_with_attrs, "|"),
padding=0, with_header_hide=None),
"html":
TableFormat(lineabove=Line("<table>", "", "", ""),
linebelowheader=None,
linebetweenrows=None,
linebelow=Line("</table>", "", "", ""),
headerrow=partial(_html_row_with_attrs, "th"),
datarow=partial(_html_row_with_attrs, "td"),
padding=0, with_header_hide=None),
"latex":
TableFormat(lineabove=_latex_line_begin_tabular,
linebelowheader=Line("\\hline", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\hline\n\\end{tabular}", "", "", ""),
headerrow=_latex_row,
datarow=_latex_row,
padding=1, with_header_hide=None),
"latex_booktabs":
TableFormat(lineabove=partial(_latex_line_begin_tabular, booktabs=True),
linebelowheader=Line("\\midrule", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\bottomrule\n\\end{tabular}", "", "", ""),
headerrow=_latex_row,
datarow=_latex_row,
padding=1, with_header_hide=None),
"tsv":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", "\t", ""),
datarow=DataRow("", "\t", ""),
padding=0, with_header_hide=None)}
tabulate_formats = list(sorted(_table_formats.keys()))
_invisible_codes = re.compile(r"\x1b\[\d*m|\x1b\[\d*\;\d*\;\d*m") # ANSI color codes
_invisible_codes_bytes = re.compile(b"\x1b\[\d*m|\x1b\[\d*\;\d*\;\d*m") # ANSI color codes
def simple_separated_format(separator):
"""Construct a simple TableFormat with columns separated by a separator.
>>> tsv = simple_separated_format("\\t") ; \
tabulate([["foo", 1], ["spam", 23]], tablefmt=tsv) == 'foo \\t 1\\nspam\\t23'
True
"""
return TableFormat(None, None, None, None,
headerrow=DataRow('', separator, ''),
datarow=DataRow('', separator, ''),
padding=0, with_header_hide=None)
def _isconvertible(conv, string):
try:
n = conv(string)
return True
except (ValueError, TypeError):
return False
def _isnumber(string):
"""
>>> _isnumber("123.45")
True
>>> _isnumber("123")
True
>>> _isnumber("spam")
False
"""
return _isconvertible(float, string)
def _isint(string):
"""
>>> _isint("123")
True
>>> _isint("123.45")
False
"""
return type(string) is _int_type or type(string) is _long_type or \
(isinstance(string, _binary_type) or isinstance(string, _text_type)) and \
_isconvertible(int, string)
def _type(string, has_invisible=True):
"""The least generic type (type(None), int, float, str, unicode).
>>> _type(None) is type(None)
True
>>> _type("foo") is type("")
True
>>> _type("1") is type(1)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
"""
if has_invisible and \
(isinstance(string, _text_type) or isinstance(string, _binary_type)):
string = _strip_invisible(string)
if string is None:
return _none_type
if isinstance(string, (bool, Decimal,)):
return _text_type
elif hasattr(string, "isoformat"): # datetime.datetime, date, and time
return _text_type
elif _isint(string):
return int
elif _isnumber(string):
return float
elif isinstance(string, _binary_type):
return _binary_type
else:
return _text_type
def _afterpoint(string):
"""Symbols after a decimal point, -1 if the string lacks the decimal point.
>>> _afterpoint("123.45")
2
>>> _afterpoint("1001")
-1
>>> _afterpoint("eggs")
-1
>>> _afterpoint("123e45")
2
"""
if _isnumber(string):
if _isint(string):
return -1
else:
pos = string.rfind(".")
pos = string.lower().rfind("e") if pos < 0 else pos
if pos >= 0:
return len(string) - pos - 1
else:
return -1 # no point
else:
return -1 # not a number
def _padleft(width, s, has_invisible=True):
"""Flush right.
>>> _padleft(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430'
True
"""
lwidth = width - wcswidth(_strip_invisible(s) if has_invisible else s)
return ' ' * lwidth + s
def _padright(width, s, has_invisible=True):
"""Flush left.
>>> _padright(6, '\u044f\u0439\u0446\u0430') == '\u044f\u0439\u0446\u0430 '
True
"""
rwidth = width - wcswidth(_strip_invisible(s) if has_invisible else s)
return s + ' ' * rwidth
def _padboth(width, s, has_invisible=True):
"""Center string.
>>> _padboth(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430 '
True
"""
xwidth = width - wcswidth(_strip_invisible(s) if has_invisible else s)
lwidth = xwidth // 2
rwidth = 0 if xwidth <= 0 else lwidth + xwidth % 2
return ' ' * lwidth + s + ' ' * rwidth
def _strip_invisible(s):
"Remove invisible ANSI color codes."
if isinstance(s, _text_type):
return re.sub(_invisible_codes, "", s)
else: # a bytestring
return re.sub(_invisible_codes_bytes, "", s)
def _visible_width(s):
"""Visible width of a printed string. ANSI color codes are removed.
>>> _visible_width('\x1b[31mhello\x1b[0m'), _visible_width("world")
(5, 5)
"""
if isinstance(s, _text_type) or isinstance(s, _binary_type):
return wcswidth(_strip_invisible(s))
else:
return wcswidth(_text_type(s))
def _align_column(strings, alignment, minwidth=0, has_invisible=True):
"""[string] -> [padded_string]
>>> list(map(str,_align_column(["12.345", "-1234.5", "1.23", "1234.5", "1e+234", "1.0e234"], "decimal")))
[' 12.345 ', '-1234.5 ', ' 1.23 ', ' 1234.5 ', ' 1e+234 ', ' 1.0e234']
>>> list(map(str,_align_column(['123.4', '56.7890'], None)))
['123.4', '56.7890']
"""
if alignment == "right":
strings = [s.strip() for s in strings]
padfn = _padleft
elif alignment == "center":
strings = [s.strip() for s in strings]
padfn = _padboth
elif alignment == "decimal":
decimals = [_afterpoint(s) for s in strings]
maxdecimals = max(decimals)
strings = [s + (maxdecimals - decs) * " "
for s, decs in zip(strings, decimals)]
padfn = _padleft
elif not alignment:
return strings
else:
strings = [s.strip() for s in strings]
padfn = _padright
if has_invisible:
width_fn = _visible_width
else:
width_fn = wcswidth
maxwidth = max(max(map(width_fn, strings)), minwidth)
padded_strings = [padfn(maxwidth, s, has_invisible) for s in strings]
return padded_strings
def _more_generic(type1, type2):
types = { _none_type: 0, int: 1, float: 2, _binary_type: 3, _text_type: 4 }
invtypes = { 4: _text_type, 3: _binary_type, 2: float, 1: int, 0: _none_type }
moregeneric = max(types.get(type1, 4), types.get(type2, 4))
return invtypes[moregeneric]
def _column_type(strings, has_invisible=True):
"""The least generic type all column values are convertible to.
>>> _column_type(["1", "2"]) is _int_type
True
>>> _column_type(["1", "2.3"]) is _float_type
True
>>> _column_type(["1", "2.3", "four"]) is _text_type
True
>>> _column_type(["four", '\u043f\u044f\u0442\u044c']) is _text_type
True
>>> _column_type([None, "brux"]) is _text_type
True
>>> _column_type([1, 2, None]) is _int_type
True
>>> import datetime as dt
>>> _column_type([dt.datetime(1991,2,19), dt.time(17,35)]) is _text_type
True
"""
types = [_type(s, has_invisible) for s in strings ]
return reduce(_more_generic, types, int)
def _format(val, valtype, floatfmt, missingval=""):
"""Format a value accoding to its type.
Unicode is supported:
>>> hrow = ['\u0431\u0443\u043a\u0432\u0430', '\u0446\u0438\u0444\u0440\u0430'] ; \
tbl = [['\u0430\u0437', 2], ['\u0431\u0443\u043a\u0438', 4]] ; \
good_result = '\\u0431\\u0443\\u043a\\u0432\\u0430 \\u0446\\u0438\\u0444\\u0440\\u0430\\n------- -------\\n\\u0430\\u0437 2\\n\\u0431\\u0443\\u043a\\u0438 4' ; \
tabulate(tbl, headers=hrow) == good_result
True
"""
if val is None:
return missingval
if valtype in [int, _text_type]:
return "{0}".format(val)
elif valtype is _binary_type:
try:
return _text_type(val, "ascii")
except TypeError:
return _text_type(val)
elif valtype is float:
return format(float(val), floatfmt)
else:
return "{0}".format(val)
def _align_header(header, alignment, width):
if alignment == "left":
return _padright(width, header)
elif alignment == "center":
return _padboth(width, header)
elif not alignment:
return "{0}".format(header)
else:
return _padleft(width, header)
def _normalize_tabular_data(tabular_data, headers):
"""Transform a supported data type to a list of lists, and a list of headers.
Supported tabular data types:
* list-of-lists or another iterable of iterables
* list of named tuples (usually used with headers="keys")
* list of dicts (usually used with headers="keys")
* list of OrderedDicts (usually used with headers="keys")
* 2D NumPy arrays
* NumPy record arrays (usually used with headers="keys")
* dict of iterables (usually used with headers="keys")
* pandas.DataFrame (usually used with headers="keys")
The first row can be used as headers if headers="firstrow",
column indices can be used as headers if headers="keys".
"""
if hasattr(tabular_data, "keys") and hasattr(tabular_data, "values"):
# dict-like and pandas.DataFrame?
if hasattr(tabular_data.values, "__call__"):
# likely a conventional dict
keys = tabular_data.keys()
rows = list(izip_longest(*tabular_data.values())) # columns have to be transposed
elif hasattr(tabular_data, "index"):
# values is a property, has .index => it's likely a pandas.DataFrame (pandas 0.11.0)
keys = tabular_data.keys()
vals = tabular_data.values # values matrix doesn't need to be transposed
names = tabular_data.index
rows = [[v]+list(row) for v,row in zip(names, vals)]
else:
raise ValueError("tabular data doesn't appear to be a dict or a DataFrame")
if headers == "keys":
headers = list(map(_text_type,keys)) # headers should be strings
else: # it's a usual an iterable of iterables, or a NumPy array
rows = list(tabular_data)
if (headers == "keys" and
hasattr(tabular_data, "dtype") and
getattr(tabular_data.dtype, "names")):
# numpy record array
headers = tabular_data.dtype.names
elif (headers == "keys"
and len(rows) > 0
and isinstance(rows[0], tuple)
and hasattr(rows[0], "_fields")):
# namedtuple
headers = list(map(_text_type, rows[0]._fields))
elif (len(rows) > 0
and isinstance(rows[0], dict)):
# dict or OrderedDict
uniq_keys = set() # implements hashed lookup
keys = [] # storage for set
if headers == "firstrow":
firstdict = rows[0] if len(rows) > 0 else {}
keys.extend(firstdict.keys())
uniq_keys.update(keys)
rows = rows[1:]
for row in rows:
for k in row.keys():
#Save unique items in input order
if k not in uniq_keys:
keys.append(k)
uniq_keys.add(k)
if headers == 'keys':
headers = keys
elif isinstance(headers, dict):
# a dict of headers for a list of dicts
headers = [headers.get(k, k) for k in keys]
headers = list(map(_text_type, headers))
elif headers == "firstrow":
if len(rows) > 0:
headers = [firstdict.get(k, k) for k in keys]
headers = list(map(_text_type, headers))
else:
headers = []
elif headers:
raise ValueError('headers for a list of dicts is not a dict or a keyword')
rows = [[row.get(k) for k in keys] for row in rows]
elif headers == "keys" and len(rows) > 0:
# keys are column indices
headers = list(map(_text_type, range(len(rows[0]))))
# take headers from the first row if necessary
if headers == "firstrow" and len(rows) > 0:
headers = list(map(_text_type, rows[0])) # headers should be strings
rows = rows[1:]
headers = list(map(_text_type,headers))
rows = list(map(list,rows))
# pad with empty headers for initial columns if necessary
if headers and len(rows) > 0:
nhs = len(headers)
ncols = len(rows[0])
if nhs < ncols:
headers = [""]*(ncols - nhs) + headers
return rows, headers
def tabulate(tabular_data, headers=[], tablefmt="simple",
floatfmt="g", numalign="decimal", stralign="left",
missingval=""):
"""Format a fixed width table for pretty printing.
>>> print(tabulate([[1, 2.34], [-56, "8.999"], ["2", "10001"]]))
--- ---------
1 2.34
-56 8.999
2 10001
--- ---------
The first required argument (`tabular_data`) can be a
list-of-lists (or another iterable of iterables), a list of named
tuples, a dictionary of iterables, an iterable of dictionaries,
a two-dimensional NumPy array, NumPy record array, or a Pandas'
dataframe.
Table headers
-------------
To print nice column headers, supply the second argument (`headers`):
- `headers` can be an explicit list of column headers
- if `headers="firstrow"`, then the first row of data is used
- if `headers="keys"`, then dictionary keys or column indices are used
Otherwise a headerless table is produced.
If the number of headers is less than the number of columns, they
are supposed to be names of the last columns. This is consistent
with the plain-text format of R and Pandas' dataframes.
>>> print(tabulate([["sex","age"],["Alice","F",24],["Bob","M",19]],
... headers="firstrow"))
sex age
----- ----- -----
Alice F 24
Bob M 19
Column alignment
----------------
`tabulate` tries to detect column types automatically, and aligns
the values properly. By default it aligns decimal points of the
numbers (or flushes integer numbers to the right), and flushes
everything else to the left. Possible column alignments
(`numalign`, `stralign`) are: "right", "center", "left", "decimal"
(only for `numalign`), and None (to disable alignment).
Table formats
-------------
`floatfmt` is a format specification used for columns which
contain numeric data with a decimal point.
`None` values are replaced with a `missingval` string:
>>> print(tabulate([["spam", 1, None],
... ["eggs", 42, 3.14],
... ["other", None, 2.7]], missingval="?"))
----- -- ----
spam 1 ?
eggs 42 3.14
other ? 2.7
----- -- ----
Various plain-text table formats (`tablefmt`) are supported:
'plain', 'simple', 'grid', 'pipe', 'orgtbl', 'rst', 'mediawiki',
'latex', and 'latex_booktabs'. Variable `tabulate_formats` contains the list of
currently supported formats.
"plain" format doesn't use any pseudographics to draw tables,
it separates columns with a double space:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "plain"))
strings numbers
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="plain"))
spam 41.9999
eggs 451
"simple" format is like Pandoc simple_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "simple"))
strings numbers
--------- ---------
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="simple"))
---- --------
spam 41.9999
eggs 451
---- --------
"grid" is similar to tables produced by Emacs table.el package or
Pandoc grid_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "grid"))
+-----------+-----------+
| strings | numbers |
+===========+===========+
| spam | 41.9999 |
+-----------+-----------+
| eggs | 451 |
+-----------+-----------+
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="grid"))
+------+----------+
| spam | 41.9999 |
+------+----------+
| eggs | 451 |
+------+----------+
"fancy_grid" draws a grid using box-drawing characters:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "fancy_grid"))
╒═══════════╤═══════════╕
│ strings │ numbers │
╞═══════════╪═══════════╡
│ spam │ 41.9999 │
├───────────┼───────────┤
│ eggs │ 451 │
╘═══════════╧═══════════╛
"pipe" is like tables in PHP Markdown Extra extension or Pandoc
pipe_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "pipe"))
| strings | numbers |
|:----------|----------:|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="pipe"))
|:-----|---------:|
| spam | 41.9999 |
| eggs | 451 |
"orgtbl" is like tables in Emacs org-mode and orgtbl-mode. They
are slightly different from "pipe" format by not using colons to
define column alignment, and using a "+" sign to indicate line
intersections:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "orgtbl"))
| strings | numbers |
|-----------+-----------|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="orgtbl"))
| spam | 41.9999 |
| eggs | 451 |
"rst" is like a simple table format from reStructuredText; please
note that reStructuredText accepts also "grid" tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "rst"))
========= =========
strings numbers
========= =========
spam 41.9999
eggs 451
========= =========
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="rst"))
==== ========
spam 41.9999
eggs 451
==== ========
"mediawiki" produces a table markup used in Wikipedia and on other
MediaWiki-based sites:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="mediawiki"))
{| class="wikitable" style="text-align: left;"
|+ <!-- caption -->
|-
! strings !! align="right"| numbers
|-
| spam || align="right"| 41.9999
|-
| eggs || align="right"| 451
|}
"html" produces HTML markup:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="html"))
<table>
<tr><th>strings </th><th style="text-align: right;"> numbers</th></tr>
<tr><td>spam </td><td style="text-align: right;"> 41.9999</td></tr>
<tr><td>eggs </td><td style="text-align: right;"> 451 </td></tr>
</table>
"latex" produces a tabular environment of LaTeX document markup:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex"))
\\begin{tabular}{lr}
\\hline
spam & 41.9999 \\\\
eggs & 451 \\\\
\\hline
\\end{tabular}
"latex_booktabs" produces a tabular environment of LaTeX document markup
using the booktabs.sty package:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex_booktabs"))
\\begin{tabular}{lr}
\\toprule
spam & 41.9999 \\\\
eggs & 451 \\\\
\\bottomrule
\end{tabular}
"""
if tabular_data is None:
tabular_data = []
list_of_lists, headers = _normalize_tabular_data(tabular_data, headers)
# optimization: look for ANSI control codes once,
# enable smart width functions only if a control code is found
plain_text = '\n'.join(['\t'.join(map(_text_type, headers))] + \
['\t'.join(map(_text_type, row)) for row in list_of_lists])
has_invisible = re.search(_invisible_codes, plain_text)
if has_invisible:
width_fn = _visible_width
else:
width_fn = wcswidth
# format rows and columns, convert numeric values to strings
cols = list(zip(*list_of_lists))
coltypes = list(map(_column_type, cols))
cols = [[_format(v, ct, floatfmt, missingval) for v in c]
for c,ct in zip(cols, coltypes)]
# align columns
aligns = [numalign if ct in [int,float] else stralign for ct in coltypes]
minwidths = [width_fn(h) + MIN_PADDING for h in headers] if headers else [0]*len(cols)
cols = [_align_column(c, a, minw, has_invisible)
for c, a, minw in zip(cols, aligns, minwidths)]
if headers:
# align headers and add headers
t_cols = cols or [['']] * len(headers)
t_aligns = aligns or [stralign] * len(headers)
minwidths = [max(minw, width_fn(c[0])) for minw, c in zip(minwidths, t_cols)]
headers = [_align_header(h, a, minw)
for h, a, minw in zip(headers, t_aligns, minwidths)]
rows = list(zip(*cols))
else:
minwidths = [width_fn(c[0]) for c in cols]
rows = list(zip(*cols))
if not isinstance(tablefmt, TableFormat):
tablefmt = _table_formats.get(tablefmt, _table_formats["simple"])
return _format_table(tablefmt, headers, rows, minwidths, aligns)
def _build_simple_row(padded_cells, rowfmt):
"Format row according to DataRow format without padding."
begin, sep, end = rowfmt
return (begin + sep.join(padded_cells) + end).rstrip()
def _build_row(padded_cells, colwidths, colaligns, rowfmt):
"Return a string which represents a row of data cells."
if not rowfmt:
return None
if hasattr(rowfmt, "__call__"):
return rowfmt(padded_cells, colwidths, colaligns)
else:
return _build_simple_row(padded_cells, rowfmt)
def _build_line(colwidths, colaligns, linefmt):
"Return a string which represents a horizontal line."
if not linefmt:
return None
if hasattr(linefmt, "__call__"):
return linefmt(colwidths, colaligns)
else:
begin, fill, sep, end = linefmt
cells = [fill*w for w in colwidths]
return _build_simple_row(cells, (begin, sep, end))
def _pad_row(cells, padding):
if cells:
pad = " "*padding
padded_cells = [pad + cell + pad for cell in cells]
return padded_cells
else:
return cells
def _format_table(fmt, headers, rows, colwidths, colaligns):
"""Produce a plain-text representation of the table."""
lines = []
hidden = fmt.with_header_hide if (headers and fmt.with_header_hide) else []
pad = fmt.padding
headerrow = fmt.headerrow
padded_widths = [(w + 2*pad) for w in colwidths]
padded_headers = _pad_row(headers, pad)
padded_rows = [_pad_row(row, pad) for row in rows]
if fmt.lineabove and "lineabove" not in hidden:
lines.append(_build_line(padded_widths, colaligns, fmt.lineabove))
if padded_headers:
lines.append(_build_row(padded_headers, padded_widths, colaligns, headerrow))
if fmt.linebelowheader and "linebelowheader" not in hidden:
lines.append(_build_line(padded_widths, colaligns, fmt.linebelowheader))
if padded_rows and fmt.linebetweenrows and "linebetweenrows" not in hidden:
# initial rows with a line below
for row in padded_rows[:-1]:
lines.append(_build_row(row, padded_widths, colaligns, fmt.datarow))
lines.append(_build_line(padded_widths, colaligns, fmt.linebetweenrows))
# the last row without a line below
lines.append(_build_row(padded_rows[-1], padded_widths, colaligns, fmt.datarow))
else:
for row in padded_rows:
lines.append(_build_row(row, padded_widths, colaligns, fmt.datarow))
if fmt.linebelow and "linebelow" not in hidden:
lines.append(_build_line(padded_widths, colaligns, fmt.linebelow))
return "\n".join(lines)
def _main():
"""\
Usage: tabulate [options] [FILE ...]
Pretty-print tabular data. See also https://bitbucket.org/astanin/python-tabulate
FILE a filename of the file with tabular data;
if "-" or missing, read data from stdin.
Options:
-h, --help show this message
-1, --header use the first row of data as a table header
-s REGEXP, --sep REGEXP use a custom column separator (default: whitespace)
-f FMT, --format FMT set output table format; supported formats:
plain, simple, grid, fancy_grid, pipe, orgtbl,
rst, mediawiki, html, latex, latex_booktabs, tsv
(default: simple)
"""
import getopt
import sys
import textwrap
usage = textwrap.dedent(_main.__doc__)
try:
opts, args = getopt.getopt(sys.argv[1:],
"h1f:s:",
["help", "header", "format", "separator"])
except getopt.GetoptError as e:
print(e)
print(usage)
sys.exit(2)
headers = []
tablefmt = "simple"
sep = r"\s+"
for opt, value in opts:
if opt in ["-1", "--header"]:
headers = "firstrow"
elif opt in ["-f", "--format"]:
if value not in tabulate_formats:
print("%s is not a supported table format" % value)
print(usage)
sys.exit(3)
tablefmt = value
elif opt in ["-s", "--sep"]:
sep = value
elif opt in ["-h", "--help"]:
print(usage)
sys.exit(0)
files = [sys.stdin] if not args else args
for f in files:
if f == "-":
f = sys.stdin
if _is_file(f):
_pprint_file(f, headers=headers, tablefmt=tablefmt, sep=sep)
else:
with open(f) as fobj:
_pprint_file(fobj)
def _pprint_file(fobject, headers, tablefmt, sep):
rows = fobject.readlines()
table = [re.split(sep, r.rstrip()) for r in rows]
print(tabulate(table, headers, tablefmt))
if __name__ == "__main__":
_main()
| bsd-3-clause |
chrisburr/scikit-learn | examples/semi_supervised/plot_label_propagation_digits_active_learning.py | 294 | 3417 | """
========================================
Label Propagation digits active learning
========================================
Demonstrates an active learning technique to learn handwritten digits
using label propagation.
We start by training a label propagation model with only 10 labeled points,
then we select the top five most uncertain points to label. Next, we train
with 15 labeled points (original 10 + 5 new ones). We repeat this process
four times to have a model trained with 30 labeled examples.
A plot will appear showing the top 5 most uncertain digits for each iteration
of training. These may or may not contain mistakes, but we will train the next
model with their true labels.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import classification_report, confusion_matrix
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 10
unlabeled_indices = np.arange(n_total_samples)[n_labeled_points:]
f = plt.figure()
for i in range(5):
y_train = np.copy(y)
y_train[unlabeled_indices] = -1
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_indices]
true_labels = y[unlabeled_indices]
cm = confusion_matrix(true_labels, predicted_labels,
labels=lp_model.classes_)
print('Iteration %i %s' % (i, 70 * '_'))
print("Label Spreading model: %d labeled & %d unlabeled (%d total)"
% (n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# compute the entropies of transduced label distributions
pred_entropies = stats.distributions.entropy(
lp_model.label_distributions_.T)
# select five digit examples that the classifier is most uncertain about
uncertainty_index = uncertainty_index = np.argsort(pred_entropies)[-5:]
# keep track of indices that we get labels for
delete_indices = np.array([])
f.text(.05, (1 - (i + 1) * .183),
"model %d\n\nfit with\n%d labels" % ((i + 1), i * 5 + 10), size=10)
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(5, 5, index + 1 + (5 * i))
sub.imshow(image, cmap=plt.cm.gray_r)
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]), size=10)
sub.axis('off')
# labeling 5 points, remote from labeled set
delete_index, = np.where(unlabeled_indices == image_index)
delete_indices = np.concatenate((delete_indices, delete_index))
unlabeled_indices = np.delete(unlabeled_indices, delete_indices)
n_labeled_points += 5
f.suptitle("Active learning with Label Propagation.\nRows show 5 most "
"uncertain labels to learn with the next model.")
plt.subplots_adjust(0.12, 0.03, 0.9, 0.8, 0.2, 0.45)
plt.show()
| bsd-3-clause |
ImageMarkup/isbi-challenge-scoring | isic_challenge_scoring/confusion.py | 1 | 1557 | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
def createBinaryConfusionMatrix(
truthBinaryValues: np.ndarray, predictionBinaryValues: np.ndarray,
name: str = None) -> pd.Series:
# This implementation is:
# ~30x faster than sklearn.metrics.confusion_matrix
# ~25x faster than sklearn.metrics.confusion_matrix(labels=[False, True])
# ~6x faster than pandas.crosstab
truthBinaryValues = truthBinaryValues.ravel()
predictionBinaryValues = predictionBinaryValues.ravel()
truthBinaryNegativeValues = 1 - truthBinaryValues
testBinaryNegativeValues = 1 - predictionBinaryValues
truePositive = np.sum(np.logical_and(truthBinaryValues,
predictionBinaryValues))
trueNegative = np.sum(np.logical_and(truthBinaryNegativeValues,
testBinaryNegativeValues))
falsePositive = np.sum(np.logical_and(truthBinaryNegativeValues,
predictionBinaryValues))
falseNegative = np.sum(np.logical_and(truthBinaryValues,
testBinaryNegativeValues))
# Storing the matrix as a Series instead of a DataFrame makes it easier to reference elements
# and aggregate multiple matrices
cm = pd.Series({
'TP': truePositive,
'TN': trueNegative,
'FP': falsePositive,
'FN': falseNegative,
}, name=name)
return cm
def normalizeConfusionMatrix(cm: pd.Series) -> pd.Series:
return cm / cm.sum()
| apache-2.0 |
untom/scikit-learn | benchmarks/bench_isotonic.py | 268 | 3046 | """
Benchmarks of isotonic regression performance.
We generate a synthetic dataset of size 10^n, for n in [min, max], and
examine the time taken to run isotonic regression over the dataset.
The timings are then output to stdout, or visualized on a log-log scale
with matplotlib.
This alows the scaling of the algorithm with the problem size to be
visualized and understood.
"""
from __future__ import print_function
import numpy as np
import gc
from datetime import datetime
from sklearn.isotonic import isotonic_regression
from sklearn.utils.bench import total_seconds
import matplotlib.pyplot as plt
import argparse
def generate_perturbed_logarithm_dataset(size):
return np.random.randint(-50, 50, size=n) \
+ 50. * np.log(1 + np.arange(n))
def generate_logistic_dataset(size):
X = np.sort(np.random.normal(size=size))
return np.random.random(size=size) < 1.0 / (1.0 + np.exp(-X))
DATASET_GENERATORS = {
'perturbed_logarithm': generate_perturbed_logarithm_dataset,
'logistic': generate_logistic_dataset
}
def bench_isotonic_regression(Y):
"""
Runs a single iteration of isotonic regression on the input data,
and reports the total time taken (in seconds).
"""
gc.collect()
tstart = datetime.now()
isotonic_regression(Y)
delta = datetime.now() - tstart
return total_seconds(delta)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Isotonic Regression benchmark tool")
parser.add_argument('--iterations', type=int, required=True,
help="Number of iterations to average timings over "
"for each problem size")
parser.add_argument('--log_min_problem_size', type=int, required=True,
help="Base 10 logarithm of the minimum problem size")
parser.add_argument('--log_max_problem_size', type=int, required=True,
help="Base 10 logarithm of the maximum problem size")
parser.add_argument('--show_plot', action='store_true',
help="Plot timing output with matplotlib")
parser.add_argument('--dataset', choices=DATASET_GENERATORS.keys(),
required=True)
args = parser.parse_args()
timings = []
for exponent in range(args.log_min_problem_size,
args.log_max_problem_size):
n = 10 ** exponent
Y = DATASET_GENERATORS[args.dataset](n)
time_per_iteration = \
[bench_isotonic_regression(Y) for i in range(args.iterations)]
timing = (n, np.mean(time_per_iteration))
timings.append(timing)
# If we're not plotting, dump the timing to stdout
if not args.show_plot:
print(n, np.mean(time_per_iteration))
if args.show_plot:
plt.plot(*zip(*timings))
plt.title("Average time taken running isotonic regression")
plt.xlabel('Number of observations')
plt.ylabel('Time (s)')
plt.axis('tight')
plt.loglog()
plt.show()
| bsd-3-clause |
khkaminska/scikit-learn | sklearn/kernel_ridge.py | 155 | 6545 | """Module :mod:`sklearn.kernel_ridge` implements kernel ridge regression."""
# Authors: Mathieu Blondel <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
import numpy as np
from .base import BaseEstimator, RegressorMixin
from .metrics.pairwise import pairwise_kernels
from .linear_model.ridge import _solve_cholesky_kernel
from .utils import check_X_y
from .utils.validation import check_is_fitted
class KernelRidge(BaseEstimator, RegressorMixin):
"""Kernel ridge regression.
Kernel ridge regression (KRR) combines ridge regression (linear least
squares with l2-norm regularization) with the kernel trick. It thus
learns a linear function in the space induced by the respective kernel and
the data. For non-linear kernels, this corresponds to a non-linear
function in the original space.
The form of the model learned by KRR is identical to support vector
regression (SVR). However, different loss functions are used: KRR uses
squared error loss while support vector regression uses epsilon-insensitive
loss, both combined with l2 regularization. In contrast to SVR, fitting a
KRR model can be done in closed-form and is typically faster for
medium-sized datasets. On the other hand, the learned model is non-sparse
and thus slower than SVR, which learns a sparse model for epsilon > 0, at
prediction-time.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <kernel_ridge>`.
Parameters
----------
alpha : {float, array-like}, shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
kernel : string or callable, default="linear"
Kernel mapping used internally. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
Attributes
----------
dual_coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s) in kernel space
X_fit_ : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data, which is also required for prediction
References
----------
* Kevin P. Murphy
"Machine Learning: A Probabilistic Perspective", The MIT Press
chapter 14.4.3, pp. 492-493
See also
--------
Ridge
Linear ridge regression.
SVR
Support Vector Regression implemented using libsvm.
Examples
--------
>>> from sklearn.kernel_ridge import KernelRidge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> clf = KernelRidge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
KernelRidge(alpha=1.0, coef0=1, degree=3, gamma=None, kernel='linear',
kernel_params=None)
"""
def __init__(self, alpha=1, kernel="linear", gamma=None, degree=3, coef0=1,
kernel_params=None):
self.alpha = alpha
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
@property
def _pairwise(self):
return self.kernel == "precomputed"
def fit(self, X, y=None, sample_weight=None):
"""Fit Kernel Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample, ignored if None is passed.
Returns
-------
self : returns an instance of self.
"""
# Convert data
X, y = check_X_y(X, y, accept_sparse=("csr", "csc"), multi_output=True,
y_numeric=True)
K = self._get_kernel(X)
alpha = np.atleast_1d(self.alpha)
ravel = False
if len(y.shape) == 1:
y = y.reshape(-1, 1)
ravel = True
copy = self.kernel == "precomputed"
self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha,
sample_weight,
copy)
if ravel:
self.dual_coef_ = self.dual_coef_.ravel()
self.X_fit_ = X
return self
def predict(self, X):
"""Predict using the the kernel ridge model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, ["X_fit_", "dual_coef_"])
K = self._get_kernel(X, self.X_fit_)
return np.dot(K, self.dual_coef_)
| bsd-3-clause |
ofgulban/scikit-image | skimage/viewer/canvastools/painttool.py | 13 | 7014 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
LABELS_CMAP = mcolors.ListedColormap(['white', 'red', 'dodgerblue', 'gold',
'greenyellow', 'blueviolet'])
from ...viewer.canvastools.base import CanvasToolBase
__all__ = ['PaintTool']
class PaintTool(CanvasToolBase):
"""Widget for painting on top of a plot.
Parameters
----------
manager : Viewer or PlotPlugin.
Skimage viewer or plot plugin object.
overlay_shape : shape tuple
2D shape tuple used to initialize overlay image.
alpha : float (between [0, 1])
Opacity of overlay
on_move : function
Function called whenever a control handle is moved.
This function must accept the end points of line as the only argument.
on_release : function
Function called whenever the control handle is released.
on_enter : function
Function called whenever the "enter" key is pressed.
rect_props : dict
Properties for :class:`matplotlib.patches.Rectangle`. This class
redefines defaults in :class:`matplotlib.widgets.RectangleSelector`.
Attributes
----------
overlay : array
Overlay of painted labels displayed on top of image.
label : int
Current paint color.
Examples
----------
>>> from skimage.data import camera
>>> import matplotlib.pyplot as plt
>>> from skimage.viewer.canvastools import PaintTool
>>> import numpy as np
>>> img = camera() #doctest: +SKIP
>>> ax = plt.subplot(111) #doctest: +SKIP
>>> plt.imshow(img, cmap=plt.cm.gray) #doctest: +SKIP
>>> p = PaintTool(ax,np.shape(img[:-1]),10,0.2) #doctest: +SKIP
>>> plt.show() #doctest: +SKIP
>>> mask = p.overlay #doctest: +SKIP
>>> plt.imshow(mask,cmap=plt.cm.gray) #doctest: +SKIP
>>> plt.show() #doctest: +SKIP
"""
def __init__(self, manager, overlay_shape, radius=5, alpha=0.3,
on_move=None, on_release=None, on_enter=None,
rect_props=None):
super(PaintTool, self).__init__(manager, on_move=on_move,
on_enter=on_enter,
on_release=on_release)
props = dict(edgecolor='r', facecolor='0.7', alpha=0.5, animated=True)
props.update(rect_props if rect_props is not None else {})
self.alpha = alpha
self.cmap = LABELS_CMAP
self._overlay_plot = None
self.shape = overlay_shape
self._cursor = plt.Rectangle((0, 0), 0, 0, **props)
self._cursor.set_visible(False)
self.ax.add_patch(self._cursor)
# `label` and `radius` can only be set after initializing `_cursor`
self.label = 1
self.radius = radius
# Note that the order is important: Redraw cursor *after* overlay
self.artists = [self._overlay_plot, self._cursor]
self.manager.add_tool(self)
@property
def label(self):
return self._label
@label.setter
def label(self, value):
if value >= self.cmap.N:
raise ValueError('Maximum label value = %s' % len(self.cmap - 1))
self._label = value
self._cursor.set_edgecolor(self.cmap(value))
@property
def radius(self):
return self._radius
@radius.setter
def radius(self, r):
self._radius = r
self._width = 2 * r + 1
self._cursor.set_width(self._width)
self._cursor.set_height(self._width)
self.window = CenteredWindow(r, self._shape)
@property
def overlay(self):
return self._overlay
@overlay.setter
def overlay(self, image):
self._overlay = image
if image is None:
self.ax.images.remove(self._overlay_plot)
self._overlay_plot = None
elif self._overlay_plot is None:
props = dict(cmap=self.cmap, alpha=self.alpha,
norm=mcolors.NoNorm(), animated=True)
self._overlay_plot = self.ax.imshow(image, **props)
else:
self._overlay_plot.set_data(image)
self.redraw()
@property
def shape(self):
return self._shape
@shape.setter
def shape(self, shape):
self._shape = shape
if not self._overlay_plot is None:
self._overlay_plot.set_extent((-0.5, shape[1] + 0.5,
shape[0] + 0.5, -0.5))
self.radius = self._radius
self.overlay = np.zeros(shape, dtype='uint8')
def on_key_press(self, event):
if event.key == 'enter':
self.callback_on_enter(self.geometry)
self.redraw()
def on_mouse_press(self, event):
if event.button != 1 or not self.ax.in_axes(event):
return
self.update_cursor(event.xdata, event.ydata)
self.update_overlay(event.xdata, event.ydata)
def on_mouse_release(self, event):
if event.button != 1:
return
self.callback_on_release(self.geometry)
def on_move(self, event):
if not self.ax.in_axes(event):
self._cursor.set_visible(False)
self.redraw() # make sure cursor is not visible
return
self._cursor.set_visible(True)
self.update_cursor(event.xdata, event.ydata)
if event.button != 1:
self.redraw() # update cursor position
return
self.update_overlay(event.xdata, event.ydata)
self.callback_on_move(self.geometry)
def update_overlay(self, x, y):
overlay = self.overlay
overlay[self.window.at(y, x)] = self.label
# Note that overlay calls `redraw`
self.overlay = overlay
def update_cursor(self, x, y):
x = x - self.radius - 1
y = y - self.radius - 1
self._cursor.set_xy((x, y))
@property
def geometry(self):
return self.overlay
class CenteredWindow(object):
"""Window that create slices numpy arrays over 2D windows.
Examples
--------
>>> a = np.arange(16).reshape(4, 4)
>>> w = CenteredWindow(1, a.shape)
>>> a[w.at(1, 1)]
array([[ 0, 1, 2],
[ 4, 5, 6],
[ 8, 9, 10]])
>>> a[w.at(0, 0)]
array([[0, 1],
[4, 5]])
>>> a[w.at(4, 3)]
array([[14, 15]])
"""
def __init__(self, radius, array_shape):
self.radius = radius
self.array_shape = array_shape
def at(self, row, col):
h, w = self.array_shape
r = self.radius
xmin = max(0, col - r)
xmax = min(w, col + r + 1)
ymin = max(0, row - r)
ymax = min(h, row + r + 1)
return [slice(ymin, ymax), slice(xmin, xmax)]
if __name__ == '__main__': # pragma: no cover
np.testing.rundocs()
from ... import data
from ...viewer import ImageViewer
image = data.camera()
viewer = ImageViewer(image)
paint_tool = PaintTool(viewer, image.shape)
viewer.show()
| bsd-3-clause |
Nyker510/scikit-learn | sklearn/metrics/cluster/bicluster.py | 359 | 2797 | from __future__ import division
import numpy as np
from sklearn.utils.linear_assignment_ import linear_assignment
from sklearn.utils.validation import check_consistent_length, check_array
__all__ = ["consensus_score"]
def _check_rows_and_columns(a, b):
"""Unpacks the row and column arrays and checks their shape."""
check_consistent_length(*a)
check_consistent_length(*b)
checks = lambda x: check_array(x, ensure_2d=False)
a_rows, a_cols = map(checks, a)
b_rows, b_cols = map(checks, b)
return a_rows, a_cols, b_rows, b_cols
def _jaccard(a_rows, a_cols, b_rows, b_cols):
"""Jaccard coefficient on the elements of the two biclusters."""
intersection = ((a_rows * b_rows).sum() *
(a_cols * b_cols).sum())
a_size = a_rows.sum() * a_cols.sum()
b_size = b_rows.sum() * b_cols.sum()
return intersection / (a_size + b_size - intersection)
def _pairwise_similarity(a, b, similarity):
"""Computes pairwise similarity matrix.
result[i, j] is the Jaccard coefficient of a's bicluster i and b's
bicluster j.
"""
a_rows, a_cols, b_rows, b_cols = _check_rows_and_columns(a, b)
n_a = a_rows.shape[0]
n_b = b_rows.shape[0]
result = np.array(list(list(similarity(a_rows[i], a_cols[i],
b_rows[j], b_cols[j])
for j in range(n_b))
for i in range(n_a)))
return result
def consensus_score(a, b, similarity="jaccard"):
"""The similarity of two sets of biclusters.
Similarity between individual biclusters is computed. Then the
best matching between sets is found using the Hungarian algorithm.
The final score is the sum of similarities divided by the size of
the larger set.
Read more in the :ref:`User Guide <biclustering>`.
Parameters
----------
a : (rows, columns)
Tuple of row and column indicators for a set of biclusters.
b : (rows, columns)
Another set of biclusters like ``a``.
similarity : string or function, optional, default: "jaccard"
May be the string "jaccard" to use the Jaccard coefficient, or
any function that takes four arguments, each of which is a 1d
indicator vector: (a_rows, a_columns, b_rows, b_columns).
References
----------
* Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis
for bicluster acquisition
<https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__.
"""
if similarity == "jaccard":
similarity = _jaccard
matrix = _pairwise_similarity(a, b, similarity)
indices = linear_assignment(1. - matrix)
n_a = len(a[0])
n_b = len(b[0])
return matrix[indices[:, 0], indices[:, 1]].sum() / max(n_a, n_b)
| bsd-3-clause |
thast/EOSC513 | DC/Coherency/coh_WV_DB3_withoutW/coh_DB3_withoutW.py | 1 | 9679 | from SimPEG import Mesh, Regularization, Maps, Utils, EM
from SimPEG.EM.Static import DC
import numpy as np
import matplotlib.pyplot as plt
#%matplotlib inline
import copy
import pandas as pd
from scipy.sparse import csr_matrix, spdiags, dia_matrix,diags
from scipy.sparse.linalg import spsolve
from scipy.stats import norm,multivariate_normal
import sys
path ="../pymatsolver/"
path = "../../../Documents/pymatsolver/"
sys.path.append(path)
from pymatsolver import PardisoSolver
from scipy.interpolate import LinearNDInterpolator, interp1d
from sklearn.mixture import GaussianMixture
from SimPEG import DataMisfit, Regularization, Optimization, InvProblem, Directives, Inversion
#2D model
csx, csy, csz = 0.25,0.25,0.25
# Number of core cells in each directiPon s
ncx, ncz = 2**7-24,2**7-12
# Number of padding cells to add in each direction
npad = 12
# Vectors of cell lengthts in each direction
hx = [(csx,npad, -1.5),(csx,ncx),(csx,npad, 1.5)]
hz= [(csz,npad,-1.5),(csz,ncz)]
# Create mesh
mesh = Mesh.TensorMesh([hx, hz],x0="CN")
# Map mesh coordinates from local to UTM coordiantes
#mesh.x0[2] = mesh.x0[2]-mesh.vectorCCz[-npad-1]
mesh.x0[1] = mesh.x0[1]+csz/2.
#mesh.x0[0] = mesh.x0[0]+csx/2.
#mesh.plotImage(np.ones(mesh.nC)*np.nan, grid=True)
#mesh.plotImage(np.ones(mesh.nC)*np.nan, grid=True)
#plt.gca().set_xlim([-20,20])
#plt.gca().set_ylim([-15,0])
#mesh.plotGrid()
#plt.gca().set_aspect('equal')
#plt.show()
print "Mesh Size: ", mesh.nC
#Model Creation
lnsig_air = 1e-8;
x0,z0, r0 = -6., -4., 3.
x1,z1, r1 = 6., -4., 3.
ln_sigback = -5.
ln_sigc = -3.
ln_sigr = -7.
noisemean = 0.
noisevar = 0.0
overburden_extent = 0.
ln_over = -4.
#m = (lnsig_background)*np.ones(mesh.nC);
#mu =np.ones(mesh.nC);
mtrue = ln_sigback*np.ones(mesh.nC) + norm(noisemean,noisevar).rvs(mesh.nC)
overb = (mesh.gridCC[:,1] >-overburden_extent) & (mesh.gridCC[:,1]<=0)
mtrue[overb] = ln_over*np.ones_like(mtrue[overb])+ norm(noisemean,noisevar).rvs(np.prod((mtrue[overb]).shape))
csph = (np.sqrt((mesh.gridCC[:,1]-z0)**2.+(mesh.gridCC[:,0]-x0)**2.))< r0
mtrue[csph] = ln_sigc*np.ones_like(mtrue[csph]) + norm(noisemean,noisevar).rvs(np.prod((mtrue[csph]).shape))
#Define the sphere limit
rsph = (np.sqrt((mesh.gridCC[:,1]-z1)**2.+(mesh.gridCC[:,0]-x1)**2.))< r1
mtrue[rsph] = ln_sigr*np.ones_like(mtrue[rsph]) + norm(noisemean,noisevar).rvs(np.prod((mtrue[rsph]).shape))
mtrue = Utils.mkvc(mtrue);
mesh.plotGrid()
plt.gca().set_xlim([-10,10])
plt.gca().set_ylim([-10,0])
xyzlim = np.r_[[[-10.,10.],[-10.,1.]]]
actind, meshCore = Utils.meshutils.ExtractCoreMesh(xyzlim,mesh)
plt.hist(mtrue[actind],bins =50,normed=True);
fig0 = plt.figure()
ax0 = fig0.add_subplot(111)
mm = meshCore.plotImage(mtrue[actind],ax = ax0)
plt.colorbar(mm[0])
ax0.set_aspect("equal")
#plt.show()
def getCylinderPoints(xc,zc,r):
xLocOrig1 = np.arange(-r,r+r/10.,r/10.)
xLocOrig2 = np.arange(r,-r-r/10.,-r/10.)
# Top half of cylinder
zLoc1 = np.sqrt(-xLocOrig1**2.+r**2.)+zc
# Bottom half of cylinder
zLoc2 = -np.sqrt(-xLocOrig2**2.+r**2.)+zc
# Shift from x = 0 to xc
xLoc1 = xLocOrig1 + xc*np.ones_like(xLocOrig1)
xLoc2 = xLocOrig2 + xc*np.ones_like(xLocOrig2)
topHalf = np.vstack([xLoc1,zLoc1]).T
topHalf = topHalf[0:-1,:]
bottomHalf = np.vstack([xLoc2,zLoc2]).T
bottomHalf = bottomHalf[0:-1,:]
cylinderPoints = np.vstack([topHalf,bottomHalf])
cylinderPoints = np.vstack([cylinderPoints,topHalf[0,:]])
return cylinderPoints
cylinderPoints0 = getCylinderPoints(x0,z1,r0)
cylinderPoints1 = getCylinderPoints(x1,z1,r1)
#Gradient array 1 2D
srclist = []
nSrc = 23
lines = 1
ylines = np.r_[0.]
xlines = np.r_[0.]
z = 0.
#xline
for k in range(lines):
for i in range(nSrc):
if i<=11:
locA = np.r_[-14.+1., z]
locB = np.r_[-8.+2.*i-1., z]
#M = np.c_[np.arange(-12.,-12+2*(i+1),2),np.ones(i+1)*z]
#N = np.c_[np.arange(-10.,-10+2*(i+1),2),np.ones(i+1)*z]
M = np.c_[np.arange(-12.,10+1,2),np.ones(12)*z]
N = np.c_[np.arange(-10.,12+1,2),np.ones(12)*z]
rx = DC.Rx.Dipole(M,N)
src= DC.Src.Dipole([rx],locA,locB)
srclist.append(src)
#print locA,locB,"\n",[M,N],"\n"
#rx = DC.Rx.Dipole(-M,-N)
#src= DC.Src.Dipole([rx],-locA,-locB)
#srclist.append(src)
#print -locA,-locB,"\n",[-M,-N],"\n"
else:
locA = np.r_[-14.+2*(i-11)+1., z]
locB = np.r_[14.-1.,z]
#M = np.c_[np.arange(locA[0]+1.,12.,2),np.ones(nSrc-i)*z]
#N = np.c_[np.arange(locA[0]+3.,14.,2),np.ones(nSrc-i)*z]
M = np.c_[np.arange(-12.,10+1,2),np.ones(12)*z]
N = np.c_[np.arange(-10.,12+1,2),np.ones(12)*z]
rx = DC.Rx.Dipole(M,N)
src= DC.Src.Dipole([rx],locA,locB)
srclist.append(src)
#print "line2",locA,locB,"\n",[M,N],"\n"
#rx = DC.Rx.Dipole(-M,-N)
#src= DC.Src.Dipole([rx],-locA,-locB)
#srclist.append(src)
mapping = Maps.ExpMap(mesh)
survey = DC.Survey(srclist)
problem = DC.Problem3D_CC(mesh, sigmaMap=mapping)
problem.pair(survey)
problem.Solver = PardisoSolver
dmis = DataMisfit.l2_DataMisfit(survey)
survey.dpred(mtrue)
survey.makeSyntheticData(mtrue,std=0.05,force=True)
survey.eps = 1e-5*np.linalg.norm(survey.dobs)
print '# of data: ', survey.dobs.shape
from SimPEG.Maps import IdentityMap
import pywt
class WaveletMap(IdentityMap):
def __init__(self, mesh=None, nP=None, **kwargs):
super(WaveletMap, self).__init__(mesh=mesh, nP=nP, **kwargs)
def _transform(self, m, wv = 'db3'):
coeff_wv = pywt.wavedecn(m.reshape(self.mesh.nCx,self.mesh.nCy,order = 'F'),wv, mode = 'per')
array_wv = pywt.coeffs_to_array(coeff_wv)
return Utils.mkvc(array_wv[0])
def deriv(self, m, v=None, wv = 'db3'):
if v is not None:
coeff_wv = pywt.wavedecn(v.reshape(self.mesh.nCx,self.mesh.nCy,order = 'F'),wv, mode = 'per')
array_wv = pywt.coeffs_to_array(coeff_wv)
return Utils.mkvc(array_wv[0])
else:
print "not implemented"
def inverse(self, m, wv = 'db3'):
msyn = np.zeros(mesh.nC)
coeff_wv = pywt.wavedecn(msyn.reshape(self.mesh.nCx,self.mesh.nCy,order = 'F'),wv, mode = 'per')
array_wv = pywt.coeffs_to_array(coeff_wv)
coeff_back = pywt.array_to_coeffs(m.reshape(array_wv[0].shape, order = 'F'),array_wv[1])
coeff_m = pywt.waverecn(coeff_back,wv, mode = 'per')
return Utils.mkvc(coeff_m)
class iWaveletMap(IdentityMap):
def __init__(self, mesh, nP=None, **kwargs):
super(iWaveletMap, self).__init__(mesh=mesh, nP=nP, **kwargs)
def _transform(self, m, wv = 'db3'):
msyn = np.zeros(mesh.nC)
coeff_map = pywt.wavedecn(msyn.reshape(self.mesh.nCx,self.mesh.nCy,order = 'F'),wv, mode = 'per')
array_map = pywt.coeffs_to_array(coeff_map)
coeff_map = pywt.array_to_coeffs(m.reshape(array_map[0].shape,order= 'F'),array_map[1])
coeff_back_map = pywt.waverecn(coeff_map,wv, mode = 'per')
return Utils.mkvc(coeff_back_map)
def deriv(self, m, v=None, wv = 'db3'):
if v is not None:
coeff_wv = pywt.wavedecn(v.reshape(self.mesh.nCx,self.mesh.nCy,order = 'F'),wv, mode = 'per')
array_wv = pywt.coeffs_to_array(coeff_wv)
coeff_back = pywt.array_to_coeffs(v,array_wv[1])
coeff_m = pywt.waverecn(coeff_back,wv, mode = 'per')
return Utils.mkvc(coeff_m)
else:
print "not implemented"
def inverse(self, m, wv = 'db3'):
coeff_wv = pywt.wavedecn(m.reshape(self.mesh.nCx,self.mesh.nCy,order = 'F'),wv, mode = 'per')
array_wv = pywt.coeffs_to_array(coeff_wv)
return Utils.mkvc(array_wv[0])
wavmap = WaveletMap(mesh)
iwavmap = iWaveletMap(mesh)
J = lambda v: problem.Jvec(mtrue,v)
Jt = lambda v: problem.Jtvec(mtrue,v)
x = np.zeros_like(mtrue)
v = np.zeros(problem.survey.nD)
print 'v shape: ',v.shape
indx = np.random.permutation(len(x))
indv = np.random.permutation(len(v))
coeff = 50
muwav = np.zeros([coeff, coeff])
for i in range(coeff):
print 'iteration: ', i
coeffs = pywt.wavedec2(np.zeros((mesh.nCx,mesh.nCy)),'db3')
indx0 = np.random.randint(1,len(coeffs))
indx1 = np.random.randint(0,3)
indx2 = np.random.randint(0,(coeffs[indx0][indx1]).shape[0])
indx3 = np.random.randint(0,(coeffs[indx0][indx1]).shape[1])
coeffs[indx0][indx1][indx2][indx3] = 1.
for j in range(coeff):
v = np.zeros(problem.survey.nD)
v[indv[j]] = 1.
v = Jt(v)
v = v/np.linalg.norm(v)
x = pywt.waverec2(coeffs,'db3')
x = x.reshape(mesh.nC,order = 'F')
x = x/np.linalg.norm(x)
muwav [i,j] = x.dot(v)
np.save('./mu.npy',muwav)
np.savez('./mu.npz',muwav)
fig1 = plt.figure(figsize=(10,8))
ax1 = fig1.add_subplot(111)
mm = ax1.matshow(np.log10(np.abs(muwav)), cmap="jet")
mm.set_clim(vmin=-8., vmax=0.)
cb = plt.colorbar(mm)
ax1.set_aspect("equal")
cb.set_label("Log10 |< * , * >|", fontsize=20)
cb.ax.tick_params(labelsize=20)
ax1.set_xlabel("columns of $J^T$",fontsize = 20)
ax1.set_ylabel("columns of $S^H$",fontsize =20)
ax1.set_title("Coherency of $J^T$ with $S^H$, $S$ the Wavelets Transform DB3",fontsize = 22)
ax1.text(0.,55.,"Minimum = %g"%(np.abs(muwav).min()),fontsize =20)
ax1.text(0.,57.,"Maximum = %.2f"%(np.abs(muwav).max()),fontsize =20)
ax1.tick_params(labelsize = 16)
fig1.savefig('./coh_DB3_withoutW.png')
plt.show() | mit |
alvarofierroclavero/scikit-learn | examples/linear_model/plot_sgd_penalties.py | 249 | 1563 | """
==============
SGD: Penalties
==============
Plot the contours of the three penalties.
All of the above are supported by
:class:`sklearn.linear_model.stochastic_gradient`.
"""
from __future__ import division
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def l1(xs):
return np.array([np.sqrt((1 - np.sqrt(x ** 2.0)) ** 2.0) for x in xs])
def l2(xs):
return np.array([np.sqrt(1.0 - x ** 2.0) for x in xs])
def el(xs, z):
return np.array([(2 - 2 * x - 2 * z + 4 * x * z -
(4 * z ** 2
- 8 * x * z ** 2
+ 8 * x ** 2 * z ** 2
- 16 * x ** 2 * z ** 3
+ 8 * x * z ** 3 + 4 * x ** 2 * z ** 4) ** (1. / 2)
- 2 * x * z ** 2) / (2 - 4 * z) for x in xs])
def cross(ext):
plt.plot([-ext, ext], [0, 0], "k-")
plt.plot([0, 0], [-ext, ext], "k-")
xs = np.linspace(0, 1, 100)
alpha = 0.501 # 0.5 division throuh zero
cross(1.2)
plt.plot(xs, l1(xs), "r-", label="L1")
plt.plot(xs, -1.0 * l1(xs), "r-")
plt.plot(-1 * xs, l1(xs), "r-")
plt.plot(-1 * xs, -1.0 * l1(xs), "r-")
plt.plot(xs, l2(xs), "b-", label="L2")
plt.plot(xs, -1.0 * l2(xs), "b-")
plt.plot(-1 * xs, l2(xs), "b-")
plt.plot(-1 * xs, -1.0 * l2(xs), "b-")
plt.plot(xs, el(xs, alpha), "y-", label="Elastic Net")
plt.plot(xs, -1.0 * el(xs, alpha), "y-")
plt.plot(-1 * xs, el(xs, alpha), "y-")
plt.plot(-1 * xs, -1.0 * el(xs, alpha), "y-")
plt.xlabel(r"$w_0$")
plt.ylabel(r"$w_1$")
plt.legend()
plt.axis("equal")
plt.show()
| bsd-3-clause |
chrsrds/scikit-learn | sklearn/utils/deprecation.py | 2 | 3633 | import warnings
import functools
__all__ = ["deprecated"]
class deprecated:
"""Decorator to mark a function or class as deprecated.
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
>>> from sklearn.utils import deprecated
>>> deprecated()
<sklearn.utils.deprecation.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
Parameters
----------
extra : string
to be added to the deprecation messages
"""
# Adapted from https://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
def __init__(self, extra=''):
self.extra = extra
def __call__(self, obj):
"""Call method
Parameters
----------
obj : object
"""
if isinstance(obj, type):
return self._decorate_class(obj)
elif isinstance(obj, property):
# Note that this is only triggered properly if the `property`
# decorator comes before the `deprecated` decorator, like so:
#
# @deprecated(msg)
# @property
# def deprecated_attribute_(self):
# ...
return self._decorate_property(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
# FIXME: we should probably reset __new__ for full generality
init = cls.__init__
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return init(*args, **kwargs)
cls.__init__ = wrapped
wrapped.__name__ = '__init__'
wrapped.__doc__ = self._update_doc(init.__doc__)
wrapped.deprecated_original = init
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
@functools.wraps(fun)
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return fun(*args, **kwargs)
wrapped.__doc__ = self._update_doc(wrapped.__doc__)
# Add a reference to the wrapped function so that we can introspect
# on function arguments in Python 2 (already works in Python 3)
wrapped.__wrapped__ = fun
return wrapped
def _decorate_property(self, prop):
msg = self.extra
@property
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return prop.fget(*args, **kwargs)
return wrapped
def _update_doc(self, olddoc):
newdoc = "DEPRECATED"
if self.extra:
newdoc = "%s: %s" % (newdoc, self.extra)
if olddoc:
newdoc = "%s\n\n%s" % (newdoc, olddoc)
return newdoc
def _is_deprecated(func):
"""Helper to check if func is wraped by our deprecated decorator"""
closures = getattr(func, '__closure__', [])
if closures is None:
closures = []
is_deprecated = ('deprecated' in ''.join([c.cell_contents
for c in closures
if isinstance(c.cell_contents, str)]))
return is_deprecated
| bsd-3-clause |
artemyk/graphy | docs/sphinxext/inheritance_diagram.py | 98 | 13648 | """
Defines a docutils directive for inserting inheritance diagrams.
Provide the directive with one or more classes or modules (separated
by whitespace). For modules, all of the classes in that module will
be used.
Example::
Given the following classes:
class A: pass
class B(A): pass
class C(A): pass
class D(B, C): pass
class E(B): pass
.. inheritance-diagram: D E
Produces a graph like the following:
A
/ \
B C
/ \ /
E D
The graph is inserted as a PNG+image map into HTML and a PDF in
LaTeX.
"""
import inspect
import os
import re
import subprocess
try:
from hashlib import md5
except ImportError:
from md5 import md5
from docutils.nodes import Body, Element
from docutils.parsers.rst import directives
from sphinx.roles import xfileref_role
def my_import(name):
"""Module importer - taken from the python documentation.
This function allows importing names with dots in them."""
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
class DotException(Exception):
pass
class InheritanceGraph(object):
"""
Given a list of classes, determines the set of classes that
they inherit from all the way to the root "object", and then
is able to generate a graphviz dot graph from them.
"""
def __init__(self, class_names, show_builtins=False):
"""
*class_names* is a list of child classes to show bases from.
If *show_builtins* is True, then Python builtins will be shown
in the graph.
"""
self.class_names = class_names
self.classes = self._import_classes(class_names)
self.all_classes = self._all_classes(self.classes)
if len(self.all_classes) == 0:
raise ValueError("No classes found for inheritance diagram")
self.show_builtins = show_builtins
py_sig_re = re.compile(r'''^([\w.]*\.)? # class names
(\w+) \s* $ # optionally arguments
''', re.VERBOSE)
def _import_class_or_module(self, name):
"""
Import a class using its fully-qualified *name*.
"""
try:
path, base = self.py_sig_re.match(name).groups()
except:
raise ValueError(
"Invalid class or module '%s' specified for inheritance diagram" % name)
fullname = (path or '') + base
path = (path and path.rstrip('.'))
if not path:
path = base
try:
module = __import__(path, None, None, [])
# We must do an import of the fully qualified name. Otherwise if a
# subpackage 'a.b' is requested where 'import a' does NOT provide
# 'a.b' automatically, then 'a.b' will not be found below. This
# second call will force the equivalent of 'import a.b' to happen
# after the top-level import above.
my_import(fullname)
except ImportError:
raise ValueError(
"Could not import class or module '%s' specified for inheritance diagram" % name)
try:
todoc = module
for comp in fullname.split('.')[1:]:
todoc = getattr(todoc, comp)
except AttributeError:
raise ValueError(
"Could not find class or module '%s' specified for inheritance diagram" % name)
# If a class, just return it
if inspect.isclass(todoc):
return [todoc]
elif inspect.ismodule(todoc):
classes = []
for cls in todoc.__dict__.values():
if inspect.isclass(cls) and cls.__module__ == todoc.__name__:
classes.append(cls)
return classes
raise ValueError(
"'%s' does not resolve to a class or module" % name)
def _import_classes(self, class_names):
"""
Import a list of classes.
"""
classes = []
for name in class_names:
classes.extend(self._import_class_or_module(name))
return classes
def _all_classes(self, classes):
"""
Return a list of all classes that are ancestors of *classes*.
"""
all_classes = {}
def recurse(cls):
all_classes[cls] = None
for c in cls.__bases__:
if c not in all_classes:
recurse(c)
for cls in classes:
recurse(cls)
return all_classes.keys()
def class_name(self, cls, parts=0):
"""
Given a class object, return a fully-qualified name. This
works for things I've tested in matplotlib so far, but may not
be completely general.
"""
module = cls.__module__
if module == '__builtin__':
fullname = cls.__name__
else:
fullname = "%s.%s" % (module, cls.__name__)
if parts == 0:
return fullname
name_parts = fullname.split('.')
return '.'.join(name_parts[-parts:])
def get_all_class_names(self):
"""
Get all of the class names involved in the graph.
"""
return [self.class_name(x) for x in self.all_classes]
# These are the default options for graphviz
default_graph_options = {
"rankdir": "LR",
"size": '"8.0, 12.0"'
}
default_node_options = {
"shape": "box",
"fontsize": 10,
"height": 0.25,
"fontname": "Vera Sans, DejaVu Sans, Liberation Sans, Arial, Helvetica, sans",
"style": '"setlinewidth(0.5)"'
}
default_edge_options = {
"arrowsize": 0.5,
"style": '"setlinewidth(0.5)"'
}
def _format_node_options(self, options):
return ','.join(["%s=%s" % x for x in options.items()])
def _format_graph_options(self, options):
return ''.join(["%s=%s;\n" % x for x in options.items()])
def generate_dot(self, fd, name, parts=0, urls={},
graph_options={}, node_options={},
edge_options={}):
"""
Generate a graphviz dot graph from the classes that
were passed in to __init__.
*fd* is a Python file-like object to write to.
*name* is the name of the graph
*urls* is a dictionary mapping class names to http urls
*graph_options*, *node_options*, *edge_options* are
dictionaries containing key/value pairs to pass on as graphviz
properties.
"""
g_options = self.default_graph_options.copy()
g_options.update(graph_options)
n_options = self.default_node_options.copy()
n_options.update(node_options)
e_options = self.default_edge_options.copy()
e_options.update(edge_options)
fd.write('digraph %s {\n' % name)
fd.write(self._format_graph_options(g_options))
for cls in self.all_classes:
if not self.show_builtins and cls in __builtins__.values():
continue
name = self.class_name(cls, parts)
# Write the node
this_node_options = n_options.copy()
url = urls.get(self.class_name(cls))
if url is not None:
this_node_options['URL'] = '"%s"' % url
fd.write(' "%s" [%s];\n' %
(name, self._format_node_options(this_node_options)))
# Write the edges
for base in cls.__bases__:
if not self.show_builtins and base in __builtins__.values():
continue
base_name = self.class_name(base, parts)
fd.write(' "%s" -> "%s" [%s];\n' %
(base_name, name,
self._format_node_options(e_options)))
fd.write('}\n')
def run_dot(self, args, name, parts=0, urls={},
graph_options={}, node_options={}, edge_options={}):
"""
Run graphviz 'dot' over this graph, returning whatever 'dot'
writes to stdout.
*args* will be passed along as commandline arguments.
*name* is the name of the graph
*urls* is a dictionary mapping class names to http urls
Raises DotException for any of the many os and
installation-related errors that may occur.
"""
try:
dot = subprocess.Popen(['dot'] + list(args),
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=True)
except OSError:
raise DotException("Could not execute 'dot'. Are you sure you have 'graphviz' installed?")
except ValueError:
raise DotException("'dot' called with invalid arguments")
except:
raise DotException("Unexpected error calling 'dot'")
self.generate_dot(dot.stdin, name, parts, urls, graph_options,
node_options, edge_options)
dot.stdin.close()
result = dot.stdout.read()
returncode = dot.wait()
if returncode != 0:
raise DotException("'dot' returned the errorcode %d" % returncode)
return result
class inheritance_diagram(Body, Element):
"""
A docutils node to use as a placeholder for the inheritance
diagram.
"""
pass
def inheritance_diagram_directive(name, arguments, options, content, lineno,
content_offset, block_text, state,
state_machine):
"""
Run when the inheritance_diagram directive is first encountered.
"""
node = inheritance_diagram()
class_names = arguments
# Create a graph starting with the list of classes
graph = InheritanceGraph(class_names)
# Create xref nodes for each target of the graph's image map and
# add them to the doc tree so that Sphinx can resolve the
# references to real URLs later. These nodes will eventually be
# removed from the doctree after we're done with them.
for name in graph.get_all_class_names():
refnodes, x = xfileref_role(
'class', ':class:`%s`' % name, name, 0, state)
node.extend(refnodes)
# Store the graph object so we can use it to generate the
# dot file later
node['graph'] = graph
# Store the original content for use as a hash
node['parts'] = options.get('parts', 0)
node['content'] = " ".join(class_names)
return [node]
def get_graph_hash(node):
return md5(node['content'] + str(node['parts'])).hexdigest()[-10:]
def html_output_graph(self, node):
"""
Output the graph for HTML. This will insert a PNG with clickable
image map.
"""
graph = node['graph']
parts = node['parts']
graph_hash = get_graph_hash(node)
name = "inheritance%s" % graph_hash
path = '_images'
dest_path = os.path.join(setup.app.builder.outdir, path)
if not os.path.exists(dest_path):
os.makedirs(dest_path)
png_path = os.path.join(dest_path, name + ".png")
path = setup.app.builder.imgpath
# Create a mapping from fully-qualified class names to URLs.
urls = {}
for child in node:
if child.get('refuri') is not None:
urls[child['reftitle']] = child.get('refuri')
elif child.get('refid') is not None:
urls[child['reftitle']] = '#' + child.get('refid')
# These arguments to dot will save a PNG file to disk and write
# an HTML image map to stdout.
image_map = graph.run_dot(['-Tpng', '-o%s' % png_path, '-Tcmapx'],
name, parts, urls)
return ('<img src="%s/%s.png" usemap="#%s" class="inheritance"/>%s' %
(path, name, name, image_map))
def latex_output_graph(self, node):
"""
Output the graph for LaTeX. This will insert a PDF.
"""
graph = node['graph']
parts = node['parts']
graph_hash = get_graph_hash(node)
name = "inheritance%s" % graph_hash
dest_path = os.path.abspath(os.path.join(setup.app.builder.outdir, '_images'))
if not os.path.exists(dest_path):
os.makedirs(dest_path)
pdf_path = os.path.abspath(os.path.join(dest_path, name + ".pdf"))
graph.run_dot(['-Tpdf', '-o%s' % pdf_path],
name, parts, graph_options={'size': '"6.0,6.0"'})
return '\n\\includegraphics{%s}\n\n' % pdf_path
def visit_inheritance_diagram(inner_func):
"""
This is just a wrapper around html/latex_output_graph to make it
easier to handle errors and insert warnings.
"""
def visitor(self, node):
try:
content = inner_func(self, node)
except DotException, e:
# Insert the exception as a warning in the document
warning = self.document.reporter.warning(str(e), line=node.line)
warning.parent = node
node.children = [warning]
else:
source = self.document.attributes['source']
self.body.append(content)
node.children = []
return visitor
def do_nothing(self, node):
pass
def setup(app):
setup.app = app
setup.confdir = app.confdir
app.add_node(
inheritance_diagram,
latex=(visit_inheritance_diagram(latex_output_graph), do_nothing),
html=(visit_inheritance_diagram(html_output_graph), do_nothing))
app.add_directive(
'inheritance-diagram', inheritance_diagram_directive,
False, (1, 100, 0), parts = directives.nonnegative_int)
| bsd-2-clause |
numenta/htmresearch | projects/learning_location/dordek_cs_sim.py | 4 | 3679 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2018, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Use Hebbian learning over place cell inputs fed through
center-surround cells to learn grid cells. The center-surround output
looks very similar to the input so it doesn't have much impact.
"""
import random
import matplotlib.pyplot as plt
import numpy as np
from sklearn.decomposition import NMF as PCA
from nupic.algorithms.spatial_pooler import SpatialPooler
def getNewLocation(x, y, width, viewDistance, wrap):
options = []
if x > viewDistance or wrap:
options.append(((x - 1) % width, y))
if x < width - viewDistance - 1 or wrap:
options.append(((x + 1) % width, y))
if y > viewDistance or wrap:
options.append((x, (y - 1) % width))
if y < width - viewDistance - 1 or wrap:
options.append((x, (y + 1) % width))
return options[int(random.random() * len(options))]
def getActive(world, x, y):
active = set()
for i in xrange(x - 2, x + 2 + 1):
for j in xrange(y - 2, y + 2 + 1):
active.add(world[i % 25, j % 25])
return active
def generateCenterSurroundFields():
fields = []
for i in xrange(25 - 5 + 1):
for j in xrange(25 - 5 + 1):
center = np.zeros((25, 25), dtype=np.bool)
center[i:i+5, j:j+5] = 1
sr = 3
surround = np.zeros((25, 25), dtype=np.bool)
surround[max(0, i-sr):i+5+sr+1, max(0, j-sr):j+5+sr+1] = 1
surround[i:i+5, j:j+5] = 0
fields.append((center.flatten(), surround.flatten()))
return fields
def processCenterSurround(fields, activeInput):
return np.array(
[activeInput[c].sum() > activeInput[s].sum()
for c, s in fields], dtype=np.uint32)
def main():
x = 10
y = 10
steps = 10000
world = np.array([i for i in xrange(625)])
world.resize((25, 25))
spInputSize = 21*21
sp = SpatialPooler(
inputDimensions=(spInputSize,),
columnDimensions=(25,),
potentialRadius=spInputSize,
numActiveColumnsPerInhArea=1,
synPermActiveInc=0.1,
synPermInactiveDec=0.5,
boostStrength=1.0,
)
csFields = generateCenterSurroundFields()
output = np.zeros((25,), dtype=np.uint32)
for _ in xrange(steps):
active = getActive(world, x, y)
assert len(active) == 25, "{}, {}: {}".format(x, y, active)
activeInput = np.zeros((625,), dtype=np.uint32)
for v in active:
activeInput[v] = 1
centerSurround = processCenterSurround(csFields, activeInput)
print centerSurround
sp.compute(centerSurround, True, output)
x, y = getNewLocation(x, y, 25, 2, False)
for i in xrange(25):
permanence = np.zeros((spInputSize,))
sp.getPermanence(i, permanence)
plt.imshow(permanence.reshape((21, 21)), cmap="hot", interpolation="nearest")
plt.show()
if __name__ == "__main__":
main()
| agpl-3.0 |
thunderhoser/GewitterGefahr | gewittergefahr/gg_utils/feature_transformation_test.py | 1 | 9703 | """Unit tests for feature_transformation.py."""
import copy
import unittest
import numpy
import pandas
from gewittergefahr.gg_utils import feature_transformation as feature_trans
TOLERANCE = 1e-6
FEATURE_NAMES = ['a', 'b', 'c']
FEATURE_MATRIX = numpy.array([
[1., 0., 10.],
[2., 1., 9.],
[3., 2., numpy.nan],
[4., numpy.nan, 8.],
[5., numpy.nan, -3.]
])
FEATURE_MATRIX_TOO_MANY_NAN = numpy.array([
[1., 0., 10.],
[2., numpy.nan, 10.],
[3., numpy.nan, numpy.nan],
[4., numpy.nan, 5.],
[5., numpy.nan, 0.]
])
THIS_FEATURE_DICT = {}
for j in range(len(FEATURE_NAMES)):
THIS_FEATURE_DICT.update({FEATURE_NAMES[j]: FEATURE_MATRIX[:, j]})
FEATURE_TABLE = pandas.DataFrame.from_dict(THIS_FEATURE_DICT)
THIS_FEATURE_DICT = {}
for j in range(len(FEATURE_NAMES)):
THIS_FEATURE_DICT.update({
FEATURE_NAMES[j]: FEATURE_MATRIX_TOO_MANY_NAN[:, j]
})
FEATURE_TABLE_TOO_MANY_NAN = pandas.DataFrame.from_dict(THIS_FEATURE_DICT)
# The following constants are used to test replace_missing_values.
FEATURE_MEANS = numpy.array([3., 1., 6.])
REPLACEMENT_DICT_MEAN = {
feature_trans.FEATURE_NAMES_KEY: FEATURE_NAMES,
feature_trans.ORIGINAL_MEANS_KEY: FEATURE_MEANS
}
FEATURE_MATRIX_MISSING_TO_MEAN = numpy.array([
[1., 0., 10.],
[2., 1., 9.],
[3., 2., 6.],
[4., 1., 8.],
[5., 1., -3.]
])
FEATURE_MEDIANS = numpy.array([3., 1., 8.5])
REPLACEMENT_DICT_MEDIAN = {
feature_trans.FEATURE_NAMES_KEY: FEATURE_NAMES,
feature_trans.ORIGINAL_MEDIANS_KEY: FEATURE_MEDIANS
}
FEATURE_MATRIX_MISSING_TO_MEDIAN = numpy.array([
[1., 0., 10.],
[2., 1., 9.],
[3., 2., 8.5],
[4., 1., 8.],
[5., 1., -3.]
])
# The following constants are used to test standardize_features.
FEATURE_STANDARD_DEVIATIONS = numpy.sqrt(numpy.array([2.5, 1., 110. / 3]))
STANDARDIZATION_DICT = {
feature_trans.FEATURE_NAMES_KEY: FEATURE_NAMES,
feature_trans.ORIGINAL_MEANS_KEY: FEATURE_MEANS,
feature_trans.ORIGINAL_STDEVS_KEY: FEATURE_STANDARD_DEVIATIONS
}
# The following constants are used to test
# _reorder_standardization_or_replacement_dict.
PERMUTED_FEATURE_NAMES = ['b', 'c', 'a']
PERMUTED_STANDARDIZATION_DICT = {
feature_trans.FEATURE_NAMES_KEY: PERMUTED_FEATURE_NAMES,
feature_trans.ORIGINAL_MEANS_KEY: numpy.array([1., 6., 3.]),
feature_trans.ORIGINAL_STDEVS_KEY:
numpy.sqrt(numpy.array([1., 110. / 3, 2.5]))
}
# The following constants are used to test filter_svd_by_explained_variance.
THIS_PC_MATRIX = numpy.array([
[0., 0.1, 0.2, 0.3, 0.4],
[0.5, 0.3, 0.2, 0.22, 0.11],
[1.2, -0.6, -0.1, 0.17, 0.4],
[-1.9, -1.2, 0.2, 0.1, 0.1],
[-2.6, -1., -0.3, 0.01, 0.005]
])
THIS_EIGENVALUE_MATRIX = numpy.array([
[50., 0., 0.],
[0., 2.7, 0.],
[0., 0.00123, 0.]
])
THIS_EOF_MATRIX = numpy.array([
[0.72, 0.87, -0.03],
[-0.43, -0.14, 0.025],
[0., 0.17, 1.03]
])
SVD_DICTIONARY = {
feature_trans.PC_MATRIX_KEY: THIS_PC_MATRIX,
feature_trans.EIGENVALUE_MATRIX_KEY: THIS_EIGENVALUE_MATRIX,
feature_trans.EOF_MATRIX_KEY: THIS_EOF_MATRIX
}
SVD_DICTIONARY_90PCT_VARIANCE = {
feature_trans.PC_MATRIX_KEY: THIS_PC_MATRIX[:, :1],
feature_trans.EIGENVALUE_MATRIX_KEY: THIS_EIGENVALUE_MATRIX[:1, :1],
feature_trans.EOF_MATRIX_KEY: THIS_EOF_MATRIX[:, :1]
}
SVD_DICTIONARY_95PCT_VARIANCE = {
feature_trans.PC_MATRIX_KEY: THIS_PC_MATRIX[:, :2],
feature_trans.EIGENVALUE_MATRIX_KEY: THIS_EIGENVALUE_MATRIX[:2, :2],
feature_trans.EOF_MATRIX_KEY: THIS_EOF_MATRIX[:, :2]
}
class FeatureTransformationTests(unittest.TestCase):
"""Each method is a unit test for feature_transformation.py."""
def test_reorder_standardization_or_replacement_dict(self):
"""Ensures crctness of _reorder_standardization_or_replacement_dict."""
this_standardization_dict = copy.deepcopy(STANDARDIZATION_DICT)
this_standardization_dict = (
feature_trans._reorder_standardization_or_replacement_dict(
this_standardization_dict, PERMUTED_FEATURE_NAMES)
)
self.assertTrue(
set(this_standardization_dict.keys()) ==
set(PERMUTED_STANDARDIZATION_DICT.keys())
)
self.assertTrue(numpy.allclose(
this_standardization_dict[feature_trans.ORIGINAL_MEANS_KEY],
PERMUTED_STANDARDIZATION_DICT[feature_trans.ORIGINAL_MEANS_KEY],
atol=TOLERANCE
))
self.assertTrue(numpy.allclose(
this_standardization_dict[feature_trans.ORIGINAL_STDEVS_KEY],
PERMUTED_STANDARDIZATION_DICT[feature_trans.ORIGINAL_STDEVS_KEY],
atol=TOLERANCE
))
def test_reorder_standardization_or_replacement_dict_mismatch(self):
"""Ensures _reorder_standardization_or_replacement_dict throws error*.
* Because standardization_dict does not contain all desired features.
"""
these_feature_names = FEATURE_NAMES + ['foo']
with self.assertRaises(ValueError):
feature_trans._reorder_standardization_or_replacement_dict(
STANDARDIZATION_DICT, these_feature_names)
def test_replace_missing_values_with_mean(self):
"""Ensures correct output from replace_missing_values.
In this case, missing values of feature F are replaced with the mean
F-value.
"""
this_feature_table, this_replacement_dict = (
feature_trans.replace_missing_values(
FEATURE_TABLE,
replacement_method=feature_trans.MEAN_VALUE_REPLACEMENT_METHOD)
)
self.assertTrue(numpy.allclose(
FEATURE_MATRIX_MISSING_TO_MEAN, this_feature_table.to_numpy(),
atol=TOLERANCE
))
self.assertTrue(
set(this_replacement_dict.keys()) ==
set(REPLACEMENT_DICT_MEAN.keys())
)
self.assertTrue(numpy.allclose(
this_replacement_dict[feature_trans.ORIGINAL_MEANS_KEY],
REPLACEMENT_DICT_MEAN[feature_trans.ORIGINAL_MEANS_KEY],
atol=TOLERANCE
))
def test_replace_missing_values_with_median(self):
"""Ensures correct output from replace_missing_values.
In this case, missing values of feature F are replaced with the median
F-value.
"""
this_feature_table, this_replacement_dict = (
feature_trans.replace_missing_values(
FEATURE_TABLE, replacement_method=
feature_trans.MEDIAN_VALUE_REPLACEMENT_METHOD)
)
self.assertTrue(numpy.allclose(
FEATURE_MATRIX_MISSING_TO_MEDIAN, this_feature_table.to_numpy(),
atol=TOLERANCE
))
self.assertTrue(
set(this_replacement_dict.keys()) ==
set(REPLACEMENT_DICT_MEDIAN.keys())
)
self.assertTrue(numpy.allclose(
this_replacement_dict[feature_trans.ORIGINAL_MEDIANS_KEY],
REPLACEMENT_DICT_MEDIAN[feature_trans.ORIGINAL_MEDIANS_KEY],
atol=TOLERANCE
))
def test_standardize_features(self):
"""Ensures correct output from standardize_features."""
_, this_standardization_dict = feature_trans.standardize_features(
FEATURE_TABLE)
self.assertTrue(
set(this_standardization_dict.keys()) ==
set(STANDARDIZATION_DICT.keys())
)
self.assertTrue(numpy.allclose(
this_standardization_dict[feature_trans.ORIGINAL_MEANS_KEY],
STANDARDIZATION_DICT[feature_trans.ORIGINAL_MEANS_KEY],
atol=TOLERANCE
))
self.assertTrue(numpy.allclose(
this_standardization_dict[feature_trans.ORIGINAL_STDEVS_KEY],
STANDARDIZATION_DICT[feature_trans.ORIGINAL_STDEVS_KEY],
atol=TOLERANCE
))
def test_standardize_features_too_many_nans(self):
"""Ensures that standardize_features throws too-many-NaN error."""
with self.assertRaises(ValueError):
feature_trans.standardize_features(FEATURE_TABLE_TOO_MANY_NAN)
def test_perform_svd_no_crash(self):
"""Ensures that perform_svd does not crash.
I do not test for expected values, because these would be very tedious
to calculate.
"""
feature_trans.perform_svd(FEATURE_TABLE)
def test_filter_svd_by_explained_variance_90pct(self):
"""Ensures correct output from filter_svd_by_explained_variance.
In this case, fraction of variance to keep is 90%.
"""
this_svd_dictionary = copy.deepcopy(SVD_DICTIONARY)
this_svd_dictionary = feature_trans.filter_svd_by_explained_variance(
this_svd_dictionary, fraction_of_variance_to_keep=0.9)
for this_key in list(this_svd_dictionary.keys()):
self.assertTrue(numpy.allclose(
this_svd_dictionary[this_key],
SVD_DICTIONARY_90PCT_VARIANCE[this_key], atol=TOLERANCE
))
def test_filter_svd_by_explained_variance_95pct(self):
"""Ensures correct output from filter_svd_by_explained_variance.
In this case, fraction of variance to keep is 95%.
"""
this_svd_dictionary = copy.deepcopy(SVD_DICTIONARY)
this_svd_dictionary = feature_trans.filter_svd_by_explained_variance(
this_svd_dictionary, fraction_of_variance_to_keep=0.95)
for this_key in list(this_svd_dictionary.keys()):
self.assertTrue(numpy.allclose(
this_svd_dictionary[this_key],
SVD_DICTIONARY_95PCT_VARIANCE[this_key], atol=TOLERANCE
))
if __name__ == '__main__':
unittest.main()
| mit |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/pandas/tests/frame/test_operators.py | 7 | 43594 | # -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime
import operator
import pytest
from numpy import nan, random
import numpy as np
from pandas.compat import lrange
from pandas import compat
from pandas import (DataFrame, Series, MultiIndex, Timestamp,
date_range)
import pandas.core.common as com
import pandas.io.formats.printing as printing
import pandas as pd
from pandas.util.testing import (assert_numpy_array_equal,
assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
from pandas.tests.frame.common import (TestData, _check_mixed_float,
_check_mixed_int)
class TestDataFrameOperators(TestData):
def test_operators(self):
garbage = random.random(4)
colSeries = Series(garbage, index=np.array(self.frame.columns))
idSum = self.frame + self.frame
seriesSum = self.frame + colSeries
for col, series in compat.iteritems(idSum):
for idx, val in compat.iteritems(series):
origVal = self.frame[col][idx] * 2
if not np.isnan(val):
assert val == origVal
else:
assert np.isnan(origVal)
for col, series in compat.iteritems(seriesSum):
for idx, val in compat.iteritems(series):
origVal = self.frame[col][idx] + colSeries[col]
if not np.isnan(val):
assert val == origVal
else:
assert np.isnan(origVal)
added = self.frame2 + self.frame2
expected = self.frame2 * 2
assert_frame_equal(added, expected)
df = DataFrame({'a': ['a', None, 'b']})
assert_frame_equal(df + df, DataFrame({'a': ['aa', np.nan, 'bb']}))
# Test for issue #10181
for dtype in ('float', 'int64'):
frames = [
DataFrame(dtype=dtype),
DataFrame(columns=['A'], dtype=dtype),
DataFrame(index=[0], dtype=dtype),
]
for df in frames:
assert (df + df).equals(df)
assert_frame_equal(df + df, df)
def test_ops_np_scalar(self):
vals, xs = np.random.rand(5, 3), [nan, 7, -23, 2.718, -3.14, np.inf]
f = lambda x: DataFrame(x, index=list('ABCDE'),
columns=['jim', 'joe', 'jolie'])
df = f(vals)
for x in xs:
assert_frame_equal(df / np.array(x), f(vals / x))
assert_frame_equal(np.array(x) * df, f(vals * x))
assert_frame_equal(df + np.array(x), f(vals + x))
assert_frame_equal(np.array(x) - df, f(x - vals))
def test_operators_boolean(self):
# GH 5808
# empty frames, non-mixed dtype
result = DataFrame(index=[1]) & DataFrame(index=[1])
assert_frame_equal(result, DataFrame(index=[1]))
result = DataFrame(index=[1]) | DataFrame(index=[1])
assert_frame_equal(result, DataFrame(index=[1]))
result = DataFrame(index=[1]) & DataFrame(index=[1, 2])
assert_frame_equal(result, DataFrame(index=[1, 2]))
result = DataFrame(index=[1], columns=['A']) & DataFrame(
index=[1], columns=['A'])
assert_frame_equal(result, DataFrame(index=[1], columns=['A']))
result = DataFrame(True, index=[1], columns=['A']) & DataFrame(
True, index=[1], columns=['A'])
assert_frame_equal(result, DataFrame(True, index=[1], columns=['A']))
result = DataFrame(True, index=[1], columns=['A']) | DataFrame(
True, index=[1], columns=['A'])
assert_frame_equal(result, DataFrame(True, index=[1], columns=['A']))
# boolean ops
result = DataFrame(1, index=[1], columns=['A']) | DataFrame(
True, index=[1], columns=['A'])
assert_frame_equal(result, DataFrame(1, index=[1], columns=['A']))
def f():
DataFrame(1.0, index=[1], columns=['A']) | DataFrame(
True, index=[1], columns=['A'])
pytest.raises(TypeError, f)
def f():
DataFrame('foo', index=[1], columns=['A']) | DataFrame(
True, index=[1], columns=['A'])
pytest.raises(TypeError, f)
def test_operators_none_as_na(self):
df = DataFrame({"col1": [2, 5.0, 123, None],
"col2": [1, 2, 3, 4]}, dtype=object)
ops = [operator.add, operator.sub, operator.mul, operator.truediv]
# since filling converts dtypes from object, changed expected to be
# object
for op in ops:
filled = df.fillna(np.nan)
result = op(df, 3)
expected = op(filled, 3).astype(object)
expected[com.isnull(expected)] = None
assert_frame_equal(result, expected)
result = op(df, df)
expected = op(filled, filled).astype(object)
expected[com.isnull(expected)] = None
assert_frame_equal(result, expected)
result = op(df, df.fillna(7))
assert_frame_equal(result, expected)
result = op(df.fillna(7), df)
assert_frame_equal(result, expected, check_dtype=False)
def test_comparison_invalid(self):
def check(df, df2):
for (x, y) in [(df, df2), (df2, df)]:
pytest.raises(TypeError, lambda: x == y)
pytest.raises(TypeError, lambda: x != y)
pytest.raises(TypeError, lambda: x >= y)
pytest.raises(TypeError, lambda: x > y)
pytest.raises(TypeError, lambda: x < y)
pytest.raises(TypeError, lambda: x <= y)
# GH4968
# invalid date/int comparisons
df = DataFrame(np.random.randint(10, size=(10, 1)), columns=['a'])
df['dates'] = date_range('20010101', periods=len(df))
df2 = df.copy()
df2['dates'] = df['a']
check(df, df2)
df = DataFrame(np.random.randint(10, size=(10, 2)), columns=['a', 'b'])
df2 = DataFrame({'a': date_range('20010101', periods=len(
df)), 'b': date_range('20100101', periods=len(df))})
check(df, df2)
def test_timestamp_compare(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH4982
df = DataFrame({'dates1': date_range('20010101', periods=10),
'dates2': date_range('20010102', periods=10),
'intcol': np.random.randint(1000000000, size=10),
'floatcol': np.random.randn(10),
'stringcol': list(tm.rands(10))})
df.loc[np.random.rand(len(df)) > 0.5, 'dates2'] = pd.NaT
ops = {'gt': 'lt', 'lt': 'gt', 'ge': 'le', 'le': 'ge', 'eq': 'eq',
'ne': 'ne'}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
expected = left_f(df, Timestamp('20010109'))
result = right_f(Timestamp('20010109'), df)
assert_frame_equal(result, expected)
# nats
expected = left_f(df, Timestamp('nat'))
result = right_f(Timestamp('nat'), df)
assert_frame_equal(result, expected)
def test_modulo(self):
# GH3590, modulo as ints
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
# this is technically wrong as the integer portion is coerced to float
# ###
expected = DataFrame({'first': Series([0, 0, 0, 0], dtype='float64'),
'second': Series([np.nan, np.nan, np.nan, 0])})
result = p % p
assert_frame_equal(result, expected)
# numpy has a slightly different (wrong) treatement
with np.errstate(all='ignore'):
arr = p.values % p.values
result2 = DataFrame(arr, index=p.index,
columns=p.columns, dtype='float64')
result2.iloc[0:3, 1] = np.nan
assert_frame_equal(result2, expected)
result = p % 0
expected = DataFrame(np.nan, index=p.index, columns=p.columns)
assert_frame_equal(result, expected)
# numpy has a slightly different (wrong) treatement
with np.errstate(all='ignore'):
arr = p.values.astype('float64') % 0
result2 = DataFrame(arr, index=p.index, columns=p.columns)
assert_frame_equal(result2, expected)
# not commutative with series
p = DataFrame(np.random.randn(10, 5))
s = p[0]
res = s % p
res2 = p % s
assert not np.array_equal(res.fillna(0), res2.fillna(0))
def test_div(self):
# integer div, but deal with the 0's (GH 9144)
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p / p
expected = DataFrame({'first': Series([1.0, 1.0, 1.0, 1.0]),
'second': Series([nan, nan, nan, 1])})
assert_frame_equal(result, expected)
with np.errstate(all='ignore'):
arr = p.values.astype('float') / p.values
result2 = DataFrame(arr, index=p.index,
columns=p.columns)
assert_frame_equal(result2, expected)
result = p / 0
expected = DataFrame(np.inf, index=p.index, columns=p.columns)
expected.iloc[0:3, 1] = nan
assert_frame_equal(result, expected)
# numpy has a slightly different (wrong) treatement
with np.errstate(all='ignore'):
arr = p.values.astype('float64') / 0
result2 = DataFrame(arr, index=p.index,
columns=p.columns)
assert_frame_equal(result2, expected)
p = DataFrame(np.random.randn(10, 5))
s = p[0]
res = s / p
res2 = p / s
assert not np.array_equal(res.fillna(0), res2.fillna(0))
def test_logical_operators(self):
def _check_bin_op(op):
result = op(df1, df2)
expected = DataFrame(op(df1.values, df2.values), index=df1.index,
columns=df1.columns)
assert result.values.dtype == np.bool_
assert_frame_equal(result, expected)
def _check_unary_op(op):
result = op(df1)
expected = DataFrame(op(df1.values), index=df1.index,
columns=df1.columns)
assert result.values.dtype == np.bool_
assert_frame_equal(result, expected)
df1 = {'a': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True},
'b': {'a': False, 'b': True, 'c': False,
'd': False, 'e': False},
'c': {'a': False, 'b': False, 'c': True,
'd': False, 'e': False},
'd': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True},
'e': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True}}
df2 = {'a': {'a': True, 'b': False, 'c': True, 'd': False, 'e': False},
'b': {'a': False, 'b': True, 'c': False,
'd': False, 'e': False},
'c': {'a': True, 'b': False, 'c': True, 'd': False, 'e': False},
'd': {'a': False, 'b': False, 'c': False,
'd': True, 'e': False},
'e': {'a': False, 'b': False, 'c': False,
'd': False, 'e': True}}
df1 = DataFrame(df1)
df2 = DataFrame(df2)
_check_bin_op(operator.and_)
_check_bin_op(operator.or_)
_check_bin_op(operator.xor)
# operator.neg is deprecated in numpy >= 1.9
_check_unary_op(operator.inv)
def test_logical_typeerror(self):
if not compat.PY3:
pytest.raises(TypeError, self.frame.__eq__, 'foo')
pytest.raises(TypeError, self.frame.__lt__, 'foo')
pytest.raises(TypeError, self.frame.__gt__, 'foo')
pytest.raises(TypeError, self.frame.__ne__, 'foo')
else:
pytest.skip('test_logical_typeerror not tested on PY3')
def test_logical_with_nas(self):
d = DataFrame({'a': [np.nan, False], 'b': [True, True]})
# GH4947
# bool comparisons should return bool
result = d['a'] | d['b']
expected = Series([False, True])
assert_series_equal(result, expected)
# GH4604, automatic casting here
result = d['a'].fillna(False) | d['b']
expected = Series([True, True])
assert_series_equal(result, expected)
result = d['a'].fillna(False, downcast=False) | d['b']
expected = Series([True, True])
assert_series_equal(result, expected)
def test_neg(self):
# what to do?
assert_frame_equal(-self.frame, -1 * self.frame)
def test_invert(self):
assert_frame_equal(-(self.frame < 0), ~(self.frame < 0))
def test_arith_flex_frame(self):
ops = ['add', 'sub', 'mul', 'div', 'truediv', 'pow', 'floordiv', 'mod']
if not compat.PY3:
aliases = {}
else:
aliases = {'div': 'truediv'}
for op in ops:
try:
alias = aliases.get(op, op)
f = getattr(operator, alias)
result = getattr(self.frame, op)(2 * self.frame)
exp = f(self.frame, 2 * self.frame)
assert_frame_equal(result, exp)
# vs mix float
result = getattr(self.mixed_float, op)(2 * self.mixed_float)
exp = f(self.mixed_float, 2 * self.mixed_float)
assert_frame_equal(result, exp)
_check_mixed_float(result, dtype=dict(C=None))
# vs mix int
if op in ['add', 'sub', 'mul']:
result = getattr(self.mixed_int, op)(2 + self.mixed_int)
exp = f(self.mixed_int, 2 + self.mixed_int)
# no overflow in the uint
dtype = None
if op in ['sub']:
dtype = dict(B='uint64', C=None)
elif op in ['add', 'mul']:
dtype = dict(C=None)
assert_frame_equal(result, exp)
_check_mixed_int(result, dtype=dtype)
# rops
r_f = lambda x, y: f(y, x)
result = getattr(self.frame, 'r' + op)(2 * self.frame)
exp = r_f(self.frame, 2 * self.frame)
assert_frame_equal(result, exp)
# vs mix float
result = getattr(self.mixed_float, op)(
2 * self.mixed_float)
exp = f(self.mixed_float, 2 * self.mixed_float)
assert_frame_equal(result, exp)
_check_mixed_float(result, dtype=dict(C=None))
result = getattr(self.intframe, op)(2 * self.intframe)
exp = f(self.intframe, 2 * self.intframe)
assert_frame_equal(result, exp)
# vs mix int
if op in ['add', 'sub', 'mul']:
result = getattr(self.mixed_int, op)(
2 + self.mixed_int)
exp = f(self.mixed_int, 2 + self.mixed_int)
# no overflow in the uint
dtype = None
if op in ['sub']:
dtype = dict(B='uint64', C=None)
elif op in ['add', 'mul']:
dtype = dict(C=None)
assert_frame_equal(result, exp)
_check_mixed_int(result, dtype=dtype)
except:
printing.pprint_thing("Failing operation %r" % op)
raise
# ndim >= 3
ndim_5 = np.ones(self.frame.shape + (3, 4, 5))
msg = "Unable to coerce to Series/DataFrame"
with tm.assert_raises_regex(ValueError, msg):
f(self.frame, ndim_5)
with tm.assert_raises_regex(ValueError, msg):
getattr(self.frame, op)(ndim_5)
# res_add = self.frame.add(self.frame)
# res_sub = self.frame.sub(self.frame)
# res_mul = self.frame.mul(self.frame)
# res_div = self.frame.div(2 * self.frame)
# assert_frame_equal(res_add, self.frame + self.frame)
# assert_frame_equal(res_sub, self.frame - self.frame)
# assert_frame_equal(res_mul, self.frame * self.frame)
# assert_frame_equal(res_div, self.frame / (2 * self.frame))
const_add = self.frame.add(1)
assert_frame_equal(const_add, self.frame + 1)
# corner cases
result = self.frame.add(self.frame[:0])
assert_frame_equal(result, self.frame * np.nan)
result = self.frame[:0].add(self.frame)
assert_frame_equal(result, self.frame * np.nan)
with tm.assert_raises_regex(NotImplementedError, 'fill_value'):
self.frame.add(self.frame.iloc[0], fill_value=3)
with tm.assert_raises_regex(NotImplementedError, 'fill_value'):
self.frame.add(self.frame.iloc[0], axis='index', fill_value=3)
def test_binary_ops_align(self):
# test aligning binary ops
# GH 6681
index = MultiIndex.from_product([list('abc'),
['one', 'two', 'three'],
[1, 2, 3]],
names=['first', 'second', 'third'])
df = DataFrame(np.arange(27 * 3).reshape(27, 3),
index=index,
columns=['value1', 'value2', 'value3']).sort_index()
idx = pd.IndexSlice
for op in ['add', 'sub', 'mul', 'div', 'truediv']:
opa = getattr(operator, op, None)
if opa is None:
continue
x = Series([1.0, 10.0, 100.0], [1, 2, 3])
result = getattr(df, op)(x, level='third', axis=0)
expected = pd.concat([opa(df.loc[idx[:, :, i], :], v)
for i, v in x.iteritems()]).sort_index()
assert_frame_equal(result, expected)
x = Series([1.0, 10.0], ['two', 'three'])
result = getattr(df, op)(x, level='second', axis=0)
expected = (pd.concat([opa(df.loc[idx[:, i], :], v)
for i, v in x.iteritems()])
.reindex_like(df).sort_index())
assert_frame_equal(result, expected)
# GH9463 (alignment level of dataframe with series)
midx = MultiIndex.from_product([['A', 'B'], ['a', 'b']])
df = DataFrame(np.ones((2, 4), dtype='int64'), columns=midx)
s = pd.Series({'a': 1, 'b': 2})
df2 = df.copy()
df2.columns.names = ['lvl0', 'lvl1']
s2 = s.copy()
s2.index.name = 'lvl1'
# different cases of integer/string level names:
res1 = df.mul(s, axis=1, level=1)
res2 = df.mul(s2, axis=1, level=1)
res3 = df2.mul(s, axis=1, level=1)
res4 = df2.mul(s2, axis=1, level=1)
res5 = df2.mul(s, axis=1, level='lvl1')
res6 = df2.mul(s2, axis=1, level='lvl1')
exp = DataFrame(np.array([[1, 2, 1, 2], [1, 2, 1, 2]], dtype='int64'),
columns=midx)
for res in [res1, res2]:
assert_frame_equal(res, exp)
exp.columns.names = ['lvl0', 'lvl1']
for res in [res3, res4, res5, res6]:
assert_frame_equal(res, exp)
def test_arith_mixed(self):
left = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 2, 3]})
result = left + left
expected = DataFrame({'A': ['aa', 'bb', 'cc'],
'B': [2, 4, 6]})
assert_frame_equal(result, expected)
def test_arith_getitem_commute(self):
df = DataFrame({'A': [1.1, 3.3], 'B': [2.5, -3.9]})
self._test_op(df, operator.add)
self._test_op(df, operator.sub)
self._test_op(df, operator.mul)
self._test_op(df, operator.truediv)
self._test_op(df, operator.floordiv)
self._test_op(df, operator.pow)
self._test_op(df, lambda x, y: y + x)
self._test_op(df, lambda x, y: y - x)
self._test_op(df, lambda x, y: y * x)
self._test_op(df, lambda x, y: y / x)
self._test_op(df, lambda x, y: y ** x)
self._test_op(df, lambda x, y: x + y)
self._test_op(df, lambda x, y: x - y)
self._test_op(df, lambda x, y: x * y)
self._test_op(df, lambda x, y: x / y)
self._test_op(df, lambda x, y: x ** y)
@staticmethod
def _test_op(df, op):
result = op(df, 1)
if not df.columns.is_unique:
raise ValueError("Only unique columns supported by this test")
for col in result.columns:
assert_series_equal(result[col], op(df[col], 1))
def test_bool_flex_frame(self):
data = np.random.randn(5, 3)
other_data = np.random.randn(5, 3)
df = DataFrame(data)
other = DataFrame(other_data)
ndim_5 = np.ones(df.shape + (1, 3))
# Unaligned
def _check_unaligned_frame(meth, op, df, other):
part_o = other.loc[3:, 1:].copy()
rs = meth(part_o)
xp = op(df, part_o.reindex(index=df.index, columns=df.columns))
assert_frame_equal(rs, xp)
# DataFrame
assert df.eq(df).values.all()
assert not df.ne(df).values.any()
for op in ['eq', 'ne', 'gt', 'lt', 'ge', 'le']:
f = getattr(df, op)
o = getattr(operator, op)
# No NAs
assert_frame_equal(f(other), o(df, other))
_check_unaligned_frame(f, o, df, other)
# ndarray
assert_frame_equal(f(other.values), o(df, other.values))
# scalar
assert_frame_equal(f(0), o(df, 0))
# NAs
msg = "Unable to coerce to Series/DataFrame"
assert_frame_equal(f(np.nan), o(df, np.nan))
with tm.assert_raises_regex(ValueError, msg):
f(ndim_5)
# Series
def _test_seq(df, idx_ser, col_ser):
idx_eq = df.eq(idx_ser, axis=0)
col_eq = df.eq(col_ser)
idx_ne = df.ne(idx_ser, axis=0)
col_ne = df.ne(col_ser)
assert_frame_equal(col_eq, df == Series(col_ser))
assert_frame_equal(col_eq, -col_ne)
assert_frame_equal(idx_eq, -idx_ne)
assert_frame_equal(idx_eq, df.T.eq(idx_ser).T)
assert_frame_equal(col_eq, df.eq(list(col_ser)))
assert_frame_equal(idx_eq, df.eq(Series(idx_ser), axis=0))
assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0))
idx_gt = df.gt(idx_ser, axis=0)
col_gt = df.gt(col_ser)
idx_le = df.le(idx_ser, axis=0)
col_le = df.le(col_ser)
assert_frame_equal(col_gt, df > Series(col_ser))
assert_frame_equal(col_gt, -col_le)
assert_frame_equal(idx_gt, -idx_le)
assert_frame_equal(idx_gt, df.T.gt(idx_ser).T)
idx_ge = df.ge(idx_ser, axis=0)
col_ge = df.ge(col_ser)
idx_lt = df.lt(idx_ser, axis=0)
col_lt = df.lt(col_ser)
assert_frame_equal(col_ge, df >= Series(col_ser))
assert_frame_equal(col_ge, -col_lt)
assert_frame_equal(idx_ge, -idx_lt)
assert_frame_equal(idx_ge, df.T.ge(idx_ser).T)
idx_ser = Series(np.random.randn(5))
col_ser = Series(np.random.randn(3))
_test_seq(df, idx_ser, col_ser)
# list/tuple
_test_seq(df, idx_ser.values, col_ser.values)
# NA
df.loc[0, 0] = np.nan
rs = df.eq(df)
assert not rs.loc[0, 0]
rs = df.ne(df)
assert rs.loc[0, 0]
rs = df.gt(df)
assert not rs.loc[0, 0]
rs = df.lt(df)
assert not rs.loc[0, 0]
rs = df.ge(df)
assert not rs.loc[0, 0]
rs = df.le(df)
assert not rs.loc[0, 0]
# complex
arr = np.array([np.nan, 1, 6, np.nan])
arr2 = np.array([2j, np.nan, 7, None])
df = DataFrame({'a': arr})
df2 = DataFrame({'a': arr2})
rs = df.gt(df2)
assert not rs.values.any()
rs = df.ne(df2)
assert rs.values.all()
arr3 = np.array([2j, np.nan, None])
df3 = DataFrame({'a': arr3})
rs = df3.gt(2j)
assert not rs.values.any()
# corner, dtype=object
df1 = DataFrame({'col': ['foo', np.nan, 'bar']})
df2 = DataFrame({'col': ['foo', datetime.now(), 'bar']})
result = df1.ne(df2)
exp = DataFrame({'col': [False, True, False]})
assert_frame_equal(result, exp)
def test_return_dtypes_bool_op_costant(self):
# GH15077
df = DataFrame({'x': [1, 2, 3], 'y': [1., 2., 3.]})
const = 2
# not empty DataFrame
for op in ['eq', 'ne', 'gt', 'lt', 'ge', 'le']:
result = getattr(df, op)(const).get_dtype_counts()
tm.assert_series_equal(result, Series([2], ['bool']))
# empty DataFrame
empty = df.iloc[:0]
for op in ['eq', 'ne', 'gt', 'lt', 'ge', 'le']:
result = getattr(empty, op)(const).get_dtype_counts()
tm.assert_series_equal(result, Series([2], ['bool']))
def test_dti_tz_convert_to_utc(self):
base = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz='UTC')
idx1 = base.tz_convert('Asia/Tokyo')[:2]
idx2 = base.tz_convert('US/Eastern')[1:]
df1 = DataFrame({'A': [1, 2]}, index=idx1)
df2 = DataFrame({'A': [1, 1]}, index=idx2)
exp = DataFrame({'A': [np.nan, 3, np.nan]}, index=base)
assert_frame_equal(df1 + df2, exp)
def test_arith_flex_series(self):
df = self.simple
row = df.xs('a')
col = df['two']
# after arithmetic refactor, add truediv here
ops = ['add', 'sub', 'mul', 'mod']
for op in ops:
f = getattr(df, op)
op = getattr(operator, op)
assert_frame_equal(f(row), op(df, row))
assert_frame_equal(f(col, axis=0), op(df.T, col).T)
# special case for some reason
assert_frame_equal(df.add(row, axis=None), df + row)
# cases which will be refactored after big arithmetic refactor
assert_frame_equal(df.div(row), df / row)
assert_frame_equal(df.div(col, axis=0), (df.T / col).T)
# broadcasting issue in GH7325
df = DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype='int64')
expected = DataFrame([[nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis='index')
assert_frame_equal(result, expected)
df = DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype='float64')
expected = DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis='index')
assert_frame_equal(result, expected)
def test_arith_non_pandas_object(self):
df = self.simple
val1 = df.xs('a').values
added = DataFrame(df.values + val1, index=df.index, columns=df.columns)
assert_frame_equal(df + val1, added)
added = DataFrame((df.values.T + val1).T,
index=df.index, columns=df.columns)
assert_frame_equal(df.add(val1, axis=0), added)
val2 = list(df['two'])
added = DataFrame(df.values + val2, index=df.index, columns=df.columns)
assert_frame_equal(df + val2, added)
added = DataFrame((df.values.T + val2).T, index=df.index,
columns=df.columns)
assert_frame_equal(df.add(val2, axis='index'), added)
val3 = np.random.rand(*df.shape)
added = DataFrame(df.values + val3, index=df.index, columns=df.columns)
assert_frame_equal(df.add(val3), added)
def test_combineFrame(self):
frame_copy = self.frame.reindex(self.frame.index[::2])
del frame_copy['D']
frame_copy['C'][:5] = nan
added = self.frame + frame_copy
indexer = added['A'].valid().index
exp = (self.frame['A'] * 2).copy()
tm.assert_series_equal(added['A'].valid(), exp.loc[indexer])
exp.loc[~exp.index.isin(indexer)] = np.nan
tm.assert_series_equal(added['A'], exp.loc[added['A'].index])
assert np.isnan(added['C'].reindex(frame_copy.index)[:5]).all()
# assert(False)
assert np.isnan(added['D']).all()
self_added = self.frame + self.frame
tm.assert_index_equal(self_added.index, self.frame.index)
added_rev = frame_copy + self.frame
assert np.isnan(added['D']).all()
assert np.isnan(added_rev['D']).all()
# corner cases
# empty
plus_empty = self.frame + self.empty
assert np.isnan(plus_empty.values).all()
empty_plus = self.empty + self.frame
assert np.isnan(empty_plus.values).all()
empty_empty = self.empty + self.empty
assert empty_empty.empty
# out of order
reverse = self.frame.reindex(columns=self.frame.columns[::-1])
assert_frame_equal(reverse + self.frame, self.frame * 2)
# mix vs float64, upcast
added = self.frame + self.mixed_float
_check_mixed_float(added, dtype='float64')
added = self.mixed_float + self.frame
_check_mixed_float(added, dtype='float64')
# mix vs mix
added = self.mixed_float + self.mixed_float2
_check_mixed_float(added, dtype=dict(C=None))
added = self.mixed_float2 + self.mixed_float
_check_mixed_float(added, dtype=dict(C=None))
# with int
added = self.frame + self.mixed_int
_check_mixed_float(added, dtype='float64')
def test_combineSeries(self):
# Series
series = self.frame.xs(self.frame.index[0])
added = self.frame + series
for key, s in compat.iteritems(added):
assert_series_equal(s, self.frame[key] + series[key])
larger_series = series.to_dict()
larger_series['E'] = 1
larger_series = Series(larger_series)
larger_added = self.frame + larger_series
for key, s in compat.iteritems(self.frame):
assert_series_equal(larger_added[key], s + series[key])
assert 'E' in larger_added
assert np.isnan(larger_added['E']).all()
# vs mix (upcast) as needed
added = self.mixed_float + series
_check_mixed_float(added, dtype='float64')
added = self.mixed_float + series.astype('float32')
_check_mixed_float(added, dtype=dict(C=None))
added = self.mixed_float + series.astype('float16')
_check_mixed_float(added, dtype=dict(C=None))
# these raise with numexpr.....as we are adding an int64 to an
# uint64....weird vs int
# added = self.mixed_int + (100*series).astype('int64')
# _check_mixed_int(added, dtype = dict(A = 'int64', B = 'float64', C =
# 'int64', D = 'int64'))
# added = self.mixed_int + (100*series).astype('int32')
# _check_mixed_int(added, dtype = dict(A = 'int32', B = 'float64', C =
# 'int32', D = 'int64'))
# TimeSeries
ts = self.tsframe['A']
# 10890
# we no longer allow auto timeseries broadcasting
# and require explict broadcasting
added = self.tsframe.add(ts, axis='index')
for key, col in compat.iteritems(self.tsframe):
result = col + ts
assert_series_equal(added[key], result, check_names=False)
assert added[key].name == key
if col.name == ts.name:
assert result.name == 'A'
else:
assert result.name is None
smaller_frame = self.tsframe[:-5]
smaller_added = smaller_frame.add(ts, axis='index')
tm.assert_index_equal(smaller_added.index, self.tsframe.index)
smaller_ts = ts[:-5]
smaller_added2 = self.tsframe.add(smaller_ts, axis='index')
assert_frame_equal(smaller_added, smaller_added2)
# length 0, result is all-nan
result = self.tsframe.add(ts[:0], axis='index')
expected = DataFrame(np.nan, index=self.tsframe.index,
columns=self.tsframe.columns)
assert_frame_equal(result, expected)
# Frame is all-nan
result = self.tsframe[:0].add(ts, axis='index')
expected = DataFrame(np.nan, index=self.tsframe.index,
columns=self.tsframe.columns)
assert_frame_equal(result, expected)
# empty but with non-empty index
frame = self.tsframe[:1].reindex(columns=[])
result = frame.mul(ts, axis='index')
assert len(result) == len(ts)
def test_combineFunc(self):
result = self.frame * 2
tm.assert_numpy_array_equal(result.values, self.frame.values * 2)
# vs mix
result = self.mixed_float * 2
for c, s in compat.iteritems(result):
tm.assert_numpy_array_equal(
s.values, self.mixed_float[c].values * 2)
_check_mixed_float(result, dtype=dict(C=None))
result = self.empty * 2
assert result.index is self.empty.index
assert len(result.columns) == 0
def test_comparisons(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
row = self.simple.xs('a')
ndim_5 = np.ones(df1.shape + (1, 1, 1))
def test_comp(func):
result = func(df1, df2)
tm.assert_numpy_array_equal(result.values,
func(df1.values, df2.values))
with tm.assert_raises_regex(ValueError,
'Wrong number of dimensions'):
func(df1, ndim_5)
result2 = func(self.simple, row)
tm.assert_numpy_array_equal(result2.values,
func(self.simple.values, row.values))
result3 = func(self.frame, 0)
tm.assert_numpy_array_equal(result3.values,
func(self.frame.values, 0))
with tm.assert_raises_regex(ValueError,
'Can only compare identically'
'-labeled DataFrame'):
func(self.simple, self.simple[:2])
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_comparison_protected_from_errstate(self):
missing_df = tm.makeDataFrame()
missing_df.iloc[0]['A'] = np.nan
with np.errstate(invalid='ignore'):
expected = missing_df.values < 0
with np.errstate(invalid='raise'):
result = (missing_df < 0).values
tm.assert_numpy_array_equal(result, expected)
def test_string_comparison(self):
df = DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}])
mask_a = df.a > 1
assert_frame_equal(df[mask_a], df.loc[1:1, :])
assert_frame_equal(df[-mask_a], df.loc[0:0, :])
mask_b = df.b == "foo"
assert_frame_equal(df[mask_b], df.loc[0:0, :])
assert_frame_equal(df[-mask_b], df.loc[1:1, :])
def test_float_none_comparison(self):
df = DataFrame(np.random.randn(8, 3), index=lrange(8),
columns=['A', 'B', 'C'])
pytest.raises(TypeError, df.__eq__, None)
def test_boolean_comparison(self):
# GH 4576
# boolean comparisons with a tuple/list give unexpected results
df = DataFrame(np.arange(6).reshape((3, 2)))
b = np.array([2, 2])
b_r = np.atleast_2d([2, 2])
b_c = b_r.T
l = (2, 2, 2)
tup = tuple(l)
# gt
expected = DataFrame([[False, False], [False, True], [True, True]])
result = df > b
assert_frame_equal(result, expected)
result = df.values > b
assert_numpy_array_equal(result, expected.values)
result = df > l
assert_frame_equal(result, expected)
result = df > tup
assert_frame_equal(result, expected)
result = df > b_r
assert_frame_equal(result, expected)
result = df.values > b_r
assert_numpy_array_equal(result, expected.values)
pytest.raises(ValueError, df.__gt__, b_c)
pytest.raises(ValueError, df.values.__gt__, b_c)
# ==
expected = DataFrame([[False, False], [True, False], [False, False]])
result = df == b
assert_frame_equal(result, expected)
result = df == l
assert_frame_equal(result, expected)
result = df == tup
assert_frame_equal(result, expected)
result = df == b_r
assert_frame_equal(result, expected)
result = df.values == b_r
assert_numpy_array_equal(result, expected.values)
pytest.raises(ValueError, lambda: df == b_c)
assert not np.array_equal(df.values, b_c)
# with alignment
df = DataFrame(np.arange(6).reshape((3, 2)),
columns=list('AB'), index=list('abc'))
expected.index = df.index
expected.columns = df.columns
result = df == l
assert_frame_equal(result, expected)
result = df == tup
assert_frame_equal(result, expected)
# not shape compatible
pytest.raises(ValueError, lambda: df == (2, 2))
pytest.raises(ValueError, lambda: df == [2, 2])
def test_combine_generic(self):
df1 = self.frame
df2 = self.frame.loc[self.frame.index[:-5], ['A', 'B', 'C']]
combined = df1.combine(df2, np.add)
combined2 = df2.combine(df1, np.add)
assert combined['D'].isnull().all()
assert combined2['D'].isnull().all()
chunk = combined.loc[combined.index[:-5], ['A', 'B', 'C']]
chunk2 = combined2.loc[combined2.index[:-5], ['A', 'B', 'C']]
exp = self.frame.loc[self.frame.index[:-5],
['A', 'B', 'C']].reindex_like(chunk) * 2
assert_frame_equal(chunk, exp)
assert_frame_equal(chunk2, exp)
def test_inplace_ops_alignment(self):
# inplace ops / ops alignment
# GH 8511
columns = list('abcdefg')
X_orig = DataFrame(np.arange(10 * len(columns))
.reshape(-1, len(columns)),
columns=columns, index=range(10))
Z = 100 * X_orig.iloc[:, 1:-1].copy()
block1 = list('bedcf')
subs = list('bcdef')
# add
X = X_orig.copy()
result1 = (X[block1] + Z).reindex(columns=subs)
X[block1] += Z
result2 = X.reindex(columns=subs)
X = X_orig.copy()
result3 = (X[block1] + Z[block1]).reindex(columns=subs)
X[block1] += Z[block1]
result4 = X.reindex(columns=subs)
assert_frame_equal(result1, result2)
assert_frame_equal(result1, result3)
assert_frame_equal(result1, result4)
# sub
X = X_orig.copy()
result1 = (X[block1] - Z).reindex(columns=subs)
X[block1] -= Z
result2 = X.reindex(columns=subs)
X = X_orig.copy()
result3 = (X[block1] - Z[block1]).reindex(columns=subs)
X[block1] -= Z[block1]
result4 = X.reindex(columns=subs)
assert_frame_equal(result1, result2)
assert_frame_equal(result1, result3)
assert_frame_equal(result1, result4)
def test_inplace_ops_identity(self):
# GH 5104
# make sure that we are actually changing the object
s_orig = Series([1, 2, 3])
df_orig = DataFrame(np.random.randint(0, 5, size=10).reshape(-1, 5))
# no dtype change
s = s_orig.copy()
s2 = s
s += 1
assert_series_equal(s, s2)
assert_series_equal(s_orig + 1, s)
assert s is s2
assert s._data is s2._data
df = df_orig.copy()
df2 = df
df += 1
assert_frame_equal(df, df2)
assert_frame_equal(df_orig + 1, df)
assert df is df2
assert df._data is df2._data
# dtype change
s = s_orig.copy()
s2 = s
s += 1.5
assert_series_equal(s, s2)
assert_series_equal(s_orig + 1.5, s)
df = df_orig.copy()
df2 = df
df += 1.5
assert_frame_equal(df, df2)
assert_frame_equal(df_orig + 1.5, df)
assert df is df2
assert df._data is df2._data
# mixed dtype
arr = np.random.randint(0, 10, size=5)
df_orig = DataFrame({'A': arr.copy(), 'B': 'foo'})
df = df_orig.copy()
df2 = df
df['A'] += 1
expected = DataFrame({'A': arr.copy() + 1, 'B': 'foo'})
assert_frame_equal(df, expected)
assert_frame_equal(df2, expected)
assert df._data is df2._data
df = df_orig.copy()
df2 = df
df['A'] += 1.5
expected = DataFrame({'A': arr.copy() + 1.5, 'B': 'foo'})
assert_frame_equal(df, expected)
assert_frame_equal(df2, expected)
assert df._data is df2._data
def test_alignment_non_pandas(self):
index = ['A', 'B', 'C']
columns = ['X', 'Y', 'Z']
df = pd.DataFrame(np.random.randn(3, 3), index=index, columns=columns)
align = pd.core.ops._align_method_FRAME
for val in [[1, 2, 3], (1, 2, 3), np.array([1, 2, 3], dtype=np.int64)]:
tm.assert_series_equal(align(df, val, 'index'),
Series([1, 2, 3], index=df.index))
tm.assert_series_equal(align(df, val, 'columns'),
Series([1, 2, 3], index=df.columns))
# length mismatch
msg = 'Unable to coerce to Series, length must be 3: given 2'
for val in [[1, 2], (1, 2), np.array([1, 2])]:
with tm.assert_raises_regex(ValueError, msg):
align(df, val, 'index')
with tm.assert_raises_regex(ValueError, msg):
align(df, val, 'columns')
val = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
tm.assert_frame_equal(align(df, val, 'index'),
DataFrame(val, index=df.index,
columns=df.columns))
tm.assert_frame_equal(align(df, val, 'columns'),
DataFrame(val, index=df.index,
columns=df.columns))
# shape mismatch
msg = 'Unable to coerce to DataFrame, shape must be'
val = np.array([[1, 2, 3], [4, 5, 6]])
with tm.assert_raises_regex(ValueError, msg):
align(df, val, 'index')
with tm.assert_raises_regex(ValueError, msg):
align(df, val, 'columns')
val = np.zeros((3, 3, 3))
with pytest.raises(ValueError):
align(df, val, 'index')
with pytest.raises(ValueError):
align(df, val, 'columns')
| mit |
rajul/mne-python | examples/preprocessing/plot_find_ecg_artifacts.py | 19 | 1304 | """
==================
Find ECG artifacts
==================
Locate QRS component of ECG.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
# Setup for reading the raw data
raw = io.Raw(raw_fname)
event_id = 999
ecg_events, _, _ = mne.preprocessing.find_ecg_events(raw, event_id,
ch_name='MEG 1531')
# Read epochs
picks = mne.pick_types(raw.info, meg=False, eeg=False, stim=False, eog=False,
include=['MEG 1531'], exclude='bads')
tmin, tmax = -0.1, 0.1
epochs = mne.Epochs(raw, ecg_events, event_id, tmin, tmax, picks=picks,
proj=False)
data = epochs.get_data()
print("Number of detected ECG artifacts : %d" % len(data))
###############################################################################
# Plot ECG artifacts
plt.plot(1e3 * epochs.times, np.squeeze(data).T)
plt.xlabel('Times (ms)')
plt.ylabel('ECG')
plt.show()
| bsd-3-clause |
auDeep/auDeep | audeep/backend/parsers/compare19_bs.py | 1 | 4739 | # Copyright (C) 2017-2019 Michael Freitag, Shahin Amiriparian, Sergey Pugachevskiy, Nicholas Cummins, Björn Schuller
#
# This file is part of auDeep.
#
# auDeep is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# auDeep is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with auDeep. If not, see <http://www.gnu.org/licenses/>.
"""Parser for the ComParE 2019 Baby Sounds (BS) dataset"""
import abc
from pathlib import Path
from typing import Optional, Mapping, Sequence
import pandas as pd
from audeep.backend.data.data_set import Partition
from audeep.backend.log import LoggingMixin
from audeep.backend.parsers.base import Parser, _InstanceMetadata
_COMPARE19_BS_LABEL_MAP = {
"Canonical": 0,
"Crying": 1,
"Junk": 2,
"Laughing": 3,
"Non-canonical": 4,
}
class Compare19BSParser(LoggingMixin, Parser):
def __init__(self, basedir: Path):
super().__init__(basedir)
self._metadata_cache = None
self._audio_dir = basedir / "wav"
@abc.abstractmethod
def label_key(self) -> str:
pass
def _metadata(self) -> pd.DataFrame:
if not self.can_parse():
raise IOError("unable to parse the ComParE 2019 Baby Sounds dataset at {}".format(self._basedir))
if self._metadata_cache is None:
metadata_file = self._basedir / "lab" / "labels.csv"
metadata_file_confidential = self._basedir / "lab" / "labels_confidential.csv"
if (metadata_file_confidential.exists()):
self.log.warn("using confidential metadata file")
self._metadata_cache = pd.read_csv(metadata_file_confidential, sep=",")
else:
self._metadata_cache = pd.read_csv(metadata_file, sep=",")
return self._metadata_cache
def can_parse(self) -> bool:
metadata_file = self._basedir / "lab" / "labels.csv"
metadata_file_confidential = self._basedir / "lab" / "labels_confidential.csv"
if not self._audio_dir.exists():
self.log.debug("cannot parse: audio directory at %s missing", self._audio_dir)
return False
if not metadata_file_confidential.exists() and not metadata_file.exists():
self.log.debug("cannot parse: metadata file at %s missing", metadata_file)
return False
return True
@property
def label_map(self) -> Optional[Mapping[str, int]]:
if not self.can_parse():
raise IOError("unable to parse the ComParE 2019 Baby Sounds dataset at {}".format(self._basedir))
return _COMPARE19_BS_LABEL_MAP
@property
def num_instances(self) -> int:
if not self.can_parse():
raise IOError("unable to parse the ComParE 2019 Baby Sounds dataset at {}".format(self._basedir))
# test instances are not contained in label tsv file
return len(list(self._audio_dir.glob("*.*")))
@property
def num_folds(self) -> int:
if not self.can_parse():
raise IOError("unable to parse the ComParE 2019 Baby Sounds dataset at {}".format(self._basedir))
return 0
def parse(self) -> Sequence[_InstanceMetadata]:
if not self.can_parse():
raise IOError("unable to parse the ComParE 2019 Baby Sounds dataset at {}".format(self._basedir))
meta_list = []
metadata = self._metadata()
for file in sorted(self._audio_dir.glob("*.*")):
label_nominal = metadata.loc[metadata["file_name"] == file.name]["label"]
# test labels are '?'
if all(l != '?' for l in label_nominal):
label_nominal = label_nominal.iloc[0]
else:
label_nominal = None
instance_metadata = _InstanceMetadata(
path=file,
filename=file.name,
label_nominal=label_nominal,
label_numeric=None, # inferred from label map
cv_folds=[],
partition=Partition.TRAIN if file.name.startswith("train") else Partition.DEVEL if file.name.startswith(
"devel") else Partition.TEST
)
self.log.debug("parsed instance %s: label = %s", file.name, label_nominal)
meta_list.append(instance_metadata)
return meta_list
| gpl-3.0 |
florentchandelier/zipline | tests/pipeline/test_factor.py | 2 | 45019 | """
Tests for Factor terms.
"""
from functools import partial
from itertools import product
from nose_parameterized import parameterized
from unittest import TestCase
from toolz import compose
from numpy import (
apply_along_axis,
arange,
array,
datetime64,
empty,
eye,
log1p,
nan,
ones,
rot90,
where,
)
from numpy.random import randn, seed
import pandas as pd
from scipy.stats.mstats import winsorize as scipy_winsorize
from zipline.errors import BadPercentileBounds, UnknownRankMethod
from zipline.lib.labelarray import LabelArray
from zipline.lib.rank import masked_rankdata_2d
from zipline.lib.normalize import naive_grouped_rowwise_apply as grouped_apply
from zipline.pipeline import Classifier, Factor, Filter
from zipline.pipeline.factors import (
Returns,
RSI,
)
from zipline.testing import (
check_allclose,
check_arrays,
parameter_space,
permute_rows,
)
from zipline.testing.fixtures import ZiplineTestCase
from zipline.testing.predicates import assert_equal
from zipline.utils.numpy_utils import (
categorical_dtype,
datetime64ns_dtype,
float64_dtype,
int64_dtype,
NaTns,
)
from zipline.utils.math_utils import nanmean, nanstd
from .base import BasePipelineTestCase
class F(Factor):
dtype = float64_dtype
inputs = ()
window_length = 0
class OtherF(Factor):
dtype = float64_dtype
inputs = ()
window_length = 0
class C(Classifier):
dtype = int64_dtype
missing_value = -1
inputs = ()
window_length = 0
class OtherC(Classifier):
dtype = int64_dtype
missing_value = -1
inputs = ()
window_length = 0
class Mask(Filter):
inputs = ()
window_length = 0
for_each_factor_dtype = parameterized.expand([
('datetime64[ns]', datetime64ns_dtype),
('float', float64_dtype),
])
class FactorTestCase(BasePipelineTestCase):
def init_instance_fixtures(self):
super(FactorTestCase, self).init_instance_fixtures()
self.f = F()
def test_bad_input(self):
with self.assertRaises(UnknownRankMethod):
self.f.rank("not a real rank method")
@parameter_space(method_name=['isnan', 'notnan', 'isfinite'])
def test_float64_only_ops(self, method_name):
class NotFloat(Factor):
dtype = datetime64ns_dtype
inputs = ()
window_length = 0
nf = NotFloat()
meth = getattr(nf, method_name)
with self.assertRaises(TypeError):
meth()
@parameter_space(custom_missing_value=[-1, 0])
def test_isnull_int_dtype(self, custom_missing_value):
class CustomMissingValue(Factor):
dtype = int64_dtype
window_length = 0
missing_value = custom_missing_value
inputs = ()
factor = CustomMissingValue()
data = arange(25).reshape(5, 5)
data[eye(5, dtype=bool)] = custom_missing_value
self.check_terms(
{
'isnull': factor.isnull(),
'notnull': factor.notnull(),
},
{
'isnull': eye(5, dtype=bool),
'notnull': ~eye(5, dtype=bool),
},
initial_workspace={factor: data},
mask=self.build_mask(ones((5, 5))),
)
def test_isnull_datetime_dtype(self):
class DatetimeFactor(Factor):
dtype = datetime64ns_dtype
window_length = 0
inputs = ()
factor = DatetimeFactor()
data = arange(25).reshape(5, 5).astype('datetime64[ns]')
data[eye(5, dtype=bool)] = NaTns
self.check_terms(
{
'isnull': factor.isnull(),
'notnull': factor.notnull(),
},
{
'isnull': eye(5, dtype=bool),
'notnull': ~eye(5, dtype=bool),
},
initial_workspace={factor: data},
mask=self.build_mask(ones((5, 5))),
)
@for_each_factor_dtype
def test_rank_ascending(self, name, factor_dtype):
f = F(dtype=factor_dtype)
# Generated with:
# data = arange(25).reshape(5, 5).transpose() % 4
data = array([[0, 1, 2, 3, 0],
[1, 2, 3, 0, 1],
[2, 3, 0, 1, 2],
[3, 0, 1, 2, 3],
[0, 1, 2, 3, 0]], dtype=factor_dtype)
expected_ranks = {
'ordinal': array([[1., 3., 4., 5., 2.],
[2., 4., 5., 1., 3.],
[3., 5., 1., 2., 4.],
[4., 1., 2., 3., 5.],
[1., 3., 4., 5., 2.]]),
'average': array([[1.5, 3., 4., 5., 1.5],
[2.5, 4., 5., 1., 2.5],
[3.5, 5., 1., 2., 3.5],
[4.5, 1., 2., 3., 4.5],
[1.5, 3., 4., 5., 1.5]]),
'min': array([[1., 3., 4., 5., 1.],
[2., 4., 5., 1., 2.],
[3., 5., 1., 2., 3.],
[4., 1., 2., 3., 4.],
[1., 3., 4., 5., 1.]]),
'max': array([[2., 3., 4., 5., 2.],
[3., 4., 5., 1., 3.],
[4., 5., 1., 2., 4.],
[5., 1., 2., 3., 5.],
[2., 3., 4., 5., 2.]]),
'dense': array([[1., 2., 3., 4., 1.],
[2., 3., 4., 1., 2.],
[3., 4., 1., 2., 3.],
[4., 1., 2., 3., 4.],
[1., 2., 3., 4., 1.]]),
}
def check(terms):
self.check_terms(
terms,
expected={name: expected_ranks[name] for name in terms},
initial_workspace={f: data},
mask=self.build_mask(ones((5, 5))),
)
check({meth: f.rank(method=meth) for meth in expected_ranks})
check({
meth: f.rank(method=meth, ascending=True)
for meth in expected_ranks
})
# Not passing a method should default to ordinal.
check({'ordinal': f.rank()})
check({'ordinal': f.rank(ascending=True)})
@for_each_factor_dtype
def test_rank_descending(self, name, factor_dtype):
f = F(dtype=factor_dtype)
# Generated with:
# data = arange(25).reshape(5, 5).transpose() % 4
data = array([[0, 1, 2, 3, 0],
[1, 2, 3, 0, 1],
[2, 3, 0, 1, 2],
[3, 0, 1, 2, 3],
[0, 1, 2, 3, 0]], dtype=factor_dtype)
expected_ranks = {
'ordinal': array([[4., 3., 2., 1., 5.],
[3., 2., 1., 5., 4.],
[2., 1., 5., 4., 3.],
[1., 5., 4., 3., 2.],
[4., 3., 2., 1., 5.]]),
'average': array([[4.5, 3., 2., 1., 4.5],
[3.5, 2., 1., 5., 3.5],
[2.5, 1., 5., 4., 2.5],
[1.5, 5., 4., 3., 1.5],
[4.5, 3., 2., 1., 4.5]]),
'min': array([[4., 3., 2., 1., 4.],
[3., 2., 1., 5., 3.],
[2., 1., 5., 4., 2.],
[1., 5., 4., 3., 1.],
[4., 3., 2., 1., 4.]]),
'max': array([[5., 3., 2., 1., 5.],
[4., 2., 1., 5., 4.],
[3., 1., 5., 4., 3.],
[2., 5., 4., 3., 2.],
[5., 3., 2., 1., 5.]]),
'dense': array([[4., 3., 2., 1., 4.],
[3., 2., 1., 4., 3.],
[2., 1., 4., 3., 2.],
[1., 4., 3., 2., 1.],
[4., 3., 2., 1., 4.]]),
}
def check(terms):
self.check_terms(
terms,
expected={name: expected_ranks[name] for name in terms},
initial_workspace={f: data},
mask=self.build_mask(ones((5, 5))),
)
check({
meth: f.rank(method=meth, ascending=False)
for meth in expected_ranks
})
# Not passing a method should default to ordinal.
check({'ordinal': f.rank(ascending=False)})
@for_each_factor_dtype
def test_rank_after_mask(self, name, factor_dtype):
f = F(dtype=factor_dtype)
# data = arange(25).reshape(5, 5).transpose() % 4
data = array([[0, 1, 2, 3, 0],
[1, 2, 3, 0, 1],
[2, 3, 0, 1, 2],
[3, 0, 1, 2, 3],
[0, 1, 2, 3, 0]], dtype=factor_dtype)
mask_data = ~eye(5, dtype=bool)
initial_workspace = {f: data, Mask(): mask_data}
terms = {
"ascending_nomask": f.rank(ascending=True),
"ascending_mask": f.rank(ascending=True, mask=Mask()),
"descending_nomask": f.rank(ascending=False),
"descending_mask": f.rank(ascending=False, mask=Mask()),
}
expected = {
"ascending_nomask": array([[1., 3., 4., 5., 2.],
[2., 4., 5., 1., 3.],
[3., 5., 1., 2., 4.],
[4., 1., 2., 3., 5.],
[1., 3., 4., 5., 2.]]),
"descending_nomask": array([[4., 3., 2., 1., 5.],
[3., 2., 1., 5., 4.],
[2., 1., 5., 4., 3.],
[1., 5., 4., 3., 2.],
[4., 3., 2., 1., 5.]]),
# Diagonal should be all nans, and anything whose rank was less
# than the diagonal in the unmasked calc should go down by 1.
"ascending_mask": array([[nan, 2., 3., 4., 1.],
[2., nan, 4., 1., 3.],
[2., 4., nan, 1., 3.],
[3., 1., 2., nan, 4.],
[1., 2., 3., 4., nan]]),
"descending_mask": array([[nan, 3., 2., 1., 4.],
[2., nan, 1., 4., 3.],
[2., 1., nan, 4., 3.],
[1., 4., 3., nan, 2.],
[4., 3., 2., 1., nan]]),
}
self.check_terms(
terms,
expected,
initial_workspace,
mask=self.build_mask(ones((5, 5))),
)
@for_each_factor_dtype
def test_grouped_rank_ascending(self, name, factor_dtype=float64_dtype):
f = F(dtype=factor_dtype)
c = C()
str_c = C(dtype=categorical_dtype, missing_value=None)
# Generated with:
# data = arange(25).reshape(5, 5).transpose() % 4
data = array([[0, 1, 2, 3, 0],
[1, 2, 3, 0, 1],
[2, 3, 0, 1, 2],
[3, 0, 1, 2, 3],
[0, 1, 2, 3, 0]], dtype=factor_dtype)
# Generated with:
# classifier_data = arange(25).reshape(5, 5).transpose() % 2
classifier_data = array([[0, 1, 0, 1, 0],
[1, 0, 1, 0, 1],
[0, 1, 0, 1, 0],
[1, 0, 1, 0, 1],
[0, 1, 0, 1, 0]], dtype=int64_dtype)
string_classifier_data = LabelArray(
classifier_data.astype(str).astype(object),
missing_value=None,
)
expected_ranks = {
'ordinal': array(
[[1., 1., 3., 2., 2.],
[1., 2., 3., 1., 2.],
[2., 2., 1., 1., 3.],
[2., 1., 1., 2., 3.],
[1., 1., 3., 2., 2.]]
),
'average': array(
[[1.5, 1., 3., 2., 1.5],
[1.5, 2., 3., 1., 1.5],
[2.5, 2., 1., 1., 2.5],
[2.5, 1., 1., 2., 2.5],
[1.5, 1., 3., 2., 1.5]]
),
'min': array(
[[1., 1., 3., 2., 1.],
[1., 2., 3., 1., 1.],
[2., 2., 1., 1., 2.],
[2., 1., 1., 2., 2.],
[1., 1., 3., 2., 1.]]
),
'max': array(
[[2., 1., 3., 2., 2.],
[2., 2., 3., 1., 2.],
[3., 2., 1., 1., 3.],
[3., 1., 1., 2., 3.],
[2., 1., 3., 2., 2.]]
),
'dense': array(
[[1., 1., 2., 2., 1.],
[1., 2., 2., 1., 1.],
[2., 2., 1., 1., 2.],
[2., 1., 1., 2., 2.],
[1., 1., 2., 2., 1.]]
),
}
def check(terms):
self.check_terms(
terms,
expected={name: expected_ranks[name] for name in terms},
initial_workspace={
f: data,
c: classifier_data,
str_c: string_classifier_data,
},
mask=self.build_mask(ones((5, 5))),
)
# Not specifying the value of ascending param should default to True
check({
meth: f.rank(method=meth, groupby=c)
for meth in expected_ranks
})
check({
meth: f.rank(method=meth, groupby=str_c)
for meth in expected_ranks
})
check({
meth: f.rank(method=meth, groupby=c, ascending=True)
for meth in expected_ranks
})
check({
meth: f.rank(method=meth, groupby=str_c, ascending=True)
for meth in expected_ranks
})
# Not passing a method should default to ordinal
check({'ordinal': f.rank(groupby=c)})
check({'ordinal': f.rank(groupby=str_c)})
check({'ordinal': f.rank(groupby=c, ascending=True)})
check({'ordinal': f.rank(groupby=str_c, ascending=True)})
@for_each_factor_dtype
def test_grouped_rank_descending(self, name, factor_dtype):
f = F(dtype=factor_dtype)
c = C()
str_c = C(dtype=categorical_dtype, missing_value=None)
# Generated with:
# data = arange(25).reshape(5, 5).transpose() % 4
data = array([[0, 1, 2, 3, 0],
[1, 2, 3, 0, 1],
[2, 3, 0, 1, 2],
[3, 0, 1, 2, 3],
[0, 1, 2, 3, 0]], dtype=factor_dtype)
# Generated with:
# classifier_data = arange(25).reshape(5, 5).transpose() % 2
classifier_data = array([[0, 1, 0, 1, 0],
[1, 0, 1, 0, 1],
[0, 1, 0, 1, 0],
[1, 0, 1, 0, 1],
[0, 1, 0, 1, 0]], dtype=int64_dtype)
string_classifier_data = LabelArray(
classifier_data.astype(str).astype(object),
missing_value=None,
)
expected_ranks = {
'ordinal': array(
[[2., 2., 1., 1., 3.],
[2., 1., 1., 2., 3.],
[1., 1., 3., 2., 2.],
[1., 2., 3., 1., 2.],
[2., 2., 1., 1., 3.]]
),
'average': array(
[[2.5, 2., 1., 1., 2.5],
[2.5, 1., 1., 2., 2.5],
[1.5, 1., 3., 2., 1.5],
[1.5, 2., 3., 1., 1.5],
[2.5, 2., 1., 1., 2.5]]
),
'min': array(
[[2., 2., 1., 1., 2.],
[2., 1., 1., 2., 2.],
[1., 1., 3., 2., 1.],
[1., 2., 3., 1., 1.],
[2., 2., 1., 1., 2.]]
),
'max': array(
[[3., 2., 1., 1., 3.],
[3., 1., 1., 2., 3.],
[2., 1., 3., 2., 2.],
[2., 2., 3., 1., 2.],
[3., 2., 1., 1., 3.]]
),
'dense': array(
[[2., 2., 1., 1., 2.],
[2., 1., 1., 2., 2.],
[1., 1., 2., 2., 1.],
[1., 2., 2., 1., 1.],
[2., 2., 1., 1., 2.]]
),
}
def check(terms):
self.check_terms(
terms,
expected={name: expected_ranks[name] for name in terms},
initial_workspace={
f: data,
c: classifier_data,
str_c: string_classifier_data,
},
mask=self.build_mask(ones((5, 5))),
)
check({
meth: f.rank(method=meth, groupby=c, ascending=False)
for meth in expected_ranks
})
check({
meth: f.rank(method=meth, groupby=str_c, ascending=False)
for meth in expected_ranks
})
# Not passing a method should default to ordinal
check({'ordinal': f.rank(groupby=c, ascending=False)})
check({'ordinal': f.rank(groupby=str_c, ascending=False)})
@parameterized.expand([
# Test cases computed by doing:
# from numpy.random import seed, randn
# from talib import RSI
# seed(seed_value)
# data = abs(randn(15, 3))
# expected = [RSI(data[:, i])[-1] for i in range(3)]
(100, array([41.032913785966, 51.553585468393, 51.022005016446])),
(101, array([43.506969935466, 46.145367530182, 50.57407044197])),
(102, array([46.610102205934, 47.646892444315, 52.13182788538])),
])
def test_rsi(self, seed_value, expected):
rsi = RSI()
today = datetime64(1, 'ns')
assets = arange(3)
out = empty((3,), dtype=float)
seed(seed_value) # Seed so we get deterministic results.
test_data = abs(randn(15, 3))
out = empty((3,), dtype=float)
rsi.compute(today, assets, out, test_data)
check_allclose(expected, out)
@parameterized.expand([
(100, 15),
(101, 4),
(102, 100),
])
def test_returns(self, seed_value, window_length):
returns = Returns(window_length=window_length)
today = datetime64(1, 'ns')
assets = arange(3)
out = empty((3,), dtype=float)
seed(seed_value) # Seed so we get deterministic results.
test_data = abs(randn(window_length, 3))
# Calculate the expected returns
expected = (test_data[-1] - test_data[0]) / test_data[0]
out = empty((3,), dtype=float)
returns.compute(today, assets, out, test_data)
check_allclose(expected, out)
def gen_ranking_cases():
seeds = range(int(1e4), int(1e5), int(1e4))
methods = ('ordinal', 'average')
use_mask_values = (True, False)
set_missing_values = (True, False)
ascending_values = (True, False)
return product(
seeds,
methods,
use_mask_values,
set_missing_values,
ascending_values,
)
@parameterized.expand(gen_ranking_cases())
def test_masked_rankdata_2d(self,
seed_value,
method,
use_mask,
set_missing,
ascending):
eyemask = ~eye(5, dtype=bool)
nomask = ones((5, 5), dtype=bool)
seed(seed_value)
asfloat = (randn(5, 5) * seed_value)
asdatetime = (asfloat).copy().view('datetime64[ns]')
mask = eyemask if use_mask else nomask
if set_missing:
asfloat[:, 2] = nan
asdatetime[:, 2] = NaTns
float_result = masked_rankdata_2d(
data=asfloat,
mask=mask,
missing_value=nan,
method=method,
ascending=True,
)
datetime_result = masked_rankdata_2d(
data=asdatetime,
mask=mask,
missing_value=NaTns,
method=method,
ascending=True,
)
check_arrays(float_result, datetime_result)
def test_normalizations_hand_computed(self):
"""
Test the hand-computed example in factor.demean.
"""
f = self.f
m = Mask()
c = C()
str_c = C(dtype=categorical_dtype, missing_value=None)
factor_data = array(
[[1.0, 2.0, 3.0, 4.0],
[1.5, 2.5, 3.5, 1.0],
[2.0, 3.0, 4.0, 1.5],
[2.5, 3.5, 1.0, 2.0]],
)
filter_data = array(
[[False, True, True, True],
[True, False, True, True],
[True, True, False, True],
[True, True, True, False]],
dtype=bool,
)
classifier_data = array(
[[1, 1, 2, 2],
[1, 1, 2, 2],
[1, 1, 2, 2],
[1, 1, 2, 2]],
dtype=int64_dtype,
)
string_classifier_data = LabelArray(
classifier_data.astype(str).astype(object),
missing_value=None,
)
terms = {
'vanilla': f.demean(),
'masked': f.demean(mask=m),
'grouped': f.demean(groupby=c),
'grouped_str': f.demean(groupby=str_c),
'grouped_masked': f.demean(mask=m, groupby=c),
'grouped_masked_str': f.demean(mask=m, groupby=str_c),
}
expected = {
'vanilla': array(
[[-1.500, -0.500, 0.500, 1.500],
[-0.625, 0.375, 1.375, -1.125],
[-0.625, 0.375, 1.375, -1.125],
[0.250, 1.250, -1.250, -0.250]],
),
'masked': array(
[[nan, -1.000, 0.000, 1.000],
[-0.500, nan, 1.500, -1.000],
[-0.166, 0.833, nan, -0.666],
[0.166, 1.166, -1.333, nan]],
),
'grouped': array(
[[-0.500, 0.500, -0.500, 0.500],
[-0.500, 0.500, 1.250, -1.250],
[-0.500, 0.500, 1.250, -1.250],
[-0.500, 0.500, -0.500, 0.500]],
),
'grouped_masked': array(
[[nan, 0.000, -0.500, 0.500],
[0.000, nan, 1.250, -1.250],
[-0.500, 0.500, nan, 0.000],
[-0.500, 0.500, 0.000, nan]]
)
}
# Changing the classifier dtype shouldn't affect anything.
expected['grouped_str'] = expected['grouped']
expected['grouped_masked_str'] = expected['grouped_masked']
self.check_terms(
terms,
expected,
initial_workspace={
f: factor_data,
c: classifier_data,
str_c: string_classifier_data,
m: filter_data,
},
mask=self.build_mask(self.ones_mask(shape=factor_data.shape)),
# The hand-computed values aren't very precise (in particular,
# we truncate repeating decimals at 3 places) This is just
# asserting that the example isn't misleading by being totally
# wrong.
check=partial(check_allclose, atol=0.001),
)
def test_winsorize_hand_computed(self):
"""
Test the hand-computed example in factor.winsorize.
"""
f = self.f
m = Mask()
c = C()
str_c = C(dtype=categorical_dtype, missing_value=None)
factor_data = array([
[1., 2., 3., 4., 5., 6.],
[1., 8., 27., 64., 125., 216.],
[6., 5., 4., 3., 2., 1.]
])
filter_data = array(
[[False, True, True, True, True, True],
[True, False, True, True, True, True],
[True, True, False, True, True, True]],
dtype=bool,
)
classifier_data = array(
[[1, 1, 1, 2, 2, 2],
[1, 1, 1, 2, 2, 2],
[1, 1, 1, 2, 2, 2]],
dtype=int64_dtype,
)
string_classifier_data = LabelArray(
classifier_data.astype(str).astype(object),
missing_value=None,
)
terms = {
'winsor_1': f.winsorize(
min_percentile=0.33,
max_percentile=0.67
),
'winsor_2': f.winsorize(
min_percentile=0.49,
max_percentile=1
),
'winsor_3': f.winsorize(
min_percentile=0,
max_percentile=.67
),
'masked': f.winsorize(
min_percentile=0.33,
max_percentile=0.67,
mask=m
),
'grouped': f.winsorize(
min_percentile=0.34,
max_percentile=0.66,
groupby=c
),
'grouped_str': f.winsorize(
min_percentile=0.34,
max_percentile=0.66,
groupby=str_c
),
'grouped_masked': f.winsorize(
min_percentile=0.34,
max_percentile=0.66,
mask=m,
groupby=c
),
'grouped_masked_str': f.winsorize(
min_percentile=0.34,
max_percentile=0.66,
mask=m,
groupby=str_c
),
}
expected = {
'winsor_1': array([
[2., 2., 3., 4., 5., 5.],
[8., 8., 27., 64., 125., 125.],
[5., 5., 4., 3., 2., 2.]
]),
'winsor_2': array([
[3.0, 3., 3., 4., 5., 6.],
[27., 27., 27., 64., 125., 216.],
[6.0, 5., 4., 3., 3., 3.]
]),
'winsor_3': array([
[1., 2., 3., 4., 5., 5.],
[1., 8., 27., 64., 125., 125.],
[5., 5., 4., 3., 2., 1.]
]),
'masked': array([
[nan, 3., 3., 4., 5., 5.],
[27., nan, 27., 64., 125., 125.],
[5.0, 5., nan, 3., 2., 2.]
]),
'grouped': array([
[2., 2., 2., 5., 5., 5.],
[8., 8., 8., 125., 125., 125.],
[5., 5., 5., 2., 2., 2.]
]),
'grouped_masked': array([
[nan, 2., 3., 5., 5., 5.],
[1.0, nan, 27., 125., 125., 125.],
[6.0, 5., nan, 2., 2., 2.]
]),
}
# Changing the classifier dtype shouldn't affect anything.
expected['grouped_str'] = expected['grouped']
expected['grouped_masked_str'] = expected['grouped_masked']
self.check_terms(
terms,
expected,
initial_workspace={
f: factor_data,
c: classifier_data,
str_c: string_classifier_data,
m: filter_data,
},
mask=self.build_mask(self.ones_mask(shape=factor_data.shape)),
check=partial(check_allclose, atol=0.001),
)
def test_winsorize_bad_bounds(self):
"""
Test out of bounds input for factor.winsorize.
"""
f = self.f
bad_percentiles = [
(-.1, 1),
(0, 95),
(5, 95),
(5, 5),
(.6, .4)
]
for min_, max_ in bad_percentiles:
with self.assertRaises(BadPercentileBounds):
f.winsorize(min_percentile=min_, max_percentile=max_)
@parameter_space(
seed_value=range(1, 2),
normalizer_name_and_func=[
('demean', {}, lambda row: row - nanmean(row)),
('zscore', {}, lambda row: (row - nanmean(row)) / nanstd(row)),
(
'winsorize',
{"min_percentile": 0.25, "max_percentile": 0.75},
lambda row: scipy_winsorize(
row,
limits=0.25,
)
),
],
add_nulls_to_factor=(False, True,),
)
def test_normalizations_randomized(self,
seed_value,
normalizer_name_and_func,
add_nulls_to_factor):
name, kwargs, func = normalizer_name_and_func
shape = (20, 20)
# All Trues.
nomask = self.ones_mask(shape=shape)
# Falses on main diagonal.
eyemask = self.eye_mask(shape=shape)
# Falses on other diagonal.
eyemask90 = rot90(eyemask)
# Falses on both diagonals.
xmask = eyemask & eyemask90
# Block of random data.
factor_data = self.randn_data(seed=seed_value, shape=shape)
if add_nulls_to_factor:
factor_data = where(eyemask, factor_data, nan)
# Cycles of 0, 1, 2, 0, 1, 2, ...
classifier_data = (
(self.arange_data(shape=shape, dtype=int64_dtype) + seed_value) % 3
)
# With -1s on main diagonal.
classifier_data_eyenulls = where(eyemask, classifier_data, -1)
# With -1s on opposite diagonal.
classifier_data_eyenulls90 = where(eyemask90, classifier_data, -1)
# With -1s on both diagonals.
classifier_data_xnulls = where(xmask, classifier_data, -1)
f = self.f
c = C()
c_with_nulls = OtherC()
m = Mask()
method = partial(getattr(f, name), **kwargs)
terms = {
'vanilla': method(),
'masked': method(mask=m),
'grouped': method(groupby=c),
'grouped_with_nulls': method(groupby=c_with_nulls),
'both': method(mask=m, groupby=c),
'both_with_nulls': method(mask=m, groupby=c_with_nulls),
}
expected = {
'vanilla': apply_along_axis(func, 1, factor_data,),
'masked': where(
eyemask,
grouped_apply(factor_data, eyemask, func),
nan,
),
'grouped': grouped_apply(
factor_data,
classifier_data,
func,
),
# If the classifier has nulls, we should get NaNs in the
# corresponding locations in the output.
'grouped_with_nulls': where(
eyemask90,
grouped_apply(factor_data, classifier_data_eyenulls90, func),
nan,
),
# Passing a mask with a classifier should behave as though the
# classifier had nulls where the mask was False.
'both': where(
eyemask,
grouped_apply(
factor_data,
classifier_data_eyenulls,
func,
),
nan,
),
'both_with_nulls': where(
xmask,
grouped_apply(
factor_data,
classifier_data_xnulls,
func,
),
nan,
)
}
self.check_terms(
terms=terms,
expected=expected,
initial_workspace={
f: factor_data,
c: classifier_data,
c_with_nulls: classifier_data_eyenulls90,
Mask(): eyemask,
},
mask=self.build_mask(nomask),
)
@parameter_space(method_name=['demean', 'zscore'])
def test_cant_normalize_non_float(self, method_name):
class DateFactor(Factor):
dtype = datetime64ns_dtype
inputs = ()
window_length = 0
d = DateFactor()
with self.assertRaises(TypeError) as e:
getattr(d, method_name)()
errmsg = str(e.exception)
expected = (
"{normalizer}() is only defined on Factors of dtype float64,"
" but it was called on a Factor of dtype datetime64[ns]."
).format(normalizer=method_name)
self.assertEqual(errmsg, expected)
@parameter_space(seed=[1, 2, 3])
def test_quantiles_unmasked(self, seed):
permute = partial(permute_rows, seed)
shape = (6, 6)
# Shuffle the input rows to verify that we don't depend on the order.
# Take the log to ensure that we don't depend on linear scaling or
# integrality of inputs
factor_data = permute(log1p(arange(36, dtype=float).reshape(shape)))
f = self.f
# Apply the same shuffle we applied to the input rows to our
# expectations. Doing it this way makes it obvious that our
# expectation corresponds to our input, while still testing against
# a range of input orderings.
permuted_array = compose(permute, partial(array, dtype=int64_dtype))
self.check_terms(
terms={
'2': f.quantiles(bins=2),
'3': f.quantiles(bins=3),
'6': f.quantiles(bins=6),
},
initial_workspace={
f: factor_data,
},
expected={
# The values in the input are all increasing, so the first half
# of each row should be in the bottom bucket, and the second
# half should be in the top bucket.
'2': permuted_array([[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1]]),
# Similar for three buckets.
'3': permuted_array([[0, 0, 1, 1, 2, 2],
[0, 0, 1, 1, 2, 2],
[0, 0, 1, 1, 2, 2],
[0, 0, 1, 1, 2, 2],
[0, 0, 1, 1, 2, 2],
[0, 0, 1, 1, 2, 2]]),
# In the limiting case, we just have every column different.
'6': permuted_array([[0, 1, 2, 3, 4, 5],
[0, 1, 2, 3, 4, 5],
[0, 1, 2, 3, 4, 5],
[0, 1, 2, 3, 4, 5],
[0, 1, 2, 3, 4, 5],
[0, 1, 2, 3, 4, 5]]),
},
mask=self.build_mask(self.ones_mask(shape=shape)),
)
@parameter_space(seed=[1, 2, 3])
def test_quantiles_masked(self, seed):
permute = partial(permute_rows, seed)
# 7 x 7 so that we divide evenly into 2/3/6-tiles after including the
# nan value in each row.
shape = (7, 7)
# Shuffle the input rows to verify that we don't depend on the order.
# Take the log to ensure that we don't depend on linear scaling or
# integrality of inputs
factor_data = permute(log1p(arange(49, dtype=float).reshape(shape)))
factor_data_w_nans = where(
permute(rot90(self.eye_mask(shape=shape))),
factor_data,
nan,
)
mask_data = permute(self.eye_mask(shape=shape))
f = F()
f_nans = OtherF()
m = Mask()
# Apply the same shuffle we applied to the input rows to our
# expectations. Doing it this way makes it obvious that our
# expectation corresponds to our input, while still testing against
# a range of input orderings.
permuted_array = compose(permute, partial(array, dtype=int64_dtype))
self.check_terms(
terms={
'2_masked': f.quantiles(bins=2, mask=m),
'3_masked': f.quantiles(bins=3, mask=m),
'6_masked': f.quantiles(bins=6, mask=m),
'2_nans': f_nans.quantiles(bins=2),
'3_nans': f_nans.quantiles(bins=3),
'6_nans': f_nans.quantiles(bins=6),
},
initial_workspace={
f: factor_data,
f_nans: factor_data_w_nans,
m: mask_data,
},
expected={
# Expected results here are the same as in
# test_quantiles_unmasked, except with diagonals of -1s
# interpolated to match the effects of masking and/or input
# nans.
'2_masked': permuted_array([[-1, 0, 0, 0, 1, 1, 1],
[0, -1, 0, 0, 1, 1, 1],
[0, 0, -1, 0, 1, 1, 1],
[0, 0, 0, -1, 1, 1, 1],
[0, 0, 0, 1, -1, 1, 1],
[0, 0, 0, 1, 1, -1, 1],
[0, 0, 0, 1, 1, 1, -1]]),
'3_masked': permuted_array([[-1, 0, 0, 1, 1, 2, 2],
[0, -1, 0, 1, 1, 2, 2],
[0, 0, -1, 1, 1, 2, 2],
[0, 0, 1, -1, 1, 2, 2],
[0, 0, 1, 1, -1, 2, 2],
[0, 0, 1, 1, 2, -1, 2],
[0, 0, 1, 1, 2, 2, -1]]),
'6_masked': permuted_array([[-1, 0, 1, 2, 3, 4, 5],
[0, -1, 1, 2, 3, 4, 5],
[0, 1, -1, 2, 3, 4, 5],
[0, 1, 2, -1, 3, 4, 5],
[0, 1, 2, 3, -1, 4, 5],
[0, 1, 2, 3, 4, -1, 5],
[0, 1, 2, 3, 4, 5, -1]]),
'2_nans': permuted_array([[0, 0, 0, 1, 1, 1, -1],
[0, 0, 0, 1, 1, -1, 1],
[0, 0, 0, 1, -1, 1, 1],
[0, 0, 0, -1, 1, 1, 1],
[0, 0, -1, 0, 1, 1, 1],
[0, -1, 0, 0, 1, 1, 1],
[-1, 0, 0, 0, 1, 1, 1]]),
'3_nans': permuted_array([[0, 0, 1, 1, 2, 2, -1],
[0, 0, 1, 1, 2, -1, 2],
[0, 0, 1, 1, -1, 2, 2],
[0, 0, 1, -1, 1, 2, 2],
[0, 0, -1, 1, 1, 2, 2],
[0, -1, 0, 1, 1, 2, 2],
[-1, 0, 0, 1, 1, 2, 2]]),
'6_nans': permuted_array([[0, 1, 2, 3, 4, 5, -1],
[0, 1, 2, 3, 4, -1, 5],
[0, 1, 2, 3, -1, 4, 5],
[0, 1, 2, -1, 3, 4, 5],
[0, 1, -1, 2, 3, 4, 5],
[0, -1, 1, 2, 3, 4, 5],
[-1, 0, 1, 2, 3, 4, 5]]),
},
mask=self.build_mask(self.ones_mask(shape=shape)),
)
def test_quantiles_uneven_buckets(self):
permute = partial(permute_rows, 5)
shape = (5, 5)
factor_data = permute(log1p(arange(25, dtype=float).reshape(shape)))
mask_data = permute(self.eye_mask(shape=shape))
f = F()
m = Mask()
permuted_array = compose(permute, partial(array, dtype=int64_dtype))
self.check_terms(
terms={
'3_masked': f.quantiles(bins=3, mask=m),
'7_masked': f.quantiles(bins=7, mask=m),
},
initial_workspace={
f: factor_data,
m: mask_data,
},
expected={
'3_masked': permuted_array([[-1, 0, 0, 1, 2],
[0, -1, 0, 1, 2],
[0, 0, -1, 1, 2],
[0, 0, 1, -1, 2],
[0, 0, 1, 2, -1]]),
'7_masked': permuted_array([[-1, 0, 2, 4, 6],
[0, -1, 2, 4, 6],
[0, 2, -1, 4, 6],
[0, 2, 4, -1, 6],
[0, 2, 4, 6, -1]]),
},
mask=self.build_mask(self.ones_mask(shape=shape)),
)
def test_quantile_helpers(self):
f = self.f
m = Mask()
self.assertIs(f.quartiles(), f.quantiles(bins=4))
self.assertIs(f.quartiles(mask=m), f.quantiles(bins=4, mask=m))
self.assertIsNot(f.quartiles(), f.quartiles(mask=m))
self.assertIs(f.quintiles(), f.quantiles(bins=5))
self.assertIs(f.quintiles(mask=m), f.quantiles(bins=5, mask=m))
self.assertIsNot(f.quintiles(), f.quintiles(mask=m))
self.assertIs(f.deciles(), f.quantiles(bins=10))
self.assertIs(f.deciles(mask=m), f.quantiles(bins=10, mask=m))
self.assertIsNot(f.deciles(), f.deciles(mask=m))
class ShortReprTestCase(TestCase):
"""
Tests for short_repr methods of Factors.
"""
def test_demean(self):
r = F().demean().short_repr()
self.assertEqual(r, "GroupedRowTransform('demean')")
def test_zscore(self):
r = F().zscore().short_repr()
self.assertEqual(r, "GroupedRowTransform('zscore')")
def test_winsorize(self):
r = F().winsorize(min_percentile=.05, max_percentile=.95).short_repr()
self.assertEqual(r, "GroupedRowTransform('winsorize')")
class TestWindowSafety(TestCase):
def test_zscore_is_window_safe(self):
self.assertTrue(F().zscore().window_safe)
def test_demean_is_window_safe_if_input_is_window_safe(self):
self.assertFalse(F().demean().window_safe)
self.assertFalse(F(window_safe=False).demean().window_safe)
self.assertTrue(F(window_safe=True).demean().window_safe)
def test_winsorize_is_window_safe_if_input_is_window_safe(self):
self.assertFalse(
F().winsorize(min_percentile=.05, max_percentile=.95).window_safe
)
self.assertFalse(
F(window_safe=False).winsorize(
min_percentile=.05,
max_percentile=.95
).window_safe
)
self.assertTrue(
F(window_safe=True).winsorize(
min_percentile=.05,
max_percentile=.95
).window_safe
)
class TestPostProcessAndToWorkSpaceValue(ZiplineTestCase):
@parameter_space(dtype_=(float64_dtype, datetime64ns_dtype))
def test_reversability(self, dtype_):
class F(Factor):
inputs = ()
dtype = dtype_
window_length = 0
f = F()
column_data = array(
[[0, f.missing_value],
[1, f.missing_value],
[2, 3]],
dtype=dtype_,
)
assert_equal(f.postprocess(column_data.ravel()), column_data.ravel())
# only include the non-missing data
pipeline_output = pd.Series(
data=array([0, 1, 2, 3], dtype=dtype_),
index=pd.MultiIndex.from_arrays([
[pd.Timestamp('2014-01-01'),
pd.Timestamp('2014-01-02'),
pd.Timestamp('2014-01-03'),
pd.Timestamp('2014-01-03')],
[0, 0, 0, 1],
]),
)
assert_equal(
f.to_workspace_value(pipeline_output, pd.Index([0, 1])),
column_data,
)
| apache-2.0 |
airanmehr/bio | Scripts/TimeSeriesPaper/Plot/NullDistribution.py | 1 | 2314 | '''
Copyleft Jun 08, 2016 Arya Iranmehr, PhD Student, Bafna Lab, UC San Diego, Email: [email protected]
'''
import numpy as np;
np.set_printoptions(linewidth=200, precision=5, suppress=True)
import pandas as pd;
pd.options.display.max_rows = 20;
pd.options.display.expand_frame_repr = False
import pylab as plt;
import matplotlib as mpl
import os;
home = os.path.expanduser('~') + '/'
import Utils.Util as utl
import seaborn as sns
def lap(x=np.linspace(-0.3,0.3,1000),scale=1): return pd.Series(np.exp(-abs(x-0)/scale)/(2.*scale),index=x)
def plotNullDistributionS(reg,take_log=True):
if reg:
a=pd.read_pickle(utl.outpath +'real/negativeControl.Simulations.maxLikelihoods.regularized.df')
plt.figure(figsize=(8,6),dpi=200);sns.distplot(a.s,bins=95,kde=False,norm_hist=True)
rang=[-0.3,0.3]
sns.kdeplot(a.s,bw=0.007,color='r',linestyle='--')
else:
a=pd.read_pickle(utl.outpath +'real/negativeControl.Simulations.maxLikelihoods.df');a.loc[a.s==-0.5,'s']=None;a=a.dropna()
plt.figure(figsize=(8,6),dpi=200);sns.distplot(a.s,bins=20,kde=False,norm_hist=True);
sns.kdeplot(a.s,bw=0.01,color='r',linestyle='--')
rang=[-0.4,0.4]
plt.legend(['Empirical','Histogram'])
plt.xlim(rang);
plt.ylabel('$P(s)$')
mpl.rc('font', **{'family': 'serif', 'serif': ['Computer Modern'], 'size':50}) ;
mpl.rc('text', usetex=True)
if take_log: plt.gca().set_yscale("log");plt.ylabel(r'$\log(P(s))$')
plt.xlabel('$s$')
plt.savefig(utl.paperFiguresPath + 'nulls{}{}.pdf'.format(('', '.reg')[reg], ('', '.log')[take_log]))
plt.show()
def plotNullDistributionLR():
a=pd.read_pickle(utl.outpath +'real/negativeControl.Simulations.maxLikelihoods.regularized.df')
a['p']=a.s.abs()*(a.alt-a.null)
plt.figure(figsize=(8,6),dpi=200)
sns.distplot(a.p,bins=11,norm_hist=True,kde=False);plt.gca().set_yscale("log")
mpl.rc('font', **{'family': 'serif', 'serif': ['Computer Modern'], 'size':50}) ;
mpl.rc('text', usetex=True)
plt.savefig(utl.paperFiguresPath + 'nullpred.pdf')
plt.show()
def plotS():
plotNullDistributionS(False)
plotNullDistributionS(False,False)
plotNullDistributionS(True)
plotNullDistributionS(True,False)
if __name__ == '__main__':
plotNullDistributionLR()
| mit |
victorbergelin/scikit-learn | sklearn/covariance/robust_covariance.py | 198 | 29735 | """
Robust location and covariance estimators.
Here are implemented estimators that are resistant to outliers.
"""
# Author: Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import linalg
from scipy.stats import chi2
from . import empirical_covariance, EmpiricalCovariance
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_random_state, check_array
# Minimum Covariance Determinant
# Implementing of an algorithm by Rousseeuw & Van Driessen described in
# (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
# 1999, American Statistical Association and the American Society
# for Quality, TECHNOMETRICS)
# XXX Is this really a public function? It's not listed in the docs or
# exported by sklearn.covariance. Deprecate?
def c_step(X, n_support, remaining_iterations=30, initial_estimates=None,
verbose=False, cov_computation_method=empirical_covariance,
random_state=None):
"""C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data set in which we look for the n_support observations whose
scatter matrix has minimum determinant.
n_support : int, > n_samples / 2
Number of observations to compute the robust estimates of location
and covariance from.
remaining_iterations : int, optional
Number of iterations to perform.
According to [Rouseeuw1999]_, two iterations are sufficient to get
close to the minimum, and we never need more than 30 to reach
convergence.
initial_estimates : 2-tuple, optional
Initial estimates of location and shape from which to run the c_step
procedure:
- initial_estimates[0]: an initial location estimate
- initial_estimates[1]: an initial covariance estimate
verbose : boolean, optional
Verbose mode.
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Returns
-------
location : array-like, shape (n_features,)
Robust location estimates.
covariance : array-like, shape (n_features, n_features)
Robust covariance estimates.
support : array-like, shape (n_samples,)
A mask for the `n_support` observations whose scatter matrix has
minimum determinant.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
X = np.asarray(X)
random_state = check_random_state(random_state)
return _c_step(X, n_support, remaining_iterations=remaining_iterations,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state)
def _c_step(X, n_support, random_state, remaining_iterations=30,
initial_estimates=None, verbose=False,
cov_computation_method=empirical_covariance):
n_samples, n_features = X.shape
# Initialisation
support = np.zeros(n_samples, dtype=bool)
if initial_estimates is None:
# compute initial robust estimates from a random subset
support[random_state.permutation(n_samples)[:n_support]] = True
else:
# get initial robust estimates from the function parameters
location = initial_estimates[0]
covariance = initial_estimates[1]
# run a special iteration for that case (to get an initial support)
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(1)
# compute new estimates
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(0)
covariance = cov_computation_method(X_support)
# Iterative procedure for Minimum Covariance Determinant computation
det = fast_logdet(covariance)
previous_det = np.inf
while (det < previous_det) and (remaining_iterations > 0):
# save old estimates values
previous_location = location
previous_covariance = covariance
previous_det = det
previous_support = support
# compute a new support from the full data set mahalanobis distances
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
# compute new estimates
support = np.zeros(n_samples, dtype=bool)
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(axis=0)
covariance = cov_computation_method(X_support)
det = fast_logdet(covariance)
# update remaining iterations for early stopping
remaining_iterations -= 1
previous_dist = dist
dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
# Catch computation errors
if np.isinf(det):
raise ValueError(
"Singular covariance matrix. "
"Please check that the covariance matrix corresponding "
"to the dataset is full rank and that MinCovDet is used with "
"Gaussian-distributed data (or at least data drawn from a "
"unimodal, symmetric distribution.")
# Check convergence
if np.allclose(det, previous_det):
# c_step procedure converged
if verbose:
print("Optimal couple (location, covariance) found before"
" ending iterations (%d left)" % (remaining_iterations))
results = location, covariance, det, support, dist
elif det > previous_det:
# determinant has increased (should not happen)
warnings.warn("Warning! det > previous_det (%.15f > %.15f)"
% (det, previous_det), RuntimeWarning)
results = previous_location, previous_covariance, \
previous_det, previous_support, previous_dist
# Check early stopping
if remaining_iterations == 0:
if verbose:
print('Maximum number of iterations reached')
results = location, covariance, det, support, dist
return results
def select_candidates(X, n_support, n_trials, select=1, n_iter=30,
verbose=False,
cov_computation_method=empirical_covariance,
random_state=None):
"""Finds the best pure subset of observations to compute MCD from it.
The purpose of this function is to find the best sets of n_support
observations with respect to a minimization of their covariance
matrix determinant. Equivalently, it removes n_samples-n_support
observations to construct what we call a pure data set (i.e. not
containing outliers). The list of the observations of the pure
data set is referred to as the `support`.
Starting from a random support, the pure data set is found by the
c_step procedure introduced by Rousseeuw and Van Driessen in
[Rouseeuw1999]_.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data (sub)set in which we look for the n_support purest observations.
n_support : int, [(n + p + 1)/2] < n_support < n
The number of samples the pure data set must contain.
select : int, int > 0
Number of best candidates results to return.
n_trials : int, nb_trials > 0 or 2-tuple
Number of different initial sets of observations from which to
run the algorithm.
Instead of giving a number of trials to perform, one can provide a
list of initial estimates that will be used to iteratively run
c_step procedures. In this case:
- n_trials[0]: array-like, shape (n_trials, n_features)
is the list of `n_trials` initial location estimates
- n_trials[1]: array-like, shape (n_trials, n_features, n_features)
is the list of `n_trials` initial covariances estimates
n_iter : int, nb_iter > 0
Maximum number of iterations for the c_step procedure.
(2 is enough to be close to the final solution. "Never" exceeds 20).
random_state : integer or numpy.RandomState, default None
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
verbose : boolean, default False
Control the output verbosity.
See Also
---------
c_step
Returns
-------
best_locations : array-like, shape (select, n_features)
The `select` location estimates computed from the `select` best
supports found in the data set (`X`).
best_covariances : array-like, shape (select, n_features, n_features)
The `select` covariance estimates computed from the `select`
best supports found in the data set (`X`).
best_supports : array-like, shape (select, n_samples)
The `select` best supports found in the data set (`X`).
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
random_state = check_random_state(random_state)
n_samples, n_features = X.shape
if isinstance(n_trials, numbers.Integral):
run_from_estimates = False
elif isinstance(n_trials, tuple):
run_from_estimates = True
estimates_list = n_trials
n_trials = estimates_list[0].shape[0]
else:
raise TypeError("Invalid 'n_trials' parameter, expected tuple or "
" integer, got %s (%s)" % (n_trials, type(n_trials)))
# compute `n_trials` location and shape estimates candidates in the subset
all_estimates = []
if not run_from_estimates:
# perform `n_trials` computations from random initial supports
for j in range(n_trials):
all_estimates.append(
_c_step(
X, n_support, remaining_iterations=n_iter, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
else:
# perform computations from every given initial estimates
for j in range(n_trials):
initial_estimates = (estimates_list[0][j], estimates_list[1][j])
all_estimates.append(_c_step(
X, n_support, remaining_iterations=n_iter,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = \
zip(*all_estimates)
# find the `n_best` best results among the `n_trials` ones
index_best = np.argsort(all_dets_sub)[:select]
best_locations = np.asarray(all_locs_sub)[index_best]
best_covariances = np.asarray(all_covs_sub)[index_best]
best_supports = np.asarray(all_supports_sub)[index_best]
best_ds = np.asarray(all_ds_sub)[index_best]
return best_locations, best_covariances, best_supports, best_ds
def fast_mcd(X, support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None):
"""Estimates the Minimum Covariance Determinant matrix.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
`[n_sample + n_features + 1] / 2`.
random_state : integer or numpy.RandomState, optional
The generator used to randomly subsample. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [Rouseeuw1999]_,
see the MinCovDet object.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
Returns
-------
location : array-like, shape (n_features,)
Robust location of the data.
covariance : array-like, shape (n_features, n_features)
Robust covariance of the features.
support : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
"""
random_state = check_random_state(random_state)
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
n_samples, n_features = X.shape
# minimum breakdown value
if support_fraction is None:
n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
else:
n_support = int(support_fraction * n_samples)
# 1-dimensional case quick computation
# (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
# Regression and Outlier Detection, John Wiley & Sons, chapter 4)
if n_features == 1:
if n_support < n_samples:
# find the sample shortest halves
X_sorted = np.sort(np.ravel(X))
diff = X_sorted[n_support:] - X_sorted[:(n_samples - n_support)]
halves_start = np.where(diff == np.min(diff))[0]
# take the middle points' mean to get the robust location estimate
location = 0.5 * (X_sorted[n_support + halves_start]
+ X_sorted[halves_start]).mean()
support = np.zeros(n_samples, dtype=bool)
X_centered = X - location
support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
covariance = np.asarray([[np.var(X[support])]])
location = np.array([location])
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
else:
support = np.ones(n_samples, dtype=bool)
covariance = np.asarray([[np.var(X)]])
location = np.asarray([np.mean(X)])
X_centered = X - location
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
# Starting FastMCD algorithm for p-dimensional case
if (n_samples > 500) and (n_features > 1):
# 1. Find candidate supports on subsets
# a. split the set in subsets of size ~ 300
n_subsets = n_samples // 300
n_samples_subsets = n_samples // n_subsets
samples_shuffle = random_state.permutation(n_samples)
h_subset = int(np.ceil(n_samples_subsets *
(n_support / float(n_samples))))
# b. perform a total of 500 trials
n_trials_tot = 500
# c. select 10 best (location, covariance) for each subset
n_best_sub = 10
n_trials = max(10, n_trials_tot // n_subsets)
n_best_tot = n_subsets * n_best_sub
all_best_locations = np.zeros((n_best_tot, n_features))
try:
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
except MemoryError:
# The above is too big. Let's try with something much small
# (and less optimal)
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
n_best_tot = 10
n_best_sub = 2
for i in range(n_subsets):
low_bound = i * n_samples_subsets
high_bound = low_bound + n_samples_subsets
current_subset = X[samples_shuffle[low_bound:high_bound]]
best_locations_sub, best_covariances_sub, _, _ = select_candidates(
current_subset, h_subset, n_trials,
select=n_best_sub, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
all_best_locations[subset_slice] = best_locations_sub
all_best_covariances[subset_slice] = best_covariances_sub
# 2. Pool the candidate supports into a merged set
# (possibly the full dataset)
n_samples_merged = min(1500, n_samples)
h_merged = int(np.ceil(n_samples_merged *
(n_support / float(n_samples))))
if n_samples > 1500:
n_best_merged = 10
else:
n_best_merged = 1
# find the best couples (location, covariance) on the merged set
selection = random_state.permutation(n_samples)[:n_samples_merged]
locations_merged, covariances_merged, supports_merged, d = \
select_candidates(
X[selection], h_merged,
n_trials=(all_best_locations, all_best_covariances),
select=n_best_merged,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 3. Finally get the overall best (locations, covariance) couple
if n_samples < 1500:
# directly get the best couple (location, covariance)
location = locations_merged[0]
covariance = covariances_merged[0]
support = np.zeros(n_samples, dtype=bool)
dist = np.zeros(n_samples)
support[selection] = supports_merged[0]
dist[selection] = d[0]
else:
# select the best couple on the full dataset
locations_full, covariances_full, supports_full, d = \
select_candidates(
X, n_support,
n_trials=(locations_merged, covariances_merged),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
elif n_features > 1:
# 1. Find the 10 best couples (location, covariance)
# considering two iterations
n_trials = 30
n_best = 10
locations_best, covariances_best, _, _ = select_candidates(
X, n_support, n_trials=n_trials, select=n_best, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 2. Select the best couple on the full dataset amongst the 10
locations_full, covariances_full, supports_full, d = select_candidates(
X, n_support, n_trials=(locations_best, covariances_best),
select=1, cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
return location, covariance, support, dist
class MinCovDet(EmpiricalCovariance):
"""Minimum Covariance Determinant (MCD): robust estimator of covariance.
The Minimum Covariance Determinant covariance estimator is to be applied
on Gaussian-distributed data, but could still be relevant on data
drawn from a unimodal, symmetric distribution. It is not meant to be used
with multi-modal data (the algorithm used to fit a MinCovDet object is
likely to fail in such a case).
One should consider projection pursuit methods to deal with multi-modal
datasets.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered : Boolean
If True, the support of the robust location and the covariance
estimates is computed, and a covariance estimate is recomputed from
it, without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
[n_sample + n_features + 1] / 2
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
raw_location_ : array-like, shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : array-like, shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
location_ : array-like, shape (n_features,)
Estimated robust location
covariance_ : array-like, shape (n_features, n_features)
Estimated robust covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the robust estimates of location and shape.
dist_ : array-like, shape (n_samples,)
Mahalanobis distances of the training set (on which `fit` is called)
observations.
References
----------
.. [Rouseeuw1984] `P. J. Rousseeuw. Least median of squares regression.
J. Am Stat Ass, 79:871, 1984.`
.. [Rouseeuw1999] `A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS`
.. [Butler1993] `R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400`
"""
_nonrobust_covariance = staticmethod(empirical_covariance)
def __init__(self, store_precision=True, assume_centered=False,
support_fraction=None, random_state=None):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.support_fraction = support_fraction
self.random_state = random_state
def fit(self, X, y=None):
"""Fits a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn("The covariance matrix associated to your dataset "
"is not full rank")
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X, support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(X[raw_support],
assume_centered=True)
# get precision matrix in an optimized way
precision = pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
def correct_covariance(self, data):
"""Apply a correction to raw Minimum Covariance Determinant estimates.
Correction using the empirical correction factor suggested
by Rousseeuw and Van Driessen in [Rouseeuw1984]_.
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
covariance_corrected : array-like, shape (n_features, n_features)
Corrected robust covariance estimate.
"""
correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
covariance_corrected = self.raw_covariance_ * correction
self.dist_ /= correction
return covariance_corrected
def reweight_covariance(self, data):
"""Re-weight raw Minimum Covariance Determinant estimates.
Re-weight observations using Rousseeuw's method (equivalent to
deleting outlying observations from the data set before
computing location and covariance estimates). [Rouseeuw1984]_
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
location_reweighted : array-like, shape (n_features, )
Re-weighted robust location estimate.
covariance_reweighted : array-like, shape (n_features, n_features)
Re-weighted robust covariance estimate.
support_reweighted : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the re-weighted robust location and covariance estimates.
"""
n_samples, n_features = data.shape
mask = self.dist_ < chi2(n_features).isf(0.025)
if self.assume_centered:
location_reweighted = np.zeros(n_features)
else:
location_reweighted = data[mask].mean(0)
covariance_reweighted = self._nonrobust_covariance(
data[mask], assume_centered=self.assume_centered)
support_reweighted = np.zeros(n_samples, dtype=bool)
support_reweighted[mask] = True
self._set_covariance(covariance_reweighted)
self.location_ = location_reweighted
self.support_ = support_reweighted
X_centered = data - self.location_
self.dist_ = np.sum(
np.dot(X_centered, self.get_precision()) * X_centered, 1)
return location_reweighted, covariance_reweighted, support_reweighted
| bsd-3-clause |
ndingwall/scikit-learn | examples/classification/plot_classifier_comparison.py | 34 | 5239 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=====================
Classifier comparison
=====================
A comparison of a several classifiers in scikit-learn on synthetic datasets.
The point of this example is to illustrate the nature of decision boundaries
of different classifiers.
This should be taken with a grain of salt, as the intuition conveyed by
these examples does not necessarily carry over to real datasets.
Particularly in high-dimensional spaces, data can more easily be separated
linearly and the simplicity of classifiers such as naive Bayes and linear SVMs
might lead to better generalization than is achieved by other classifiers.
The plots show training points in solid colors and testing points
semi-transparent. The lower right shows the classification accuracy on the test
set.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Andreas Müller
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
h = .02 # step size in the mesh
names = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Gaussian Process",
"Decision Tree", "Random Forest", "Neural Net", "AdaBoost",
"Naive Bayes", "QDA"]
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025),
SVC(gamma=2, C=1),
GaussianProcessClassifier(1.0 * RBF(1.0)),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
MLPClassifier(alpha=1, max_iter=1000),
AdaBoostClassifier(),
GaussianNB(),
QuadraticDiscriminantAnalysis()]
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable
]
figure = plt.figure(figsize=(27, 9))
i = 1
# iterate over datasets
for ds_cnt, ds in enumerate(datasets):
# preprocess dataset, split into training and test part
X, y = ds
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=.4, random_state=42)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
if ds_cnt == 0:
ax.set_title("Input data")
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright,
edgecolors='k')
# Plot the testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6,
edgecolors='k')
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright,
edgecolors='k')
# Plot the testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
edgecolors='k', alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
if ds_cnt == 0:
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
plt.tight_layout()
plt.show()
| bsd-3-clause |
C2SM-RCM/serialbox | python/serialbox/Visualizer.py | 1 | 4181 | # This file is released under terms of BSD license`
# See LICENSE.txt for more information
"""Visualizer module used for visualization of serialized data."""
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.widgets import Slider, CheckButtons
from matplotlib.colors import LogNorm, SymLogNorm, Normalize
class Visualizer:
def __init__(self, field, fieldname, halospec=None):
"""Initializes a visualization instance, that is a windows with a field
field is a 3D numpy array
fieldname is a string with the name of the field
halospec is a 2x2 array with the definition of the halo size
After this call the window is shown
"""
self.field = field
self.fieldname = fieldname
# Register halo information
if halospec is None:
halospec = [[3, 3], [3, 3]]
self.istart = halospec[0][0]
self.iend = field.shape[0] - halospec[0][1]
self.jstart = halospec[1][0]
self.jend = field.shape[1] - halospec[1][1]
self.plotHalo = True
self.plotLogLog = False
self.curklevel = 0
self.figure = plt.figure()
# Slider
slideraxes = plt.axes([0.15, 0.02, 0.5, 0.03], axisbg='lightgoldenrodyellow')
self.slider = Slider(slideraxes, 'K level', 0, field.shape[2]-1, valinit=0)
self.slider.valfmt = '%2d'
self.slider.set_val(0)
self.slider.on_changed(self.update_slider)
# CheckButton
self.cbaxes = plt.axes([0.8, -.04, 0.12, 0.15])
self.cbaxes.set_axis_off()
self.cb = CheckButtons(self.cbaxes, ('Halo', 'Logscale'), (self.plotHalo, self.plotLogLog))
self.cb.on_clicked(self.update_button)
# Initial plot
self.fieldaxes = self.figure.add_axes([0.1, 0.15, 0.9, 0.75])
self.collection = plt.pcolor(self._get_field(), axes=self.fieldaxes)
self.colorbar = plt.colorbar()
self.fieldaxes.set_xlim(right=self._get_field().shape[1])
self.fieldaxes.set_ylim(top=self._get_field().shape[0])
plt.xlabel('i')
plt.ylabel('j')
self.title = plt.title('%s - Level 0' % (fieldname,))
plt.show(block=False)
def update_slider(self, val):
if val == self.curklevel:
return
self.curklevel = round(val)
self.title.set_text('%s - Level %d' % (self.fieldname, self.curklevel))
# Draw new field level
field = self._get_field()
size = field.shape[0] * field.shape[1]
array = field.reshape(size)
self.collection.set_array(array)
self.colorbar.set_clim(vmin=field.min(), vmax=field.max())
self.collection.set_clim(vmin=field.min(), vmax=field.max())
self.colorbar.update_normal(self.collection)
self.figure.canvas.draw_idle()
def update_button(self, label):
if label == 'Halo':
self.plotHalo = not self.plotHalo
if label == 'Logscale':
self.plotLogLog = not self.plotLogLog
self.update_plot()
def update_plot(self):
# Redraw field
self.collection.remove()
field = self._get_field()
if (self.plotLogLog):
minvalue = field.min()
norm = SymLogNorm(linthresh=1e-10)
self.collection = plt.pcolor(field, axes=self.fieldaxes,
norm=norm)
self.colorbar.set_clim(vmin=minvalue, vmax=field.max())
else:
self.collection = plt.pcolor(field, axes=self.fieldaxes)
self.colorbar.set_clim(vmin=field.min(), vmax=field.max())
self.colorbar.set_norm(norm=Normalize(vmin=field.min(), vmax=field.max()))
self.fieldaxes.set_xlim(right=field.shape[1])
self.fieldaxes.set_ylim(top=field.shape[0])
self.colorbar.update_normal(self.collection)
self.figure.canvas.draw_idle()
def _get_field(self):
if self.plotHalo:
return np.rot90(self.field[:, :, self.curklevel])
else:
return np.rot90(self.field[self.istart:self.iend, self.jstart:self.jend, self.curklevel])
| bsd-2-clause |
imbasimba/astroquery | astroquery/alma/core.py | 1 | 53277 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os.path
import keyring
import numpy as np
import re
import tarfile
import string
import requests
import warnings
from pkg_resources import resource_filename
from bs4 import BeautifulSoup
import pyvo
from six.moves.urllib_parse import urljoin
import six
from astropy.table import Table, Column, vstack
from astroquery import log
from astropy.utils import deprecated
from astropy.utils.console import ProgressBar
from astropy.utils.exceptions import AstropyDeprecationWarning
from astropy import units as u
from astropy.time import Time
from ..exceptions import LoginError
from ..utils import commons
from ..utils.process_asyncs import async_to_sync
from ..query import QueryWithLogin
from .tapsql import _gen_pos_sql, _gen_str_sql, _gen_numeric_sql,\
_gen_band_list_sql, _gen_datetime_sql, _gen_pol_sql, _gen_pub_sql,\
_gen_science_sql, _gen_spec_res_sql, ALMA_DATE_FORMAT
from . import conf, auth_urls
from astroquery.utils.commons import ASTROPY_LT_4_1
__all__ = {'AlmaClass', 'ALMA_BANDS'}
__doctest_skip__ = ['AlmaClass.*']
ALMA_TAP_PATH = 'tap'
ALMA_SIA_PATH = 'sia2'
ALMA_DATALINK_PATH = 'datalink/sync'
# Map from ALMA ObsCore result to ALMA original query result
# The map is provided in order to preserve the name of the columns in the
# original ALMA query original results and make it backwards compatible
# key - current column, value - original column name
_OBSCORE_TO_ALMARESULT = {
'proposal_id': 'Project code',
'target_name': 'Source name',
's_ra': 'RA',
's_dec': 'Dec',
'gal_longitude': 'Galactic longitude',
'gal_latitude': 'Galactic latitude',
'band_list': 'Band',
's_region': 'Footprint',
'em_resolution': 'Frequency resolution',
'antenna_arrays': 'Array',
'is_mosaic': 'Mosaic',
't_exptime': 'Integration',
'obs_release_date': 'Release date',
'frequency_support': 'Frequency support',
'velocity_resolution': 'Velocity resolution',
'pol_states': 'Pol products',
't_min': 'Observation date',
'obs_creator_name': 'PI name',
'schedblock_name': 'SB name',
'proposal_authors': 'Proposal authors',
'sensitivity_10kms': 'Line sensitivity (10 km/s)',
'cont_sensitivity_bandwidth': 'Continuum sensitivity',
'pwv': 'PWV',
'group_ous_uid': 'Group ous id',
'member_ous_uid': 'Member ous id',
'asdm_uid': 'Asdm uid',
'obs_title': 'Project title',
'type': 'Project type',
'scan_intent': 'Scan intent',
's_fov': 'Field of view',
'spatial_scale_max': 'Largest angular scale',
'qa2_passed': 'QA2 Status',
# TODO COUNT
'science_keyword': 'Science keyword',
'scientific_category': 'Scientific category'
}
ALMA_BANDS = {
'3': (84*u.GHz, 116*u.GHz),
'4': (125*u.GHz, 163*u.GHz),
'5': (163*u.GHz, 211*u.GHz),
'6': (211*u.GHz, 275*u.GHz),
'7': (275*u.GHz, 373*u.GHz),
'8': (385*u.GHz, 500*u.GHz),
'9': (602*u.GHz, 720*u.GHz),
'10': (787*u.GHz, 950*u.GHz)
}
ALMA_FORM_KEYS = {
'Position': {
'Source name (astropy Resolver)': ['source_name_resolver',
'SkyCoord.from_name', _gen_pos_sql],
'Source name (ALMA)': ['source_name_alma', 'target_name', _gen_str_sql],
'RA Dec (Sexagesimal)': ['ra_dec', 's_ra, s_dec', _gen_pos_sql],
'Galactic (Degrees)': ['galactic', 'gal_longitude, gal_latitude',
_gen_pos_sql],
'Angular resolution (arcsec)': ['spatial_resolution',
'spatial_resolution', _gen_numeric_sql],
'Largest angular scale (arcsec)': ['spatial_scale_max',
'spatial_scale_max', _gen_numeric_sql],
'Field of view (arcsec)': ['fov', 's_fov', _gen_numeric_sql]
},
'Energy': {
'Frequency (GHz)': ['frequency', 'frequency', _gen_numeric_sql],
'Bandwidth (GHz)': ['bandwidth', 'bandwidth', _gen_numeric_sql],
'Spectral resolution (KHz)': ['spectral_resolution',
'em_resolution', _gen_spec_res_sql],
'Band': ['band_list', 'band_list', _gen_band_list_sql]
},
'Time': {
'Observation date': ['start_date', 't_min', _gen_datetime_sql],
'Integration time (s)': ['integration_time', 't_exptime',
_gen_numeric_sql]
},
'Polarization': {
'Polarisation type (Single, Dual, Full)': ['polarisation_type',
'pol_states', _gen_pol_sql]
},
'Observation': {
'Line sensitivity (10 km/s) (mJy/beam)': ['line_sensitivity',
'sensitivity_10kms',
_gen_numeric_sql],
'Continuum sensitivity (mJy/beam)': ['continuum_sensitivity',
'cont_sensitivity_bandwidth',
_gen_numeric_sql],
'Water vapour (mm)': ['water_vapour', 'pvw', _gen_numeric_sql]
},
'Project': {
'Project code': ['project_code', 'proposal_id', _gen_str_sql],
'Project title': ['project_title', 'obs_title', _gen_str_sql],
'PI name': ['pi_name', 'obs_creator_name', _gen_str_sql],
'Proposal authors': ['proposal_authors', 'proposal_authors', _gen_str_sql],
'Project abstract': ['project_abstract', 'proposal_abstract', _gen_str_sql],
'Publication count': ['publication_count', 'NA', _gen_str_sql],
'Science keyword': ['science_keyword', 'science_keyword', _gen_str_sql]
},
'Publication': {
'Bibcode': ['bibcode', 'bib_reference', _gen_str_sql],
'Title': ['pub_title', 'pub_title', _gen_str_sql],
'First author': ['first_author', 'first_author', _gen_str_sql],
'Authors': ['authors', 'authors', _gen_str_sql],
'Abstract': ['pub_abstract', 'pub_abstract', _gen_str_sql],
'Year': ['publication_year', 'pub_year', _gen_numeric_sql]
},
'Options': {
'Public data only': ['public_data', 'data_rights', _gen_pub_sql],
'Science observations only': ['science_observation',
'science_observation', _gen_science_sql]
}
}
def _gen_sql(payload):
sql = 'select * from ivoa.obscore'
where = ''
if payload:
for constraint in payload:
for attrib_category in ALMA_FORM_KEYS.values():
for attrib in attrib_category.values():
if constraint in attrib:
# use the value and the second entry in attrib which
# is the new name of the column
val = payload[constraint]
if constraint == 'em_resolution':
# em_resolution does not require any transformation
attrib_where = _gen_numeric_sql(constraint, val)
else:
attrib_where = attrib[2](attrib[1], val)
if attrib_where:
if where:
where += ' AND '
else:
where = ' WHERE '
where += attrib_where
return sql + where
@async_to_sync
class AlmaClass(QueryWithLogin):
TIMEOUT = conf.timeout
archive_url = conf.archive_url
USERNAME = conf.username
def __init__(self):
# sia service does not need disambiguation but tap does
super(AlmaClass, self).__init__()
self._sia = None
self._tap = None
self._datalink = None
self.sia_url = None
self.tap_url = None
self.datalink_url = None
@property
def datalink(self):
if not self._datalink:
base_url = self._get_dataarchive_url()
if base_url.endswith('/'):
self.datalink_url = base_url + ALMA_DATALINK_PATH
else:
self.datalink_url = base_url + '/' + ALMA_DATALINK_PATH
self._datalink = pyvo.dal.adhoc.DatalinkService(
baseurl=self.datalink_url)
return self._datalink
@property
def sia(self):
if not self._sia:
base_url = self._get_dataarchive_url()
if base_url.endswith('/'):
self.sia_url = base_url + ALMA_SIA_PATH
else:
self.sia_url = base_url + '/' + ALMA_SIA_PATH
self._sia = pyvo.dal.sia2.SIAService(baseurl=self.sia_url)
return self._sia
@property
def tap(self):
if not self._tap:
base_url = self._get_dataarchive_url()
if base_url.endswith('/'):
self.tap_url = base_url + ALMA_TAP_PATH
else:
self.tap_url = base_url + '/' + ALMA_TAP_PATH
self._tap = pyvo.dal.tap.TAPService(baseurl=self.tap_url)
return self._tap
def query_object_async(self, object_name, cache=None, public=True,
science=True, payload=None, **kwargs):
"""
Query the archive for a source name.
Parameters
----------
object_name : str
The object name. Will be resolved by astropy.coord.SkyCoord
cache : deprecated
public : bool
True to return only public datasets, False to return private only,
None to return both
science : bool
True to return only science datasets, False to return only
calibration, None to return both
payload : dict
Dictionary of additional keywords. See `help`.
"""
if payload is not None:
payload['source_name_resolver'] = object_name
else:
payload = {'source_name_resolver': object_name}
return self.query_async(public=public, science=science,
payload=payload, **kwargs)
def query_region_async(self, coordinate, radius, cache=None, public=True,
science=True, payload=None, **kwargs):
"""
Query the ALMA archive with a source name and radius
Parameters
----------
coordinates : str / `astropy.coordinates`
the identifier or coordinates around which to query.
radius : str / `~astropy.units.Quantity`, optional
the radius of the region
cache : Deprecated
Cache the query?
public : bool
True to return only public datasets, False to return private only,
None to return both
science : bool
True to return only science datasets, False to return only
calibration, None to return both
payload : dict
Dictionary of additional keywords. See `help`.
"""
rad = radius
if not isinstance(radius, u.Quantity):
rad = radius*u.deg
obj_coord = commons.parse_coordinates(coordinate).icrs
ra_dec = '{}, {}'.format(obj_coord.to_string(), rad.to(u.deg).value)
if payload is None:
payload = {}
if 'ra_dec' in payload:
payload['ra_dec'] += ' | {}'.format(ra_dec)
else:
payload['ra_dec'] = ra_dec
return self.query_async(public=public, science=science,
payload=payload, **kwargs)
def query_async(self, payload, cache=None, public=True, science=True,
legacy_columns=False, max_retries=None,
get_html_version=None,
get_query_payload=None, **kwargs):
"""
Perform a generic query with user-specified payload
Parameters
----------
payload : dictionary
Please consult the `help` method
cache : deprecated
public : bool
True to return only public datasets, False to return private only,
None to return both
science : bool
True to return only science datasets, False to return only
calibration, None to return both
legacy_columns : bool
True to return the columns from the obsolete ALMA advanced query,
otherwise return the current columns based on ObsCore model.
Returns
-------
Table with results. Columns are those in the ALMA ObsCore model
(see ``help_tap``) unless ``legacy_columns`` argument is set to True.
"""
local_args = dict(locals().items())
for arg in local_args:
# check if the deprecated attributes have been used
for dep in ['cache', 'max_retries', 'get_html_version']:
if arg[0] == dep and arg[1] is not None:
warnings.warn(
("Argument '{}' has been deprecated "
"since version 4.0.1 and will be ignored".format(arg[0])),
AstropyDeprecationWarning)
del kwargs[arg[0]]
if payload is None:
payload = {}
for arg in kwargs:
value = kwargs[arg]
if 'band_list' == arg and isinstance(value, list):
value = ' '.join([str(_) for _ in value])
if arg in payload:
payload[arg] = '{} {}'.format(payload[arg], value)
else:
payload[arg] = value
if science is not None:
payload['science_observation'] = science
if public is not None:
payload['public_data'] = public
if get_query_payload:
return payload
query = _gen_sql(payload)
result = self.query_tap(query, maxrec=payload.get('maxrec', None))
if result is not None:
result = result.to_table()
else:
# Should not happen
raise RuntimeError('BUG: Unexpected result None')
if legacy_columns:
legacy_result = Table()
# add 'Observation date' column
for col_name in _OBSCORE_TO_ALMARESULT:
if col_name in result.columns:
if col_name == 't_min':
legacy_result['Observation date'] = \
[Time(_['t_min'], format='mjd').strftime(
ALMA_DATE_FORMAT) for _ in result]
else:
legacy_result[_OBSCORE_TO_ALMARESULT[col_name]] = \
result[col_name]
else:
log.error("Invalid column mapping in OBSCORE_TO_ALMARESULT: "
"{}:{}. Please "
"report this as an Issue."
.format(col_name, _OBSCORE_TO_ALMARESULT[col_name]))
return legacy_result
return result
def query_sia(self, pos=None, band=None, time=None, pol=None,
field_of_view=None, spatial_resolution=None,
spectral_resolving_power=None, exptime=None,
timeres=None, publisher_did=None,
facility=None, collection=None,
instrument=None, data_type=None,
calib_level=None, target_name=None,
res_format=None, maxrec=None,
**kwargs):
"""
Use standard SIA2 attributes to query the ALMA SIA service.
Parameters
----------
Returns
-------
Results in `pyvo.dal.SIAResults` format.
result.table in Astropy table format
"""
return self.sia.search(
pos=pos,
band=band,
time=time,
pol=pol,
field_of_view=field_of_view,
spatial_resolution=spatial_resolution,
spectral_resolving_power=spectral_resolving_power,
exptime=exptime,
timeres=timeres,
publisher_did=publisher_did,
facility=facility,
collection=collection,
instrument=instrument,
data_type=data_type,
calib_level=calib_level,
target_name=target_name,
res_format=res_format,
maxrec=maxrec,
**kwargs)
def query_tap(self, query, maxrec=None):
"""
Send query to the ALMA TAP. Results in pyvo.dal.TapResult format.
result.table in Astropy table format
Parameters
----------
maxrec : int
maximum number of records to return
"""
log.debug('TAP query: {}'.format(query))
return self.tap.search(query, language='ADQL', maxrec=maxrec)
def help_tap(self):
print('Table to query is "voa.ObsCore".')
print('For example: "select top 1 * from ivoa.ObsCore"')
print('The scheme of the table is as follows.\n')
print(' {0:20s} {1:15s} {2:10} {3}'.
format('Name', 'Type', 'Unit', 'Description'))
print('-'*90)
for tb in self.tap.tables.items():
if tb[0] == 'ivoa.obscore':
for col in tb[1].columns:
if col.datatype.content == 'char':
type = 'char({})'.format(col.datatype.arraysize)
else:
type = str(col.datatype.content)
unit = col.unit if col.unit else ''
print(' {0:20s} {1:15s} {2:10} {3}'.
format(col.name, type, unit, col.description))
# update method pydocs
query_region_async.__doc__ = query_region_async.__doc__.replace(
'_SIA2_PARAMETERS', pyvo.dal.sia2.SIA_PARAMETERS_DESC)
query_object_async.__doc__ = query_object_async.__doc__.replace(
'_SIA2_PARAMETERS', pyvo.dal.sia2.SIA_PARAMETERS_DESC)
query_async.__doc__ = query_async.__doc__.replace(
'_SIA2_PARAMETERS', pyvo.dal.sia2.SIA_PARAMETERS_DESC)
def _get_dataarchive_url(self):
"""
If the generic ALMA URL is used, query it to determine which mirror to
access for querying data
"""
if not hasattr(self, 'dataarchive_url'):
if self.archive_url in ('http://almascience.org', 'https://almascience.org'):
response = self._request('GET', self.archive_url,
cache=False)
response.raise_for_status()
# Jan 2017: we have to force https because the archive doesn't
# tell us it needs https.
self.dataarchive_url = response.url.replace(
"/asax/", "").replace("/aq/", "").replace("http://", "https://")
else:
self.dataarchive_url = self.archive_url
elif self.dataarchive_url in ('http://almascience.org',
'https://almascience.org'):
raise ValueError("'dataarchive_url' was set to a disambiguation "
"page that is meant to redirect to a real "
"archive. You should only reach this message "
"if you manually specified Alma.dataarchive_url. "
"If you did so, instead consider setting "
"Alma.archive_url. Otherwise, report an error "
"on github.")
return self.dataarchive_url
@deprecated(since="v0.4.1", alternative="get_data_info")
def stage_data(self, uids, expand_tarfiles=False, return_json=False):
"""
Obtain table of ALMA files
DEPRECATED: Data is no longer staged. This method is deprecated and
kept here for backwards compatibility reasons but it's not fully
compatible with the original implementation.
Parameters
----------
uids : list or str
A list of valid UIDs or a single UID.
UIDs should have the form: 'uid://A002/X391d0b/X7b'
expand_tarfiles : DEPRECATED
return_json : DEPRECATED
Note: The returned astropy table can be easily converted to json
through pandas:
output = StringIO()
stage_data(...).to_pandas().to_json(output)
table_json = output.getvalue()
Returns
-------
data_file_table : Table
A table containing 3 columns: the UID, the file URL (for future
downloading), and the file size
"""
if return_json:
raise AttributeError(
'return_json is deprecated. See method docs for a workaround')
table = Table()
res = self.get_data_info(uids, expand_tarfiles=expand_tarfiles)
p = re.compile(r'.*(uid__.*)\.asdm.*')
if res:
table['name'] = [u.split('/')[-1] for u in res['access_url']]
table['id'] = [p.search(x).group(1) if 'asdm' in x else 'None'
for x in table['name']]
table['type'] = res['content_type']
table['size'] = res['content_length']
table['permission'] = ['UNKNOWN'] * len(res)
table['mous_uid'] = [uids] * len(res)
table['URL'] = res['access_url']
table['isProprietary'] = res['readable']
return table
def get_data_info(self, uids, expand_tarfiles=False,
with_auxiliary=True, with_rawdata=True):
"""
Return information about the data associated with ALMA uid(s)
Parameters
----------
uids: list or str
A list of valid UIDs or a single UID.
UIDs should have the form: 'uid://A002/X391d0b/X7b'
expand_tarfiles: bool
False to return information on the tarfiles packages containing
the data or True to return information about individual files in
these packages
with_auxiliary: bool
True to include the auxiliary packages, False otherwise
with_rawdata: bool
True to include raw data, False otherwise
Returns
-------
Table with results or None. Table has the following columns: id (UID),
access_url (URL to access data), content_length, content_type (MIME
type), semantics, description (optional), error_message (optional)
"""
if uids is None:
raise AttributeError('UIDs required')
if isinstance(uids, six.string_types + (np.bytes_,)):
uids = [uids]
if not isinstance(uids, (list, tuple, np.ndarray)):
raise TypeError("Datasets must be given as a list of strings.")
# TODO remove this loop and send uids at once when pyvo fixed
result = None
for uid in uids:
res = self.datalink.run_sync(uid)
if res.status[0] != 'OK':
raise Exception('ERROR {}: {}'.format(res.status[0],
res.status[1]))
temp = res.table
if ASTROPY_LT_4_1:
# very annoying
for col in [x for x in temp.colnames
if x not in ['content_length', 'readable']]:
temp[col] = temp[col].astype(str)
result = temp if result is None else vstack([result, temp])
to_delete = []
for index, rr in enumerate(result):
if rr['error_message'] is not None and \
rr['error_message'].strip():
log.warning('Error accessing info about file {}: {}'.
format(rr['access_url'], rr['error_message']))
# delete from results. Good thing to do?
to_delete.append(index)
result.remove_rows(to_delete)
if not with_auxiliary:
result = result[np.core.defchararray.find(
result['semantics'], '#aux') == -1]
if not with_rawdata:
result = result[np.core.defchararray.find(
result['semantics'], '#progenitor') == -1]
# primary data delivery type is files packaged in tarballs. However
# some type of data has an alternative way to retrieve each individual
# file as an alternative (semantics='#datalink' and
# 'content_type=application/x-votable+xml;content=datalink'). They also
# require an extra call to the datalink service to get the list of
# files.
DATALINK_FILE_TYPE = 'application/x-votable+xml;content=datalink'
DATALINK_SEMANTICS = '#datalink'
if expand_tarfiles:
# identify the tarballs that can be expandable and replace them
# with the list of components
expanded_result = None
to_delete = []
for index, row in enumerate(result):
if DATALINK_SEMANTICS in row['semantics'] and \
row['content_type'] == DATALINK_FILE_TYPE:
# subsequent call to datalink
file_id = row['access_url'].split('ID=')[1]
expanded_tar = self.get_data_info(file_id)
expanded_tar = expanded_tar[
expanded_tar['semantics'] != '#cutout']
if not expanded_result:
expanded_result = expanded_tar
else:
expanded_result = vstack(
[expanded_result, expanded_tar], join_type='exact')
to_delete.append(index)
# cleanup
result.remove_rows(to_delete)
# add the extra rows
if expanded_result:
result = vstack([result, expanded_result], join_type='exact')
else:
result = result[np.logical_or(np.core.defchararray.find(
result['semantics'].astype(str), DATALINK_SEMANTICS) == -1,
result['content_type'].astype(str) != DATALINK_FILE_TYPE)]
return result
def is_proprietary(self, uid):
"""
Given an ALMA UID, query the servers to determine whether it is
proprietary or not.
"""
query = "select distinct data_rights from ivoa.obscore where " \
"obs_id='{}'".format(uid)
result = self.query_tap(query)
if not result or len(result.table) == 0:
raise AttributeError('{} not found'.format(uid))
if len(result.table) == 1 and result.table[0][0] == 'Public':
return False
return True
def _HEADER_data_size(self, files):
"""
Given a list of file URLs, return the data size. This is useful for
assessing how much data you might be downloading!
(This is discouraged by the ALMA archive, as it puts unnecessary load
on their system)
"""
totalsize = 0 * u.B
data_sizes = {}
pb = ProgressBar(len(files))
for index, fileLink in enumerate(files):
response = self._request('HEAD', fileLink, stream=False,
cache=False, timeout=self.TIMEOUT)
filesize = (int(response.headers['content-length']) * u.B).to(u.GB)
totalsize += filesize
data_sizes[fileLink] = filesize
log.debug("File {0}: size {1}".format(fileLink, filesize))
pb.update(index + 1)
response.raise_for_status()
return data_sizes, totalsize.to(u.GB)
def download_files(self, files, savedir=None, cache=True,
continuation=True, skip_unauthorized=True):
"""
Given a list of file URLs, download them
Note: Given a list with repeated URLs, each will only be downloaded
once, so the return may have a different length than the input list
Parameters
----------
files : list
List of URLs to download
savedir : None or str
The directory to save to. Default is the cache location.
cache : bool
Cache the download?
continuation : bool
Attempt to continue where the download left off (if it was broken)
skip_unauthorized : bool
If you receive "unauthorized" responses for some of the download
requests, skip over them. If this is False, an exception will be
raised.
"""
if self.USERNAME:
auth = self._get_auth_info(self.USERNAME)
else:
auth = None
downloaded_files = []
if savedir is None:
savedir = self.cache_location
for fileLink in unique(files):
log.debug("Downloading {0} to {1}".format(fileLink, savedir))
try:
check_filename = self._request('HEAD', fileLink, auth=auth,
stream=True)
check_filename.raise_for_status()
except requests.HTTPError as ex:
if ex.response.status_code == 401:
if skip_unauthorized:
log.info("Access denied to {url}. Skipping to"
" next file".format(url=fileLink))
continue
else:
raise(ex)
if 'text/html' in check_filename.headers['Content-Type']:
raise ValueError("Bad query. This can happen if you "
"attempt to download proprietary "
"data when not logged in")
try:
filename = re.search("filename=(.*)",
check_filename.headers['Content-Disposition']).groups()[0]
except KeyError:
log.info(f"Unable to find filename for {fileLink} "
"(missing Content-Disposition in header). "
"Skipping to next file.")
continue
if savedir is not None:
filename = os.path.join(savedir,
filename)
try:
self._download_file(fileLink,
filename,
timeout=self.TIMEOUT,
auth=auth,
cache=cache,
method='GET',
head_safe=True,
continuation=continuation)
downloaded_files.append(filename)
except requests.HTTPError as ex:
if ex.response.status_code == 401:
if skip_unauthorized:
log.info("Access denied to {url}. Skipping to"
" next file".format(url=fileLink))
continue
else:
raise(ex)
elif ex.response.status_code == 403:
log.error("Access denied to {url}".format(url=fileLink))
if 'dataPortal' in fileLink and 'sso' not in fileLink:
log.error("The URL may be incorrect. Try using "
"{0} instead of {1}"
.format(fileLink.replace('dataPortal/',
'dataPortal/sso/'),
fileLink))
raise ex
elif ex.response.status_code == 500:
# empirically, this works the second time most of the time...
self._download_file(fileLink,
filename,
timeout=self.TIMEOUT,
auth=auth,
cache=cache,
method='GET',
head_safe=True,
continuation=continuation)
downloaded_files.append(filename)
else:
raise ex
return downloaded_files
def _parse_result(self, response, verbose=False):
"""
Parse a VOtable response
"""
if not verbose:
commons.suppress_vo_warnings()
return response
def retrieve_data_from_uid(self, uids, cache=True):
"""
Stage & Download ALMA data. Will print out the expected file size
before attempting the download.
Parameters
----------
uids : list or str
A list of valid UIDs or a single UID.
UIDs should have the form: 'uid://A002/X391d0b/X7b'
cache : bool
Whether to cache the downloads.
Returns
-------
downloaded_files : list
A list of the downloaded file paths
"""
if isinstance(uids, six.string_types + (np.bytes_,)):
uids = [uids]
if not isinstance(uids, (list, tuple, np.ndarray)):
raise TypeError("Datasets must be given as a list of strings.")
files = self.get_data_info(uids)
file_urls = files['access_url']
totalsize = files['content_length'].sum()*u.B
# each_size, totalsize = self.data_size(files)
log.info("Downloading files of size {0}...".format(totalsize.to(u.GB)))
# TODO: Add cache=cache keyword here. Currently would have no effect.
downloaded_files = self.download_files(file_urls)
return downloaded_files
def _get_auth_info(self, username, store_password=False,
reenter_password=False):
"""
Get the auth info (user, password) for use in another function
"""
if username is None:
if not self.USERNAME:
raise LoginError("If you do not pass a username to login(), "
"you should configure a default one!")
else:
username = self.USERNAME
if hasattr(self, '_auth_url'):
auth_url = self._auth_url
else:
raise LoginError("Login with .login() to acquire the appropriate"
" login URL")
# Get password from keyring or prompt
password, password_from_keyring = self._get_password(
"astroquery:{0}".format(auth_url), username, reenter=reenter_password)
# When authenticated, save password in keyring if needed
if password_from_keyring is None and store_password:
keyring.set_password("astroquery:{0}".format(auth_url), username, password)
return username, password
def _login(self, username=None, store_password=False,
reenter_password=False, auth_urls=auth_urls):
"""
Login to the ALMA Science Portal.
Parameters
----------
username : str, optional
Username to the ALMA Science Portal. If not given, it should be
specified in the config file.
store_password : bool, optional
Stores the password securely in your keyring. Default is False.
reenter_password : bool, optional
Asks for the password even if it is already stored in the
keyring. This is the way to overwrite an already stored passwork
on the keyring. Default is False.
"""
success = False
for auth_url in auth_urls:
# set session cookies (they do not get set otherwise)
cookiesetpage = self._request("GET",
urljoin(self._get_dataarchive_url(),
'rh/forceAuthentication'),
cache=False)
self._login_cookiepage = cookiesetpage
cookiesetpage.raise_for_status()
if (auth_url+'/cas/login' in cookiesetpage.request.url):
# we've hit a target, we're good
success = True
break
if not success:
raise LoginError("Could not log in to any of the known ALMA "
"authorization portals: {0}".format(auth_urls))
# Check if already logged in
loginpage = self._request("GET", "https://{auth_url}/cas/login".format(auth_url=auth_url),
cache=False)
root = BeautifulSoup(loginpage.content, 'html5lib')
if root.find('div', class_='success'):
log.info("Already logged in.")
return True
self._auth_url = auth_url
username, password = self._get_auth_info(username=username,
store_password=store_password,
reenter_password=reenter_password)
# Authenticate
log.info("Authenticating {0} on {1} ...".format(username, auth_url))
# Do not cache pieces of the login process
data = {kw: root.find('input', {'name': kw})['value']
for kw in ('execution', '_eventId')}
data['username'] = username
data['password'] = password
data['submit'] = 'LOGIN'
login_response = self._request("POST", "https://{0}/cas/login".format(auth_url),
params={'service': self._get_dataarchive_url()},
data=data,
cache=False)
# save the login response for debugging purposes
self._login_response = login_response
# do not expose password back to user
del data['password']
# but save the parameters for debug purposes
self._login_parameters = data
authenticated = ('You have successfully logged in' in
login_response.text)
if authenticated:
log.info("Authentication successful!")
self.USERNAME = username
else:
log.exception("Authentication failed!")
return authenticated
def get_cycle0_uid_contents(self, uid):
"""
List the file contents of a UID from Cycle 0. Will raise an error
if the UID is from cycle 1+, since those data have been released in
a different and more consistent format. See
http://almascience.org/documents-and-tools/cycle-2/ALMAQA2Productsv1.01.pdf
for details.
"""
# First, check if UID is in the Cycle 0 listing
if uid in self.cycle0_table['uid']:
cycle0id = self.cycle0_table[
self.cycle0_table['uid'] == uid][0]['ID']
contents = [row['Files']
for row in self._cycle0_tarfile_content
if cycle0id in row['ID']]
return contents
else:
info_url = urljoin(
self._get_dataarchive_url(),
'documents-and-tools/cycle-2/ALMAQA2Productsv1.01.pdf')
raise ValueError("Not a Cycle 0 UID. See {0} for details about "
"cycle 1+ data release formats.".format(info_url))
@property
def _cycle0_tarfile_content(self):
"""
In principle, this is a static file, but we'll retrieve it just in case
"""
if not hasattr(self, '_cycle0_tarfile_content_table'):
url = urljoin(self._get_dataarchive_url(),
'alma-data/archive/cycle-0-tarfile-content')
response = self._request('GET', url, cache=True)
# html.parser is needed because some <tr>'s have form:
# <tr width="blah"> which the default parser does not pick up
root = BeautifulSoup(response.content, 'html.parser')
html_table = root.find('table', class_='grid listing')
data = list(zip(*[(x.findAll('td')[0].text,
x.findAll('td')[1].text)
for x in html_table.findAll('tr')]))
columns = [Column(data=data[0], name='ID'),
Column(data=data[1], name='Files')]
tbl = Table(columns)
assert len(tbl) == 8497
self._cycle0_tarfile_content_table = tbl
else:
tbl = self._cycle0_tarfile_content_table
return tbl
@property
def cycle0_table(self):
"""
Return a table of Cycle 0 Project IDs and associated UIDs.
The table is distributed with astroquery and was provided by Felix
Stoehr.
"""
if not hasattr(self, '_cycle0_table'):
filename = resource_filename(
'astroquery.alma', 'data/cycle0_delivery_asdm_mapping.txt')
self._cycle0_table = Table.read(filename, format='ascii.no_header')
self._cycle0_table.rename_column('col1', 'ID')
self._cycle0_table.rename_column('col2', 'uid')
return self._cycle0_table
def get_files_from_tarballs(self, downloaded_files, regex=r'.*\.fits$',
path='cache_path', verbose=True):
"""
Given a list of successfully downloaded tarballs, extract files
with names matching a specified regular expression. The default
is to extract all FITS files
NOTE: alma now supports direct listing and downloads of tarballs. See
``get_data_info`` and ``download_and_extract_files``
Parameters
----------
downloaded_files : list
A list of downloaded files. These should be paths on your local
machine.
regex : str
A valid regular expression
path : 'cache_path' or str
If 'cache_path', will use the astroquery.Alma cache directory
(``Alma.cache_location``), otherwise will use the specified path.
Note that the subdirectory structure of the tarball will be
maintained.
Returns
-------
filelist : list
A list of the extracted file locations on disk
"""
if path == 'cache_path':
path = self.cache_location
elif not os.path.isdir(path):
raise OSError("Specified an invalid path {0}.".format(path))
fitsre = re.compile(regex)
filelist = []
for fn in downloaded_files:
tf = tarfile.open(fn)
for member in tf.getmembers():
if fitsre.match(member.name):
if verbose:
log.info("Extracting {0} to {1}".format(member.name,
path))
tf.extract(member, path)
filelist.append(os.path.join(path, member.name))
return filelist
def download_and_extract_files(self, urls, delete=True, regex=r'.*\.fits$',
include_asdm=False, path='cache_path',
verbose=True):
"""
Given a list of tarball URLs, it extracts all the FITS files (or
whatever matches the regex)
Parameters
----------
urls : str or list
A single URL or a list of URLs
include_asdm : bool
Only affects cycle 1+ data. If set, the ASDM files will be
downloaded in addition to the script and log files. By default,
though, this file will be downloaded and deleted without extracting
any information: you must change the regex if you want to extract
data from an ASDM tarball
"""
if isinstance(urls, six.string_types):
urls = [urls]
if not isinstance(urls, (list, tuple, np.ndarray)):
raise TypeError("Datasets must be given as a list of strings.")
filere = re.compile(regex)
all_files = []
tar_files = []
expanded_files = []
for url in urls:
if url[-4:] != '.tar':
raise ValueError("URLs should be links to tarballs.")
tarfile_name = os.path.split(url)[-1]
if tarfile_name in self._cycle0_tarfile_content['ID']:
# It is a cycle 0 file: need to check if it contains FITS
match = (self._cycle0_tarfile_content['ID'] == tarfile_name)
if not any(re.match(regex, x) for x in
self._cycle0_tarfile_content['Files'][match]):
log.info("No FITS files found in {0}".format(tarfile_name))
continue
else:
if 'asdm' in tarfile_name and not include_asdm:
log.info("ASDM tarballs do not contain FITS files; "
"skipping.")
continue
tar_file = url.split('/')[-1]
files = self.get_data_info(tar_file)
if files:
expanded_files += [x for x in files['access_url'] if
filere.match(x.split('/')[-1])]
else:
tar_files.append(tar_file)
try:
# get the tar files
downloaded = self.download_files(tar_files, savedir=path)
fitsfilelist = self.get_files_from_tarballs(downloaded,
regex=regex, path=path,
verbose=verbose)
if delete:
for tarball_name in downloaded:
log.info("Deleting {0}".format(tarball_name))
os.remove(tarball_name)
all_files += fitsfilelist
# download the other files
all_files += self.download_files(expanded_files, savedir=path)
except requests.ConnectionError as ex:
self.partial_file_list = all_files
log.error("There was an error downloading the file. "
"A partially completed download list is "
"in Alma.partial_file_list")
raise ex
except requests.HTTPError as ex:
if ex.response.status_code == 401:
log.info("Access denied to {url}. Skipping to"
" next file".format(url=url))
else:
raise ex
return all_files
def help(self, cache=True):
"""
Return the valid query parameters
"""
print("\nMost common ALMA query keywords are listed below. These "
"keywords are part of the ALMA ObsCore model, an IVOA standard "
"for metadata representation (3rd column). They were also "
"present in original ALMA Web form and, for backwards "
"compatibility can be accessed with their old names (2nd "
"column).\n"
"More elaborate queries on the ObsCore model "
"are possible with `query_sia` or `query_tap` methods")
print(" {0:33s} {1:35s} {2:35s}".format("Description",
"Original ALMA keyword",
"ObsCore keyword"))
print("-"*103)
for title, section in ALMA_FORM_KEYS.items():
print()
print(title)
for row in section.items():
print(" {0:33s} {1:35s} {2:35s}".format(row[0], row[1][0], row[1][1]))
print('\nExamples of queries:')
print("Alma.query('proposal_id':'2011.0.00131.S'}")
print("Alma.query({'band_list': ['5', '7']}")
print("Alma.query({'source_name_alma': 'GRB021004'})")
print("Alma.query(payload=dict(project_code='2017.1.01355.L', "
"source_name_alma='G008.67'))")
def _json_summary_to_table(self, data, base_url):
"""
Special tool to convert some JSON metadata to a table Obsolete as of
March 2020 - should be removed along with stage_data_prefeb2020
"""
from ..utils import url_helpers
from six import iteritems
columns = {'mous_uid': [], 'URL': [], 'size': []}
for entry in data['node_data']:
# de_type can be useful (e.g., MOUS), but it is not necessarily
# specified
# file_name and file_key *must* be specified.
is_file = \
(entry['file_name'] != 'null' and entry['file_key'] != 'null')
if is_file:
# "de_name": "ALMA+uid://A001/X122/X35e",
columns['mous_uid'].append(entry['de_name'][5:])
if entry['file_size'] == 'null':
columns['size'].append(np.nan * u.Gbyte)
else:
columns['size'].append(
(int(entry['file_size']) * u.B).to(u.Gbyte))
# example template for constructing url:
# https://almascience.eso.org/dataPortal/requests/keflavich/940238268/ALMA/
# uid___A002_X9d6f4c_X154/2013.1.00546.S_uid___A002_X9d6f4c_X154.asdm.sdm.tar
# above is WRONG... except for ASDMs, when it's right
# should be:
# 2013.1.00546.S_uid___A002_X9d6f4c_X154.asdm.sdm.tar/2013.1.00546.S_uid___A002_X9d6f4c_X154.asdm.sdm.tar
#
# apparently ASDMs are different from others:
# templates:
# https://almascience.eso.org/dataPortal/requests/keflavich/946895898/ALMA/
# 2013.1.00308.S_uid___A001_X196_X93_001_of_001.tar/2013.1.00308.S_uid___A001_X196_X93_001_of_001.tar
# uid___A002_X9ee74a_X26f0/2013.1.00308.S_uid___A002_X9ee74a_X26f0.asdm.sdm.tar
url = url_helpers.join(base_url,
entry['file_key'],
entry['file_name'])
if 'null' in url:
raise ValueError("The URL {0} was created containing "
"'null', which is invalid.".format(url))
columns['URL'].append(url)
columns['size'] = u.Quantity(columns['size'], u.Gbyte)
tbl = Table([Column(name=k, data=v) for k, v in iteritems(columns)])
return tbl
def get_project_metadata(self, projectid, cache=True):
"""
Get the metadata - specifically, the project abstract - for a given project ID.
"""
if len(projectid) != 14:
raise AttributeError('Wrong length for project ID')
if not projectid[4] == projectid[6] == projectid[12] == '.':
raise AttributeError('Wrong format for project ID')
result = self.query_tap(
"select distinct proposal_abstract from "
"ivoa.obscore where proposal_id='{}'".format(projectid))
if ASTROPY_LT_4_1:
return [result[0]['proposal_abstract'].astype(str)]
else:
return [result[0]['proposal_abstract']]
Alma = AlmaClass()
def clean_uid(uid):
"""
Return a uid with all unacceptable characters replaced with underscores
"""
if not hasattr(uid, 'replace'):
return clean_uid(str(uid.astype('S')))
try:
return uid.decode('utf-8').replace(u"/", u"_").replace(u":", u"_")
except AttributeError:
return uid.replace("/", "_").replace(":", "_")
def reform_uid(uid):
"""
Convert a uid with underscores to the original format
"""
return uid[:3] + "://" + "/".join(uid[6:].split("_"))
def unique(seq):
"""
Return unique elements of a list, preserving order
"""
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
def filter_printable(s):
""" extract printable characters from a string """
return filter(lambda x: x in string.printable, s)
def uid_json_to_table(jdata,
productlist=['ASDM', 'PIPELINE_PRODUCT',
'PIPELINE_PRODUCT_TARFILE',
'PIPELINE_AUXILIARY_TARFILE']):
rows = []
def flatten_jdata(this_jdata, mousID=None):
if isinstance(this_jdata, list):
for item in this_jdata:
if item['type'] in productlist:
item['mous_uid'] = mousID
rows.append(item)
elif len(item['children']) > 0:
if len(item['allMousUids']) == 1:
flatten_jdata(item['children'], item['allMousUids'][0])
else:
flatten_jdata(item['children'])
flatten_jdata(jdata['children'])
keys = rows[-1].keys()
columns = [Column(data=[row[key] for row in rows], name=key)
for key in keys if key not in ('children', 'allMousUids')]
columns = [col.astype(str) if col.dtype.name == 'object' else col for col
in columns]
return Table(columns)
| bsd-3-clause |
EliotBryant/ShadDetector | shadDetector_testing/Gradient Based Methods/shadowmask_canny.py | 1 | 6919 | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 31 12:09:11 2017
@author: Eliot
shadowmask_canny.py
"""
def trimcontours(contours, sizethreshold):
'''
function to reduce the list of contours and remove noise from contours by
thresholding out contours below a given size
'''
cont_output = []
for conts in contours:
area = cv2.contourArea(conts, False)
if area > sizethreshold:
cont_output.append(conts)
return cont_output
def trimcontoursbylength(contours, lengththreshold):
'''
function to reduce the list of contours and remove noise from contours by
thresholding out contours below a given length
'''
cont_output = []
for conts in contours:
length = cv2.arcLength(conts,False)
if length > lengththreshold:
cont_output.append(conts)
return cont_output
# 0. import necessary libraries
import os
import numpy as np
import cv2
import matplotlib.pylab as plt
from skimage import morphology, exposure, filters, img_as_ubyte, img_as_float
# 1. Read in input image
'''
Day2Run2 imageset
# change to medianblur directory
'''
thisfilepath = os.path.dirname(__file__)
loaddirpath = os.path.abspath(os.path.join(thisfilepath, "test_files/Input Images/adaptive_medianblur_gauss"))
savedirpath = os.path.abspath(os.path.join(thisfilepath, "test_files/shadowmaskcannies"))
histdirpath = os.path.abspath(os.path.join(thisfilepath, "test_files/shadowmaskcannies/hists"))
truncateddirpath = os.path.abspath(os.path.join(thisfilepath, "test_files/shadowmaskcannies/truncated"))
'''
Initial Run imageset
'''
#loaddirpath = os.path.abspath(os.path.join(thisfilepath, "../../data/Initial_Run/imgs_1_MEDBLUR"))
#savedirpath = os.path.abspath(os.path.join(thisfilepath, "../../data/Initial_Run/imgs_3_CANNYSHADMASK"))
for imgs in os.listdir(loaddirpath):
colour = cv2.imread(loaddirpath + "/" + imgs)
c_cop = colour.copy()
image = colour[...,0]
print(imgs)
canny = cv2.Canny(image, 2480, 2999, apertureSize=7, L2gradient=True)
# cv2.imshow("Canny", canny)
# if cv2.waitKey(0) & 0xff == 27:
# cv2.destroyAllWindows()
# eroded binary combined to find reflection regions
dilated = cv2.dilate(canny, np.ones((3,3), dtype="int"), iterations=1)
# cv2.imshow("dilated", dilated)
# if cv2.waitKey(0) & 0xff == 27:
# cv2.destroyAllWindows()
#create border for reflection regions
imH, imW = dilated.shape[0], dilated.shape[1]
thick = 2
cv2.rectangle(dilated,(-1+thick, -1+thick),(imW-thick, imH-thick),255,thickness=thick)
## First contour set for shadow regions
# (cimage3, contr3, heir3) = cv2.findContours(dilated.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
# heir3 = heir3[0] # get the actual inner heirarchy
# contr3 = trimcontoursbylength(contr3, 80)
#print(contr3)
# newbinary = np.zeros_like(cimage3, dtype=np.uint8) # this bit just for showing the contours
# cv2.drawContours(newbinary, contr3, -1, (255), thickness = 2)
# cv2.imshow("Contours", newbinary)
# if cv2.waitKey(0) & 0xff == 27:
# cv2.destroyAllWindows()
dilated2 = cv2.dilate(dilated, np.ones((3,3), dtype="int"), iterations=2)
cv2.imshow("Contours", dilated2)
if cv2.waitKey(0) & 0xff == 27:
cv2.destroyAllWindows()
# second contour set for thicker lines
(cimage4, contr4, heir4) = cv2.findContours(dilated2.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
heir4 = heir4[0] # get the actual inner heirarchy
contr4 = trimcontours(contr4, 300)
# create blank binary image for drawing on
binary3 = np.zeros_like(dilated2)
fordrawing = c_cop.copy()
# calculate Otsu threshold for thresholding regions as shadow or not
mean = np.mean(image)
std = np.std(image)
print("mean =", mean, "standard dev =", std)
histogrammask = np.logical_and(image < mean+2*std, image > mean-1.25*std)
ret, imagetruncated = cv2.threshold(image.copy(), mean+2*std, 255, cv2.THRESH_TRUNC) # truncating histogram
ret, imagetruncated = cv2.threshold(imagetruncated.copy(), mean-1.25*std, 255, cv2.THRESH_TOZERO) #truncating histogram
imagetruncated[imagetruncated==0] = mean-std
cv2.imwrite(truncateddirpath + "/" + imgs[:3] + "truncatedimage.png", imagetruncated)
truncated_float = img_as_float(imagetruncated)
threshold_global_otsu = filters.threshold_otsu(truncated_float[histogrammask])
threshold_global_otsu = int((threshold_global_otsu*255 - 6))
print("Threshold Global Otsu Value = ", threshold_global_otsu+2) #using global otsu
# histo = plt.hist(image[histogrammask].ravel(),256,[0,256])
# plt.title("Otsu Threshold = " + str(threshold_global_otsu))
# plt.savefig(histdirpath + "/" + imgs[:3] + "cannyhistogram.png")
# plt.close()
# draw contours of shadow regions using 3 criteria: hierarchy, area and meanvalue
for component in zip(contr4, heir4):
colour = np.random.randint(256, size=3)
print(colour)
coltuple = tuple(colour)
print(colour)
mask = np.zeros_like(dilated2)
currentContour = component[0]
currentHierarchy = component[1]
currentContourArea = cv2.contourArea(currentContour)
cv2.fillConvexPoly(mask, currentContour, 255)
#cv2.imshow("mask", mask)
#if cv2.waitKey(0) & 0xff == 27:
# cv2.destroyAllWindows
(mean_val,___, ___, ___) = cv2.mean(image,mask = mask)
print("Current Contour Area = ", currentContourArea)
print("Current Area Mean Value =", mean_val)
print("Current Heirarchy = ", currentHierarchy[3])
if currentContourArea < 550000:
if currentContourArea > 100:
if currentHierarchy[3] > -1:
if mean_val < threshold_global_otsu:
print(currentHierarchy)
cv2.drawContours(binary3, currentContour ,-1, (255), 3)
cv2.fillConvexPoly(binary3, currentContour, 255)
cv2.fillPoly(binary3, currentContour, 255)
cv2.fillConvexPoly(binary3, currentContour, 255)
# for analysis purposes
cv2.drawContours(fordrawing, currentContour, -1, (92,163,180),1)
M = cv2.moments(currentContour)
cx, cy = int(M['m10']/M['m00']), int(M['m01']/M['m00'])
cv2.putText(fordrawing, "Mean = " + '{:07.0f}'.format(mean_val),(cx-6,cy-3), cv2.FONT_HERSHEY_SIMPLEX, 0.25,coltuple,1,cv2.LINE_AA)
cv2.imshow("fordrawing", fordrawing)
if cv2.waitKey(0) & 0xff == 27:
cv2.destroyAllWindows
# cv2.imshow("Applied 3 Contour Criteria and Fill Regions", binary3)
# if cv2.waitKey(0) & 0xff == 27:
# cv2.destroyAllWindows()
# cv2.imwrite(savedirpath + "/" + imgs[:3] + "shadowmask.png", binary3)
cv2.imwrite(savedirpath + "/" + imgs[:3] + "cannyshadowmask.png", binary3) | gpl-3.0 |
ThomasSweijen/yadesolute2 | py/post2d.py | 4 | 13849 | # encoding: utf-8
# 2009 © Václav Šmilauer <[email protected]>
"""
Module for 2d postprocessing, containing classes to project points from 3d to 2d in various ways,
providing basic but flexible framework for extracting arbitrary scalar values from bodies/interactions
and plotting the results. There are 2 basic components: flatteners and extractors.
The algorithms operate on bodies (default) or interactions, depending on the ``intr`` parameter
of post2d.data.
Flatteners
==========
Instance of classes that convert 3d (model) coordinates to 2d (plot) coordinates. Their interface is
defined by the :yref:`yade.post2d.Flatten` class (``__call__``, ``planar``, ``normal``).
Extractors
==========
Callable objects returning scalar or vector value, given a body/interaction object.
If a 3d vector is returned, Flattener.planar is called, which should return only in-plane
components of the vector.
Example
=======
This example can be found in examples/concrete/uniax-post.py ::
from yade import post2d
import pylab # the matlab-like interface of matplotlib
O.load('/tmp/uniax-tension.xml.bz2')
# flattener that project to the xz plane
flattener=post2d.AxisFlatten(useRef=False,axis=1)
# return scalar given a Body instance
extractDmg=lambda b: b.state.normDmg
# will call flattener.planar implicitly
# the same as: extractVelocity=lambda b: flattener.planar(b,b.state.vel)
extractVelocity=lambda b: b.state.vel
# create new figure
pylab.figure()
# plot raw damage
post2d.plot(post2d.data(extractDmg,flattener))
# plot smooth damage into new figure
pylab.figure(); ax,map=post2d.plot(post2d.data(extractDmg,flattener,stDev=2e-3))
# show color scale
pylab.colorbar(map,orientation='horizontal')
# raw velocity (vector field) plot
pylab.figure(); post2d.plot(post2d.data(extractVelocity,flattener))
# smooth velocity plot; data are sampled at regular grid
pylab.figure(); ax,map=post2d.plot(post2d.data(extractVelocity,flattener,stDev=1e-3))
# save last (current) figure to file
pylab.gcf().savefig('/tmp/foo.png')
# show the figures
pylab.show()
"""
from yade.wrapper import *
try:
from minieigen import *
except ImportError:
from miniEigen import *
class Flatten:
"""Abstract class for converting 3d point into 2d. Used by post2d.data2d."""
def __init__(self): pass
def __call__(self,b):
"Given a :yref:`Body` / :yref:`Interaction` instance, should return either 2d coordinates as a 2-tuple, or None if the Body should be discarded."
pass
def planar(self,pos,vec):
"Given position and vector value, project the vector value to the flat plane and return its 2 in-plane components."
def normal(self,pos,vec):
"Given position and vector value, return lenght of the vector normal to the flat plane."
class HelixFlatten(Flatten):
"""Class converting 3d point to 2d based on projection from helix.
The y-axis in the projection corresponds to the rotation axis"""
def __init__(self,useRef,thetaRange,dH_dTheta,axis=2,periodStart=0):
"""
:param bool useRef: use reference positions rather than actual positions
:param (θmin,θmax) thetaRange: bodies outside this range will be discarded
:param float dH_dTheta: inclination of the spiral (per radian)
:param {0,1,2} axis: axis of rotation of the spiral
:param float periodStart: height of the spiral for zero angle
"""
self.useRef,self.thetaRange,self.dH_dTheta,self.axis,self.periodStart=useRef,thetaRange,dH_dTheta,axis,periodStart
self.ax1,self.ax2=(axis+1)%3,(axis+2)%3
def _getPos(self,b):
return b.state.refPos if self.useRef else b.state.pos
def __call__(self,b):
import yade.utils
xy,theta=yade.utils.spiralProject(_getPos(b),self.dH_dTheta,self.axis,self.periodStart)
if theta<thetaRange[0] or theta>thetaRange[1]: return None
return xy
def planar(self,b,vec):
from math import sqrt
pos=_getPos(b)
pos[self.axis]=0; pos.Normalize()
return pos.Dot(vec),vec[axis]
def normal(self,pos,vec):
ax=Vector3(0,0,0); ax[axis]=1; pos=_getPos(b)
circum=ax.Cross(pos); circum.Normalize()
return circum.Dot(vec)
class CylinderFlatten(Flatten):
"""Class for converting 3d point to 2d based on projection onto plane from circle.
The y-axis in the projection corresponds to the rotation axis; the x-axis is distance form the axis.
"""
def __init__(self,useRef,axis=2):
"""
:param useRef: (bool) use reference positions rather than actual positions
:param axis: axis of the cylinder, ∈{0,1,2}
"""
if axis not in (0,1,2): raise IndexError("axis must be one of 0,1,2 (not %d)"%axis)
self.useRef,self.axis=useRef,axis
def _getPos(self,b):
return b.state.refPos if self.useRef else b.state.pos
def __call__(self,b):
p=_getPos(b)
pp=(p[(self.axis+1)%3],p[(self.axis+2)%3])
import math.sqrt
return math.sqrt(pp[0]**2+pp[2]**2),p[self.axis]
def planar(self,b,vec):
pos=_getPos(b)
from math import sqrt
pos[self.axis]=0; pos.Normalize()
return pos.Dot(vec),vec[axis]
def normal(self,b,vec):
pos=_getPos(b)
ax=Vector3(0,0,0); ax[axis]=1
circum=ax.Cross(pos); circum.Normalize()
return circum.Dot(vec)
class AxisFlatten(Flatten):
def __init__(self,useRef=False,axis=2):
"""
:param bool useRef: use reference positions rather than actual positions (only meaningful when operating on Bodies)
:param {0,1,2} axis: axis normal to the plane; the return value will be simply position with this component dropped.
"""
if axis not in (0,1,2): raise IndexError("axis must be one of 0,1,2 (not %d)"%axis)
self.useRef,self.axis=useRef,axis
self.ax1,self.ax2=(self.axis+1)%3,(self.axis+2)%3
def __call__(self,b):
p=((b.state.refPos if self.useRef else b.state.pos) if isinstance(b,Body) else b.geom.contactPoint)
return (p[self.ax1],p[self.ax2])
def planar(self,pos,vec):
return vec[self.ax1],vec[self.ax2]
def normal(self,pos,vec):
return vec[self.axis]
def data(extractor,flattener,intr=False,onlyDynamic=True,stDev=None,relThreshold=3.,perArea=0,div=(50,50),margin=(0,0),radius=1):
"""Filter all bodies/interactions, project them to 2d and extract required scalar value;
return either discrete array of positions and values, or smoothed data, depending on whether the stDev
value is specified.
The ``intr`` parameter determines whether we operate on bodies or interactions;
the extractor provided should expect to receive body/interaction.
:param callable extractor: receives :yref:`Body` (or :yref:`Interaction`, if ``intr`` is ``True``) instance, should return scalar, a 2-tuple (vector fields) or None (to skip that body/interaction)
:param callable flattener: :yref:`yade.post2d.Flatten` instance, receiving body/interaction, returns its 2d coordinates or ``None`` (to skip that body/interaction)
:param bool intr: operate on interactions rather than bodies
:param bool onlyDynamic: skip all non-dynamic bodies
:param float/None stDev: standard deviation for averaging, enables smoothing; ``None`` (default) means raw mode, where discrete points are returned
:param float relThreshold: threshold for the gaussian weight function relative to stDev (smooth mode only)
:param int perArea: if 1, compute weightedSum/weightedArea rather than weighted average (weightedSum/sumWeights); the first is useful to compute average stress; if 2, compute averages on subdivision elements, not using weight function
:param (int,int) div: number of cells for the gaussian grid (smooth mode only)
:param (float,float) margin: x,y margins around bounding box for data (smooth mode only)
:param float/callable radius: Fallback value for radius (for raw plotting) for non-spherical bodies or interactions; if a callable, receives body/interaction and returns radius
:return: dictionary
Returned dictionary always containing keys 'type' (one of 'rawScalar','rawVector','smoothScalar','smoothVector', depending on value of smooth and on return value from extractor), 'x', 'y', 'bbox'.
Raw data further contains 'radii'.
Scalar fields contain 'val' (value from *extractor*), vector fields have 'valX' and 'valY' (2 components returned by the *extractor*).
"""
from miniEigen import Vector3
xx,yy,dd1,dd2,rr=[],[],[],[],[]
nDim=0
objects=O.interactions if intr else O.bodies
for b in objects:
if not intr and onlyDynamic and not b.dynamic: continue
xy,d=flattener(b),extractor(b)
if xy==None or d==None: continue
if nDim==0: nDim=1 if isinstance(d,float) else 2
if nDim==1: dd1.append(d);
elif len(d)==2:
dd1.append(d[0]); dd2.append(d[1])
elif len(d)==3:
d1,d2=flattener.planar(b,Vector3(d))
dd1.append(d1); dd2.append(d2)
else:
raise RuntimeError("Extractor must return float or 2 or 3 (not %d) floats"%nDim)
if stDev==None: # radii are needed in the raw mode exclusively
if not intr and isinstance(b.shape,Sphere): r=b.shape.radius
else: r=(radius(b) if callable(radius) else radius)
rr.append(r)
xx.append(xy[0]); yy.append(xy[1]);
if stDev==None:
bbox=(min(xx),min(yy)),(max(xx),max(yy))
if nDim==1: return {'type':'rawScalar','x':xx,'y':yy,'val':dd1,'radii':rr,'bbox':bbox}
else: return {'type':'rawVector','x':xx,'y':yy,'valX':dd1,'valY':dd2,'radii':rr,'bbox':bbox}
from yade.WeightedAverage2d import GaussAverage
import numpy
lo,hi=(min(xx),min(yy)),(max(xx),max(yy))
llo=lo[0]-margin[0],lo[1]-margin[1]; hhi=hi[0]+margin[0],hi[1]+margin[1]
ga=GaussAverage(llo,hhi,div,stDev,relThreshold)
ga2=GaussAverage(llo,hhi,div,stDev,relThreshold)
for i in range(0,len(xx)):
ga.add(dd1[i],(xx[i],yy[i]))
if nDim>1: ga2.add(dd2[i],(xx[i],yy[i]))
step=[(hhi[i]-llo[i])/float(div[i]) for i in [0,1]]
xxx,yyy=[numpy.arange(llo[i]+.5*step[i],hhi[i],step[i]) for i in [0,1]]
ddd=numpy.zeros((len(yyy),len(xxx)),float)
ddd2=numpy.zeros((len(yyy),len(xxx)),float)
# set the type of average we are going to use
if perArea==0:
def compAvg(gauss,coord,cellCoord): return float(gauss.avg(coord))
elif perArea==1:
def compAvg(gauss,coord,cellCoord): return gauss.avgPerUnitArea(coord)
elif perArea==2:
def compAvg(gauss,coord,cellCoord):
s=gauss.cellSum(cellCoord);
return (s/gauss.cellArea) if s>0 else float('nan')
elif perArea==3:
def compAvg(gauss,coord,cellCoord):
s=gauss.cellSum(cellCoord);
return s if s>0 else float('nan')
else: raise RuntimeError('Invalid value of *perArea*, must be one of 0,1,2,3.')
#
for cx in range(0,div[0]):
for cy in range(0,div[1]):
ddd[cy,cx]=compAvg(ga,(xxx[cx],yyy[cy]),(cx,cy))
if nDim>1: ddd2[cy,cx]=compAvg(ga2,(xxx[cx],yyy[cy]),(cx,cy))
if nDim==1: return {'type':'smoothScalar','x':xxx,'y':yyy,'val':ddd,'bbox':(llo,hhi),'perArea':perArea,'grid':ga}
else: return {'type':'smoothVector','x':xxx,'y':yyy,'valX':ddd,'valY':ddd2,'bbox':(llo,hhi),'grid':ga,'grid2':ga2}
def plot(data,axes=None,alpha=.5,clabel=True,cbar=False,aspect='equal',**kw):
"""Given output from post2d.data, plot the scalar as discrete or smooth plot.
For raw discrete data, plot filled circles with radii of particles, colored by the scalar value.
For smooth discrete data, plot image with optional contours and contour labels.
For vector data (raw or smooth), plot quiver (vector field), with arrows colored by the magnitude.
:param axes: matplotlib.axes\ instance where the figure will be plotted; if None, will be created from scratch.
:param data: value returned by :yref:`yade.post2d.data`
:param bool clabel: show contour labels (smooth mode only), or annotate cells with numbers inside (with perArea==2)
:param bool cbar: show colorbar (equivalent to calling pylab.colorbar(mappable) on the returned mappable)
:return: tuple of ``(axes,mappable)``; mappable can be used in further calls to pylab.colorbar.
"""
import pylab,math
if not axes: axes=pylab.gca()
if data['type']=='rawScalar':
from matplotlib.patches import Circle
import matplotlib.collections,numpy
patches=[]
for x,y,d,r in zip(data['x'],data['y'],data['val'],data['radii']):
patches.append(Circle(xy=(x,y),radius=r))
coll=matplotlib.collections.PatchCollection(patches,linewidths=0.,**kw)
coll.set_array(numpy.array(data['val']))
bb=coll.get_datalim(coll.get_transform())
axes.add_collection(coll)
axes.set_xlim(bb.xmin,bb.xmax); axes.set_ylim(bb.ymin,bb.ymax)
if cbar: axes.get_figure().colorbar(coll)
axes.grid(True); axes.set_aspect(aspect)
return axes,coll
elif data['type']=='smoothScalar':
loHi=data['bbox']
if data['perArea'] in (0,1):
img=axes.imshow(data['val'],extent=(loHi[0][0],loHi[1][0],loHi[0][1],loHi[1][1]),origin='lower',aspect=aspect,**kw)
ct=axes.contour(data['x'],data['y'],data['val'],colors='k',origin='lower',extend='both')
if clabel: axes.clabel(ct,inline=1,fontsize=10)
else:
img=axes.imshow(data['val'],extent=(loHi[0][0],loHi[1][0],loHi[0][1],loHi[1][1]),origin='lower',aspect=aspect,interpolation='nearest',**kw)
xStep=(data['x'][1]-data['x'][0]) if len(data['x'])>1 else 0
for y,valLine in zip(data['y'],data['val']):
for x,val in zip(data['x'],valLine): axes.text(x-.4*xStep,y,('-' if math.isnan(val) else '%5g'%val),size=4)
axes.update_datalim(loHi)
axes.set_xlim(loHi[0][0],loHi[1][0]); axes.set_ylim(loHi[0][1],loHi[1][1])
if cbar: axes.get_figure().colorbar(img)
axes.grid(True if data['perArea'] in (0,1) else False); axes.set_aspect(aspect)
return axes,img
elif data['type'] in ('rawVector','smoothVector'):
import numpy
loHi=data['bbox']
valX,valY=numpy.array(data['valX']),numpy.array(data['valY']) # rawVector data are plain python lists
scalars=numpy.sqrt(valX**2+valY**2)
# numpy.sqrt computes element-wise sqrt
quiv=axes.quiver(data['x'],data['y'],data['valX'],data['valY'],scalars,**kw)
#axes.update_datalim(loHi)
axes.set_xlim(loHi[0][0],loHi[1][0]); axes.set_ylim(loHi[0][1],loHi[1][1])
if cbar: axes.get_figure().colorbar(coll)
axes.grid(True); axes.set_aspect(aspect)
return axes,quiv
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.