repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
mvictor212/hmmlearn | hmmlearn/hmm.py | 1 | 86425 | # Hidden Markov Models
#
# Author: Ron Weiss <[email protected]>
# and Shiqiao Du <[email protected]>
# API changes: Jaques Grobler <[email protected]>
# Modifications to create of the HMMLearn module: Gael Varoquaux
"""
The :mod:`hmmlearn.hmm` module implements hidden Markov models.
"""
import string
import cPickle
import numpy as np
import multiprocessing as mp
from numpy.random import multivariate_normal, normal
from sklearn.utils import check_random_state
from sklearn.utils.extmath import logsumexp
from sklearn.base import BaseEstimator
from sklearn.mixture import (
GMM, sample_gaussian,
distribute_covar_matrix_to_match_covariance_type, _validate_covars)
from sklearn import cluster
from scipy.stats import (poisson, expon)
from copy import deepcopy
from .utils.fixes import (log_multivariate_normal_density,
log_poisson_pmf, log_exponential_density)
from . import _hmmc
__all__ = ['GMMHMM',
'GaussianHMM',
'MultinomialHMM',
'decoder_algorithms',
'normalize']
ZEROLOGPROB = -1e200
EPS = np.finfo(float).eps
NEGINF = -np.inf
decoder_algorithms = ("viterbi", "map")
def batches(l, n):
""" Yield successive n-sized batches from l.
"""
for i in xrange(0, len(l), n):
yield l[i:i+n]
def unwrap_self_estep(arg, **kwarg):
return _BaseHMM._do_estep(*arg, **kwarg)
def unwrap_self_score(arg, **kwarg):
return _BaseHMM._score(*arg, **kwarg)
def merge_sum(x, y):
D = {}
for k in x.keys():
if isinstance(x[k], list):
z = []
for i in xrange(len(x[k])):
z.append(x[k][i] + y[k][i])
D[k] = z
else:
D[k] = x[k] + y[k]
return D
def reduce_merge_sum(L):
return reduce(lambda x, y: merge_sum(x, y), L)
def log_normalize(A, axis=None):
arr = np.rollaxis(A, axis)
vmax = arr.max(axis=axis)
return normalize(np.exp((arr.T - vmax).T))
def normalize(A, axis=None):
""" Normalize the input array so that it sums to 1.
WARNING: The HMM module and its functions will be removed in 0.17
as it no longer falls within the project's scope and API.
Parameters
----------
A: array, shape (n_samples, n_features)
Non-normalized input data
axis: int
dimension along which normalization is performed
Returns
-------
normalized_A: array, shape (n_samples, n_features)
A with values normalized (summing to 1) along the prescribed axis
WARNING: Modifies inplace the array
"""
A += EPS
Asum = A.sum(axis)
if axis and A.ndim > 1:
# Make sure we don't divide by zero.
Asum[Asum == 0] = 1
shape = list(A.shape)
shape[axis] = 1
Asum.shape = shape
return A / Asum
def randomize(A, axis=None):
randomizer = np.random.rand(*A.shape) / 10.
Arand = A + randomizer
return normalize(Arand, axis)
class VerboseReporter(object):
"""Reports verbose output to stdout.
If ``verbose==1`` output is printed once in a while (when iteration mod
verbose_mod is zero).; if larger than 1 then output is printed for
each update.
"""
def __init__(self, verbose):
self.verbose = verbose
self.verbose_fmt = '{iter:>10d} {lpr:>16.4f} {improvement:>16.4f}'
self.verbose_mod = 1
def init(self):
header_fields = ['Iter', 'Log Likelihood', 'Log Improvement']
print(('%10s ' + '%16s ' *
(len(header_fields) - 1)) % tuple(header_fields))
def update(self, i, lpr, improvement):
"""Update reporter with new iteration. """
# we need to take into account if we fit additional estimators.
if (i + 1) % self.verbose_mod == 0:
print(self.verbose_fmt.format(iter=i + 1,
lpr=lpr,
improvement=improvement))
if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0):
# adjust verbose frequency (powers of 10)
self.verbose_mod *= 10
class _BaseHMM(BaseEstimator):
"""Hidden Markov Model base class.
Representation of a hidden Markov model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a HMM.
See the instance documentation for details specific to a
particular object.
Attributes
----------
n_states : int
Number of states in the model.
transmat : array, shape (`n_states`, `n_states`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_states`,)
Initial state occupation distribution.
transmat_prior : array, shape (`n_states`, `n_states`)
Matrix of prior transition probabilities between states.
startprob_prior : array, shape ('n_states`,)
Initial state occupation prior distribution.
algorithm : string, one of the decoder_algorithms
decoder algorithm
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, and other characters for subclass-specific
emmission parameters. Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, and other characters for
subclass-specific emmission parameters. Defaults to all
parameters.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more iterations the lower the frequency). If
greater than 1 then it prints progress and performance for every
iteration.
See Also
--------
GMM : Gaussian mixture model
"""
# This class implements the public interface to all HMMs that
# derive from it, including all of the machinery for the
# forward-backward and Viterbi algorithms. Subclasses need only
# implement _generate_sample_from_state(), _compute_log_likelihood(),
# _init(), _initialize_sufficient_statistics(),
# _accumulate_sufficient_statistics(), and _do_mstep(), all of
# which depend on the specific emission distribution.
#
# Subclasses will probably also want to implement properties for
# the emission distribution parameters to expose them publicly.
def __init__(self, n_states=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
algorithm="viterbi", random_state=None,
n_iter=10, thresh=1e-2, params=string.ascii_letters,
init_params=string.ascii_letters, verbose=0,
n_jobs=1, batch_size=1, memory_safe=False):
self.n_states = n_states
self.n_iter = n_iter
self.thresh = thresh
self.params = params
self.init_params = init_params
self.startprob_ = startprob
if startprob_prior is None:
startprob_prior = np.ones(n_states)
self.startprob_prior = startprob_prior
self.transmat_ = transmat
if transmat_prior is None:
transmat_prior = np.ones((n_states,
n_states))
self.transmat_prior = transmat_prior
self._algorithm = algorithm
self.random_state = random_state
self.verbose = verbose
self.n_jobs = n_jobs
self.batch_size = batch_size
self.memory_safe = memory_safe
def eval(self, X):
return self.score_samples(X)
def score_samples(self, obs):
"""Compute the log probability under the model and compute posteriors.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
logprob : float
Log likelihood of the sequence ``obs``.
posteriors : list of array_like, shape (n, n_states)
Posterior probabilities of each state for each
observation
See Also
--------
score : Compute the log probability under the model
decode : Find most likely state sequence corresponding to a `obs`
"""
logprob = 0
posteriors = []
for seq in obs:
seq = np.asarray(seq)
framelogprob = self._compute_log_likelihood(seq)
lpr, fwdlattice = self._do_forward_pass(framelogprob)
bwdlattice = self._do_backward_pass(framelogprob)
gamma = fwdlattice + bwdlattice
# gamma is guaranteed to be correctly normalized by logprob at
# all frames, unless we do approximate inference using pruning.
# So, we will normalize each frame explicitly in case we
# pruned too aggressively.
posteriors.append(np.exp(gamma.T - logsumexp(gamma, axis=1)).T)
posteriors[-1] += np.finfo(np.float32).eps
posteriors[-1] /= np.sum(posteriors, axis=1).reshape((-1, 1))
logprob += lpr
return logprob, posteriors
def score(self, obs):
"""Compute the log probability under the model.
Parameters
----------
obs : list of array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : float
Log likelihood of the ``obs``.
See Also
--------
score_samples : Compute the log probability under the model and
posteriors
decode : Find most likely state sequence corresponding to a `obs`
"""
n_batches = (len(obs) // self.batch_size) + \
(1 if len(obs) % self.batch_size else 0)
if self.n_jobs == 1:
logprob = 0
for obs_batch in batches(obs, self.batch_size):
logprob += self._score(obs_batch)
else:
pool = mp.Pool(processes=self.n_jobs)
results = pool.map(unwrap_self_score,
zip([self] * n_batches,
batches(obs, self.batch_size)))
pool.terminate()
logprob = sum(results)
return logprob
def aic(self, obs):
"""Computes the Aikaike Information Criterion of the model and
set of observations.
Parameters
----------
obs : list of arrays
List of observation sequences.
Returns
-------
aic_score : float
The Aikaike Information Criterion.
"""
logprob = self.score(obs)
n_pars = self._n_free_parameters()
aic_score = 2 * n_pars - 2 * logprob
return aic_score
def bic(self, obs):
"""Computes the Aikaike Information Criterion of the model and
set of observations.
Parameters
----------
obs : list of arrays
List of observation sequences.
Returns
-------
bic_score : float
The Aikaike Information Criterion.
"""
logprob = self.score(obs)
n_pars = self._n_free_parameters()
n_data = sum([len(seq) for seq in obs])
bic_score = n_pars * (np.log(n_data) - np.log(2 * np.pi)) - 2 * logprob
return bic_score
def _decode_viterbi(self, obs):
"""Find most likely state sequence corresponding to ``obs``.
Uses the Viterbi algorithm.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
viterbi_logprobs : array_like, shape (n,)
Log probability of the maximum likelihood path through the HMM.
state_sequences : list of array_like, shape (n,)
Index of the most likely states for each observation.
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model
"""
viterbi_logprobs = np.zeros(len(obs))
state_sequences = []
for n, seq in enumerate(obs):
seq = np.asarray(seq)
framelogprob = self._compute_log_likelihood(seq)
viterbi_logprobs[n], state_sequence = self._do_viterbi_pass(framelogprob)
state_sequences.append(state_sequence)
return viterbi_logprobs, state_sequences
def _decode_map(self, obs):
"""Find most likely state sequence corresponding to `obs`.
Uses the maximum a posteriori estimation.
Parameters
----------
obs : list of array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
map_logprobs : array_like, shape (n,)
Log probability of the maximum likelihood path through the HMM
state_sequences : list of array_like, shape (n,)
Index of the most likely states for each observation
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model.
"""
map_logprobs = np.zeros(len(obs))
state_sequences = []
_, posteriors = self.score_samples(obs)
for n, post in enumerate(posteriors):
state_sequences.append(np.argmax(post, axis=1))
map_logprobs[n] = np.max(post, axis=1).sum()
return map_logprobs, state_sequences
def decode(self, obs, algorithm="viterbi"):
"""Find most likely state sequence corresponding to ``obs``.
Uses the selected algorithm for decoding.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
algorithm : string, one of the `decoder_algorithms`
decoder algorithm to be used
Returns
-------
logprobs : array_like, shape (n,)
Log probability of the maximum likelihood path through the HMM
state_sequences : list of array_like, shape (n,)
Index of the most likely states for each observation
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model.
"""
if self._algorithm in decoder_algorithms:
algorithm = self._algorithm
elif algorithm in decoder_algorithms:
algorithm = algorithm
decoder = {"viterbi": self._decode_viterbi,
"map": self._decode_map}
logprobs, state_sequences = decoder[algorithm](obs)
return logprobs, state_sequences
def predict(self, obs, algorithm="viterbi"):
"""Find most likely state sequence corresponding to `obs`.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
state_sequences : list of array_like, shape (n,)
Index of the most likely states for each observation
"""
_, state_sequences = self.decode(obs, algorithm)
return state_sequences
def predict_proba(self, obs):
"""Compute the posterior probability for each state in the model
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
posteriors : list of array-like, shape (n, n_states)
Returns the probability of the sample for each state in the model.
"""
_, posteriors = self.score_samples(obs)
return posteriors
def sample(self, n_seq=1, n_min=10, n_max=20, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_seq : int
Number of observation sequences to generate.
n_min : int
Minimum number of observations for a sequence.
n_max : int
Maximum number of observations for a sequence.
random_state: RandomState or an int seed (0 by default)
A random number generator instance. If None is given, the
object's random_state is used
Returns
-------
(obs, hidden_states)
obs : list of array_like, length `n_seq` List of samples
states : list of array_like, length `n_seq` List of hidden states
"""
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
startprob_pdf = self.startprob_
startprob_cdf = np.cumsum(startprob_pdf)
transmat_pdf = self.transmat_
transmat_cdf = np.cumsum(transmat_pdf, 1)
obs = []
states = []
for _ in range(n_seq):
n = np.random.randint(n_min, n_max, size=1)
# Initial state.
rand = random_state.rand()
currstate = (startprob_cdf > rand).argmax()
state_seq = [currstate]
obs_seq = [self._generate_sample_from_state(
currstate, random_state=random_state)]
for _ in range(n - 1):
rand = random_state.rand()
currstate = (transmat_cdf[currstate] > rand).argmax()
state_seq.append(currstate)
obs_seq.append(self._generate_sample_from_state(
currstate, random_state=random_state))
obs.append(deepcopy(np.array(obs_seq)))
states.append(deepcopy(np.array(state_seq, dtype=int)))
return obs, states
def fit(self, obs, warm_start=False):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, pass proper
``init_params`` keyword argument to estimator's constructor.
Parameters
----------
obs : list
List of array-like observation sequences, each of which
has shape (n_i, n_features), where n_i is the length of
the i_th observation. Alternatively, a list of strings,
each of which is a filepath to a pickled object, being
a list of array-like observation sequences.
Notes
-----
In general, `logprob` should be non-decreasing unless
aggressive pruning is used. Decreasing `logprob` is generally
a sign of overfitting (e.g. a covariance parameter getting too
small). You can fix this by getting more training data,
or strengthening the appropriate subclass-specific regularization
parameter.
"""
if self.memory_safe and (not isinstance(obs[0], str)):
raise ValueError("Filepath locations must be provided as \
observations to be memory safe.")
n_batches = (len(obs) // self.batch_size) + \
(1 if len(obs) % self.batch_size else 0)
if self.algorithm not in decoder_algorithms:
self._algorithm = "viterbi"
if not warm_start:
self._init(obs, self.init_params)
if self.verbose:
verbose_reporter = VerboseReporter(self.verbose)
verbose_reporter.init()
logprob = []
for i in range(self.n_iter):
# Expectation step
if self.n_jobs == 1:
stats = self._initialize_sufficient_statistics()
curr_logprob = 0
for obs_batch in batches(obs, self.batch_size):
seq_stats, lpr = self._do_estep(obs_batch)
stats = merge_sum(stats, seq_stats)
curr_logprob += lpr
else:
pool = mp.Pool(processes=self.n_jobs)
results = pool.map(unwrap_self_estep,
zip([self] * n_batches,
batches(obs, self.batch_size)))
pool.terminate()
stats = reduce_merge_sum([x[0] for x in results])
curr_logprob = sum([x[1] for x in results])
logprob.append(curr_logprob)
if i > 0:
improvement = logprob[-1] - logprob[-2]
else:
improvement = np.inf
if self.verbose:
verbose_reporter.update(i, curr_logprob, improvement)
# Check for convergence.
if i > 0 and abs(logprob[-1] - logprob[-2]) < self.thresh:
break
# Maximization step
self._do_mstep(stats, self.params)
return self
def _get_algorithm(self):
"decoder algorithm"
return self._algorithm
def _set_algorithm(self, algorithm):
if algorithm not in decoder_algorithms:
raise ValueError("algorithm must be one of the decoder_algorithms")
self._algorithm = algorithm
algorithm = property(_get_algorithm, _set_algorithm)
def _get_startprob(self):
"""Mixing startprob for each state."""
return np.exp(self._log_startprob)
def _set_startprob(self, startprob):
if startprob is None:
startprob = np.tile(1.0 / self.n_states, self.n_states)
else:
startprob = np.asarray(startprob, dtype=np.float)
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(startprob):
normalize(startprob)
if len(startprob) != self.n_states:
raise ValueError('startprob must have length n_states')
if not np.allclose(np.sum(startprob), 1.0):
raise ValueError('startprob must sum to 1.0')
self._log_startprob = np.log(np.asarray(startprob).copy())
startprob_ = property(_get_startprob, _set_startprob)
def _get_transmat(self):
"""Matrix of transition probabilities."""
return np.exp(self._log_transmat)
def _set_transmat(self, transmat):
if transmat is None:
transmat = np.tile(1.0 / self.n_states,
(self.n_states, self.n_states))
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(transmat):
normalize(transmat, axis=1)
if (np.asarray(transmat).shape
!= (self.n_states, self.n_states)):
raise ValueError('transmat must have shape '
'(n_states, n_states)')
if not np.all(np.allclose(np.sum(transmat, axis=1), 1.0)):
raise ValueError('Rows of transmat must sum to 1.0')
self._log_transmat = np.log(np.asarray(transmat).copy())
underflow_idx = np.isnan(self._log_transmat)
self._log_transmat[underflow_idx] = NEGINF
transmat_ = property(_get_transmat, _set_transmat)
def _do_viterbi_pass(self, framelogprob):
n_observations, n_states = framelogprob.shape
state_sequence, logprob = _hmmc._viterbi(
n_observations, n_states, self._log_startprob,
self._log_transmat, framelogprob)
return logprob, state_sequence
def _do_forward_pass(self, framelogprob):
n_observations, n_states = framelogprob.shape
fwdlattice = np.zeros((n_observations, n_states))
_hmmc._forward(n_observations, n_states, self._log_startprob,
self._log_transmat, framelogprob, fwdlattice)
fwdlattice[fwdlattice <= ZEROLOGPROB] = NEGINF
return logsumexp(fwdlattice[-1]), fwdlattice
def _do_backward_pass(self, framelogprob):
n_observations, n_states = framelogprob.shape
bwdlattice = np.zeros((n_observations, n_states))
_hmmc._backward(n_observations, n_states, self._log_startprob,
self._log_transmat, framelogprob, bwdlattice)
bwdlattice[bwdlattice <= ZEROLOGPROB] = NEGINF
return bwdlattice
def _compute_log_likelihood(self, obs):
pass
def _generate_sample_from_state(self, state, random_state=None):
pass
def _init(self, obs, params):
if 's' in params:
self.startprob_ = np.random.dirichlet(self.startprob_prior)
if 't' in params:
self.transmat_ = np.vstack([np.random.dirichlet(
self.transmat_prior[i])
for i in xrange(self.n_states)])
# Methods used by self.fit()
def _initialize_sufficient_statistics(self):
stats = {'nobs': 0,
'start': np.zeros(self.n_states),
'trans': np.zeros((self.n_states, self.n_states))}
return stats
def _accumulate_sufficient_statistics(self, stats, seq, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
stats['nobs'] += 1
if 's' in params:
stats['start'] += posteriors[0]
if 't' in params:
n_observations, n_states = framelogprob.shape
# when the sample is of length 1, it contains no transitions
# so there is no reason to update our trans. matrix estimate
if n_observations > 1:
lneta = np.zeros((n_observations - 1,
n_states,
n_states))
lnP = logsumexp(fwdlattice[-1])
_hmmc._compute_lneta(n_observations, n_states, fwdlattice,
self._log_transmat, bwdlattice,
framelogprob, lnP, lneta)
stats['trans'] += np.exp(np.minimum(logsumexp(lneta, 0), 700))
def _do_estep(self, obs_batch):
if self.memory_safe:
local_obs = reduce(lambda x, y: x + y,
[cPickle.load(open(filename, 'r'))
for filename in obs_batch],
[])
else:
local_obs = obs_batch
local_stats = self._initialize_sufficient_statistics()
curr_logprob = 0
for seq in local_obs:
framelogprob = self._compute_log_likelihood(seq)
lpr, fwdlattice = self._do_forward_pass(framelogprob)
curr_logprob += lpr
bwdlattice = self._do_backward_pass(framelogprob)
gamma = fwdlattice + bwdlattice
posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T
self._accumulate_sufficient_statistics(local_stats, seq, framelogprob,
posteriors, fwdlattice,
bwdlattice, self.params)
if self.memory_safe:
local_obs = None
return local_stats, curr_logprob
def _score(self, obs_batch):
if self.memory_safe:
local_obs = reduce(lambda x, y: x + y,
[cPickle.load(open(filename, 'r'))
for filename in obs_batch],
[])
else:
local_obs = obs_batch
logprob = 0
for seq in local_obs:
seq = np.asarray(seq)
framelogprob = self._compute_log_likelihood(seq)
lpr, _ = self._do_forward_pass(framelogprob)
logprob += lpr
return logprob
def _do_mstep(self, stats, params):
# Based on Huang, Acero, Hon, "Spoken Language Processing",
# p. 443 - 445
if 's' in params:
self.startprob_ = normalize(np.maximum(stats['start'], 1e-20))
if 't' in params:
self.transmat_ = normalize(np.maximum(stats['trans'], 1e-20), 1)
def _n_free_parameters(self):
pass
class GaussianHMM(_BaseHMM):
"""Hidden Markov Model with Gaussian emissions
Representation of a hidden Markov model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a HMM.
Parameters
----------
n_states : int
Number of states.
``_covariance_type`` : string
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
Attributes
----------
``_covariance_type`` : string
String describing the type of covariance parameters used by
the model. Must be one of 'spherical', 'tied', 'diag', 'full'.
n_features : int
Dimensionality of the Gaussian emissions.
n_states : int
Number of states in the model.
transmat : array, shape (`n_states`, `n_states`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_states`,)
Initial state occupation distribution.
means : array, shape (`n_states`, `n_features`)
Mean parameters for each state.
covars : array
Covariance parameters for each state. The shape depends on
``_covariance_type``::
(`n_states`,) if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_states`, `n_features`) if 'diag',
(`n_states`, `n_features`, `n_features`) if 'full'
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'm' for means, and 'c' for covars.
Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, 'm' for means, and 'c' for
covars. Defaults to all parameters.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more iterations the lower the frequency). If
greater than 1 then it prints progress and performance for every
iteration.
Examples
--------
>>> from hmmlearn.hmm import GaussianHMM
>>> GaussianHMM(n_states=2)
... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
GaussianHMM(algorithm='viterbi',...
See Also
--------
GMM : Gaussian mixture model
"""
def __init__(self, n_states=1, covariance_type='diag', startprob=None,
transmat=None, startprob_prior=None, transmat_prior=None,
algorithm="viterbi", means_var=1.0,
covars_prior=1e-2, covars_weight=1,
random_state=None, n_iter=10, thresh=1e-2,
params=string.ascii_letters,
init_params=string.ascii_letters,
verbose=0,
n_jobs=1,
batch_size=1,
memory_safe=False):
_BaseHMM.__init__(self, n_states, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior, algorithm=algorithm,
random_state=random_state, n_iter=n_iter,
thresh=thresh, params=params,
init_params=init_params, verbose=verbose,
n_jobs=n_jobs,
batch_size=batch_size,
memory_safe=memory_safe)
self._covariance_type = covariance_type
if not covariance_type in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('bad covariance_type')
self.means_var = means_var
self.covars_prior = covars_prior
self.covars_weight = covars_weight
@property
def covariance_type(self):
"""Covariance type of the model.
Must be one of 'spherical', 'tied', 'diag', 'full'.
"""
return self._covariance_type
def _get_means(self):
"""Mean parameters for each state."""
return self._means_
def _set_means(self, means):
means = np.asarray(means)
if (hasattr(self, 'n_features')
and means.shape != (self.n_states, self.n_features)):
raise ValueError('means must have shape '
'(n_states, n_features)')
self._means_ = means.copy()
self.n_features = self._means_.shape[1]
means_ = property(_get_means, _set_means)
def _get_covars(self):
"""Return covars as a full matrix."""
if self._covariance_type == 'full':
return self._covars_
elif self._covariance_type == 'diag':
return [np.diag(cov) for cov in self._covars_]
elif self._covariance_type == 'tied':
return [self._covars_] * self.n_states
elif self._covariance_type == 'spherical':
return [np.eye(self.n_features) * f for f in self._covars_]
def _set_covars(self, covars):
covars = np.asarray(covars)
_validate_covars(covars, self._covariance_type, self.n_states)
self._covars_ = covars.copy()
covars_ = property(_get_covars, _set_covars)
def _compute_log_likelihood(self, obs):
return log_multivariate_normal_density(
obs, self._means_, self._covars_, self._covariance_type)
def _generate_sample_from_state(self, state, random_state=None):
if self._covariance_type == 'tied':
cv = self._covars_
else:
cv = self._covars_[state]
return sample_gaussian(self._means_[state], cv, self._covariance_type,
random_state=random_state)
def _init(self, obs, params='stmc'):
super(GaussianHMM, self)._init(obs, params=params)
if self.memory_safe:
concat_obs = np.vstack(cPickle.load(open(obs[0], 'r')))
else:
concat_obs = np.vstack(obs)
if (hasattr(self, 'n_features')
and self.n_features != concat_obs.shape[1]):
raise ValueError('Unexpected number of dimensions, got %s but '
'expected %s' % (concat_obs.shape[1],
self.n_features))
self.n_features = concat_obs.shape[1]
if 'm' in params:
clu = cluster.KMeans(n_clusters=self.n_states).fit(
concat_obs)
self._means_ = np.array([multivariate_normal(
mean,
np.eye(self.n_features) * self.means_var)
for mean in clu.cluster_centers_])
if 'c' in params:
cv = np.cov(concat_obs.T)
if not cv.shape:
cv.shape = (1, 1)
self._covars_ = distribute_covar_matrix_to_match_covariance_type(
cv, self._covariance_type, self.n_states)
self._covars_[self._covars_ == 0] = 1e-5
def _initialize_sufficient_statistics(self):
stats = super(GaussianHMM, self)._initialize_sufficient_statistics()
stats['post'] = np.zeros(self.n_states)
stats['obs'] = np.zeros((self.n_states, self.n_features))
stats['obs**2'] = np.zeros((self.n_states, self.n_features))
if self._covariance_type in ('tied', 'full'):
stats['obs*obs.T'] = np.zeros((self.n_states, self.n_features,
self.n_features))
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(GaussianHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
if 'm' in params or 'c' in params:
stats['post'] += posteriors.sum(axis=0)
stats['obs'] += np.dot(posteriors.T, obs)
if 'c' in params:
if self._covariance_type in ('spherical', 'diag'):
stats['obs**2'] += np.dot(posteriors.T, obs ** 2)
elif self._covariance_type in ('tied', 'full'):
for t, o in enumerate(obs):
obsobsT = np.outer(o, o)
for c in range(self.n_states):
stats['obs*obs.T'][c] += posteriors[t, c] * obsobsT
def _do_mstep(self, stats, params):
super(GaussianHMM, self)._do_mstep(stats, params)
# Based on Huang, Acero, Hon, "Spoken Language Processing",
# p. 443 - 445
denom = stats['post'][:, np.newaxis]
if 'm' in params:
self._means_ = stats['obs'] / stats['post'][:, np.newaxis]
if 'c' in params:
covars_prior = self.covars_prior
covars_weight = self.covars_weight
if covars_prior is None:
covars_weight = 0
covars_prior = 0
if self._covariance_type in ('spherical', 'diag'):
cv_num = ((self._means_) ** 2
+ stats['obs**2']
- 2 * self._means_ * stats['obs']
+ self._means_ ** 2 * denom)
cv_den = max(covars_weight - 1, 0) + denom
self._covars_ = (covars_prior + cv_num) / np.maximum(cv_den,
1e-5)
if self._covariance_type == 'spherical':
self._covars_ = np.tile(
self._covars_.mean(1)[:, np.newaxis],
(1, self._covars_.shape[1]))
elif self._covariance_type in ('tied', 'full'):
cvnum = np.empty((self.n_states, self.n_features,
self.n_features))
for c in range(self.n_states):
obsmean = np.outer(stats['obs'][c], self._means_[c])
cvnum[c] = (np.outer(self._means_[c],
self._means_[c])
+ stats['obs*obs.T'][c]
- obsmean - obsmean.T
+ np.outer(self._means_[c], self._means_[c])
* stats['post'][c])
cvweight = max(covars_weight - self.n_features, 0)
if self._covariance_type == 'tied':
self._covars_ = ((covars_prior + cvnum.sum(axis=0)) /
(cvweight + stats['post'].sum()))
elif self._covariance_type == 'full':
self._covars_ = ((covars_prior + cvnum) /
(cvweight + stats['post'][:, None, None]))
def _n_free_parameters(self):
n_pars = (self.n_states - 1) * (self.n_states + 1)
n_pars += self.n_states * self.n_features
if self._covariance_type == 'spherical':
n_pars += self.n_states
elif self._covariance_type == 'tied':
n_pars += ((self.n_features + 1) * self.n_features) / 2
elif self._covariance_type == 'diag':
n_pars += self.n_states * self.n_features
elif self._covariance_type == 'full':
n_pars += self.n_states * ((self.n_features + 1) * self.n_features) / 2
return n_pars
def fit(self, obs, warm_start=False):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, pass proper
``init_params`` keyword argument to estimator's constructor.
Parameters
----------
obs : list
List of array-like observation sequences, each of which
has shape (n_i, n_features), where n_i is the length of
the i_th observation. Alternatively, a list of strings,
each of which is a filepath to a pickled object, being
a list of array-like observation sequences.
Notes
-----
In general, `logprob` should be non-decreasing unless
aggressive pruning is used. Decreasing `logprob` is generally
a sign of overfitting (e.g. the covariance parameter on one or
more components becomminging too small). You can fix this by getting
more training data, or increasing covars_prior.
"""
return super(GaussianHMM, self).fit(obs, warm_start)
class MultinomialHMM(_BaseHMM):
"""Hidden Markov Model with multinomial (discrete) emissions
Attributes
----------
n_states : int
Number of states in the model.
n_symbols : int
Number of possible symbols emitted by the model (in the observations).
transmat : array, shape (`n_states`, `n_states`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_states`,)
Initial state occupation distribution.
emissionprob : array, shape ('n_states`, 'n_symbols`)
Probability of emitting a given symbol when in each state.
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'e' for emmissionprob.
Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, 'e' for emmissionprob.
Defaults to all parameters.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more iterations the lower the frequency). If
greater than 1 then it prints progress and performance for every
iteration.
Examples
--------
>>> from hmmlearn.hmm import MultinomialHMM
>>> MultinomialHMM(n_states=2)
... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
MultinomialHMM(algorithm='viterbi',...
See Also
--------
GaussianHMM : HMM with Gaussian emissions
"""
def __init__(self, n_states=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
emissionprob_prior=None, algorithm="viterbi",
random_state=None, n_iter=10, thresh=1e-2,
params=string.ascii_letters, init_params=string.ascii_letters,
verbose=0, n_jobs=1, batch_size=1, memory_safe=False):
"""Create a hidden Markov model with multinomial emissions.
Parameters
----------
n_states : int
Number of states.
"""
_BaseHMM.__init__(self, n_states, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior,
algorithm=algorithm,
random_state=random_state,
n_iter=n_iter,
thresh=thresh,
params=params,
init_params=init_params,
verbose=verbose,
n_jobs=n_jobs,
batch_size=batch_size,
memory_safe=memory_safe)
self.emissionprob_prior = emissionprob_prior
def _get_emissionprob(self):
"""Emission probability distribution for each state."""
return np.exp(self._log_emissionprob)
def _set_emissionprob(self, emissionprob):
emissionprob = np.asarray(emissionprob)
if hasattr(self, 'n_symbols') and \
emissionprob.shape != (self.n_states, self.n_symbols):
raise ValueError('emissionprob must have shape '
'(n_states, n_symbols)')
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(emissionprob):
normalize(emissionprob)
self._log_emissionprob = np.log(emissionprob)
underflow_idx = np.isnan(self._log_emissionprob)
self._log_emissionprob[underflow_idx] = NEGINF
self.n_symbols = self._log_emissionprob.shape[1]
emissionprob_ = property(_get_emissionprob, _set_emissionprob)
def _compute_log_likelihood(self, obs):
return self._log_emissionprob[:, obs].T
def _generate_sample_from_state(self, state, random_state=None):
cdf = np.cumsum(self.emissionprob_[state, :])
random_state = check_random_state(random_state)
rand = random_state.rand()
symbol = (cdf > rand).argmax()
return symbol
def _init(self, obs, params='ste'):
super(MultinomialHMM, self)._init(obs, params=params)
self.random_state = check_random_state(self.random_state)
if 'e' in params:
if not hasattr(self, 'n_symbols'):
symbols = set()
for o in obs:
if self.memory_safe:
symbols = symbols.union(set(np.concatenate(
cPickle.load(open(o, 'r')))))
else:
symbols = symbols.union(set(o))
self.n_symbols = len(symbols)
if self.emissionprob_prior is None:
self.emissionprob_prior = np.ones((self.n_states,
self.n_symbols))
emissionprob = np.vstack([np.random.dirichlet(
self.emissionprob_prior[i])
for i in xrange(self.n_states)])
self.emissionprob_ = emissionprob
def _initialize_sufficient_statistics(self):
stats = super(MultinomialHMM, self)._initialize_sufficient_statistics()
stats['obs'] = np.zeros((self.n_states, self.n_symbols))
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(MultinomialHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
if 'e' in params:
for t, symbol in enumerate(obs):
stats['obs'][:, symbol] += posteriors[t]
def _do_mstep(self, stats, params):
super(MultinomialHMM, self)._do_mstep(stats, params)
if 'e' in params:
self.emissionprob_ = (stats['obs']
/ stats['obs'].sum(1)[:, np.newaxis])
def _check_input_symbols(self, obs):
"""check if input can be used for Multinomial.fit input must be both
positive integer array and every element must be continuous.
e.g. x = [0, 0, 2, 1, 3, 1, 1] is OK and y = [0, 0, 3, 5, 10] not
"""
if self.memory_safe:
symbols = []
for o in obs:
symbols += cPickle.load(open(o, 'r'))
symbols = np.concatenate(symbols)
else:
symbols = np.concatenate(obs)
if symbols.dtype.kind != 'i':
# input symbols must be integer
return False
if len(symbols) == 1:
# input too short
return False
if np.any(symbols < 0):
# input contains negative intiger
return False
symbols.sort()
if np.any(np.diff(symbols) > 1):
# input is discontinous
return False
return True
def _n_free_parameters(self):
n_pars = (self.n_states - 1) * (self.n_states + 1)
n_pars += self.n_states * (self.n_symbols - 1)
return n_pars
def fit(self, obs, warm_start=False, **kwargs):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, pass proper
``init_params`` keyword argument to estimator's constructor.
Parameters
----------
obs : list
List of array-like observation sequences, each of which
has shape (n_i, n_features), where n_i is the length of
the i_th observation. Alternatively, a list of strings,
each of which is a filepath to a pickled object, being
a list of array-like observation sequences.
"""
err_msg = ("Input must be a list of non-negative integer arrays where "
"in all, every element must be continuous, but %s was "
"given.")
if not self._check_input_symbols(obs):
raise ValueError(err_msg % obs)
return super(MultinomialHMM, self).fit(obs, warm_start, **kwargs)
class PoissonHMM(_BaseHMM):
"""Hidden Markov Model with Poisson (discrete) emissions
Attributes
----------
n_states : int
Number of states in the model.
transmat : array, shape (`n_states`, `n_states`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_states`,)
Initial state occupation distribution.
rates : array, shape ('n_states`,)
Poisson rate parameters for each state.
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'e' for emmissionprob.
Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, 'e' for emmissionprob.
Defaults to all parameters.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more iterations the lower the frequency). If
greater than 1 then it prints progress and performance for every
iteration.
Examples
--------
>>> from hmmlearn.hmm import PoissonHMM
>>> PoissonHMM(n_states=2)
... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
PoissonHMM(algorithm='viterbi',...
See Also
--------
GaussianHMM : HMM with Gaussian emissions
"""
def __init__(self, n_states=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
rates_var=1.0, algorithm="viterbi",
random_state=None, n_iter=10, thresh=1e-2,
params=string.ascii_letters,
init_params=string.ascii_letters, verbose=0,
n_jobs=1, batch_size=1, memory_safe=False):
"""Create a hidden Markov model with multinomial emissions.
Parameters
----------
n_states : int
Number of states.
"""
_BaseHMM.__init__(self, n_states, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior,
algorithm=algorithm,
random_state=random_state,
n_iter=n_iter,
thresh=thresh,
params=params,
init_params=init_params,
verbose=verbose,
n_jobs=n_jobs,
batch_size=batch_size,
memory_safe=memory_safe)
self.rates_var = rates_var
def _get_rates(self):
"""Emission rate for each state."""
return self._rates
def _set_rates(self, rates):
rates = np.asarray(rates)
self._rates = rates.copy()
rates_ = property(_get_rates, _set_rates)
def _compute_log_likelihood(self, obs):
return log_poisson_pmf(obs, self._rates)
def _generate_sample_from_state(self, state, random_state=None):
return poisson.rvs(self._rates[state])
def _init(self, obs, params='str'):
super(PoissonHMM, self)._init(obs, params=params)
if self.memory_safe:
concat_obs = np.concatenate(cPickle.load(open(obs[0], 'r')))
else:
concat_obs = np.concatenate(obs)
if 'r' in params:
clu = cluster.KMeans(n_clusters=self.n_states).fit(
np.atleast_2d(concat_obs).T)
rates = normal(0, self.rates_var, self.n_states) + \
clu.cluster_centers_.T[0]
self._rates = np.maximum(0.1, rates)
def _initialize_sufficient_statistics(self):
stats = super(PoissonHMM, self)._initialize_sufficient_statistics()
stats['post'] = np.zeros(self.n_states)
stats['obs'] = np.zeros((self.n_states,))
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(PoissonHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
if 'r' in params:
stats['post'] += posteriors.sum(axis=0)
stats['obs'] += np.dot(posteriors.T, obs)
def _do_mstep(self, stats, params):
super(PoissonHMM, self)._do_mstep(stats, params)
if 'r' in params:
self._rates = stats['obs'] / stats['post']
def _check_input_symbols(self, obs):
"""check if input can be used for PoissonHMM. Input must be a list
of non-negative integers.
e.g. x = [0, 0, 2, 1, 3, 1, 1] is OK and y = [0, -1, 3, 5, 10] not
"""
if self.memory_safe:
for o in obs:
symbols = np.concatenate(cPickle.load(open(o, 'r')))
if symbols.dtype.kind != 'i':
# input symbols must be integer
return False
if len(symbols) == 1:
# input too short
return False
if np.any(symbols < 0):
# input contains negative intiger
return False
else:
symbols = np.concatenate(obs)
if symbols.dtype.kind != 'i':
# input symbols must be integer
return False
if len(symbols) == 1:
# input too short
return False
if np.any(symbols < 0):
# input contains negative intiger
return False
return True
def _n_free_parameters(self):
n_pars = (self.n_states - 1) * (self.n_states + 1)
n_pars += self.n_states
return n_pars
def fit(self, obs, warm_start=False):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, pass proper
``init_params`` keyword argument to estimator's constructor.
Parameters
----------
obs : list
List of array-like observation sequences, each of which
has shape (n_i, n_features), where n_i is the length of
the i_th observation. Alternatively, a list of strings,
each of which is a filepath to a pickled object, being
a list of array-like observation sequences.
Notes
-----
In general, `logprob` should be non-decreasing unless
aggressive pruning is used. Decreasing `logprob` is generally
a sign of overfitting (e.g. the covariance parameter on one or
more components becomminging too small). You can fix this by getting
more training data, or increasing covars_prior.
"""
err_msg = ("Input must be a list of non-negative integer arrays, \
but %s was given.")
if not self._check_input_symbols(obs):
raise ValueError(err_msg % obs)
return super(PoissonHMM, self).fit(obs, warm_start)
class ExponentialHMM(_BaseHMM):
"""Hidden Markov Model with Exponential (continuous) emissions
Attributes
----------
n_states : int
Number of states in the model.
transmat : array, shape (`n_states`, `n_states`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_states`,)
Initial state occupation distribution.
rates : array, shape ('n_states`,)
Exponential rate parameters for each state.
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'e' for emmissionprob.
Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, 'e' for emmissionprob.
Defaults to all parameters.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more iterations the lower the frequency). If
greater than 1 then it prints progress and performance for every
iteration.
Examples
--------
>>> from hmmlearn.hmm import ExponentialHMM
>>> ExponentialHMM(n_states=2)
... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ExponentialHMM(algorithm='viterbi',...
See Also
--------
GaussianHMM : HMM with Gaussian emissions
"""
def __init__(self, n_states=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
rates_var=1.0, algorithm="viterbi",
random_state=None, n_iter=10, thresh=1e-2,
params=string.ascii_letters,
init_params=string.ascii_letters, verbose=0,
n_jobs=1, batch_size=1, memory_safe=False):
"""Create a hidden Markov model with multinomial emissions.
Parameters
----------
n_states : int
Number of states.
"""
_BaseHMM.__init__(self, n_states, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior,
algorithm=algorithm,
random_state=random_state,
n_iter=n_iter,
thresh=thresh,
params=params,
init_params=init_params,
verbose=verbose,
n_jobs=n_jobs,
batch_size=batch_size,
memory_safe=memory_safe)
self.rates_var = rates_var
def _get_rates(self):
"""Emission rate for each state."""
return self._rates
def _set_rates(self, rates):
rates = np.asarray(rates)
self._rates = rates.copy()
rates_ = property(_get_rates, _set_rates)
def _compute_log_likelihood(self, obs):
return log_exponential_density(obs, self._rates)
def _generate_sample_from_state(self, state, random_state=None):
return expon.rvs(scale=1. / self._rates[state])
def _init(self, obs, params='str'):
super(ExponentialHMM, self)._init(obs, params=params)
if self.memory_safe:
concat_obs = np.concatenate(cPickle.load(open(obs[0], 'r')))
else:
concat_obs = np.concatenate(obs)
if 'r' in params:
clu = cluster.KMeans(n_clusters=self.n_states).fit(
np.atleast_2d(concat_obs).T)
rates = normal(0, self.rates_var, self.n_states) + \
1. / clu.cluster_centers_.T[0]
self._rates = np.maximum(0.1, rates)
def _initialize_sufficient_statistics(self):
stats = super(ExponentialHMM, self)._initialize_sufficient_statistics()
stats['post'] = np.zeros(self.n_states)
stats['obs'] = np.zeros((self.n_states,))
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(ExponentialHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
if 'r' in params:
stats['post'] += posteriors.sum(axis=0)
stats['obs'] += np.dot(posteriors.T, obs)
def _do_mstep(self, stats, params):
super(ExponentialHMM, self)._do_mstep(stats, params)
if 'r' in params:
self._rates = stats['post'] / stats['obs']
def _check_input_symbols(self, obs):
"""check if input can be used for ExponentialHMM. Input must be a list
of non-negative reals.
e.g. x = [0., 0.5, 2.3] is OK and y = [0.0, -1.0, 3.3, 5.4, 10.9] not
"""
if self.memory_safe:
for o in obs:
symbols = np.concatenate(cPickle.load(open(o, 'r')))
if symbols.dtype.kind not in ('f', 'i'):
# input symbols must be integer
return False
if len(symbols) == 1:
# input too short
return False
if np.any(symbols < 0):
# input contains negative intiger
return False
else:
symbols = np.concatenate(obs)
if symbols.dtype.kind not in ('f', 'i'):
# input symbols must be integer
return False
if len(symbols) == 1:
# input too short
return False
if np.any(symbols < 0):
# input contains negative intiger
return False
return True
def _n_free_parameters(self):
n_pars = (self.n_states - 1) * (self.n_states + 1)
n_pars += self.n_states
return n_pars
def fit(self, obs, warm_start=False):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, pass proper
``init_params`` keyword argument to estimator's constructor.
Parameters
----------
obs : list
List of array-like observation sequences, each of which
has shape (n_i, n_features), where n_i is the length of
the i_th observation. Alternatively, a list of strings,
each of which is a filepath to a pickled object, being
a list of array-like observation sequences.
Notes
-----
In general, `logprob` should be non-decreasing unless
aggressive pruning is used. Decreasing `logprob` is generally
a sign of overfitting (e.g. the covariance parameter on one or
more components becomminging too small). You can fix this by getting
more training data, or increasing covars_prior.
"""
err_msg = ("Input must be a list of non-negative real arrays, \
but %s was given.")
if not self._check_input_symbols(obs):
raise ValueError(err_msg % obs)
return super(ExponentialHMM, self).fit(obs, warm_start)
class MultinomialExponentialHMM(_BaseHMM):
"""Hidden Markov Model with joint multinomial and exponential emissions
Attributes
----------
n_states : int
Number of states in the model.
n_symbols : int
Number of possible symbols emitted by the model (in the observations).
transmat : array, shape (`n_states`, `n_states`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_states`,)
Initial state occupation distribution.
emissionprob : array, shape ('n_states`, 'n_symbols`)
Probability of emitting a given symbol when in each state.
rates : array, shape ('n_states`,)
Exponential rate parameters for each state.
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'e' for emmissionprob.
Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, 'e' for emmissionprob.
Defaults to all parameters.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more iterations the lower the frequency). If
greater than 1 then it prints progress and performance for every
iteration.
Examples
--------
>>> from hmmlearn.hmm import MultinomialHMM
>>> MultinomialHMM(n_states=2)
... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
MultinomialHMM(algorithm='viterbi',...
See Also
--------
GaussianHMM : HMM with Gaussian emissions
"""
def __init__(self, n_states=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
emissionprob_prior=None, rates_var=1.0, algorithm="viterbi",
random_state=None, n_iter=10, thresh=1e-2,
params=string.ascii_letters, init_params=string.ascii_letters,
verbose=0, n_jobs=1, batch_size=1, memory_safe=False):
"""Create a hidden Markov model with multinomial emissions.
Parameters
----------
n_states : int
Number of states.
"""
_BaseHMM.__init__(self, n_states, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior,
algorithm=algorithm,
random_state=random_state,
n_iter=n_iter,
thresh=thresh,
params=params,
init_params=init_params,
verbose=verbose,
n_jobs=n_jobs,
batch_size=batch_size,
memory_safe=memory_safe)
self.emissionprob_prior = emissionprob_prior
self.rates_var = rates_var
def _get_emissionprob(self):
"""Emission probability distribution for each state."""
return np.exp(self._log_emissionprob)
def _set_emissionprob(self, emissionprob):
emissionprob = np.asarray(emissionprob)
if hasattr(self, 'n_symbols') and \
emissionprob.shape != (self.n_states, self.n_symbols):
raise ValueError('emissionprob must have shape '
'(n_states, n_symbols)')
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(emissionprob):
normalize(emissionprob)
self._log_emissionprob = np.log(emissionprob)
underflow_idx = np.isnan(self._log_emissionprob)
self._log_emissionprob[underflow_idx] = NEGINF
self.n_symbols = self._log_emissionprob.shape[1]
emissionprob_ = property(_get_emissionprob, _set_emissionprob)
def _get_rates(self):
"""Emission rate for each state."""
return self._rates
def _set_rates(self, rates):
rates = np.asarray(rates)
self._rates = rates.copy()
rates_ = property(_get_rates, _set_rates)
def _compute_log_likelihood(self, obs):
return self._log_emissionprob[:, map(int, obs[:, 0])].T + \
log_exponential_density(obs[:, 1], self._rates)
def _generate_sample_from_state(self, state, random_state=None):
cdf = np.cumsum(self.emissionprob_[state, :])
random_state = check_random_state(random_state)
rand = random_state.rand()
symbol = (cdf > rand).argmax()
expon_obs = expon.rvs(scale=1. / self._rates[state])
return symbol, expon_obs
def _init(self, obs, params='ster'):
super(MultinomialExponentialHMM, self)._init(obs, params=params)
self.random_state = check_random_state(self.random_state)
if 'e' in params:
if not hasattr(self, 'n_symbols'):
symbols = set()
for o in obs:
if self.memory_safe:
symbols = symbols.union(set(np.concatenate(
cPickle.load(open(o, 'r')))[:, 0]))
else:
symbols = symbols.union(set(o[:, 0]))
self.n_symbols = len(symbols)
if self.emissionprob_prior is None:
self.emissionprob_prior = np.ones((self.n_states,
self.n_symbols))
emissionprob = np.vstack([np.random.dirichlet(
self.emissionprob_prior[i])
for i in xrange(self.n_states)])
self.emissionprob_ = emissionprob
if self.memory_safe:
concat_obs = np.concatenate(cPickle.load(open(obs[0], 'r')))[:, 1]
else:
concat_obs = np.concatenate(obs)[:, 1]
if 'r' in params:
clu = cluster.KMeans(n_clusters=self.n_states).fit(
np.atleast_2d(concat_obs).T)
rates = normal(0, self.rates_var, self.n_states) + \
1. / clu.cluster_centers_.T[0]
self._rates = np.maximum(0.1, rates)
def _initialize_sufficient_statistics(self):
stats = super(MultinomialExponentialHMM, self)._initialize_sufficient_statistics()
stats['obs'] = np.zeros((self.n_states, self.n_symbols))
stats['post'] = np.zeros(self.n_states)
stats['expon_obs'] = np.zeros((self.n_states,))
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(MultinomialExponentialHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
if 'e' in params:
for t, symbol in enumerate(obs[:, 0]):
stats['obs'][:, int(symbol)] += posteriors[t]
if 'r' in params:
stats['post'] += posteriors.sum(axis=0)
stats['expon_obs'] += np.dot(posteriors.T, obs[:, 1])
def _do_mstep(self, stats, params):
super(MultinomialExponentialHMM, self)._do_mstep(stats, params)
if 'e' in params:
self.emissionprob_ = (stats['obs']
/ stats['obs'].sum(1)[:, np.newaxis])
if 'r' in params:
self._rates = stats['post'] / stats['expon_obs']
def _check_input_symbols(self, obs):
"""check if input can be used for Multinomial.fit input must be both
positive integer array and every element must be continuous.
e.g. x = [0, 0, 2, 1, 3, 1, 1] is OK and y = [0, 0, 3, 5, 10] not
"""
if self.memory_safe:
symbols = []
for o in obs:
symbols += cPickle.load(open(o, 'r'))
symbols = np.concatenate(symbols)[:, 0]
else:
symbols = np.concatenate(obs)[:, 0]
if symbols.dtype.kind not in ('i', 'f'):
# input symbols must be integer
return False
if len(symbols) == 1:
# input too short
return False
if np.any(symbols < 0):
# input contains negative intiger
return False
symbols.sort()
if np.any(np.diff(symbols) > 1):
# input is discontinous
return False
if self.memory_safe:
for o in obs:
symbols = np.concatenate(cPickle.load(open(o, 'r')))[:, 1]
if symbols.dtype.kind not in ('f', 'i'):
# input symbols must be integer
return False
if len(symbols) == 1:
# input too short
return False
if np.any(symbols < 0):
# input contains negative intiger
return False
else:
symbols = np.concatenate(obs)[:, 1]
if symbols.dtype.kind not in ('f', 'i'):
# input symbols must be integer
return False
if len(symbols) == 1:
# input too short
return False
if np.any(symbols < 0):
# input contains negative intiger
return False
return True
def _n_free_parameters(self):
n_pars = (self.n_states - 1) * (self.n_states + 1)
n_pars += self.n_states * (self.n_symbols - 1)
n_pars += self.n_states
return n_pars
def fit(self, obs, warm_start=False, **kwargs):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, pass proper
``init_params`` keyword argument to estimator's constructor.
Parameters
----------
obs : list
List of array-like observation sequences, each of which
has shape (n_i, n_features), where n_i is the length of
the i_th observation. Alternatively, a list of strings,
each of which is a filepath to a pickled object, being
a list of array-like observation sequences.
"""
err_msg = ("Input must be a list of non-negative integer arrays where "
"in all, every element must be continuous, but %s was "
"given.")
cleaned_obs = [np.array(seq) for seq in obs]
if not self._check_input_symbols(cleaned_obs):
raise ValueError(err_msg % obs)
return super(MultinomialExponentialHMM, self).fit(cleaned_obs, warm_start, **kwargs)
class GMMHMM(_BaseHMM):
"""Hidden Markov Model with Gaussin mixture emissions
Attributes
----------
init_params : string, optional
Controls which parameters are initialized prior to training. Can
contain any combination of 's' for startprob, 't' for transmat, 'm'
for means, 'c' for covars, and 'w' for GMM mixing weights.
Defaults to all parameters.
params : string, optional
Controls which parameters are updated in the training process. Can
contain any combination of 's' for startprob, 't' for transmat, 'm' for
means, and 'c' for covars, and 'w' for GMM mixing weights.
Defaults to all parameters.
n_states : int
Number of states in the model.
transmat : array, shape (`n_states`, `n_states`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_states`,)
Initial state occupation distribution.
gmms : array of GMM objects, length `n_states`
GMM emission distributions for each state.
random_state : RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more iterations the lower the frequency). If
greater than 1 then it prints progress and performance for every
iteration.
var : float, default: 1.0
Variance parameter to randomize the initialization of the GMM objects.
The larger var, the greater the randomization.
Examples
--------
>>> from hmmlearn.hmm import GMMHMM
>>> GMMHMM(n_states=2, n_mix=10, covariance_type='diag')
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
GMMHMM(algorithm='viterbi', covariance_type='diag',...
See Also
--------
GaussianHMM : HMM with Gaussian emissions
"""
def __init__(self, n_states=1, n_mix=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
algorithm="viterbi", gmms=None, covariance_type='diag',
covars_prior=1e-2, random_state=None, n_iter=10, thresh=1e-2,
params=string.ascii_letters,
init_params=string.ascii_letters,
verbose=0, means_var=1.0,
n_jobs=1, batch_size=1, memory_safe=False):
"""Create a hidden Markov model with GMM emissions.
Parameters
----------
n_states : int
Number of states.
"""
_BaseHMM.__init__(self, n_states, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior,
algorithm=algorithm,
random_state=random_state,
n_iter=n_iter,
thresh=thresh,
params=params,
init_params=init_params,
verbose=verbose,
n_jobs=n_jobs,
batch_size=batch_size,
memory_safe=memory_safe)
# XXX: Hotfit for n_mix that is incompatible with the scikit's
# BaseEstimator API
self.n_mix = n_mix
self._covariance_type = covariance_type
self.covars_prior = covars_prior
self.gmms = gmms
if gmms is None:
gmms = []
for x in range(self.n_states):
if covariance_type is None:
g = GMM(n_mix)
else:
g = GMM(n_mix, covariance_type=covariance_type)
gmms.append(g)
self.gmms_ = gmms
self.means_var = means_var
# Read-only properties.
@property
def covariance_type(self):
"""Covariance type of the model.
Must be one of 'spherical', 'tied', 'diag', 'full'.
"""
return self._covariance_type
def _compute_log_likelihood(self, obs):
return np.array([g.score(obs) for g in self.gmms_]).T
def _generate_sample_from_state(self, state, random_state=None):
return self.gmms_[state].sample(1, random_state=random_state).flatten()
def _init(self, obs, params='stwmc'):
super(GMMHMM, self)._init(obs, params=params)
if self.memory_safe:
concat_obs = np.concatenate(cPickle.load(open(obs[0], 'r')), 0)
else:
concat_obs = np.concatenate(obs, 0)
n_features = concat_obs.shape[1]
for g in self.gmms_:
g.set_params(init_params=params, n_iter=0)
g.fit(concat_obs)
means = np.array([multivariate_normal(
mean,
np.eye(n_features) * self.means_var)
for mean in g.means_])
g.means_ = means
def _initialize_sufficient_statistics(self):
stats = super(GMMHMM, self)._initialize_sufficient_statistics()
stats['norm'] = [np.zeros(g.weights_.shape) for g in self.gmms_]
stats['means'] = [np.zeros(np.shape(g.means_)) for g in self.gmms_]
stats['covars'] = [np.zeros(np.shape(g.covars_)) for g in self.gmms_]
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(GMMHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
for state, g in enumerate(self.gmms_):
_, tmp_gmm_posteriors = g.score_samples(obs)
lgmm_posteriors = np.log(tmp_gmm_posteriors
+ np.finfo(np.float).eps) + \
np.log(posteriors[:, state][:, np.newaxis]
+ np.finfo(np.float).eps)
gmm_posteriors = np.exp(lgmm_posteriors)
tmp_gmm = GMM(g.n_components, covariance_type=g.covariance_type)
n_features = g.means_.shape[1]
tmp_gmm._set_covars(
distribute_covar_matrix_to_match_covariance_type(
np.eye(n_features), g.covariance_type,
g.n_components))
norm = tmp_gmm._do_mstep(obs, gmm_posteriors, params)
if np.any(np.isnan(tmp_gmm.covars_)):
raise ValueError
stats['norm'][state] += norm
if 'm' in params:
stats['means'][state] += tmp_gmm.means_ * norm[:, np.newaxis]
if 'c' in params:
if tmp_gmm.covariance_type == 'tied':
stats['covars'][state] += tmp_gmm.covars_ * norm.sum()
else:
cvnorm = np.copy(norm)
shape = np.ones(tmp_gmm.covars_.ndim)
shape[0] = np.shape(tmp_gmm.covars_)[0]
cvnorm.shape = shape
stats['covars'][state] += tmp_gmm.covars_ * cvnorm
def _do_mstep(self, stats, params):
super(GMMHMM, self)._do_mstep(stats, params)
# All that is left to do is to apply covars_prior to the
# parameters updated in _accumulate_sufficient_statistics.
for state, g in enumerate(self.gmms_):
n_features = g.means_.shape[1]
norm = stats['norm'][state]
if 'w' in params:
g.weights_ = normalize(norm)
if 'm' in params:
g.means_ = stats['means'][state] / norm[:, np.newaxis]
if 'c' in params:
if g.covariance_type == 'tied':
g.covars_ = ((stats['covars'][state]
+ self.covars_prior * np.eye(n_features))
/ norm.sum())
else:
cvnorm = np.copy(norm)
shape = np.ones(g.covars_.ndim)
shape[0] = np.shape(g.covars_)[0]
cvnorm.shape = shape
if (g.covariance_type in ['spherical', 'diag']):
g.covars_ = (stats['covars'][state] +
self.covars_prior) / cvnorm
elif g.covariance_type == 'full':
eye = np.eye(n_features)
g.covars_ = ((stats['covars'][state]
+ self.covars_prior * eye[np.newaxis])
/ cvnorm)
def _n_free_parameters(self):
n_pars = (self.n_states - 1) * (self.n_states + 1)
for g in self.gmms_:
n_components = g.means_.shape[0]
n_features = g.means_.shape[1]
n_pars += n_components - 1
n_pars += n_components * n_features
if g.covariance_type == 'spherical':
n_pars += n_components
elif g.covariance_type == 'tied':
n_pars += ((n_features + 1) * n_features) / 2
elif g.covariance_type == 'diag':
n_pars += n_components * n_features
elif g.covariance_type == 'full':
n_pars += n_components * ((n_features + 1) * n_features) / 2
return n_pars
| bsd-3-clause |
juanka1331/VAN-applied-to-Nifti-images | final_scripts/results_reader/reader_helper.py | 1 | 1817 | from matplotlib import pyplot as plt
import os
import numpy as np
from matplotlib import cm
from numpy import linspace
mapa_etiquetas = {
"kernel": "Kernel",
"latent_layer": "Neuronas de la capa latente"
}
string_ref = "{0}. Variación {1}. Método {2}"
def generate_color_palette():
start = 0.0
stop = 1.0
number_of_lines = 1000
cm_subsection = linspace(start, stop, 9)
colors = [cm.jet(x) for x in cm_subsection]
def plot_evaluation_parameters(list_parameters_dict, string_ref,
path_evaluation_images_folder,
swap_type, xlabel = "none"):
list_rows = list_parameters_dict
kernel_size_array = np.array([float(row[swap_type]) for row in list_rows])
f1_score_array = np.array([float(row['f1_score']) for row in list_rows])
recall_score_array = np.array([float(row['recall_score']) for row in list_rows])
accuracy_score_array = np.array([float(row['accuracy']) for row in list_rows])
auc_score_array = np.array([float(row['area under the curve']) for row in list_rows])
precision = np.array([float(row['precision']) for row in list_rows])
plt.figure()
plt.title(string_ref)
plt.plot(kernel_size_array, f1_score_array, label="Valor-F1")
plt.plot(kernel_size_array, recall_score_array, label="Sensibilidad")
plt.plot(kernel_size_array, accuracy_score_array, label="Precisión")
plt.plot(kernel_size_array, auc_score_array, label="Area Bajo Curva")
plt.plot(kernel_size_array, precision, label="Especificidad")
plt.ylabel("Tasa")
plt.xlabel(mapa_etiquetas[swap_type])
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.savefig(os.path.join(path_evaluation_images_folder, "{}.png".format(string_ref)),
bbox_inches="tight")
| gpl-2.0 |
bigdataelephants/scikit-learn | examples/bicluster/plot_spectral_biclustering.py | 403 | 2011 | """
=============================================
A demo of the Spectral Biclustering algorithm
=============================================
This example demonstrates how to generate a checkerboard dataset and
bicluster it using the Spectral Biclustering algorithm.
The data is generated with the ``make_checkerboard`` function, then
shuffled and passed to the Spectral Biclustering algorithm. The rows
and columns of the shuffled matrix are rearranged to show the
biclusters found by the algorithm.
The outer product of the row and column label vectors shows a
representation of the checkerboard structure.
"""
print(__doc__)
# Author: Kemal Eren <[email protected]>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_checkerboard
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.metrics import consensus_score
n_clusters = (4, 3)
data, rows, columns = make_checkerboard(
shape=(300, 300), n_clusters=n_clusters, noise=10,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralBiclustering(n_clusters=n_clusters, method='log',
random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.1f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.matshow(np.outer(np.sort(model.row_labels_) + 1,
np.sort(model.column_labels_) + 1),
cmap=plt.cm.Blues)
plt.title("Checkerboard structure of rearranged data")
plt.show()
| bsd-3-clause |
sghosh1991/distalgo | benchmarks/plot.py | 2 | 44666 | import sys
import os.path
import json
from collections import namedtuple
from itertools import chain
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
def avg(iterable):
return sum(iterable) / len(iterable)
# ==================================================
class DataSet:
def __init__(self, target, args=None):
self.target = target
self.args = args
@property
def data_file(self):
return self.target.replace('/', '_')
@property
def run_profile(self):
return [self.target] + list(self.args)
def __iter__(self):
arglist = self.run_profile
vaidx = vararg = None
for idx, arg in enumerate(arglist):
if not isinstance(arg, str) and hasattr(arg, '__iter__'):
vaidx = idx
vararg = arg
break
assert vararg is not None
for val in vararg:
arglist[vaidx] = str(val)
yield val, list(arglist)
class CompilerDataSet(DataSet):
@property
def run_profile(self):
return ['dac', '-B', self.target]
@property
def data_file(self):
return 'dac'
def __iter__(self):
arglist = self.run_profile
vaidx = vararg = None
for idx, arg in enumerate(arglist):
if not isinstance(arg, str) and hasattr(arg, '__iter__'):
vaidx = idx
vararg = arg
break
assert vararg is not None
for i, val in enumerate(vararg):
arglist[vaidx] = str(val)
yield i, list(arglist)
class CompilerIncDataSet(CompilerDataSet):
@property
def run_profile(self):
return ['dac', '-B', '-i', self.target]
@property
def data_file(self):
return 'dac_i'
class DADataSet(DataSet):
def __init__(self, program, inc_module, args):
super().__init__(target=program, args=args)
self.inc_module = inc_module
@property
def run_profile(self):
prof = ['dar', '-i']
if self.inc_module is not None:
prof += ['-m', self.inc_module]
prof.append(self.target)
prof += self.args
return prof
class DALoopDataSet(DataSet):
@property
def run_profile(self):
return ['dar', self.target] + list(self.args)
class CDataSet(DataSet): pass
class PyDataSet(DataSet):
@property
def run_profile(self):
return ['python3', self.target] + list(self.args)
class ErlDataSet(DataSet):
@property
def run_profile(self):
path_components = self.target.split(os.sep)
mod = path_components[-1]
indir = os.sep.join(path_components[:-1])
return ['erl', '-noshell', '-pa', indir, '-run', mod, 'start'] + \
list(self.args)
# ==================================================
class GraphLine(namedtuple("_GraphLine",
"dataset, key, aggregate, avg_over_procs, \
avg_factor, \
label, fit_degree, \
linestyle, color, marker, markersize, \
markeredgecolor, markeredgewidth, markerfacecolor")):
def __new__(cls, dataset, key, aggregate=avg, avg_over_procs=False,
avg_factor=1,
label='No Legend', fit_degree=1,
linestyle='-', color=None, marker=None, markersize=9,
markeredgecolor=None, markeredgewidth=0, markerfacecolor=None):
return super().__new__(cls, dataset, key, aggregate, avg_over_procs,
avg_factor,
label, fit_degree,
linestyle, color, marker, markersize,
markeredgecolor, markeredgewidth, markerfacecolor)
@property
def __dict__(self):
return super().__dict__
non_visual_properties = {'dataset', 'key', 'aggregate',
'avg_over_procs', 'avg_factor', 'fit_degree'}
@property
def line_properties(self):
res = super().__dict__
for prop in GraphLine.non_visual_properties:
res.pop(prop)
return res
class GraphBar(namedtuple("_GraphBar",
"dataset, key, aggregate, \
avg_factor, bottom, \
label, color \
width, offset")):
def __new__(cls, dataset, key, aggregate=min,
avg_factor=1, bottom=None,
label='No Legend', color=None,
width=0.5, offset=0):
return super().__new__(cls, dataset, key, aggregate,
avg_factor, bottom,
label, color, width, offset)
@property
def __dict__(self):
return super().__dict__
non_visual_properties = {'dataset', 'key', 'aggregate', 'avg_factor',
'bottom', 'offset'}
@property
def bar_properties(self):
res = super().__dict__
for prop in GraphBar.non_visual_properties:
res.pop(prop)
return res
class GraphInfo(namedtuple("_GraphInfo",
"title, lines, bars, bars2, \
xlabel, xlim, xticks, xticklabels, \
xticklabel_rotation, \
ylabel, ylim, yticks, \
ylabel2, ylim2, yticks2, \
legend_position, show_grid")):
def __new__(cls, title, lines=(), bars=(), bars2=(),
xlabel='', xlim=(None, None), xticks=None, xticklabels=None,
xticklabel_rotation=45,
ylabel='', ylim=(0, None), yticks=None,
ylabel2='', ylim2=(0, None), yticks2=None,
legend_position="upper left", show_grid=False):
return super().__new__(cls, title, lines, bars, bars2,
xlabel, xlim, xticks, xticklabels,
xticklabel_rotation,
ylabel, ylim, yticks,
ylabel2, ylim2, yticks2,
legend_position, show_grid)
DataDir = "results/"
CompileTargets = [
("../examples/2pcommit/orig.da" , '2P Commit'),
("../examples/clpaxos/spec.da" , 'Byz Paxos'),
("../examples/crleader/orig.da" , 'CR Leader'),
("../examples/dscrash/orig.da" , 'DS Crash'),
("../examples/hsleader/orig.da" , 'HS Leader'),
("../examples/lamutex/orig.da" , 'LA Mutex'),
("../examples/lapaxos/orig.da" , 'LA Paxos'),
("../examples/raft/orig.da" , 'Raft'),
("../examples/ramutex/orig.da" , 'RA Mutex'),
("../examples/ratoken/spec.da" , 'RA Token'),
("../examples/sktoken/orig.da" , 'SK Token'),
("../examples/vrpaxos/orig.da" , 'VR Paxos'),
]
Arg_lamutex_vary_rounds = ('10', range(100, 1000+1, 100))
Arg_lamutex_vary_rounds_low = ('5', range(10, 100+1, 10))
Arg_lamutex_vary_procs_low = (range(5, 20+1, 2), '5')
Arg_lamutex_vary_procs_high = (range(15, 150+1, 20), '5')
Arg_lamutex_vary_procs_oopsla = (range(25, 150+1, 25), '5')
Arg_lamutex_vary_procs_erlang = (range(25, 150+1, 25), '30')
Arg_clpaxos_vary_procs_oopsla = (10, range(25, 150+1, 25))
Arg_lamutex_vary_procs_all = ([15, 25, 35, 50, 55, 75, 95, 100, 115, 125,
135, 150], '5')
Arg_tpcommit_vary_procs_low = (range(5, 20+1, 2), '0')
Arg_tpcommit_vary_procs = (range(25, 150+1, 25), '0')
Arg_lapaxos_vary_procs = ('5', range(25, 150+1, 25))
DataSet_compile = CompilerDataSet(
target=[fn for fn, _ in CompileTargets])
DataSet_compile_inc = CompilerIncDataSet(
target=[fn for fn, _ in CompileTargets])
DataSet_lamutex_orig_vary_rounds = DADataSet(
program="lamutex/orig.da",
inc_module=None,
args=Arg_lamutex_vary_rounds)
DataSet_lamutex_orig_inc_vary_rounds = DADataSet(
program="lamutex/orig.da",
inc_module="lamutex_orig_inc_inc",
args=Arg_lamutex_vary_rounds)
DataSet_lamutex_orig_invts_vary_rounds = DADataSet(
program="lamutex/orig.da",
inc_module="orig_inc_invts",
args=Arg_lamutex_vary_rounds)
DataSet_lamutex_orig_loop_vary_rounds = DALoopDataSet(
target="lamutex/orig2.da",
args=Arg_lamutex_vary_rounds)
DataSet_lamutex_orig_vary_procs = DADataSet(
program="lamutex/orig.da",
inc_module=None,
args=Arg_lamutex_vary_procs_oopsla)
DataSet_lamutex_orig_inc_vary_procs = DADataSet(
program="lamutex/orig.da",
inc_module="lamutex_orig_inc_inc",
args=Arg_lamutex_vary_procs_oopsla)
DataSet_lamutex_orig_invts_vary_procs = DADataSet(
program="lamutex/orig.da",
inc_module="orig_inc_invts",
args=Arg_lamutex_vary_procs_oopsla)
DataSet_lamutex_orig_loop_vary_procs = DALoopDataSet(
target="lamutex/orig2.da",
args=Arg_lamutex_vary_procs_oopsla)
# ==================================================
DataSet_lamutex_C_vary_procs = CDataSet(
target="lamutex/C/lamport",
args=Arg_lamutex_vary_procs_erlang)
DataSet_lamutex_C_vary_rounds = CDataSet(
target="lamutex/C/lamport",
args=Arg_lamutex_vary_rounds)
# ==================================================
DataSet_lamutex_erlang_vary_procs = ErlDataSet(
target="lamutex/Erlang/lamutex",
args=Arg_lamutex_vary_procs_erlang)
DataSet_lamutex_erlang_vary_rounds = ErlDataSet(
target="lamutex/Erlang/lamutex",
args=Arg_lamutex_vary_rounds)
# ==================================================
DataSet_lamutex_python_vary_rounds_low = PyDataSet(
target="lamutex/Python/lamutex.py",
args=Arg_lamutex_vary_rounds_low)
DataSet_lamutex_python_vary_rounds = PyDataSet(
target="lamutex/Python/lamutex.py",
args=Arg_lamutex_vary_rounds)
DataSet_lamutex_python_vary_procs_low = PyDataSet(
target="lamutex/Python/lamutex.py",
args=Arg_lamutex_vary_procs_low)
DataSet_lamutex_python_vary_procs_high = PyDataSet(
target="lamutex/Python/lamutex.py",
args=Arg_lamutex_vary_procs_high)
DataSet_lamutex_python_vary_procs_oopsla = PyDataSet(
target="lamutex/Python/lamutex.py",
args=Arg_lamutex_vary_procs_oopsla)
# ==================================================
DataSet_lamutex_spec_inc_vary_rounds_low = DADataSet(
program="lamutex/spec.da",
inc_module="spec_inc_inc",
args=Arg_lamutex_vary_rounds_low)
DataSet_lamutex_spec_inc_vary_rounds = DADataSet(
program="lamutex/spec.da",
inc_module="spec_inc_inc",
args=Arg_lamutex_vary_rounds)
DataSet_lamutex_spec_vary_rounds_low = DADataSet(
program="lamutex/spec.da",
inc_module=None,
args=Arg_lamutex_vary_rounds_low)
DataSet_lamutex_spec_vary_procs_low = DADataSet(
program="lamutex/spec.da",
inc_module=None,
args=Arg_lamutex_vary_procs_low)
DataSet_lamutex_spec_inc_vary_procs_low = DADataSet(
program="lamutex/spec.da",
inc_module="spec_inc_inc",
args=Arg_lamutex_vary_procs_low)
DataSet_lamutex_spec_vary_procs_high = DADataSet(
program="lamutex/spec.da",
inc_module=None,
args=Arg_lamutex_vary_procs_high)
DataSet_lamutex_spec_inc_vary_procs_high = DADataSet(
program="lamutex/spec.da",
inc_module="spec_inc_inc",
args=Arg_lamutex_vary_procs_high)
DataSet_lamutex_spec_inc_vary_procs_oopsla = DADataSet(
program="lamutex/spec.da",
inc_module="spec_inc_inc",
args=Arg_lamutex_vary_procs_all)
DataSet_clpaxos_spec_vary_procs_low = DADataSet(
program="clpaxos/spec.da",
inc_module=None,
args=Arg_lamutex_vary_procs_low)
DataSet_clpaxos_oopsla_vary_procs = DADataSet(
program="clpaxos/oopsla.da",
inc_module=None,
args=('10', [25, 50, 75]))
DataSet_clpaxos_oopsla_inc_vary_procs = DADataSet(
program="clpaxos/oopsla.da",
inc_module="oopsla_inc_inc",
args=('10', [25, 50, 75]))
DataSet_clpaxos_spec_inc_vary_procs_low = DADataSet(
program="clpaxos/spec.da",
inc_module="clpaxos_inc_inc",
args=Arg_lamutex_vary_procs_low)
DataSet_tpcommit_spec_vary_procs_low = DADataSet(
program="2pcommit/spec.da",
inc_module=None,
args=Arg_tpcommit_vary_procs_low)
DataSet_tpcommit_spec_inc_vary_procs_low = DADataSet(
program="2pcommit/spec.da",
inc_module="tpcommit_inc_inc",
args=Arg_tpcommit_vary_procs_low)
DataSet_tpcommit_spec_vary_procs = DADataSet(
program="2pcommit/spec.da",
inc_module=None,
args=Arg_tpcommit_vary_procs)
DataSet_tpcommit_spec_inc_vary_procs = DADataSet(
program="2pcommit/spec.da",
inc_module="tpcommit_inc_inc",
args=Arg_tpcommit_vary_procs)
DataSet_tpcommit_loop_vary_procs = DALoopDataSet(
target="2pcommit/spec2.da",
args=Arg_tpcommit_vary_procs)
DataSet_lapaxos_spec_vary_procs = DADataSet(
program="lapaxos/orig.da",
inc_module=None,
args=Arg_lapaxos_vary_procs)
DataSet_lapaxos_spec_inc_vary_procs = DADataSet(
program="lapaxos/orig.da",
inc_module="lapaxos_inc_inc",
args=Arg_lapaxos_vary_procs)
DataSet_lapaxos_loop_vary_procs = DALoopDataSet(
target="lapaxos/orig2.da",
args=Arg_lapaxos_vary_procs)
Graph_lamutex_orig_running_time_vary_rounds = \
GraphInfo(
title="La Mutex 10 processes CPU time vary rounds",
xlabel="Number of times entered CS",
ylabel="CPU time (in seconds)",
ylim=(0, 25),
lines=(
GraphLine(
dataset=DataSet_lamutex_orig_vary_rounds,
key='Total_process_time',
linestyle='-',
color='y',
marker='s',
fit_degree=2,
label='Orig w/Query',
avg_over_procs=True),
GraphLine(
dataset=DataSet_lamutex_orig_loop_vary_rounds,
key='Total_process_time',
linestyle='-.',
color='k',
marker='*',
fit_degree=2,
label='Orig w/Loop',
avg_over_procs=True),
GraphLine(
dataset=DataSet_lamutex_orig_inc_vary_rounds,
key='Total_process_time',
linestyle='--',
color='m',
marker='p',
label="IncOQ",
avg_over_procs=True),
GraphLine(
dataset=DataSet_lamutex_orig_invts_vary_rounds,
key='Total_process_time',
linestyle='-.',
color='y',
marker='x',
markeredgewidth=1,
label="InvTS",
avg_over_procs=True),
GraphLine(
dataset=DataSet_lamutex_python_vary_rounds,
key='Total_process_time',
linestyle='-',
color='c',
marker='^',
label='Python',
avg_over_procs=True),
GraphLine(
dataset=DataSet_lamutex_erlang_vary_rounds,
key='Total_process_time',
linestyle='--',
color='b',
marker='D',
label='Erlang',
avg_over_procs=True),
GraphLine(
dataset=DataSet_lamutex_C_vary_rounds,
key='Total_process_time',
linestyle='-',
color='r',
marker='*',
label="C",
avg_over_procs=True)
))
Graph_lamutex_orig_running_time_vary_procs = \
GraphInfo(
title="La Mutex CPU time vary procs",
xlabel="Number of processes",
ylabel="CPU time (in seconds)",
xlim=(25, 150),
ylim=(0.0, 0.35),
xticks=np.arange(25, 150+1, 25),
lines=(
GraphLine(
dataset=DataSet_lamutex_orig_vary_procs,
key='Total_process_time',
linestyle='-',
color='y',
marker='x',
markeredgewidth=1,
fit_degree=2,
avg_factor=5,
label='Orig w/Query',
avg_over_procs=True),
GraphLine(
dataset=DataSet_lamutex_orig_loop_vary_procs,
key='Total_process_time',
linestyle='-.',
color='k',
marker='*',
markeredgewidth=1,
fit_degree=2,
avg_factor=5,
label='Orig w/Loop',
avg_over_procs=True),
GraphLine(
dataset=DataSet_lamutex_orig_inc_vary_procs,
key='Total_process_time',
linestyle='--',
color='m',
marker='p',
avg_factor=5,
label="IncOQ",
avg_over_procs=True),
GraphLine(
dataset=DataSet_lamutex_orig_invts_vary_procs,
key='Total_process_time',
linestyle='-',
color='g',
marker='^',
avg_factor=5,
label="InvTS",
avg_over_procs=True),
GraphLine(
dataset=DataSet_lamutex_python_vary_procs_oopsla,
key='Total_process_time',
linestyle='--',
color='c',
marker='D',
avg_factor=5,
label='Python',
avg_over_procs=True),
GraphLine(
dataset=DataSet_lamutex_erlang_vary_procs,
key='Total_process_time',
linestyle='--',
color='b',
marker='s',
avg_factor=30,
label='Erlang',
avg_over_procs=True),
GraphLine(
dataset=DataSet_lamutex_C_vary_procs,
key='Total_process_time',
linestyle='-.',
color='r',
marker='*',
avg_factor=30,
label="C",
avg_over_procs=True)
))
Graph_lamutex_orig_memory_vary_procs = \
GraphInfo(
title="La Mutex memory vary procs",
xlabel="Number of processes",
ylabel="Avg. Process Peak RSS (in kB)",
xlim=(25, 150),
# ylim=(0, 2200000),
# yticks=np.arange(0, 2200001, 220000*2),
xticks=np.arange(25, 150+1, 25),
legend_position='outside',
lines=(
GraphLine(
dataset=DataSet_lamutex_orig_vary_procs,
key='Total_memory',
linestyle='-',
color='y',
marker='s',
label='Orig\n(w/Query)',
avg_over_procs=True),
GraphLine(
dataset=DataSet_lamutex_orig_loop_vary_procs,
key='Total_memory',
linestyle='-.',
color='k',
marker='o',
label='Orig\n(w/Loop)',
avg_over_procs=True),
GraphLine(
dataset=DataSet_lamutex_python_vary_procs_oopsla,
key='Total_memory',
linestyle='-',
color='c',
marker='s',
label='Python',
avg_over_procs=True),
GraphLine(
dataset=DataSet_lamutex_orig_inc_vary_procs,
key='Total_memory',
linestyle='--',
color='m',
marker='p',
label="IncOQ",
avg_over_procs=True),
GraphLine(
dataset=DataSet_lamutex_orig_invts_vary_procs,
key='Total_memory',
linestyle='--',
color='r',
marker='x',
markeredgewidth=1,
label="InvTS",
avg_over_procs=True),
GraphLine(
dataset=DataSet_lamutex_erlang_vary_procs,
key='Total_memory',
linestyle='-',
color='b',
marker='s',
label='Erlang',
avg_over_procs=True),
GraphLine(
dataset=DataSet_lamutex_C_vary_procs,
key='Total_memory',
linestyle='-',
color='r',
marker='*',
label="C",
avg_over_procs=True)
))
Graph_lamutex_orig_memory_vary_rounds = \
GraphInfo(
title="La Mutex 10 processes memory vary rounds",
xlabel="Number of times entered cs",
ylabel="Avg. Process Peak RSS (in kB)",
legend_position='outside',
lines=(
GraphLine(
dataset=DataSet_lamutex_orig_vary_rounds,
key='Total_memory',
linestyle='-',
color='y',
markeredgewidth=1,
marker='x',
label='Orig\n(w/Query)',
avg_over_procs=True),
GraphLine(
dataset=DataSet_lamutex_orig_loop_vary_rounds,
key='Total_memory',
linestyle='-.',
color='k',
markeredgewidth=1,
marker='*',
label='Orig\n(w/Loop)',
avg_over_procs=True),
GraphLine(
dataset=DataSet_lamutex_orig_inc_vary_rounds,
key='Total_memory',
linestyle='--',
color='m',
marker='p',
label="IncOQ",
avg_over_procs=True),
GraphLine(
dataset=DataSet_lamutex_orig_invts_vary_rounds,
key='Total_memory',
linestyle='--',
color='g',
marker='s',
label="InvTS",
avg_over_procs=True),
GraphLine(
dataset=DataSet_lamutex_python_vary_rounds,
key='Total_memory',
linestyle='-',
color='c',
marker='^',
label='Python',
avg_over_procs=True),
GraphLine(
dataset=DataSet_lamutex_erlang_vary_rounds,
key='Total_memory',
linestyle='--',
color='b',
marker='D',
label='Erlang',
avg_over_procs=True),
GraphLine(
dataset=DataSet_lamutex_C_vary_rounds,
key='Total_memory',
linestyle='-',
color='r',
marker='*',
label="C",
avg_over_procs=True)
))
Graph_clpaxos_spec_running_time_vary_processes_low = \
GraphInfo(
title="Clpaxos_spec running time vary processes (low)",
xlabel="Number of processes",
ylabel="Running time (in seconds)",
lines=(
GraphLine(
dataset=DataSet_clpaxos_spec_vary_procs_low,
key='Wallclock_time',
linestyle='-',
color='b',
marker='o',
label='original (wall-clock time)',
fit_degree=2,
avg_over_procs=False),
GraphLine(
dataset=DataSet_clpaxos_spec_inc_vary_procs_low,
key='Wallclock_time',
linestyle='--',
color='g',
marker='v',
label="incremental (wall-clock time)",
avg_over_procs=False),
GraphLine(
dataset=DataSet_clpaxos_spec_vary_procs_low,
key='Total_process_time',
linestyle='-',
color='y',
marker='s',
fit_degree=2,
label='original (total process time)',
avg_over_procs=False),
GraphLine(
dataset=DataSet_clpaxos_spec_inc_vary_procs_low,
key='Total_process_time',
linestyle='--',
color='m',
marker='p',
label="incremental (total process time)",
avg_over_procs=False)))
Graph_clpaxos_oopsla_running_time_vary_processes = \
GraphInfo(
title="Clpaxos_oopsla running time vary processes",
xlabel="Number of processes",
ylabel="Running time (in seconds)",
lines=(
# GraphLine(
# dataset=DataSet_clpaxos_oopsla_vary_procs,
# key='Wallclock_time',
# linestyle='-',
# color='b',
# marker='o',
# label='original (wall-clock time)',
# fit_degree=2,
# avg_over_procs=False),
# GraphLine(
# dataset=DataSet_clpaxos_oopsla_inc_vary_procs,
# key='Wallclock_time',
# linestyle='--',
# color='g',
# marker='v',
# label="incremental (wall-clock time)",
# avg_over_procs=False),
GraphLine(
dataset=DataSet_clpaxos_oopsla_vary_procs,
key='Total_process_time',
linestyle='-',
color='y',
marker='s',
fit_degree=2,
label='original (total process time)',
avg_over_procs=True),
GraphLine(
dataset=DataSet_clpaxos_oopsla_inc_vary_procs,
key='Total_process_time',
linestyle='--',
color='m',
marker='p',
label="incremental (total process time)",
avg_over_procs=True)))
Graph_tpcommit_wallclock_time_vary_processes = \
GraphInfo(
title="2pcommit running time (wall-clock) vary processes",
xlabel="Number of processes",
ylabel="Running time (in seconds)",
xlim=(25, 150+1),
xticks=np.arange(25, 150+1, 25),
lines=(
GraphLine(
dataset=DataSet_tpcommit_spec_vary_procs,
key='Wallclock_time',
aggregate=min,
linestyle='-',
color='b',
marker='o',
label='w/Query (wall-clock time)',
fit_degree=2,
avg_over_procs=False),
GraphLine(
dataset=DataSet_tpcommit_spec_inc_vary_procs,
key='Wallclock_time',
aggregate=min,
linestyle='--',
color='g',
marker='v',
label="IncOQ (wall-clock time)",
avg_over_procs=False),
GraphLine(
dataset=DataSet_tpcommit_loop_vary_procs,
key='Wallclock_time',
aggregate=min,
linestyle='-.',
color='k',
marker='o',
label='w/Loop (wall-clock time)',
fit_degree=2,
avg_over_procs=False)))
Graph_tpcommit_CPU_time_vary_processes = \
GraphInfo(
title="2pcommit Coordinator CPU time vary Cohorts",
xlabel="Number of Cohorts",
ylabel="Running time (in seconds)",
xlim=(25, 150+1),
xticks=np.arange(25, 150+1, 25),
lines=(
GraphLine(
dataset=DataSet_tpcommit_spec_vary_procs,
key='Total_process_time',
linestyle='-',
color='y',
marker='s',
fit_degree=2,
label='w/Query (CPU time)',
avg_over_procs=False),
GraphLine(
dataset=DataSet_tpcommit_loop_vary_procs,
key='Total_process_time',
linestyle='-.',
color='k',
marker='s',
fit_degree=2,
label='w/Loop (CPU time)',
avg_over_procs=False),
GraphLine(
dataset=DataSet_tpcommit_spec_inc_vary_procs,
key='Total_process_time',
linestyle='--',
color='m',
marker='p',
label="IncOQ (CPU time)",
avg_over_procs=False)))
Graph_tpcommit_memory_vary_processes = \
GraphInfo(
title="2pcommit Coordinator memory vary Cohorts",
xlabel="Number of Cohorts",
ylabel="Total memory (in kB)",
ylim=(11000, 16000+1),
xlim=(25, 150+1),
xticks=np.arange(25, 150+1, 25),
lines=(
GraphLine(
dataset=DataSet_tpcommit_spec_vary_procs,
key='Total_memory',
linestyle='-',
color='y',
marker='s',
label='w/Query (Memory)',
avg_over_procs=False),
GraphLine(
dataset=DataSet_tpcommit_loop_vary_procs,
key='Total_memory',
linestyle='-.',
color='k',
marker='s',
label='w/Loop (Memory)',
avg_over_procs=False),
GraphLine(
dataset=DataSet_tpcommit_spec_inc_vary_procs,
key='Total_memory',
linestyle='--',
color='m',
marker='p',
label="IncOQ (Memory)",
avg_over_procs=False)))
Graph_lapaxos_CPU_time_vary_processes = \
GraphInfo(
title="La Paxos CPU time vary processes",
xlabel="Number of processes",
ylabel="Running time (in seconds)",
xlim=(25, 150+1),
xticks=np.arange(25, 150+1, 25),
lines=(
GraphLine(
dataset=DataSet_lapaxos_spec_vary_procs,
key='Total_process_time',
linestyle='-',
color='y',
marker='s',
fit_degree=2,
label='w/Query (CPU time)',
avg_over_procs=False),
GraphLine(
dataset=DataSet_lapaxos_loop_vary_procs,
key='Total_process_time',
linestyle='-.',
color='k',
marker='s',
label='w/Loop (CPU time)',
avg_over_procs=False),
GraphLine(
dataset=DataSet_lapaxos_spec_inc_vary_procs,
key='Total_process_time',
linestyle='--',
color='m',
marker='p',
label="IncOQ (CPU time)",
avg_over_procs=False)))
Graph_lapaxos_memory_vary_processes = \
GraphInfo(
title="La Paxos memory vary processes",
xlabel="Number of processes",
ylabel="Total memory (in kB)",
xlim=(25, 150+1),
xticks=np.arange(25, 150+1, 25),
lines=(
GraphLine(
dataset=DataSet_lapaxos_spec_vary_procs,
key='Total_memory',
linestyle='-',
color='y',
marker='s',
label='w/Query (Memory)',
avg_over_procs=False),
GraphLine(
dataset=DataSet_lapaxos_loop_vary_procs,
key='Total_memory',
linestyle='-.',
color='k',
marker='s',
label='w/Loop (Memory)',
avg_over_procs=False),
GraphLine(
dataset=DataSet_lapaxos_spec_inc_vary_procs,
key='Total_memory',
linestyle='--',
color='m',
marker='p',
label="IncOQ (Memory)",
avg_over_procs=False)))
# Graph_lamutex_spec_running_time_vary_processes_high = \
# GraphInfo(
# title="Lamutex_spec running time vary processes (high)",
# xlabel="Number of processes",
# ylabel="Running time (in seconds)",
# lines=(
# GraphLine(
# dataset=DataSet_lamutex_spec_vary_procs_high,
# key='Wallclock_time',
# linestyle='-',
# color='b',
# marker='o',
# label='original (wall-clock time)',
# fit_degree=2,
# avg_over_procs=False),
# GraphLine(
# dataset=DataSet_lamutex_spec_inc_vary_procs_high,
# key='Wallclock_time',
# linestyle='--',
# color='g',
# marker='v',
# fit_degree=1,
# label="incremental (wall-clock time)",
# avg_over_procs=False),
# GraphLine(
# dataset=DataSet_lamutex_spec_vary_procs_high,
# key='Total_process_time',
# linestyle='-',
# color='y',
# marker='s',
# fit_degree=4,
# label='original (total process time)',
# avg_over_procs=False),
# GraphLine(
# dataset=DataSet_lamutex_spec_inc_vary_procs_high,
# key='Total_process_time',
# linestyle='--',
# color='m',
# marker='p',
# fit_degree=1,
# label="incremental (total process time)",
# avg_over_procs=False)))
# Graph_lamutex_spec_memory_vary_processes_low = \
# GraphInfo(
# title="Lamutex_spec memory vary processes (low)",
# xlabel="Number of processes",
# ylabel="Total memory (in bytes)",
# lines=(
# GraphLine(
# dataset=DataSet_lamutex_spec_vary_procs_low,
# key='Total_memory',
# linestyle='-',
# color='b',
# marker='o',
# label='original (total memory)',
# avg_over_procs=False),
# GraphLine(
# dataset=DataSet_lamutex_spec_inc_vary_procs_low,
# key='Total_memory',
# linestyle='--',
# color='g',
# marker='v',
# label="incremental (total memory)",
# avg_over_procs=False)))
# Graph_lamutex_spec_memory_vary_processes_high = \
# GraphInfo(
# title="Lamutex_spec memory vary processes (high)",
# xlabel="Number of processes",
# ylabel="Total memory (in bytes)",
# lines=(
# GraphLine(
# dataset=DataSet_lamutex_spec_vary_procs_high,
# key='Total_memory',
# linestyle='-',
# color='b',
# marker='o',
# label='original (total memory)',
# avg_over_procs=False),
# GraphLine(
# dataset=DataSet_lamutex_spec_inc_vary_procs_high,
# key='Total_memory',
# linestyle='--',
# color='g',
# marker='v',
# label="incremental (total memory)",
# avg_over_procs=False)))
Graph_clpaxos_spec_memory_vary_processes_low = \
GraphInfo(
title="Clpaxos_spec memory vary processes (low)",
xlabel="Number of processes",
ylabel="Total memory (in bytes)",
lines=(
GraphLine(
dataset=DataSet_clpaxos_spec_vary_procs_low,
key='Total_memory',
linestyle='-',
color='b',
marker='o',
label='original (total memory)',
avg_over_procs=False),
GraphLine(
dataset=DataSet_clpaxos_spec_inc_vary_procs_low,
key='Total_memory',
linestyle='--',
color='g',
marker='v',
label="incremental (total memory)",
avg_over_procs=False)))
Graph_clpaxos_oopsla_memory_vary_processes = \
GraphInfo(
title="Clpaxos_oopsla memory vary processes (low)",
xlabel="Number of processes",
ylabel="Total memory (in bytes)",
lines=(
GraphLine(
dataset=DataSet_clpaxos_oopsla_vary_procs,
key='Total_memory',
linestyle='-',
color='b',
marker='o',
label='original (total memory)',
avg_over_procs=True),
GraphLine(
dataset=DataSet_clpaxos_oopsla_inc_vary_procs,
key='Total_memory',
linestyle='--',
color='g',
marker='v',
label="incremental (total memory)",
avg_over_procs=True)))
BarWidth=0.35
Graph_compile = \
GraphInfo(
title="Compilation times",
xlabel='',
xticks=np.arange(len(CompileTargets))+BarWidth,
xticklabels=[label for _, label in CompileTargets],
show_grid=True,
ylabel='Compilation time (in seconds)',
ylim=(0, 0.06),
yticks=np.arange(0, 0.061, 0.06/10),
bars=(
GraphBar(
dataset=DataSet_compile,
key='Wallclock_time',
aggregate=min,
width=BarWidth,
color='y',
label='Compilation time'),),
ylabel2='I/O (in bytes)',
ylim2=(0, 30000+1),
yticks2=np.arange(0, 30000+1, 30000/10),
bars2=(
GraphBar(
dataset=DataSet_compile,
key='Input_size',
aggregate=min,
offset=BarWidth,
width=BarWidth,
color='g',
label='Input size'),
GraphBar(
dataset=DataSet_compile,
key='Output_size',
aggregate=min,
offset=BarWidth,
width=BarWidth,
color='r',
bottom=GraphBar(
dataset=DataSet_compile,
key='Input_size',
aggregate=min),
label='Output size')
))
Graph_compile_inc = \
GraphInfo(
title="Compilation times (with inc)",
show_grid=True,
xlabel='',
xticks=np.arange(len(CompileTargets))+BarWidth,
xticklabels=[label for _, label in CompileTargets],
ylabel='Compilation time (in seconds)',
ylim=(0, 0.12),
yticks=np.arange(0, 0.121, 0.12/10),
bars=(
GraphBar(
dataset=DataSet_compile_inc,
key='Wallclock_time',
aggregate=min,
width=BarWidth,
color='y',
label='Compilation time'),),
ylabel2='I/O Volume (in bytes)',
ylim2=(0, 50000+1),
yticks2=np.arange(0, 50000+1, 5000),
bars2=(
GraphBar(
dataset=DataSet_compile_inc,
key='Input_size',
aggregate=min,
offset=BarWidth,
width=BarWidth,
color='g',
label='Input size'),
GraphBar(
dataset=DataSet_compile_inc,
key='Output_size',
aggregate=min,
offset=BarWidth,
width=BarWidth,
color='r',
bottom=GraphBar(
dataset=DataSet_compile_inc,
key='Input_size',
aggregate=min),
label='Output size')
))
def load_data(fromfile):
realfile = os.path.join(DataDir, fromfile + ".json")
with open(realfile, "r") as infd:
results = json.load(infd)
assert results is not None and isinstance(results, list)
return results
def load_bardata(graphbar):
datafile = graphbar.dataset.data_file
results = load_data(datafile)
xset = []
yset = []
for xaxis, datapoint in graphbar.dataset:
datas = [data[graphbar.key]
for config, data, ts in results if config == datapoint]
if len(datas) == 0:
print("No data for ", graphbar.key, datapoint)
xset.append(xaxis + graphbar.offset)
yset.append(graphbar.aggregate(datas))
return xset, yset
def load_graphbar(graphbar, ax=plt):
"""Plot a bar on the graph, return its handle."""
xset, yset = load_bardata(graphbar)
if graphbar.bottom is not None:
_, ybot = load_bardata(graphbar.bottom)
return ax.bar(xset, yset, bottom=ybot, **graphbar.bar_properties)
else:
return ax.bar(xset, yset, **graphbar.bar_properties)
def load_graphline(graphline, ax=plt):
"""Plot a line on the graph, return its handle."""
datafile = graphline.dataset.data_file
results = load_data(datafile)
xset = []
yset = []
for xaxis, datapoint in graphline.dataset:
datas = [(data[graphline.key] if not graphline.avg_over_procs
else data[graphline.key] / data['Total_processes'])
/ graphline.avg_factor
for config, data, ts in results if config == datapoint]
if len(datas) == 0:
print("No data for ", graphline.key, datapoint)
xset.append(xaxis)
yset.append(graphline.aggregate(datas))
if graphline.fit_degree is not None:
pol = np.polyfit(xset, yset, graphline.fit_degree)
sample_xs = np.linspace(xset[0], xset[-1], len(xset) * 10)
fitline_args = graphline.line_properties
datapoint_args = graphline.line_properties
fitline_args.pop('marker')
datapoint_args.pop('linestyle')
datapoint_args.pop('label')
ax.plot(sample_xs, np.polyval(pol, sample_xs), **fitline_args)
ax.plot(xset, yset, label='_nolegend_', linestyle="", **datapoint_args)
return Line2D([0, 1], [0, 1], **graphline.line_properties)
else:
return ax.plot(xset, yset, **graphline.line_properties)
def plot_graph(graph):
plt.clf()
plt.title(graph.title)
plt.xlabel(graph.xlabel)
plt.ylabel(graph.ylabel)
ax = plt.subplot(111)
ax2 = None
handles = []
handles += [load_graphline(line) for line in graph.lines]
handles += [load_graphbar(bar, ax) for bar in graph.bars]
if len(graph.bars2) > 0:
ax2 = ax.twinx()
ax2.set_ylabel(graph.ylabel2)
ax2.set_ylim(*graph.ylim2)
if graph.yticks2 is not None:
ax2.set_yticks(graph.yticks2)
handles += [load_graphbar(bar, ax2) for bar in graph.bars2]
if graph.xticklabels is not None:
box = ax.get_position()
newpos = [box.x0, box.y0 + box.height * 0.05,
box.width, box.height * 0.95]
ax.set_position(newpos)
if ax2 is not None:
ax2.set_position(newpos)
ax.set_xticklabels(graph.xticklabels,
fontsize=9,
rotation=graph.xticklabel_rotation,
ha='right')
if graph.legend_position == "outside":
box = ax.get_position()
newpos = [box.x0, box.y0, box.width * 0.8, box.height]
ax.set_position(newpos)
if ax2 is not None:
ax2.set_position(newpos)
legend_params = {'frameon': True,
'shadow': True,
'fancybox': True,
'loc' : 'center left',
'bbox_to_anchor' : (1.0, 0.5)}
else:
legend_params = {'frameon' : False,
'loc' : graph.legend_position}
legend_params['numpoints'] = 1
ax.legend(handles=handles, **legend_params)
ax.set_ylim(*graph.ylim)
ax.set_xlim(*graph.xlim)
if graph.yticks is not None:
ax.set_yticks(graph.yticks)
if graph.xticks is not None:
ax.set_xticks(graph.xticks)
if graph.show_grid:
plt.grid()
def show_graph(graph):
plot_graph(graph)
plt.show()
def main():
output_dir = sys.argv[1] if len(sys.argv) > 1 else "graphs"
if not os.path.exists(output_dir):
os.mkdir(output_dir)
if not os.path.isdir(output_dir):
sys.stderr.write("Error: %s is not a directory!" % output_dir)
exit(1)
for graph in [value for name, value in globals().items()
if isinstance(value, GraphInfo) and
name.startswith("Graph")]:
print("Plotting %s..." % graph.title)
plot_graph(graph)
plt.savefig(os.path.join(output_dir,
graph.title.replace(' ', '_') + ".png"))
if __name__ == "__main__":
main()
| mit |
miloharper/neural-network-animation | matplotlib/tests/test_triangulation.py | 9 | 39659 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.tri as mtri
from nose.tools import assert_equal, assert_raises
from numpy.testing import assert_array_equal, assert_array_almost_equal,\
assert_array_less
import numpy.ma.testutils as matest
from matplotlib.testing.decorators import image_comparison
import matplotlib.cm as cm
from matplotlib.path import Path
def test_delaunay():
# No duplicate points, regular grid.
nx = 5
ny = 4
x, y = np.meshgrid(np.linspace(0.0, 1.0, nx), np.linspace(0.0, 1.0, ny))
x = x.ravel()
y = y.ravel()
npoints = nx*ny
ntriangles = 2 * (nx-1) * (ny-1)
nedges = 3*nx*ny - 2*nx - 2*ny + 1
# Create delaunay triangulation.
triang = mtri.Triangulation(x, y)
# The tests in the remainder of this function should be passed by any
# triangulation that does not contain duplicate points.
# Points - floating point.
assert_array_almost_equal(triang.x, x)
assert_array_almost_equal(triang.y, y)
# Triangles - integers.
assert_equal(len(triang.triangles), ntriangles)
assert_equal(np.min(triang.triangles), 0)
assert_equal(np.max(triang.triangles), npoints-1)
# Edges - integers.
assert_equal(len(triang.edges), nedges)
assert_equal(np.min(triang.edges), 0)
assert_equal(np.max(triang.edges), npoints-1)
# Neighbors - integers.
# Check that neighbors calculated by C++ triangulation class are the same
# as those returned from delaunay routine.
neighbors = triang.neighbors
triang._neighbors = None
assert_array_equal(triang.neighbors, neighbors)
# Is each point used in at least one triangle?
assert_array_equal(np.unique(triang.triangles), np.arange(npoints))
def test_delaunay_duplicate_points():
# x[duplicate] == x[duplicate_of]
# y[duplicate] == y[duplicate_of]
npoints = 10
duplicate = 7
duplicate_of = 3
np.random.seed(23)
x = np.random.random((npoints))
y = np.random.random((npoints))
x[duplicate] = x[duplicate_of]
y[duplicate] = y[duplicate_of]
# Create delaunay triangulation.
triang = mtri.Triangulation(x, y)
# Duplicate points should be ignored, so the index of the duplicate points
# should not appear in any triangle.
assert_array_equal(np.unique(triang.triangles),
np.delete(np.arange(npoints), duplicate))
def test_delaunay_points_in_line():
# Cannot triangulate points that are all in a straight line, but check
# that delaunay code fails gracefully.
x = np.linspace(0.0, 10.0, 11)
y = np.linspace(0.0, 10.0, 11)
assert_raises(RuntimeError, mtri.Triangulation, x, y)
# Add an extra point not on the line and the triangulation is OK.
x = np.append(x, 2.0)
y = np.append(y, 8.0)
triang = mtri.Triangulation(x, y)
def test_delaunay_insufficient_points():
# Triangulation should raise a ValueError if passed less than 3 points.
assert_raises(ValueError, mtri.Triangulation, [], [])
assert_raises(ValueError, mtri.Triangulation, [1], [5])
assert_raises(ValueError, mtri.Triangulation, [1, 2], [5, 6])
# Triangulation should also raise a ValueError if passed duplicate points
# such that there are less than 3 unique points.
assert_raises(ValueError, mtri.Triangulation, [1, 2, 1], [5, 6, 5])
assert_raises(ValueError, mtri.Triangulation, [1, 2, 2], [5, 6, 6])
assert_raises(ValueError, mtri.Triangulation, [1, 1, 1, 2, 1, 2],
[5, 5, 5, 6, 5, 6])
def test_delaunay_robust():
# Fails when mtri.Triangulation uses matplotlib.delaunay, works when using
# qhull.
tri_points = np.array([
[0.8660254037844384, -0.5000000000000004],
[0.7577722283113836, -0.5000000000000004],
[0.6495190528383288, -0.5000000000000003],
[0.5412658773652739, -0.5000000000000003],
[0.811898816047911, -0.40625000000000044],
[0.7036456405748561, -0.4062500000000004],
[0.5953924651018013, -0.40625000000000033]])
test_points = np.asarray([
[0.58, -0.46],
[0.65, -0.46],
[0.65, -0.42],
[0.7, -0.48],
[0.7, -0.44],
[0.75, -0.44],
[0.8, -0.48]])
# Utility function that indicates if a triangle defined by 3 points
# (xtri, ytri) contains the test point xy. Avoid calling with a point that
# lies on or very near to an edge of the triangle.
def tri_contains_point(xtri, ytri, xy):
tri_points = np.vstack((xtri, ytri)).T
return Path(tri_points).contains_point(xy)
# Utility function that returns how many triangles of the specified
# triangulation contain the test point xy. Avoid calling with a point that
# lies on or very near to an edge of any triangle in the triangulation.
def tris_contain_point(triang, xy):
count = 0
for tri in triang.triangles:
if tri_contains_point(triang.x[tri], triang.y[tri], xy):
count += 1
return count
# Using matplotlib.delaunay, an invalid triangulation is created with
# overlapping triangles; qhull is OK.
triang = mtri.Triangulation(tri_points[:, 0], tri_points[:, 1])
for test_point in test_points:
assert_equal(tris_contain_point(triang, test_point), 1)
# If ignore the first point of tri_points, matplotlib.delaunay throws a
# KeyError when calculating the convex hull; qhull is OK.
triang = mtri.Triangulation(tri_points[1:, 0], tri_points[1:, 1])
@image_comparison(baseline_images=['tripcolor1'], extensions=['png'])
def test_tripcolor():
x = np.asarray([0, 0.5, 1, 0, 0.5, 1, 0, 0.5, 1, 0.75])
y = np.asarray([0, 0, 0, 0.5, 0.5, 0.5, 1, 1, 1, 0.75])
triangles = np.asarray([
[0, 1, 3], [1, 4, 3],
[1, 2, 4], [2, 5, 4],
[3, 4, 6], [4, 7, 6],
[4, 5, 9], [7, 4, 9], [8, 7, 9], [5, 8, 9]])
# Triangulation with same number of points and triangles.
triang = mtri.Triangulation(x, y, triangles)
Cpoints = x + 0.5*y
xmid = x[triang.triangles].mean(axis=1)
ymid = y[triang.triangles].mean(axis=1)
Cfaces = 0.5*xmid + ymid
plt.subplot(121)
plt.tripcolor(triang, Cpoints, edgecolors='k')
plt.title('point colors')
plt.subplot(122)
plt.tripcolor(triang, facecolors=Cfaces, edgecolors='k')
plt.title('facecolors')
def test_no_modify():
# Test that Triangulation does not modify triangles array passed to it.
triangles = np.array([[3, 2, 0], [3, 1, 0]], dtype=np.int32)
points = np.array([(0, 0), (0, 1.1), (1, 0), (1, 1)])
old_triangles = triangles.copy()
tri = mtri.Triangulation(points[:, 0], points[:, 1], triangles)
edges = tri.edges
assert_array_equal(old_triangles, triangles)
def test_trifinder():
# Test points within triangles of masked triangulation.
x, y = np.meshgrid(np.arange(4), np.arange(4))
x = x.ravel()
y = y.ravel()
triangles = [[0, 1, 4], [1, 5, 4], [1, 2, 5], [2, 6, 5], [2, 3, 6],
[3, 7, 6], [4, 5, 8], [5, 9, 8], [5, 6, 9], [6, 10, 9],
[6, 7, 10], [7, 11, 10], [8, 9, 12], [9, 13, 12], [9, 10, 13],
[10, 14, 13], [10, 11, 14], [11, 15, 14]]
mask = np.zeros(len(triangles))
mask[8:10] = 1
triang = mtri.Triangulation(x, y, triangles, mask)
trifinder = triang.get_trifinder()
xs = [0.25, 1.25, 2.25, 3.25]
ys = [0.25, 1.25, 2.25, 3.25]
xs, ys = np.meshgrid(xs, ys)
xs = xs.ravel()
ys = ys.ravel()
tris = trifinder(xs, ys)
assert_array_equal(tris, [0, 2, 4, -1, 6, -1, 10, -1,
12, 14, 16, -1, -1, -1, -1, -1])
tris = trifinder(xs-0.5, ys-0.5)
assert_array_equal(tris, [-1, -1, -1, -1, -1, 1, 3, 5,
-1, 7, -1, 11, -1, 13, 15, 17])
# Test points exactly on boundary edges of masked triangulation.
xs = [0.5, 1.5, 2.5, 0.5, 1.5, 2.5, 1.5, 1.5, 0.0, 1.0, 2.0, 3.0]
ys = [0.0, 0.0, 0.0, 3.0, 3.0, 3.0, 1.0, 2.0, 1.5, 1.5, 1.5, 1.5]
tris = trifinder(xs, ys)
assert_array_equal(tris, [0, 2, 4, 13, 15, 17, 3, 14, 6, 7, 10, 11])
# Test points exactly on boundary corners of masked triangulation.
xs = [0.0, 3.0]
ys = [0.0, 3.0]
tris = trifinder(xs, ys)
assert_array_equal(tris, [0, 17])
# Test triangles with horizontal colinear points. These are not valid
# triangulations, but we try to deal with the simplest violations.
delta = 0.0 # If +ve, triangulation is OK, if -ve triangulation invalid,
# if zero have colinear points but should pass tests anyway.
x = [1.5, 0, 1, 2, 3, 1.5, 1.5]
y = [-1, 0, 0, 0, 0, delta, 1]
triangles = [[0, 2, 1], [0, 3, 2], [0, 4, 3], [1, 2, 5], [2, 3, 5],
[3, 4, 5], [1, 5, 6], [4, 6, 5]]
triang = mtri.Triangulation(x, y, triangles)
trifinder = triang.get_trifinder()
xs = [-0.1, 0.4, 0.9, 1.4, 1.9, 2.4, 2.9]
ys = [-0.1, 0.1]
xs, ys = np.meshgrid(xs, ys)
tris = trifinder(xs, ys)
assert_array_equal(tris, [[-1, 0, 0, 1, 1, 2, -1],
[-1, 6, 6, 6, 7, 7, -1]])
# Test triangles with vertical colinear points. These are not valid
# triangulations, but we try to deal with the simplest violations.
delta = 0.0 # If +ve, triangulation is OK, if -ve triangulation invalid,
# if zero have colinear points but should pass tests anyway.
x = [-1, -delta, 0, 0, 0, 0, 1]
y = [1.5, 1.5, 0, 1, 2, 3, 1.5]
triangles = [[0, 1, 2], [0, 1, 5], [1, 2, 3], [1, 3, 4], [1, 4, 5],
[2, 6, 3], [3, 6, 4], [4, 6, 5]]
triang = mtri.Triangulation(x, y, triangles)
trifinder = triang.get_trifinder()
xs = [-0.1, 0.1]
ys = [-0.1, 0.4, 0.9, 1.4, 1.9, 2.4, 2.9]
xs, ys = np.meshgrid(xs, ys)
tris = trifinder(xs, ys)
assert_array_equal(tris, [[-1, -1], [0, 5], [0, 5], [0, 6], [1, 6], [1, 7],
[-1, -1]])
# Test that changing triangulation by setting a mask causes the trifinder
# to be reinitialised.
x = [0, 1, 0, 1]
y = [0, 0, 1, 1]
triangles = [[0, 1, 2], [1, 3, 2]]
triang = mtri.Triangulation(x, y, triangles)
trifinder = triang.get_trifinder()
xs = [-0.2, 0.2, 0.8, 1.2]
ys = [ 0.5, 0.5, 0.5, 0.5]
tris = trifinder(xs, ys)
assert_array_equal(tris, [-1, 0, 1, -1])
triang.set_mask([1, 0])
assert_equal(trifinder, triang.get_trifinder())
tris = trifinder(xs, ys)
assert_array_equal(tris, [-1, -1, 1, -1])
def test_triinterp():
# Test points within triangles of masked triangulation.
x, y = np.meshgrid(np.arange(4), np.arange(4))
x = x.ravel()
y = y.ravel()
z = 1.23*x - 4.79*y
triangles = [[0, 1, 4], [1, 5, 4], [1, 2, 5], [2, 6, 5], [2, 3, 6],
[3, 7, 6], [4, 5, 8], [5, 9, 8], [5, 6, 9], [6, 10, 9],
[6, 7, 10], [7, 11, 10], [8, 9, 12], [9, 13, 12], [9, 10, 13],
[10, 14, 13], [10, 11, 14], [11, 15, 14]]
mask = np.zeros(len(triangles))
mask[8:10] = 1
triang = mtri.Triangulation(x, y, triangles, mask)
linear_interp = mtri.LinearTriInterpolator(triang, z)
cubic_min_E = mtri.CubicTriInterpolator(triang, z)
cubic_geom = mtri.CubicTriInterpolator(triang, z, kind='geom')
xs = np.linspace(0.25, 2.75, 6)
ys = [0.25, 0.75, 2.25, 2.75]
xs, ys = np.meshgrid(xs, ys) # Testing arrays with array.ndim = 2
for interp in (linear_interp, cubic_min_E, cubic_geom):
zs = interp(xs, ys)
assert_array_almost_equal(zs, (1.23*xs - 4.79*ys))
# Test points outside triangulation.
xs = [-0.25, 1.25, 1.75, 3.25]
ys = xs
xs, ys = np.meshgrid(xs, ys)
for interp in (linear_interp, cubic_min_E, cubic_geom):
zs = linear_interp(xs, ys)
assert_array_equal(zs.mask, [[True]*4]*4)
# Test mixed configuration (outside / inside).
xs = np.linspace(0.25, 1.75, 6)
ys = [0.25, 0.75, 1.25, 1.75]
xs, ys = np.meshgrid(xs, ys)
for interp in (linear_interp, cubic_min_E, cubic_geom):
zs = interp(xs, ys)
matest.assert_array_almost_equal(zs, (1.23*xs - 4.79*ys))
mask = (xs >= 1) * (xs <= 2) * (ys >= 1) * (ys <= 2)
assert_array_equal(zs.mask, mask)
# 2nd order patch test: on a grid with an 'arbitrary shaped' triangle,
# patch test shall be exact for quadratic functions and cubic
# interpolator if *kind* = user
(a, b, c) = (1.23, -4.79, 0.6)
def quad(x, y):
return a*(x-0.5)**2 + b*(y-0.5)**2 + c*x*y
def gradient_quad(x, y):
return (2*a*(x-0.5) + c*y, 2*b*(y-0.5) + c*x)
x = np.array([0.2, 0.33367, 0.669, 0., 1., 1., 0.])
y = np.array([0.3, 0.80755, 0.4335, 0., 0., 1., 1.])
triangles = np.array([[0, 1, 2], [3, 0, 4], [4, 0, 2], [4, 2, 5],
[1, 5, 2], [6, 5, 1], [6, 1, 0], [6, 0, 3]])
triang = mtri.Triangulation(x, y, triangles)
z = quad(x, y)
dz = gradient_quad(x, y)
# test points for 2nd order patch test
xs = np.linspace(0., 1., 5)
ys = np.linspace(0., 1., 5)
xs, ys = np.meshgrid(xs, ys)
cubic_user = mtri.CubicTriInterpolator(triang, z, kind='user', dz=dz)
interp_zs = cubic_user(xs, ys)
assert_array_almost_equal(interp_zs, quad(xs, ys))
(interp_dzsdx, interp_dzsdy) = cubic_user.gradient(x, y)
(dzsdx, dzsdy) = gradient_quad(x, y)
assert_array_almost_equal(interp_dzsdx, dzsdx)
assert_array_almost_equal(interp_dzsdy, dzsdy)
# Cubic improvement: cubic interpolation shall perform better than linear
# on a sufficiently dense mesh for a quadratic function.
n = 11
x, y = np.meshgrid(np.linspace(0., 1., n+1), np.linspace(0., 1., n+1))
x = x.ravel()
y = y.ravel()
z = quad(x, y)
triang = mtri.Triangulation(x, y, triangles=meshgrid_triangles(n+1))
xs, ys = np.meshgrid(np.linspace(0.1, 0.9, 5), np.linspace(0.1, 0.9, 5))
xs = xs.ravel()
ys = ys.ravel()
linear_interp = mtri.LinearTriInterpolator(triang, z)
cubic_min_E = mtri.CubicTriInterpolator(triang, z)
cubic_geom = mtri.CubicTriInterpolator(triang, z, kind='geom')
zs = quad(xs, ys)
diff_lin = np.abs(linear_interp(xs, ys) - zs)
for interp in (cubic_min_E, cubic_geom):
diff_cubic = np.abs(interp(xs, ys) - zs)
assert(np.max(diff_lin) >= 10.*np.max(diff_cubic))
assert(np.dot(diff_lin, diff_lin) >=
100.*np.dot(diff_cubic, diff_cubic))
def test_triinterpcubic_C1_continuity():
# Below the 4 tests which demonstrate C1 continuity of the
# TriCubicInterpolator (testing the cubic shape functions on arbitrary
# triangle):
#
# 1) Testing continuity of function & derivatives at corner for all 9
# shape functions. Testing also function values at same location.
# 2) Testing C1 continuity along each edge (as gradient is polynomial of
# 2nd order, it is sufficient to test at the middle).
# 3) Testing C1 continuity at triangle barycenter (where the 3 subtriangles
# meet)
# 4) Testing C1 continuity at median 1/3 points (midside between 2
# subtriangles)
# Utility test function check_continuity
def check_continuity(interpolator, loc, values=None):
"""
Checks the continuity of interpolator (and its derivatives) near
location loc. Can check the value at loc itself if *values* is
provided.
*interpolator* TriInterpolator
*loc* location to test (x0, y0)
*values* (optional) array [z0, dzx0, dzy0] to check the value at *loc*
"""
n_star = 24 # Number of continuity points in a boundary of loc
epsilon = 1.e-10 # Distance for loc boundary
k = 100. # Continuity coefficient
(loc_x, loc_y) = loc
star_x = loc_x + epsilon*np.cos(np.linspace(0., 2*np.pi, n_star))
star_y = loc_y + epsilon*np.sin(np.linspace(0., 2*np.pi, n_star))
z = interpolator([loc_x], [loc_y])[0]
(dzx, dzy) = interpolator.gradient([loc_x], [loc_y])
if values is not None:
assert_array_almost_equal(z, values[0])
assert_array_almost_equal(dzx[0], values[1])
assert_array_almost_equal(dzy[0], values[2])
diff_z = interpolator(star_x, star_y) - z
(tab_dzx, tab_dzy) = interpolator.gradient(star_x, star_y)
diff_dzx = tab_dzx - dzx
diff_dzy = tab_dzy - dzy
assert_array_less(diff_z, epsilon*k)
assert_array_less(diff_dzx, epsilon*k)
assert_array_less(diff_dzy, epsilon*k)
# Drawing arbitrary triangle (a, b, c) inside a unit square.
(ax, ay) = (0.2, 0.3)
(bx, by) = (0.33367, 0.80755)
(cx, cy) = (0.669, 0.4335)
x = np.array([ax, bx, cx, 0., 1., 1., 0.])
y = np.array([ay, by, cy, 0., 0., 1., 1.])
triangles = np.array([[0, 1, 2], [3, 0, 4], [4, 0, 2], [4, 2, 5],
[1, 5, 2], [6, 5, 1], [6, 1, 0], [6, 0, 3]])
triang = mtri.Triangulation(x, y, triangles)
for idof in range(9):
z = np.zeros(7, dtype=np.float64)
dzx = np.zeros(7, dtype=np.float64)
dzy = np.zeros(7, dtype=np.float64)
values = np.zeros([3, 3], dtype=np.float64)
case = idof//3
values[case, idof % 3] = 1.0
if case == 0:
z[idof] = 1.0
elif case == 1:
dzx[idof % 3] = 1.0
elif case == 2:
dzy[idof % 3] = 1.0
interp = mtri.CubicTriInterpolator(triang, z, kind='user',
dz=(dzx, dzy))
# Test 1) Checking values and continuity at nodes
check_continuity(interp, (ax, ay), values[:, 0])
check_continuity(interp, (bx, by), values[:, 1])
check_continuity(interp, (cx, cy), values[:, 2])
# Test 2) Checking continuity at midside nodes
check_continuity(interp, ((ax+bx)*0.5, (ay+by)*0.5))
check_continuity(interp, ((ax+cx)*0.5, (ay+cy)*0.5))
check_continuity(interp, ((cx+bx)*0.5, (cy+by)*0.5))
# Test 3) Checking continuity at barycenter
check_continuity(interp, ((ax+bx+cx)/3., (ay+by+cy)/3.))
# Test 4) Checking continuity at median 1/3-point
check_continuity(interp, ((4.*ax+bx+cx)/6., (4.*ay+by+cy)/6.))
check_continuity(interp, ((ax+4.*bx+cx)/6., (ay+4.*by+cy)/6.))
check_continuity(interp, ((ax+bx+4.*cx)/6., (ay+by+4.*cy)/6.))
def test_triinterpcubic_cg_solver():
# Now 3 basic tests of the Sparse CG solver, used for
# TriCubicInterpolator with *kind* = 'min_E'
# 1) A commonly used test involves a 2d Poisson matrix.
def poisson_sparse_matrix(n, m):
"""
Sparse Poisson matrix.
Returns the sparse matrix in coo format resulting from the
discretisation of the 2-dimensional Poisson equation according to a
finite difference numerical scheme on a uniform (n, m) grid.
Size of the matrix: (n*m, n*m)
"""
l = m*n
rows = np.concatenate([
np.arange(l, dtype=np.int32),
np.arange(l-1, dtype=np.int32), np.arange(1, l, dtype=np.int32),
np.arange(l-n, dtype=np.int32), np.arange(n, l, dtype=np.int32)])
cols = np.concatenate([
np.arange(l, dtype=np.int32),
np.arange(1, l, dtype=np.int32), np.arange(l-1, dtype=np.int32),
np.arange(n, l, dtype=np.int32), np.arange(l-n, dtype=np.int32)])
vals = np.concatenate([
4*np.ones(l, dtype=np.float64),
-np.ones(l-1, dtype=np.float64), -np.ones(l-1, dtype=np.float64),
-np.ones(l-n, dtype=np.float64), -np.ones(l-n, dtype=np.float64)])
# In fact +1 and -1 diags have some zeros
vals[l:2*l-1][m-1::m] = 0.
vals[2*l-1:3*l-2][m-1::m] = 0.
return vals, rows, cols, (n*m, n*m)
# Instantiating a sparse Poisson matrix of size 48 x 48:
(n, m) = (12, 4)
mat = mtri.triinterpolate._Sparse_Matrix_coo(*poisson_sparse_matrix(n, m))
mat.compress_csc()
mat_dense = mat.to_dense()
# Testing a sparse solve for all 48 basis vector
for itest in range(n*m):
b = np.zeros(n*m, dtype=np.float64)
b[itest] = 1.
x, _ = mtri.triinterpolate._cg(A=mat, b=b, x0=np.zeros(n*m),
tol=1.e-10)
assert_array_almost_equal(np.dot(mat_dense, x), b)
# 2) Same matrix with inserting 2 rows - cols with null diag terms
# (but still linked with the rest of the matrix by extra-diag terms)
(i_zero, j_zero) = (12, 49)
vals, rows, cols, _ = poisson_sparse_matrix(n, m)
rows = rows + 1*(rows >= i_zero) + 1*(rows >= j_zero)
cols = cols + 1*(cols >= i_zero) + 1*(cols >= j_zero)
# adding extra-diag terms
rows = np.concatenate([rows, [i_zero, i_zero-1, j_zero, j_zero-1]])
cols = np.concatenate([cols, [i_zero-1, i_zero, j_zero-1, j_zero]])
vals = np.concatenate([vals, [1., 1., 1., 1.]])
mat = mtri.triinterpolate._Sparse_Matrix_coo(vals, rows, cols,
(n*m + 2, n*m + 2))
mat.compress_csc()
mat_dense = mat.to_dense()
# Testing a sparse solve for all 50 basis vec
for itest in range(n*m + 2):
b = np.zeros(n*m + 2, dtype=np.float64)
b[itest] = 1.
x, _ = mtri.triinterpolate._cg(A=mat, b=b, x0=np.ones(n*m + 2),
tol=1.e-10)
assert_array_almost_equal(np.dot(mat_dense, x), b)
# 3) Now a simple test that summation of duplicate (i.e. with same rows,
# same cols) entries occurs when compressed.
vals = np.ones(17, dtype=np.float64)
rows = np.array([0, 1, 2, 0, 0, 1, 1, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1],
dtype=np.int32)
cols = np.array([0, 1, 2, 1, 1, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2],
dtype=np.int32)
dim = (3, 3)
mat = mtri.triinterpolate._Sparse_Matrix_coo(vals, rows, cols, dim)
mat.compress_csc()
mat_dense = mat.to_dense()
assert_array_almost_equal(mat_dense, np.array([
[1., 2., 0.], [2., 1., 5.], [0., 5., 1.]], dtype=np.float64))
def test_triinterpcubic_geom_weights():
# Tests to check computation of weights for _DOF_estimator_geom:
# The weight sum per triangle can be 1. (in case all angles < 90 degrees)
# or (2*w_i) where w_i = 1-alpha_i/np.pi is the weight of apex i ; alpha_i
# is the apex angle > 90 degrees.
(ax, ay) = (0., 1.687)
x = np.array([ax, 0.5*ax, 0., 1.])
y = np.array([ay, -ay, 0., 0.])
z = np.zeros(4, dtype=np.float64)
triangles = [[0, 2, 3], [1, 3, 2]]
sum_w = np.zeros([4, 2]) # 4 possibilities ; 2 triangles
for theta in np.linspace(0., 2*np.pi, 14): # rotating the figure...
x_rot = np.cos(theta)*x + np.sin(theta)*y
y_rot = -np.sin(theta)*x + np.cos(theta)*y
triang = mtri.Triangulation(x_rot, y_rot, triangles)
cubic_geom = mtri.CubicTriInterpolator(triang, z, kind='geom')
dof_estimator = mtri.triinterpolate._DOF_estimator_geom(cubic_geom)
weights = dof_estimator.compute_geom_weights()
# Testing for the 4 possibilities...
sum_w[0, :] = np.sum(weights, 1) - 1
for itri in range(3):
sum_w[itri+1, :] = np.sum(weights, 1) - 2*weights[:, itri]
assert_array_almost_equal(np.min(np.abs(sum_w), axis=0),
np.array([0., 0.], dtype=np.float64))
def test_triinterp_colinear():
# Tests interpolating inside a triangulation with horizontal colinear
# points (refer also to the tests :func:`test_trifinder` ).
#
# These are not valid triangulations, but we try to deal with the
# simplest violations (i. e. those handled by default TriFinder).
#
# Note that the LinearTriInterpolator and the CubicTriInterpolator with
# kind='min_E' or 'geom' still pass a linear patch test.
# We also test interpolation inside a flat triangle, by forcing
# *tri_index* in a call to :meth:`_interpolate_multikeys`.
delta = 0. # If +ve, triangulation is OK, if -ve triangulation invalid,
# if zero have colinear points but should pass tests anyway.
x0 = np.array([1.5, 0, 1, 2, 3, 1.5, 1.5])
y0 = np.array([-1, 0, 0, 0, 0, delta, 1])
# We test different affine transformations of the initial figure ; to
# avoid issues related to round-off errors we only use integer
# coefficients (otherwise the Triangulation might become invalid even with
# delta == 0).
transformations = [[1, 0], [0, 1], [1, 1], [1, 2], [-2, -1], [-2, 1]]
for transformation in transformations:
x_rot = transformation[0]*x0 + transformation[1]*y0
y_rot = -transformation[1]*x0 + transformation[0]*y0
(x, y) = (x_rot, y_rot)
z = 1.23*x - 4.79*y
triangles = [[0, 2, 1], [0, 3, 2], [0, 4, 3], [1, 2, 5], [2, 3, 5],
[3, 4, 5], [1, 5, 6], [4, 6, 5]]
triang = mtri.Triangulation(x, y, triangles)
xs = np.linspace(np.min(triang.x), np.max(triang.x), 20)
ys = np.linspace(np.min(triang.y), np.max(triang.y), 20)
xs, ys = np.meshgrid(xs, ys)
xs = xs.ravel()
ys = ys.ravel()
mask_out = (triang.get_trifinder()(xs, ys) == -1)
zs_target = np.ma.array(1.23*xs - 4.79*ys, mask=mask_out)
linear_interp = mtri.LinearTriInterpolator(triang, z)
cubic_min_E = mtri.CubicTriInterpolator(triang, z)
cubic_geom = mtri.CubicTriInterpolator(triang, z, kind='geom')
for interp in (linear_interp, cubic_min_E, cubic_geom):
zs = interp(xs, ys)
assert_array_almost_equal(zs_target, zs)
# Testing interpolation inside the flat triangle number 4: [2, 3, 5]
# by imposing *tri_index* in a call to :meth:`_interpolate_multikeys`
itri = 4
pt1 = triang.triangles[itri, 0]
pt2 = triang.triangles[itri, 1]
xs = np.linspace(triang.x[pt1], triang.x[pt2], 10)
ys = np.linspace(triang.y[pt1], triang.y[pt2], 10)
zs_target = 1.23*xs - 4.79*ys
for interp in (linear_interp, cubic_min_E, cubic_geom):
zs, = interp._interpolate_multikeys(
xs, ys, tri_index=itri*np.ones(10, dtype=np.int32))
assert_array_almost_equal(zs_target, zs)
def test_triinterp_transformations():
# 1) Testing that the interpolation scheme is invariant by rotation of the
# whole figure.
# Note: This test is non-trivial for a CubicTriInterpolator with
# kind='min_E'. It does fail for a non-isotropic stiffness matrix E of
# :class:`_ReducedHCT_Element` (tested with E=np.diag([1., 1., 1.])), and
# provides a good test for :meth:`get_Kff_and_Ff`of the same class.
#
# 2) Also testing that the interpolation scheme is invariant by expansion
# of the whole figure along one axis.
n_angles = 20
n_radii = 10
min_radius = 0.15
def z(x, y):
r1 = np.sqrt((0.5-x)**2 + (0.5-y)**2)
theta1 = np.arctan2(0.5-x, 0.5-y)
r2 = np.sqrt((-x-0.2)**2 + (-y-0.2)**2)
theta2 = np.arctan2(-x-0.2, -y-0.2)
z = -(2*(np.exp((r1/10)**2)-1)*30. * np.cos(7.*theta1) +
(np.exp((r2/10)**2)-1)*30. * np.cos(11.*theta2) +
0.7*(x**2 + y**2))
return (np.max(z)-z)/(np.max(z)-np.min(z))
# First create the x and y coordinates of the points.
radii = np.linspace(min_radius, 0.95, n_radii)
angles = np.linspace(0 + n_angles, 2*np.pi + n_angles,
n_angles, endpoint=False)
angles = np.repeat(angles[..., np.newaxis], n_radii, axis=1)
angles[:, 1::2] += np.pi/n_angles
x0 = (radii*np.cos(angles)).flatten()
y0 = (radii*np.sin(angles)).flatten()
triang0 = mtri.Triangulation(x0, y0) # Delaunay triangulation
z0 = z(x0, y0)
# Then create the test points
xs0 = np.linspace(-1., 1., 23)
ys0 = np.linspace(-1., 1., 23)
xs0, ys0 = np.meshgrid(xs0, ys0)
xs0 = xs0.ravel()
ys0 = ys0.ravel()
interp_z0 = {}
for i_angle in range(2):
# Rotating everything
theta = 2*np.pi / n_angles * i_angle
x = np.cos(theta)*x0 + np.sin(theta)*y0
y = -np.sin(theta)*x0 + np.cos(theta)*y0
xs = np.cos(theta)*xs0 + np.sin(theta)*ys0
ys = -np.sin(theta)*xs0 + np.cos(theta)*ys0
triang = mtri.Triangulation(x, y, triang0.triangles)
linear_interp = mtri.LinearTriInterpolator(triang, z0)
cubic_min_E = mtri.CubicTriInterpolator(triang, z0)
cubic_geom = mtri.CubicTriInterpolator(triang, z0, kind='geom')
dic_interp = {'lin': linear_interp,
'min_E': cubic_min_E,
'geom': cubic_geom}
# Testing that the interpolation is invariant by rotation...
for interp_key in ['lin', 'min_E', 'geom']:
interp = dic_interp[interp_key]
if i_angle == 0:
interp_z0[interp_key] = interp(xs0, ys0) # storage
else:
interpz = interp(xs, ys)
matest.assert_array_almost_equal(interpz,
interp_z0[interp_key])
scale_factor = 987654.3210
for scaled_axis in ('x', 'y'):
# Scaling everything (expansion along scaled_axis)
if scaled_axis == 'x':
x = scale_factor * x0
y = y0
xs = scale_factor * xs0
ys = ys0
else:
x = x0
y = scale_factor * y0
xs = xs0
ys = scale_factor * ys0
triang = mtri.Triangulation(x, y, triang0.triangles)
linear_interp = mtri.LinearTriInterpolator(triang, z0)
cubic_min_E = mtri.CubicTriInterpolator(triang, z0)
cubic_geom = mtri.CubicTriInterpolator(triang, z0, kind='geom')
dic_interp = {'lin': linear_interp,
'min_E': cubic_min_E,
'geom': cubic_geom}
# Testing that the interpolation is invariant by expansion along
# 1 axis...
for interp_key in ['lin', 'min_E', 'geom']:
interpz = dic_interp[interp_key](xs, ys)
matest.assert_array_almost_equal(interpz, interp_z0[interp_key])
@image_comparison(baseline_images=['tri_smooth_contouring'],
extensions=['png'], remove_text=True)
def test_tri_smooth_contouring():
# Image comparison based on example tricontour_smooth_user.
n_angles = 20
n_radii = 10
min_radius = 0.15
def z(x, y):
r1 = np.sqrt((0.5-x)**2 + (0.5-y)**2)
theta1 = np.arctan2(0.5-x, 0.5-y)
r2 = np.sqrt((-x-0.2)**2 + (-y-0.2)**2)
theta2 = np.arctan2(-x-0.2, -y-0.2)
z = -(2*(np.exp((r1/10)**2)-1)*30. * np.cos(7.*theta1) +
(np.exp((r2/10)**2)-1)*30. * np.cos(11.*theta2) +
0.7*(x**2 + y**2))
return (np.max(z)-z)/(np.max(z)-np.min(z))
# First create the x and y coordinates of the points.
radii = np.linspace(min_radius, 0.95, n_radii)
angles = np.linspace(0 + n_angles, 2*np.pi + n_angles,
n_angles, endpoint=False)
angles = np.repeat(angles[..., np.newaxis], n_radii, axis=1)
angles[:, 1::2] += np.pi/n_angles
x0 = (radii*np.cos(angles)).flatten()
y0 = (radii*np.sin(angles)).flatten()
triang0 = mtri.Triangulation(x0, y0) # Delaunay triangulation
z0 = z(x0, y0)
xmid = x0[triang0.triangles].mean(axis=1)
ymid = y0[triang0.triangles].mean(axis=1)
mask = np.where(xmid*xmid + ymid*ymid < min_radius*min_radius, 1, 0)
triang0.set_mask(mask)
# Then the plot
refiner = mtri.UniformTriRefiner(triang0)
tri_refi, z_test_refi = refiner.refine_field(z0, subdiv=4)
levels = np.arange(0., 1., 0.025)
plt.triplot(triang0, lw=0.5, color='0.5')
plt.tricontour(tri_refi, z_test_refi, levels=levels, colors="black")
@image_comparison(baseline_images=['tri_smooth_gradient'],
extensions=['png'], remove_text=True)
def test_tri_smooth_gradient():
# Image comparison based on example trigradient_demo.
def dipole_potential(x, y):
""" An electric dipole potential V """
r_sq = x**2 + y**2
theta = np.arctan2(y, x)
z = np.cos(theta)/r_sq
return (np.max(z)-z) / (np.max(z)-np.min(z))
# Creating a Triangulation
n_angles = 30
n_radii = 10
min_radius = 0.2
radii = np.linspace(min_radius, 0.95, n_radii)
angles = np.linspace(0, 2*np.pi, n_angles, endpoint=False)
angles = np.repeat(angles[..., np.newaxis], n_radii, axis=1)
angles[:, 1::2] += np.pi/n_angles
x = (radii*np.cos(angles)).flatten()
y = (radii*np.sin(angles)).flatten()
V = dipole_potential(x, y)
triang = mtri.Triangulation(x, y)
xmid = x[triang.triangles].mean(axis=1)
ymid = y[triang.triangles].mean(axis=1)
mask = np.where(xmid*xmid + ymid*ymid < min_radius*min_radius, 1, 0)
triang.set_mask(mask)
# Refine data - interpolates the electrical potential V
refiner = mtri.UniformTriRefiner(triang)
tri_refi, z_test_refi = refiner.refine_field(V, subdiv=3)
# Computes the electrical field (Ex, Ey) as gradient of -V
tci = mtri.CubicTriInterpolator(triang, -V)
(Ex, Ey) = tci.gradient(triang.x, triang.y)
E_norm = np.sqrt(Ex**2 + Ey**2)
# Plot the triangulation, the potential iso-contours and the vector field
plt.figure()
plt.gca().set_aspect('equal')
plt.triplot(triang, color='0.8')
levels = np.arange(0., 1., 0.01)
cmap = cm.get_cmap(name='hot', lut=None)
plt.tricontour(tri_refi, z_test_refi, levels=levels, cmap=cmap,
linewidths=[2.0, 1.0, 1.0, 1.0])
# Plots direction of the electrical vector field
plt.quiver(triang.x, triang.y, Ex/E_norm, Ey/E_norm,
units='xy', scale=10., zorder=3, color='blue',
width=0.007, headwidth=3., headlength=4.)
def test_tritools():
# Tests TriAnalyzer.scale_factors on masked triangulation
# Tests circle_ratios on equilateral and right-angled triangle.
x = np.array([0., 1., 0.5, 0., 2.])
y = np.array([0., 0., 0.5*np.sqrt(3.), -1., 1.])
triangles = np.array([[0, 1, 2], [0, 1, 3], [1, 2, 4]], dtype=np.int32)
mask = np.array([False, False, True], dtype=np.bool)
triang = mtri.Triangulation(x, y, triangles, mask=mask)
analyser = mtri.TriAnalyzer(triang)
assert_array_almost_equal(analyser.scale_factors,
np.array([1., 1./(1.+0.5*np.sqrt(3.))]))
assert_array_almost_equal(
analyser.circle_ratios(rescale=False),
np.ma.masked_array([0.5, 1./(1.+np.sqrt(2.)), np.nan], mask))
# Tests circle ratio of a flat triangle
x = np.array([0., 1., 2.])
y = np.array([1., 1.+3., 1.+6.])
triangles = np.array([[0, 1, 2]], dtype=np.int32)
triang = mtri.Triangulation(x, y, triangles)
analyser = mtri.TriAnalyzer(triang)
assert_array_almost_equal(analyser.circle_ratios(), np.array([0.]))
# Tests TriAnalyzer.get_flat_tri_mask
# Creates a triangulation of [-1, 1] x [-1, 1] with contiguous groups of
# 'flat' triangles at the 4 corners and at the center. Checks that only
# those at the borders are eliminated by TriAnalyzer.get_flat_tri_mask
n = 9
def power(x, a):
return np.abs(x)**a*np.sign(x)
x = np.linspace(-1., 1., n+1)
x, y = np.meshgrid(power(x, 2.), power(x, 0.25))
x = x.ravel()
y = y.ravel()
triang = mtri.Triangulation(x, y, triangles=meshgrid_triangles(n+1))
analyser = mtri.TriAnalyzer(triang)
mask_flat = analyser.get_flat_tri_mask(0.2)
verif_mask = np.zeros(162, dtype=np.bool)
corners_index = [0, 1, 2, 3, 14, 15, 16, 17, 18, 19, 34, 35, 126, 127,
142, 143, 144, 145, 146, 147, 158, 159, 160, 161]
verif_mask[corners_index] = True
assert_array_equal(mask_flat, verif_mask)
# Now including a hole (masked triangle) at the center. The center also
# shall be eliminated by get_flat_tri_mask.
mask = np.zeros(162, dtype=np.bool)
mask[80] = True
triang.set_mask(mask)
mask_flat = analyser.get_flat_tri_mask(0.2)
center_index = [44, 45, 62, 63, 78, 79, 80, 81, 82, 83, 98, 99, 116, 117]
verif_mask[center_index] = True
assert_array_equal(mask_flat, verif_mask)
def test_trirefine():
# Testing subdiv=2 refinement
n = 3
subdiv = 2
x = np.linspace(-1., 1., n+1)
x, y = np.meshgrid(x, x)
x = x.ravel()
y = y.ravel()
mask = np.zeros(2*n**2, dtype=np.bool)
mask[n**2:] = True
triang = mtri.Triangulation(x, y, triangles=meshgrid_triangles(n+1),
mask=mask)
refiner = mtri.UniformTriRefiner(triang)
refi_triang = refiner.refine_triangulation(subdiv=subdiv)
x_refi = refi_triang.x
y_refi = refi_triang.y
n_refi = n * subdiv**2
x_verif = np.linspace(-1., 1., n_refi+1)
x_verif, y_verif = np.meshgrid(x_verif, x_verif)
x_verif = x_verif.ravel()
y_verif = y_verif.ravel()
ind1d = np.in1d(np.around(x_verif*(2.5+y_verif), 8),
np.around(x_refi*(2.5+y_refi), 8))
assert_array_equal(ind1d, True)
# Testing the mask of the refined triangulation
refi_mask = refi_triang.mask
refi_tri_barycenter_x = np.sum(refi_triang.x[refi_triang.triangles],
axis=1) / 3.
refi_tri_barycenter_y = np.sum(refi_triang.y[refi_triang.triangles],
axis=1) / 3.
tri_finder = triang.get_trifinder()
refi_tri_indices = tri_finder(refi_tri_barycenter_x,
refi_tri_barycenter_y)
refi_tri_mask = triang.mask[refi_tri_indices]
assert_array_equal(refi_mask, refi_tri_mask)
# Testing that the numbering of triangles does not change the
# interpolation result.
x = np.asarray([0.0, 1.0, 0.0, 1.0])
y = np.asarray([0.0, 0.0, 1.0, 1.0])
triang = [mtri.Triangulation(x, y, [[0, 1, 3], [3, 2, 0]]),
mtri.Triangulation(x, y, [[0, 1, 3], [2, 0, 3]])]
z = np.sqrt((x-0.3)*(x-0.3) + (y-0.4)*(y-0.4))
# Refining the 2 triangulations and reordering the points
xyz_data = []
for i in range(2):
refiner = mtri.UniformTriRefiner(triang[i])
refined_triang, refined_z = refiner.refine_field(z, subdiv=1)
xyz = np.dstack((refined_triang.x, refined_triang.y, refined_z))[0]
xyz = xyz[np.lexsort((xyz[:, 1], xyz[:, 0]))]
xyz_data += [xyz]
assert_array_almost_equal(xyz_data[0], xyz_data[1])
def meshgrid_triangles(n):
"""
Utility function.
Returns triangles to mesh a np.meshgrid of n x n points
"""
tri = []
for i in range(n-1):
for j in range(n-1):
a = i + j*(n)
b = (i+1) + j*n
c = i + (j+1)*n
d = (i+1) + (j+1)*n
tri += [[a, b, d], [a, d, c]]
return np.array(tri, dtype=np.int32)
def test_triplot_return():
# Check that triplot returns the artists it adds
from matplotlib.figure import Figure
ax = Figure().add_axes([0.1, 0.1, 0.7, 0.7])
triang = mtri.Triangulation(
[0.0, 1.0, 0.0, 1.0], [0.0, 0.0, 1.0, 1.0],
triangles=[[0, 1, 3], [3, 2, 0]])
if ax.triplot(triang, "b-") is None:
raise AssertionError("triplot should return the artist it adds")
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| mit |
shangwuhencc/scikit-learn | examples/classification/plot_lda_qda.py | 78 | 5046 | """
====================================================================
Linear and Quadratic Discriminant Analysis with confidence ellipsoid
====================================================================
Plot the confidence ellipsoids of each class and decision boundary
"""
print(__doc__)
from scipy import linalg
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import colors
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
###############################################################################
# colormap
cmap = colors.LinearSegmentedColormap(
'red_blue_classes',
{'red': [(0, 1, 1), (1, 0.7, 0.7)],
'green': [(0, 0.7, 0.7), (1, 0.7, 0.7)],
'blue': [(0, 0.7, 0.7), (1, 1, 1)]})
plt.cm.register_cmap(cmap=cmap)
###############################################################################
# generate datasets
def dataset_fixed_cov():
'''Generate 2 Gaussians samples with the same covariance matrix'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -0.23], [0.83, .23]])
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C) + np.array([1, 1])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
def dataset_cov():
'''Generate 2 Gaussians samples with different covariance matrices'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -1.], [2.5, .7]]) * 2.
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C.T) + np.array([1, 4])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
###############################################################################
# plot functions
def plot_data(lda, X, y, y_pred, fig_index):
splot = plt.subplot(2, 2, fig_index)
if fig_index == 1:
plt.title('Linear Discriminant Analysis')
plt.ylabel('Data with fixed covariance')
elif fig_index == 2:
plt.title('Quadratic Discriminant Analysis')
elif fig_index == 3:
plt.ylabel('Data with varying covariances')
tp = (y == y_pred) # True Positive
tp0, tp1 = tp[y == 0], tp[y == 1]
X0, X1 = X[y == 0], X[y == 1]
X0_tp, X0_fp = X0[tp0], X0[~tp0]
X1_tp, X1_fp = X1[tp1], X1[~tp1]
xmin, xmax = X[:, 0].min(), X[:, 0].max()
ymin, ymax = X[:, 1].min(), X[:, 1].max()
# class 0: dots
plt.plot(X0_tp[:, 0], X0_tp[:, 1], 'o', color='red')
plt.plot(X0_fp[:, 0], X0_fp[:, 1], '.', color='#990000') # dark red
# class 1: dots
plt.plot(X1_tp[:, 0], X1_tp[:, 1], 'o', color='blue')
plt.plot(X1_fp[:, 0], X1_fp[:, 1], '.', color='#000099') # dark blue
# class 0 and 1 : areas
nx, ny = 200, 100
x_min, x_max = plt.xlim()
y_min, y_max = plt.ylim()
xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),
np.linspace(y_min, y_max, ny))
Z = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()])
Z = Z[:, 1].reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap='red_blue_classes',
norm=colors.Normalize(0., 1.))
plt.contour(xx, yy, Z, [0.5], linewidths=2., colors='k')
# means
plt.plot(lda.means_[0][0], lda.means_[0][1],
'o', color='black', markersize=10)
plt.plot(lda.means_[1][0], lda.means_[1][1],
'o', color='black', markersize=10)
return splot
def plot_ellipse(splot, mean, cov, color):
v, w = linalg.eigh(cov)
u = w[0] / linalg.norm(w[0])
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
# filled Gaussian at 2 standard deviation
ell = mpl.patches.Ellipse(mean, 2 * v[0] ** 0.5, 2 * v[1] ** 0.5,
180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
splot.set_xticks(())
splot.set_yticks(())
def plot_lda_cov(lda, splot):
plot_ellipse(splot, lda.means_[0], lda.covariance_, 'red')
plot_ellipse(splot, lda.means_[1], lda.covariance_, 'blue')
def plot_qda_cov(qda, splot):
plot_ellipse(splot, qda.means_[0], qda.covariances_[0], 'red')
plot_ellipse(splot, qda.means_[1], qda.covariances_[1], 'blue')
###############################################################################
for i, (X, y) in enumerate([dataset_fixed_cov(), dataset_cov()]):
# Linear Discriminant Analysis
lda = LinearDiscriminantAnalysis(solver="svd", store_covariance=True)
y_pred = lda.fit(X, y).predict(X)
splot = plot_data(lda, X, y, y_pred, fig_index=2 * i + 1)
plot_lda_cov(lda, splot)
plt.axis('tight')
# Quadratic Discriminant Analysis
qda = QuadraticDiscriminantAnalysis()
y_pred = qda.fit(X, y, store_covariances=True).predict(X)
splot = plot_data(qda, X, y, y_pred, fig_index=2 * i + 2)
plot_qda_cov(qda, splot)
plt.axis('tight')
plt.suptitle('Linear Discriminant Analysis vs Quadratic Discriminant Analysis')
plt.show()
| bsd-3-clause |
oew1v07/scikit-image | skimage/io/_plugins/matplotlib_plugin.py | 16 | 4961 | from collections import namedtuple
import numpy as np
import warnings
import matplotlib.pyplot as plt
from ...util import dtype as dtypes
from ...exposure import is_low_contrast
from ...util.colormap import viridis
_default_colormap = 'gray'
_nonstandard_colormap = viridis
_diverging_colormap = 'RdBu'
ImageProperties = namedtuple('ImageProperties',
['signed', 'out_of_range_float',
'low_dynamic_range', 'unsupported_dtype'])
def _get_image_properties(image):
"""Determine nonstandard properties of an input image.
Parameters
----------
image : array
The input image.
Returns
-------
ip : ImageProperties named tuple
The properties of the image:
- signed: whether the image has negative values.
- out_of_range_float: if the image has floating point data
outside of [-1, 1].
- low_dynamic_range: if the image is in the standard image
range (e.g. [0, 1] for a floating point image) but its
dynamic range would be too small to display with standard
image ranges.
- unsupported_dtype: if the image data type is not a
standard skimage type, e.g. ``numpy.uint64``.
"""
immin, immax = np.min(image), np.max(image)
imtype = image.dtype.type
try:
lo, hi = dtypes.dtype_range[imtype]
except KeyError:
lo, hi = immin, immax
signed = immin < 0
out_of_range_float = (np.issubdtype(image.dtype, np.float) and
(immin < lo or immax > hi))
low_dynamic_range = (immin != immax and
is_low_contrast(image))
unsupported_dtype = image.dtype not in dtypes._supported_types
return ImageProperties(signed, out_of_range_float,
low_dynamic_range, unsupported_dtype)
def _raise_warnings(image_properties):
"""Raise the appropriate warning for each nonstandard image type.
Parameters
----------
image_properties : ImageProperties named tuple
The properties of the considered image.
"""
ip = image_properties
if ip.unsupported_dtype:
warnings.warn("Non-standard image type; displaying image with "
"stretched contrast.")
if ip.low_dynamic_range:
warnings.warn("Low image dynamic range; displaying image with "
"stretched contrast.")
if ip.out_of_range_float:
warnings.warn("Float image out of standard range; displaying "
"image with stretched contrast.")
def _get_display_range(image):
"""Return the display range for a given set of image properties.
Parameters
----------
image : array
The input image.
Returns
-------
lo, hi : same type as immin, immax
The display range to be used for the input image.
cmap : string
The name of the colormap to use.
"""
ip = _get_image_properties(image)
immin, immax = np.min(image), np.max(image)
if ip.signed:
magnitude = max(abs(immin), abs(immax))
lo, hi = -magnitude, magnitude
cmap = _diverging_colormap
elif any(ip):
_raise_warnings(ip)
lo, hi = immin, immax
cmap = _nonstandard_colormap
else:
lo = 0
imtype = image.dtype.type
hi = dtypes.dtype_range[imtype][1]
cmap = _default_colormap
return lo, hi, cmap
def imshow(im, *args, **kwargs):
"""Show the input image and return the current axes.
By default, the image is displayed in greyscale, rather than
the matplotlib default colormap.
Images are assumed to have standard range for their type. For
example, if a floating point image has values in [0, 0.5], the
most intense color will be gray50, not white.
If the image exceeds the standard range, or if the range is too
small to display, we fall back on displaying exactly the range of
the input image, along with a colorbar to clearly indicate that
this range transformation has occurred.
For signed images, we use a diverging colormap centered at 0.
Parameters
----------
im : array, shape (M, N[, 3])
The image to display.
*args, **kwargs : positional and keyword arguments
These are passed directly to `matplotlib.pyplot.imshow`.
Returns
-------
ax_im : `matplotlib.pyplot.AxesImage`
The `AxesImage` object returned by `plt.imshow`.
"""
if kwargs.get('cmap', None) == 'viridis':
kwargs['cmap'] = viridis
lo, hi, cmap = _get_display_range(im)
kwargs.setdefault('interpolation', 'nearest')
kwargs.setdefault('cmap', cmap)
kwargs.setdefault('vmin', lo)
kwargs.setdefault('vmax', hi)
ax_im = plt.imshow(im, *args, **kwargs)
if cmap != _default_colormap:
plt.colorbar()
return ax_im
imread = plt.imread
show = plt.show
def _app_show():
show()
| bsd-3-clause |
BorisJeremic/Real-ESSI-Examples | analytic_solution/test_cases/Contact/Stress_Based_Contact_Verification/SoftContact_ElPPlShear/Area/Normalized_Shear_Stress_Plot.py | 6 | 6112 | #!/usr/bin/python
import h5py
import matplotlib.pylab as plt
import matplotlib as mpl
import sys
import numpy as np;
plt.rcParams.update({'font.size': 28})
# set tick width
mpl.rcParams['xtick.major.size'] = 10
mpl.rcParams['xtick.major.width'] = 5
mpl.rcParams['xtick.minor.size'] = 10
mpl.rcParams['xtick.minor.width'] = 5
plt.rcParams['xtick.labelsize']=24
mpl.rcParams['ytick.major.size'] = 10
mpl.rcParams['ytick.major.width'] = 5
mpl.rcParams['ytick.minor.size'] = 10
mpl.rcParams['ytick.minor.width'] = 5
plt.rcParams['ytick.labelsize']=24
plt.style.use('grayscale')
###############################################################
## Area = 1*m^2
###############################################################
# Go over each feioutput and plot each one.
thefile = "A_1/Analytical_Solution_Shear.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
shear_strain_x = finput["/Model/Elements/Element_Outputs"][4,:]
shear_strain_y = finput["/Model/Elements/Element_Outputs"][5,:]
shear_stress_x = finput["/Model/Elements/Element_Outputs"][7,:]
shear_stress_y = finput["/Model/Elements/Element_Outputs"][8,:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
shear_strain = np.sqrt(shear_strain_x*shear_strain_x + shear_strain_y*shear_strain_y) ;
shear_stress = np.sqrt(shear_stress_x*shear_stress_x + shear_stress_y*shear_stress_y );
shear_stress = shear_stress_x;
shear_strain = shear_strain_x;
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.figure(figsize=(12,10))
plt.plot(shear_strain*5,shear_stress/normal_stress,label=r'Area = $1 m^2$', Linewidth=4)
plt.xlabel(r"Shear Displacement $\Delta_t [mm]$")
plt.ylabel(r"Normalized Shear Stress $\tau/\sigma_n$")
###############################################################
## Area = 1e^2 m^2
###############################################################
# Go over each feioutput and plot each one.
thefile = "A_1e2/Analytical_Solution_Shear.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
shear_strain_x = finput["/Model/Elements/Element_Outputs"][4,:]
shear_strain_y = finput["/Model/Elements/Element_Outputs"][5,:]
shear_stress_x = finput["/Model/Elements/Element_Outputs"][7,:]
shear_stress_y = finput["/Model/Elements/Element_Outputs"][8,:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
shear_strain = np.sqrt(shear_strain_x*shear_strain_x + shear_strain_y*shear_strain_y) ;
shear_stress = np.sqrt(shear_stress_x*shear_stress_x + shear_stress_y*shear_stress_y );
shear_stress = shear_stress_x;
shear_strain = shear_strain_x;
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.plot(shear_strain*5,shear_stress/normal_stress,label=r'Area = $1e^2 m^2$', Linewidth=4)
plt.xlabel(r"Shear Displacement $\Delta_t [mm]$")
plt.ylabel(r"Normalized Shear Stress $\tau/\sigma_n$")
###############################################################
## Area = 1e^-2 m^2
###############################################################
# Go over each feioutput and plot each one.
thefile = "A_1e-2/Analytical_Solution_Shear.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
shear_strain_x = finput["/Model/Elements/Element_Outputs"][4,:]
shear_strain_y = finput["/Model/Elements/Element_Outputs"][5,:]
shear_stress_x = finput["/Model/Elements/Element_Outputs"][7,:]
shear_stress_y = finput["/Model/Elements/Element_Outputs"][8,:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
shear_strain = np.sqrt(shear_strain_x*shear_strain_x + shear_strain_y*shear_strain_y) ;
shear_stress = np.sqrt(shear_stress_x*shear_stress_x + shear_stress_y*shear_stress_y );
shear_stress = shear_stress_x;
shear_strain = shear_strain_x;
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.plot(shear_strain*5,shear_stress/normal_stress,label=r'Area = $1e^{-2} m^2$', Linewidth=4)
plt.xlabel(r"Shear Displacement $\Delta_t [mm]$")
plt.ylabel(r"Normalized Shear Stress $\tau/\sigma_n$")
###############################################################
## Area = 1e^-4 m^2
###############################################################
# Go over each feioutput and plot each one.
thefile = "A_1e-4/Analytical_Solution_Shear.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
shear_strain_x = finput["/Model/Elements/Element_Outputs"][4,:]
shear_strain_y = finput["/Model/Elements/Element_Outputs"][5,:]
shear_stress_x = finput["/Model/Elements/Element_Outputs"][7,:]
shear_stress_y = finput["/Model/Elements/Element_Outputs"][8,:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
shear_strain = np.sqrt(shear_strain_x*shear_strain_x + shear_strain_y*shear_strain_y) ;
shear_stress = np.sqrt(shear_stress_x*shear_stress_x + shear_stress_y*shear_stress_y );
shear_stress = shear_stress_x;
shear_strain = shear_strain_x;
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.plot(shear_strain*5,shear_stress/normal_stress,label=r'Area = $1e^{-4} m^2$', Linewidth=4)
plt.xlabel(r"Shear Displacement $\Delta_t [mm]$")
plt.ylabel(r"Normalized Shear Stress $\tau/\sigma_n$")
########################################################
# # axes = plt.gca()
# # axes.set_xlim([-7,7])
# # axes.set_ylim([-1,1])
outfigname = "Normalized_Shear_Stress.pdf";
legend = plt.legend()
legend.get_frame().set_linewidth(0.0)
legend.get_frame().set_facecolor('none')
plt.savefig(outfigname, bbox_inches='tight')
# plt.show()
| cc0-1.0 |
MechCoder/scikit-learn | examples/cluster/plot_mini_batch_kmeans.py | 86 | 4092 | """
====================================================================
Comparison of the K-Means and MiniBatchKMeans clustering algorithms
====================================================================
We want to compare the performance of the MiniBatchKMeans and KMeans:
the MiniBatchKMeans is faster, but gives slightly different results (see
:ref:`mini_batch_kmeans`).
We will cluster a set of data, first with KMeans and then with
MiniBatchKMeans, and plot the results.
We will also plot the points that are labelled differently between the two
algorithms.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import MiniBatchKMeans, KMeans
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
np.random.seed(0)
batch_size = 45
centers = [[1, 1], [-1, -1], [1, -1]]
n_clusters = len(centers)
X, labels_true = make_blobs(n_samples=3000, centers=centers, cluster_std=0.7)
##############################################################################
# Compute clustering with Means
k_means = KMeans(init='k-means++', n_clusters=3, n_init=10)
t0 = time.time()
k_means.fit(X)
t_batch = time.time() - t0
##############################################################################
# Compute clustering with MiniBatchKMeans
mbk = MiniBatchKMeans(init='k-means++', n_clusters=3, batch_size=batch_size,
n_init=10, max_no_improvement=10, verbose=0)
t0 = time.time()
mbk.fit(X)
t_mini_batch = time.time() - t0
##############################################################################
# Plot result
fig = plt.figure(figsize=(8, 3))
fig.subplots_adjust(left=0.02, right=0.98, bottom=0.05, top=0.9)
colors = ['#4EACC5', '#FF9C34', '#4E9A06']
# We want to have the same colors for the same cluster from the
# MiniBatchKMeans and the KMeans algorithm. Let's pair the cluster centers per
# closest one.
k_means_cluster_centers = np.sort(k_means.cluster_centers_, axis=0)
mbk_means_cluster_centers = np.sort(mbk.cluster_centers_, axis=0)
k_means_labels = pairwise_distances_argmin(X, k_means_cluster_centers)
mbk_means_labels = pairwise_distances_argmin(X, mbk_means_cluster_centers)
order = pairwise_distances_argmin(k_means_cluster_centers,
mbk_means_cluster_centers)
# KMeans
ax = fig.add_subplot(1, 3, 1)
for k, col in zip(range(n_clusters), colors):
my_members = k_means_labels == k
cluster_center = k_means_cluster_centers[k]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('KMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' % (
t_batch, k_means.inertia_))
# MiniBatchKMeans
ax = fig.add_subplot(1, 3, 2)
for k, col in zip(range(n_clusters), colors):
my_members = mbk_means_labels == order[k]
cluster_center = mbk_means_cluster_centers[order[k]]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('MiniBatchKMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' %
(t_mini_batch, mbk.inertia_))
# Initialise the different array to all False
different = (mbk_means_labels == 4)
ax = fig.add_subplot(1, 3, 3)
for k in range(n_clusters):
different += ((k_means_labels == k) != (mbk_means_labels == order[k]))
identic = np.logical_not(different)
ax.plot(X[identic, 0], X[identic, 1], 'w',
markerfacecolor='#bbbbbb', marker='.')
ax.plot(X[different, 0], X[different, 1], 'w',
markerfacecolor='m', marker='.')
ax.set_title('Difference')
ax.set_xticks(())
ax.set_yticks(())
plt.show()
| bsd-3-clause |
ctralie/SlidingWindowVideoTDA | CSMSSMTools.py | 1 | 4831 | """
Programmer: Chris Tralie, 12/2016 ([email protected])
Purpose: To provide tools for quickly computing all pairs self-similarity
and cross-similarity matrices
"""
import numpy as np
import matplotlib.pyplot as plt
import scipy.misc
def getSSM(X, DPixels, doPlot = False):
"""
Compute a Euclidean self-similarity image between a set of points
:param X: An Nxd matrix holding the d coordinates of N points
:param DPixels: The image will be resized to this dimensions
:param doPlot: If true, show a plot comparing the original/resized images
:return: A tuple (D, DResized)
"""
D = np.sum(X**2, 1)[:, None]
D = D + D.T - 2*X.dot(X.T)
D[D < 0] = 0
D = 0.5*(D + D.T)
D = np.sqrt(D)
if doPlot:
plt.subplot(121)
plt.imshow(D, interpolation = 'nearest', cmap = 'afmhot')
plt.subplot(122)
plt.imshow(scipy.misc.imresize(D, (DPixels, DPixels)), interpolation = 'nearest', cmap = 'afmhot')
plt.show()
if not (D.shape[0] == DPixels):
return (D, scipy.misc.imresize(D, (DPixels, DPixels)))
return (D, D)
def getSSMAltMetric(X, A, DPixels, doPlot = False):
"""
Compute a self-similarity matrix under an alternative metric specified
by the symmetric positive definite matrix A^TA, so that the squared
Euclidean distance under this metric between two vectors x and y is
(x-y)^T*A^T*A*(x-y)
:param X: An Nxd matrix holding the d coordinates of N points
:param DPixels: The image will be resized to this dimensions
:param doPlot: If true, show a plot comparing the original/resized images
:return: A tuple (D, DResized)
"""
X2 = X.dot(A.T)
return getSSM(X2, DPixels, doPlot)
#############################################################################
## Code for dealing with cross-similarity matrices
#############################################################################
def getCSM(X, Y):
"""
Return the Euclidean cross-similarity matrix between the M points
in the Mxd matrix X and the N points in the Nxd matrix Y.
:param X: An Mxd matrix holding the coordinates of M points
:param Y: An Nxd matrix holding the coordinates of N points
:return D: An MxN Euclidean cross-similarity matrix
"""
C = np.sum(X**2, 1)[:, None] + np.sum(Y**2, 1)[None, :] - 2*X.dot(Y.T)
C[C < 0] = 0
return np.sqrt(C)
def getCSMEMD1D(X, Y):
"""
An approximation of all pairs Earth Mover's 1D Distance
"""
M = X.shape[0]
N = Y.shape[0]
K = X.shape[1]
XC = np.cumsum(X, 1)
YC = np.cumsum(Y, 1)
D = np.zeros((M, N))
for k in range(K):
xc = XC[:, k]
yc = YC[:, k]
D += np.abs(xc[:, None] - yc[None, :])
return D
def getCSMCosine(X, Y):
XNorm = np.sqrt(np.sum(X**2, 1))
XNorm[XNorm == 0] = 1
YNorm = np.sqrt(np.sum(Y**2, 1))
YNorm[YNorm == 0] = 1
D = (X/XNorm[:, None]).dot((Y/YNorm[:, None]).T)
D = 1 - D #Make sure distance 0 is the same and distance 2 is the most different
return D
def CSMToBinary(D, Kappa):
"""
Turn a cross-similarity matrix into a binary cross-simlarity matrix
If Kappa = 0, take all neighbors
If Kappa < 1 it is the fraction of mutual neighbors to consider
Otherwise Kappa is the number of mutual neighbors to consider
"""
N = D.shape[0]
M = D.shape[1]
if Kappa == 0:
return np.ones((N, M))
elif Kappa < 1:
NNeighbs = int(np.round(Kappa*M))
else:
NNeighbs = Kappa
J = np.argpartition(D, NNeighbs, 1)[:, 0:NNeighbs]
I = np.tile(np.arange(N)[:, None], (1, NNeighbs))
V = np.ones(I.size)
[I, J] = [I.flatten(), J.flatten()]
ret = sparse.coo_matrix((V, (I, J)), shape=(N, M))
return ret.toarray()
def CSMToBinaryMutual(D, Kappa):
"""
Take the binary AND between the nearest neighbors in one direction
and the other
"""
B1 = CSMToBinary(D, Kappa)
B2 = CSMToBinary(D.T, Kappa).T
return B1*B2
def getW(D, K, Mu = 0.5):
"""
Return affinity matrix
[1] Wang, Bo, et al. "Similarity network fusion for aggregating data types on a genomic scale."
Nature methods 11.3 (2014): 333-337.
:param D: Self-similarity matrix
:param K: Number of nearest neighbors
"""
#W(i, j) = exp(-Dij^2/(mu*epsij))
DSym = 0.5*(D + D.T)
np.fill_diagonal(DSym, 0)
Neighbs = np.partition(DSym, K+1, 1)[:, 0:K+1]
MeanDist = np.mean(Neighbs, 1)*float(K+1)/float(K) #Need this scaling
#to exclude diagonal element in mean
#Equation 1 in SNF paper [1] for estimating local neighborhood radii
#by looking at k nearest neighbors, not including point itself
Eps = MeanDist[:, None] + MeanDist[None, :] + DSym
Eps = Eps/3
W = np.exp(-DSym**2/(2*(Mu*Eps)**2))
return W
| apache-2.0 |
duncanwp/iris | docs/iris/src/userguide/regridding_plots/interpolate_column.py | 5 | 2068 |
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import iris
import iris.quickplot as qplt
import iris.analysis
import matplotlib.pyplot as plt
import numpy as np
fname = iris.sample_data_path('hybrid_height.nc')
column = iris.load_cube(fname, 'air_potential_temperature')[:, 0, 0]
alt_coord = column.coord('altitude')
# Interpolate the "perfect" linear interpolation. Really this is just
# a high number of interpolation points, in this case 1000 of them.
altitude_points = [('altitude', np.linspace(400, 1250, 1000))]
scheme = iris.analysis.Linear(extrapolation_mode='mask')
linear_column = column.interpolate(altitude_points, scheme)
# Now interpolate the data onto 10 evenly spaced altitude levels,
# as we did in the example.
altitude_points = [('altitude', np.linspace(400, 1250, 10))]
scheme = iris.analysis.Linear()
new_column = column.interpolate(altitude_points, scheme)
plt.figure(figsize=(5, 4), dpi=100)
# Plot the black markers for the original data.
qplt.plot(column, column.coord('altitude'),
marker='o', color='black', linestyle='', markersize=3,
label='Original values', zorder=2)
# Plot the gray line to display the linear interpolation.
qplt.plot(linear_column, linear_column.coord('altitude'),
color='gray',
label='Linear interpolation', zorder=0)
# Plot the red markers for the new data.
qplt.plot(new_column, new_column.coord('altitude'),
marker='D', color='red', linestyle='',
label='Interpolated values', zorder=1)
ax = plt.gca()
# Space the plot such that the labels appear correctly.
plt.subplots_adjust(left=0.17, bottom=0.14)
# Limit the plot to a maximum of 5 ticks.
ax.xaxis.get_major_locator().set_params(nbins=5)
# Prevent matplotlib from using "offset" notation on the xaxis.
ax.xaxis.get_major_formatter().set_useOffset(False)
# Put some space between the line and the axes.
ax.margins(0.05)
# Place gridlines and a legend.
ax.grid()
plt.legend(loc='lower right')
plt.show()
| lgpl-3.0 |
amueller/odsc-masterclass-2017-morning | notebooks/solutions/linear_models_diabetes.py | 1 | 3780 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import Lasso, Ridge, LinearRegression
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.datasets import load_diabetes
diabetes = load_diabetes()
# create dataframe for easy boxplot
df = pd.DataFrame(diabetes.data, columns=diabetes.feature_names)
df.boxplot()
plt.figure()
plt.title("Target distribution")
plt.hist(diabetes.target, bins="auto")
X_train, X_test, y_train, y_test = train_test_split(diabetes.data,
diabetes.target)
scores_lr = cross_val_score(LinearRegression(), X_train, y_train, cv=10)
print("Linear regression score: {}".format(scores_lr.mean()))
scores_ridge = cross_val_score(Ridge(), X_train, y_train, cv=10)
print("Ridge Regression score: {}".format(scores_ridge.mean()))
# With scaled data
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler().fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
scores_lr = cross_val_score(LinearRegression(), X_train_scaled, y_train, cv=10)
print("Linear regression w/ scaling: {}".format(scores_lr.mean()))
scores_ridge = cross_val_score(Ridge(), X_train_scaled, y_train, cv=10)
print("Ridge regression w/ scaling: {}".format(scores_ridge.mean()))
from sklearn.model_selection import GridSearchCV
param_grid = {'alpha': np.logspace(-3, 3, 7)}
grid = GridSearchCV(Ridge(), param_grid, cv=10, return_train_score=True)
grid.fit(X_train_scaled, y_train)
res = pd.DataFrame(grid.cv_results_)
res.plot("param_alpha", ["mean_train_score", "mean_test_score"], logx=True)
plt.title("Ridge grid search")
print(grid.best_params_, grid.best_score_)
lr = LinearRegression().fit(X_train_scaled, y_train)
plt.figure()
plt.title("Coefficients LR vs Ridge")
plt.hlines(0, 0, X_train.shape[1], linewidth=.5)
plt.plot(grid.best_estimator_.coef_, 'o', label="Ridge({})".format(grid.best_params_['alpha']))
plt.plot(lr.coef_, 'o', label="LR", alpha=.6)
plt.legend()
from sklearn.model_selection import GridSearchCV
param_grid = {'alpha': np.logspace(-3, 3, 7)}
grid_lasso = GridSearchCV(Lasso(), param_grid, cv=10, return_train_score=True)
grid_lasso.fit(X_train_scaled, y_train)
res = pd.DataFrame(grid_lasso.cv_results_)
res.plot("param_alpha", ["mean_train_score", "mean_test_score"], logx=True)
plt.title("Lasso grid search")
print(grid_lasso.best_params_, grid_lasso.best_score_)
plt.figure()
plt.title("coefficients")
plt.hlines(0, 0, X_train.shape[1], linewidth=.5)
plt.plot(grid.best_estimator_.coef_, 'o', label="Ridge({})".format(grid.best_params_['alpha']))
plt.plot(grid_lasso.best_estimator_.coef_, 'o', label="Lasso({})".format(grid_lasso.best_params_['alpha']))
plt.plot(lr.coef_, 'o', label="LR", alpha=.6)
plt.legend()
from sklearn.preprocessing import PolynomialFeatures
poly = PolynomialFeatures(include_bias=False)
X_train_poly = poly.fit_transform(X_train_scaled)
X_test_poly = poly.transform(X_test_scaled)
scores_lr = cross_val_score(LinearRegression(), X_train_poly, y_train, cv=10)
print("Linear regression poly features: {}".format(scores_lr.mean()))
scores_ridge = cross_val_score(Ridge(), X_train_poly, y_train, cv=10)
print("Ridge regression poly features: {}".format(scores_ridge.mean()))
from sklearn.model_selection import GridSearchCV
param_grid = {'alpha': np.logspace(-3, 3, 7)}
grid = GridSearchCV(Ridge(), param_grid, cv=10, return_train_score=True)
grid.fit(X_train_poly, y_train)
res = pd.DataFrame(grid.cv_results_)
res.plot("param_alpha", ["mean_train_score", "mean_test_score"], logx=True)
plt.title("Ridge grid search with polynomial features")
print(grid.best_params_, grid.best_score_)
# score with polynomial features is worse! | mit |
JeanKossaifi/scikit-learn | sklearn/preprocessing/tests/test_label.py | 156 | 17626 | import numpy as np
from scipy.sparse import issparse
from scipy.sparse import coo_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.preprocessing.label import LabelBinarizer
from sklearn.preprocessing.label import MultiLabelBinarizer
from sklearn.preprocessing.label import LabelEncoder
from sklearn.preprocessing.label import label_binarize
from sklearn.preprocessing.label import _inverse_binarize_thresholding
from sklearn.preprocessing.label import _inverse_binarize_multiclass
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_label_binarizer():
lb = LabelBinarizer()
# one-class case defaults to negative label
inp = ["pos", "pos", "pos", "pos"]
expected = np.array([[0, 0, 0, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["pos"])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# two-class case
inp = ["neg", "pos", "pos", "neg"]
expected = np.array([[0, 1, 1, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["neg", "pos"])
assert_array_equal(expected, got)
to_invert = np.array([[1, 0],
[0, 1],
[0, 1],
[1, 0]])
assert_array_equal(lb.inverse_transform(to_invert), inp)
# multi-class case
inp = ["spam", "ham", "eggs", "ham", "0"]
expected = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0]])
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ['0', 'eggs', 'ham', 'spam'])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_unseen_labels():
lb = LabelBinarizer()
expected = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
got = lb.fit_transform(['b', 'd', 'e'])
assert_array_equal(expected, got)
expected = np.array([[0, 0, 0],
[1, 0, 0],
[0, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 0]])
got = lb.transform(['a', 'b', 'c', 'd', 'e', 'f'])
assert_array_equal(expected, got)
def test_label_binarizer_set_label_encoding():
lb = LabelBinarizer(neg_label=-2, pos_label=0)
# two-class case with pos_label=0
inp = np.array([0, 1, 1, 0])
expected = np.array([[-2, 0, 0, -2]]).T
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
lb = LabelBinarizer(neg_label=-2, pos_label=2)
# multi-class case
inp = np.array([3, 2, 1, 2, 0])
expected = np.array([[-2, -2, -2, +2],
[-2, -2, +2, -2],
[-2, +2, -2, -2],
[-2, -2, +2, -2],
[+2, -2, -2, -2]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
@ignore_warnings
def test_label_binarizer_errors():
# Check that invalid arguments yield ValueError
one_class = np.array([0, 0, 0, 0])
lb = LabelBinarizer().fit(one_class)
multi_label = [(2, 3), (0,), (0, 2)]
assert_raises(ValueError, lb.transform, multi_label)
lb = LabelBinarizer()
assert_raises(ValueError, lb.transform, [])
assert_raises(ValueError, lb.inverse_transform, [])
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=1)
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=2)
assert_raises(ValueError, LabelBinarizer, neg_label=1, pos_label=2,
sparse_output=True)
# Fail on y_type
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2], threshold=0)
# Sequence of seq type should raise ValueError
y_seq_of_seqs = [[], [1, 2], [3], [0, 1, 3], [2]]
assert_raises(ValueError, LabelBinarizer().fit_transform, y_seq_of_seqs)
# Fail on the number of classes
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2, 3], threshold=0)
# Fail on the dimension of 'binary'
assert_raises(ValueError, _inverse_binarize_thresholding,
y=np.array([[1, 2, 3], [2, 1, 3]]), output_type="binary",
classes=[1, 2, 3], threshold=0)
# Fail on multioutput data
assert_raises(ValueError, LabelBinarizer().fit, np.array([[1, 3], [2, 1]]))
assert_raises(ValueError, label_binarize, np.array([[1, 3], [2, 1]]),
[1, 2, 3])
def test_label_encoder():
# Test LabelEncoder's transform and inverse_transform methods
le = LabelEncoder()
le.fit([1, 1, 4, 5, -1, 0])
assert_array_equal(le.classes_, [-1, 0, 1, 4, 5])
assert_array_equal(le.transform([0, 1, 4, 4, 5, -1, -1]),
[1, 2, 3, 3, 4, 0, 0])
assert_array_equal(le.inverse_transform([1, 2, 3, 3, 4, 0, 0]),
[0, 1, 4, 4, 5, -1, -1])
assert_raises(ValueError, le.transform, [0, 6])
def test_label_encoder_fit_transform():
# Test fit_transform
le = LabelEncoder()
ret = le.fit_transform([1, 1, 4, 5, -1, 0])
assert_array_equal(ret, [2, 2, 3, 4, 0, 1])
le = LabelEncoder()
ret = le.fit_transform(["paris", "paris", "tokyo", "amsterdam"])
assert_array_equal(ret, [1, 1, 2, 0])
def test_label_encoder_errors():
# Check that invalid arguments yield ValueError
le = LabelEncoder()
assert_raises(ValueError, le.transform, [])
assert_raises(ValueError, le.inverse_transform, [])
# Fail on unseen labels
le = LabelEncoder()
le.fit([1, 2, 3, 1, -1])
assert_raises(ValueError, le.inverse_transform, [-1])
def test_sparse_output_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for sparse_output in [True, False]:
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit_transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit(inp()).transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
assert_raises(ValueError, mlb.inverse_transform,
csr_matrix(np.array([[0, 1, 1],
[2, 0, 0],
[1, 1, 0]])))
def test_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer()
got = mlb.fit_transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer()
got = mlb.fit(inp()).transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
def test_multilabel_binarizer_empty_sample():
mlb = MultiLabelBinarizer()
y = [[1, 2], [1], []]
Y = np.array([[1, 1],
[1, 0],
[0, 0]])
assert_array_equal(mlb.fit_transform(y), Y)
def test_multilabel_binarizer_unknown_class():
mlb = MultiLabelBinarizer()
y = [[1, 2]]
assert_raises(KeyError, mlb.fit(y).transform, [[0]])
mlb = MultiLabelBinarizer(classes=[1, 2])
assert_raises(KeyError, mlb.fit_transform, [[0]])
def test_multilabel_binarizer_given_classes():
inp = [(2, 3), (1,), (1, 2)]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# fit().transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# ensure works with extra class
mlb = MultiLabelBinarizer(classes=[4, 1, 3, 2])
assert_array_equal(mlb.fit_transform(inp),
np.hstack(([[0], [0], [0]], indicator_mat)))
assert_array_equal(mlb.classes_, [4, 1, 3, 2])
# ensure fit is no-op as iterable is not consumed
inp = iter(inp)
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
def test_multilabel_binarizer_same_length_sequence():
# Ensure sequences of the same length are not interpreted as a 2-d array
inp = [[1], [0], [2]]
indicator_mat = np.array([[0, 1, 0],
[1, 0, 0],
[0, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
def test_multilabel_binarizer_non_integer_labels():
tuple_classes = np.empty(3, dtype=object)
tuple_classes[:] = [(1,), (2,), (3,)]
inputs = [
([('2', '3'), ('1',), ('1', '2')], ['1', '2', '3']),
([('b', 'c'), ('a',), ('a', 'b')], ['a', 'b', 'c']),
([((2,), (3,)), ((1,),), ((1,), (2,))], tuple_classes),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
for inp, classes in inputs:
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
mlb = MultiLabelBinarizer()
assert_raises(TypeError, mlb.fit_transform, [({}), ({}, {'a': 'b'})])
def test_multilabel_binarizer_non_unique():
inp = [(1, 1, 1, 0)]
indicator_mat = np.array([[1, 1]])
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
def test_multilabel_binarizer_inverse_validation():
inp = [(1, 1, 1, 0)]
mlb = MultiLabelBinarizer()
mlb.fit_transform(inp)
# Not binary
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 3]]))
# The following binary cases are fine, however
mlb.inverse_transform(np.array([[0, 0]]))
mlb.inverse_transform(np.array([[1, 1]]))
mlb.inverse_transform(np.array([[1, 0]]))
# Wrong shape
assert_raises(ValueError, mlb.inverse_transform, np.array([[1]]))
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 1, 1]]))
def test_label_binarize_with_class_order():
out = label_binarize([1, 6], classes=[1, 2, 4, 6])
expected = np.array([[1, 0, 0, 0], [0, 0, 0, 1]])
assert_array_equal(out, expected)
# Modified class order
out = label_binarize([1, 6], classes=[1, 6, 4, 2])
expected = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])
assert_array_equal(out, expected)
out = label_binarize([0, 1, 2, 3], classes=[3, 2, 0, 1])
expected = np.array([[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[1, 0, 0, 0]])
assert_array_equal(out, expected)
def check_binarized_results(y, classes, pos_label, neg_label, expected):
for sparse_output in [True, False]:
if ((pos_label == 0 or neg_label != 0) and sparse_output):
assert_raises(ValueError, label_binarize, y, classes,
neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
continue
# check label_binarize
binarized = label_binarize(y, classes, neg_label=neg_label,
pos_label=pos_label,
sparse_output=sparse_output)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
# check inverse
y_type = type_of_target(y)
if y_type == "multiclass":
inversed = _inverse_binarize_multiclass(binarized, classes=classes)
else:
inversed = _inverse_binarize_thresholding(binarized,
output_type=y_type,
classes=classes,
threshold=((neg_label +
pos_label) /
2.))
assert_array_equal(toarray(inversed), toarray(y))
# Check label binarizer
lb = LabelBinarizer(neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
binarized = lb.fit_transform(y)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
inverse_output = lb.inverse_transform(binarized)
assert_array_equal(toarray(inverse_output), toarray(y))
assert_equal(issparse(inverse_output), issparse(y))
def test_label_binarize_binary():
y = [0, 1, 0]
classes = [0, 1]
pos_label = 2
neg_label = -1
expected = np.array([[2, -1], [-1, 2], [2, -1]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
# Binary case where sparse_output = True will not result in a ValueError
y = [0, 1, 0]
classes = [0, 1]
pos_label = 3
neg_label = 0
expected = np.array([[3, 0], [0, 3], [3, 0]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
def test_label_binarize_multiclass():
y = [0, 1, 2]
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = 2 * np.eye(3)
yield check_binarized_results, y, classes, pos_label, neg_label, expected
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_label_binarize_multilabel():
y_ind = np.array([[0, 1, 0], [1, 1, 1], [0, 0, 0]])
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = pos_label * y_ind
y_sparse = [sparse_matrix(y_ind)
for sparse_matrix in [coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix]]
for y in [y_ind] + y_sparse:
yield (check_binarized_results, y, classes, pos_label, neg_label,
expected)
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_invalid_input_label_binarize():
assert_raises(ValueError, label_binarize, [0, 2], classes=[0, 2],
pos_label=0, neg_label=1)
def test_inverse_binarize_multiclass():
got = _inverse_binarize_multiclass(csr_matrix([[0, 1, 0],
[-1, 0, -1],
[0, 0, 0]]),
np.arange(3))
assert_array_equal(got, np.array([1, 1, 0]))
| bsd-3-clause |
bhermansyah/DRR-datacenter | geodb/geoapi.py | 1 | 161284 | from geodb.models import AfgFldzonea100KRiskLandcoverPop, FloodRiskExposure, AfgLndcrva, LandcoverDescription, AfgAvsa, AfgAdmbndaAdm1, AfgPplp, earthquake_shakemap, earthquake_events, villagesummaryEQ, AfgRdsl, AfgHltfac, forecastedLastUpdate, provincesummary, districtsummary, AfgCaptAdm1ItsProvcImmap, AfgCaptAdm1NearestProvcImmap, AfgCaptAdm2NearestDistrictcImmap, AfgCaptAirdrmImmap, AfgCaptHltfacTier1Immap, AfgCaptHltfacTier2Immap, tempCurrentSC, AfgCaptHltfacTier3Immap, AfgCaptHltfacTierallImmap, AfgIncidentOasis, AfgCapaGsmcvr, AfgAirdrmp, OasisSettlements
import json
import time, datetime
from tastypie.resources import ModelResource, Resource
from tastypie.serializers import Serializer
from tastypie import fields
from tastypie.constants import ALL
from django.db.models import Count, Sum
from django.core.serializers.json import DjangoJSONEncoder
from tastypie.authorization import DjangoAuthorization
from urlparse import urlparse
from geonode.maps.models import Map
from geonode.maps.views import _resolve_map, _PERMISSION_MSG_VIEW
from django.db import connection, connections
from itertools import *
# addded by boedy
from matrix.models import matrix
from tastypie.cache import SimpleCache
from pytz import timezone, all_timezones
from django.http import HttpResponse
from djgeojson.serializers import Serializer as GeoJSONSerializer
from geodb.riverflood import getFloodForecastBySource
import timeago
from fuzzywuzzy import process, fuzz
def query_to_dicts(cursor, query_string, *query_args):
"""Run a simple query and produce a generator
that returns the results as a bunch of dictionaries
with keys for the column values selected.
"""
cursor.execute(query_string, query_args)
col_names = [desc[0] for desc in cursor.description]
while True:
row = cursor.fetchone()
if row is None:
break
row_dict = dict(izip(col_names, row))
yield row_dict
return
FILTER_TYPES = {
'flood': AfgFldzonea100KRiskLandcoverPop
}
class FloodRiskStatisticResource(ModelResource):
"""Flood api"""
class Meta:
# authorization = DjangoAuthorization()
resource_name = 'floodrisk'
allowed_methods = ['post']
detail_allowed_methods = ['post']
cache = SimpleCache()
# always_return_data = True
def getRisk(self, request):
# saving the user tracking records
o = urlparse(request.META.get('HTTP_REFERER')).path
o=o.split('/')
if 'v2' in o:
mapCode = o[3]
else:
mapCode = o[2]
map_obj = _resolve_map(request, mapCode, 'base.view_resourcebase', _PERMISSION_MSG_VIEW)
queryset = matrix(user=request.user,resourceid=map_obj,action='Interactive Calculation')
queryset.save()
boundaryFilter = json.loads(request.body)
bring = None
temp1 = []
for i in boundaryFilter['spatialfilter']:
temp1.append('ST_GeomFromText(\''+i+'\',4326)')
bring = i
temp2 = 'ARRAY['
first=True
for i in temp1:
if first:
temp2 = temp2 + i
first=False
else :
temp2 = temp2 + ', ' + i
temp2 = temp2+']'
filterLock = 'ST_Union('+temp2+')'
yy = None
mm = None
dd = None
if 'date' in boundaryFilter:
tempDate = boundaryFilter['date'].split("-")
dateSent = datetime.datetime(int(tempDate[0]), int(tempDate[1]), int(tempDate[2]))
if (datetime.datetime.today() - dateSent).days == 0:
yy = None
mm = None
dd = None
else:
yy = tempDate[0]
mm = tempDate[1]
dd = tempDate[2]
response = getRiskExecuteExternal(filterLock,boundaryFilter['flag'],boundaryFilter['code'], yy, mm, dd, boundaryFilter['rf_type'], bring)
return response
def post_list(self, request, **kwargs):
self.method_check(request, allowed=['post'])
response = self.getRisk(request)
return self.create_response(request, response)
def getRiskExecuteExternal(filterLock, flag, code, yy=None, mm=None, dd=None, rf_type=None, bring=None):
date_params = False
if yy and mm and dd:
date_params = True
YEAR = yy
MONTH = mm
DAY = dd
else:
YEAR = datetime.datetime.utcnow().strftime("%Y")
MONTH = datetime.datetime.utcnow().strftime("%m")
DAY = datetime.datetime.utcnow().strftime("%d")
targetRiskIncludeWater = AfgFldzonea100KRiskLandcoverPop.objects.all()
targetRisk = targetRiskIncludeWater.exclude(agg_simplified_description='Water body and Marshland')
targetBase = AfgLndcrva.objects.all()
targetAvalanche = AfgAvsa.objects.all()
response = {}
if flag not in ['entireAfg','currentProvince'] or date_params:
#Avalanche Risk
counts = getRiskNumber(targetAvalanche, filterLock, 'avalanche_cat', 'avalanche_pop', 'sum_area_sqm', 'area_buildings', flag, code, None)
# pop at risk level
temp = dict([(c['avalanche_cat'], c['count']) for c in counts])
response['high_ava_population']=round(temp.get('High', 0) or 0,0)
response['med_ava_population']=round(temp.get('Moderate', 0) or 0,0)
response['low_ava_population']=0
response['total_ava_population']=response['high_ava_population']+response['med_ava_population']+response['low_ava_population']
# area at risk level
temp = dict([(c['avalanche_cat'], c['areaatrisk']) for c in counts])
response['high_ava_area']=round((temp.get('High', 0) or 0)/1000000,1)
response['med_ava_area']=round((temp.get('Moderate', 0) or 0)/1000000,1)
response['low_ava_area']=0
response['total_ava_area']=round(response['high_ava_area']+response['med_ava_area']+response['low_ava_area'],2)
# Number of Building on Avalanche Risk
temp = dict([(c['avalanche_cat'], c['houseatrisk']) for c in counts])
response['high_ava_buildings']=temp.get('High', 0) or 0
response['med_ava_buildings']=temp.get('Moderate', 0) or 0
response['total_ava_buildings'] = response['high_ava_buildings']+response['med_ava_buildings']
# Flood Risk
counts = getRiskNumber(targetRisk.exclude(mitigated_pop__gt=0), filterLock, 'deeperthan', 'fldarea_population', 'fldarea_sqm', 'area_buildings', flag, code, None)
# pop at risk level
temp = dict([(c['deeperthan'], c['count']) for c in counts])
response['high_risk_population']=round((temp.get('271 cm', 0) or 0),0)
response['med_risk_population']=round((temp.get('121 cm', 0) or 0), 0)
response['low_risk_population']=round((temp.get('029 cm', 0) or 0),0)
response['total_risk_population']=response['high_risk_population']+response['med_risk_population']+response['low_risk_population']
# area at risk level
temp = dict([(c['deeperthan'], c['areaatrisk']) for c in counts])
response['high_risk_area']=round((temp.get('271 cm', 0) or 0)/1000000,1)
response['med_risk_area']=round((temp.get('121 cm', 0) or 0)/1000000,1)
response['low_risk_area']=round((temp.get('029 cm', 0) or 0)/1000000,1)
response['total_risk_area']=round(response['high_risk_area']+response['med_risk_area']+response['low_risk_area'],2)
# buildings at flood risk
temp = dict([(c['deeperthan'], c['houseatrisk']) for c in counts])
response['total_risk_buildings'] = 0
response['total_risk_buildings']+=temp.get('271 cm', 0) or 0
response['total_risk_buildings']+=temp.get('121 cm', 0) or 0
response['total_risk_buildings']+=temp.get('029 cm', 0) or 0
counts = getRiskNumber(targetRiskIncludeWater.exclude(mitigated_pop__gt=0), filterLock, 'agg_simplified_description', 'fldarea_population', 'fldarea_sqm', 'area_buildings', flag, code, None)
# landcover/pop/atrisk
temp = dict([(c['agg_simplified_description'], c['count']) for c in counts])
response['water_body_pop_risk']=round(temp.get('Water body and Marshland', 0) or 0,0)
response['barren_land_pop_risk']=round(temp.get('Barren land', 0) or 0,0)
response['built_up_pop_risk']=round(temp.get('Build Up', 0) or 0,0)
response['fruit_trees_pop_risk']=round(temp.get('Fruit Trees', 0) or 0,0)
response['irrigated_agricultural_land_pop_risk']=round(temp.get('Irrigated Agricultural Land', 0) or 0,0)
response['permanent_snow_pop_risk']=round(temp.get('Snow', 0) or 0,0)
response['rainfed_agricultural_land_pop_risk']=round(temp.get('Rainfed', 0) or 0,0)
response['rangeland_pop_risk']=round(temp.get('Rangeland', 0) or 0,0)
response['sandcover_pop_risk']=round(temp.get('Sand Covered Areas', 0) or 0,0)
response['vineyards_pop_risk']=round(temp.get('Vineyards', 0) or 0,0)
response['forest_pop_risk']=round(temp.get('Forest & Shrub', 0) or 0,0)
response['sand_dunes_pop_risk']=round(temp.get('Sand Dunes', 0) or 0,0)
temp = dict([(c['agg_simplified_description'], c['areaatrisk']) for c in counts])
response['water_body_area_risk']=round((temp.get('Water body and Marshland', 0) or 0)/1000000,1)
response['barren_land_area_risk']=round((temp.get('Barren land', 0) or 0)/1000000,1)
response['built_up_area_risk']=round((temp.get('Build Up', 0) or 0)/1000000,1)
response['fruit_trees_area_risk']=round((temp.get('Fruit Trees', 0) or 0)/1000000,1)
response['irrigated_agricultural_land_area_risk']=round((temp.get('Irrigated Agricultural Land', 0) or 0)/1000000,1)
response['permanent_snow_area_risk']=round((temp.get('Snow', 0) or 0)/1000000,1)
response['rainfed_agricultural_land_area_risk']=round((temp.get('Rainfed', 0) or 0)/1000000,1)
response['rangeland_area_risk']=round((temp.get('Rangeland', 0) or 0)/1000000,1)
response['sandcover_area_risk']=round((temp.get('Sand Covered Areas', 0) or 0)/1000000,1)
response['vineyards_area_risk']=round((temp.get('Vineyards', 0) or 0)/1000000,1)
response['forest_area_risk']=round((temp.get('Forest & Shrub', 0) or 0)/1000000,1)
response['sand_dunes_area_risk']=round((temp.get('Sand Dunes', 0) or 0)/1000000,1)
# landcover all
counts = getRiskNumber(targetBase, filterLock, 'agg_simplified_description', 'area_population', 'area_sqm', 'area_buildings', flag, code, None)
temp = dict([(c['agg_simplified_description'], c['count']) for c in counts])
response['water_body_pop']=round(temp.get('Water body and Marshland', 0),0)
response['barren_land_pop']=round(temp.get('Barren land', 0),0)
response['built_up_pop']=round(temp.get('Build Up', 0),0)
response['fruit_trees_pop']=round(temp.get('Fruit Trees', 0),0)
response['irrigated_agricultural_land_pop']=round(temp.get('Irrigated Agricultural Land', 0),0)
response['permanent_snow_pop']=round(temp.get('Snow', 0),0)
response['rainfed_agricultural_land_pop']=round(temp.get('Rainfed', 0),0)
response['rangeland_pop']=round(temp.get('Rangeland', 0),0)
response['sandcover_pop']=round(temp.get('Sand Covered Areas', 0),0)
response['vineyards_pop']=round(temp.get('Vineyards', 0),0)
response['forest_pop']=round(temp.get('Forest & Shrub', 0),0)
response['sand_dunes_pop']=round(temp.get('Sand Dunes', 0),0)
temp = dict([(c['agg_simplified_description'], c['areaatrisk']) for c in counts])
response['water_body_area']=round(temp.get('Water body and Marshland', 0)/1000000,1)
response['barren_land_area']=round(temp.get('Barren land', 0)/1000000,1)
response['built_up_area']=round(temp.get('Build Up', 0)/1000000,1)
response['fruit_trees_area']=round(temp.get('Fruit Trees', 0)/1000000,1)
response['irrigated_agricultural_land_area']=round(temp.get('Irrigated Agricultural Land', 0)/1000000,1)
response['permanent_snow_area']=round(temp.get('Snow', 0)/1000000,1)
response['rainfed_agricultural_land_area']=round(temp.get('Rainfed', 0)/1000000,1)
response['rangeland_area']=round(temp.get('Rangeland', 0)/1000000,1)
response['sandcover_area']=round(temp.get('Sand Covered Areas', 0)/1000000,1)
response['vineyards_area']=round(temp.get('Vineyards', 0)/1000000,1)
response['forest_area']=round(temp.get('Forest & Shrub', 0)/1000000,1)
response['sand_dunes_area']=round(temp.get('Sand Dunes', 0)/1000000,1)
# total buildings
temp = dict([(c['agg_simplified_description'], c['houseatrisk']) for c in counts])
response['total_buildings'] = 0
response['total_buildings']+=temp.get('Water body and Marshland', 0) or 0
response['total_buildings']+=temp.get('Barren land', 0) or 0
response['total_buildings']+=temp.get('Build Up', 0) or 0
response['total_buildings']+=temp.get('Fruit Trees', 0) or 0
response['total_buildings']+=temp.get('Irrigated Agricultural Land', 0) or 0
response['total_buildings']+=temp.get('Snow', 0) or 0
response['total_buildings']+=temp.get('Rainfed', 0) or 0
response['total_buildings']+=temp.get('Rangeland', 0) or 0
response['total_buildings']+=temp.get('Sand Covered Areas', 0) or 0
response['total_buildings']+=temp.get('Vineyards', 0) or 0
response['total_buildings']+=temp.get('Forest & Shrub', 0) or 0
response['total_buildings']+=temp.get('Sand Dunes', 0) or 0
# Number settlement at risk of flood
if flag=='drawArea':
countsBase = targetRisk.exclude(mitigated_pop__gt=0).filter(agg_simplified_description='Build Up').extra(
select={
'numbersettlementsatrisk': 'count(distinct vuid)'},
where = {'st_area(st_intersection(wkb_geometry,'+filterLock+')) / st_area(wkb_geometry)*fldarea_sqm > 1 and ST_Intersects(wkb_geometry, '+filterLock+')'}).values('numbersettlementsatrisk')
elif flag=='entireAfg':
countsBase = targetRisk.exclude(mitigated_pop__gt=0).filter(agg_simplified_description='Build Up').extra(
select={
'numbersettlementsatrisk': 'count(distinct vuid)'}).values('numbersettlementsatrisk')
elif flag=='currentProvince':
if len(str(code)) > 2:
ff0001 = "dist_code = '"+str(code)+"'"
else :
ff0001 = "prov_code = '"+str(code)+"'"
countsBase = targetRisk.exclude(mitigated_pop__gt=0).filter(agg_simplified_description='Build Up').extra(
select={
'numbersettlementsatrisk': 'count(distinct vuid)'},
where = {ff0001}).values('numbersettlementsatrisk')
elif flag=='currentBasin':
countsBase = targetRisk.exclude(mitigated_pop__gt=0).filter(agg_simplified_description='Build Up').extra(
select={
'numbersettlementsatrisk': 'count(distinct vuid)'},
where = {"vuid = '"+str(code)+"'"}).values('numbersettlementsatrisk')
else:
countsBase = targetRisk.exclude(mitigated_pop__gt=0).filter(agg_simplified_description='Build Up').extra(
select={
'numbersettlementsatrisk': 'count(distinct vuid)'},
where = {'ST_Within(wkb_geometry, '+filterLock+')'}).values('numbersettlementsatrisk')
response['settlements_at_risk'] = round(countsBase[0]['numbersettlementsatrisk'],0)
# number all settlements
if flag=='drawArea':
countsBase = targetBase.exclude(agg_simplified_description='Water body and Marshland').extra(
select={
'numbersettlements': 'count(distinct vuid)'},
where = {'st_area(st_intersection(wkb_geometry,'+filterLock+')) / st_area(wkb_geometry)*area_sqm > 1 and ST_Intersects(wkb_geometry, '+filterLock+')'}).values('numbersettlements')
elif flag=='entireAfg':
countsBase = targetBase.exclude(agg_simplified_description='Water body and Marshland').extra(
select={
'numbersettlements': 'count(distinct vuid)'}).values('numbersettlements')
elif flag=='currentProvince':
if len(str(code)) > 2:
ff0001 = "dist_code = '"+str(code)+"'"
else :
ff0001 = "prov_code = '"+str(code)+"'"
countsBase = targetBase.exclude(agg_simplified_description='Water body and Marshland').extra(
select={
'numbersettlements': 'count(distinct vuid)'},
where = {ff0001}).values('numbersettlements')
elif flag=='currentBasin':
countsBase = targetBase.exclude(agg_simplified_description='Water body and Marshland').extra(
select={
'numbersettlements': 'count(distinct vuid)'},
where = {"vuid = '"+str(code)+"'"}).values('numbersettlements')
else:
countsBase = targetBase.exclude(agg_simplified_description='Water body and Marshland').extra(
select={
'numbersettlements': 'count(distinct vuid)'},
where = {'ST_Within(wkb_geometry, '+filterLock+')'}).values('numbersettlements')
response['settlements'] = round(countsBase[0]['numbersettlements'],0)
# All population number
if flag=='drawArea':
countsBase = targetBase.extra(
select={
'countbase' : 'SUM( \
case \
when ST_CoveredBy(wkb_geometry,'+filterLock+') then area_population \
else st_area(st_intersection(wkb_geometry,'+filterLock+')) / st_area(wkb_geometry)*area_population end \
)'
},
where = {
'ST_Intersects(wkb_geometry, '+filterLock+')'
}).values('countbase')
elif flag=='entireAfg':
countsBase = targetBase.extra(
select={
'countbase' : 'SUM(area_population)'
}).values('countbase')
elif flag=='currentProvince':
if len(str(code)) > 2:
ff0001 = "dist_code = '"+str(code)+"'"
else :
ff0001 = "prov_code = '"+str(code)+"'"
countsBase = targetBase.extra(
select={
'countbase' : 'SUM(area_population)'
},
where = {
ff0001
}).values('countbase')
elif flag=='currentBasin':
countsBase = targetBase.extra(
select={
'countbase' : 'SUM(area_population)'
},
where = {"vuid = '"+str(code)+"'"}).values('countbase')
else:
countsBase = targetBase.extra(
select={
'countbase' : 'SUM(area_population)'
},
where = {
'ST_Within(wkb_geometry, '+filterLock+')'
}).values('countbase')
response['Population']=round(countsBase[0]['countbase'],0)
if flag=='drawArea':
countsBase = targetBase.extra(
select={
'areabase' : 'SUM( \
case \
when ST_CoveredBy(wkb_geometry,'+filterLock+') then area_sqm \
else st_area(st_intersection(wkb_geometry,'+filterLock+')) / st_area(wkb_geometry)*area_sqm end \
)'
},
where = {
'ST_Intersects(wkb_geometry, '+filterLock+')'
}).values('areabase')
elif flag=='entireAfg':
countsBase = targetBase.extra(
select={
'areabase' : 'SUM(area_sqm)'
}).values('areabase')
elif flag=='currentProvince':
if len(str(code)) > 2:
ff0001 = "dist_code = '"+str(code)+"'"
else :
ff0001 = "prov_code = '"+str(code)+"'"
countsBase = targetBase.extra(
select={
'areabase' : 'SUM(area_sqm)'
},
where = {
ff0001
}).values('areabase')
elif flag=='currentBasin':
countsBase = targetBase.extra(
select={
'areabase' : 'SUM(area_sqm)'
},
where = {"vuid = '"+str(code)+"'"}).values('areabase')
else:
countsBase = targetBase.extra(
select={
'areabase' : 'SUM(area_sqm)'
},
where = {
'ST_Within(wkb_geometry, '+filterLock+')'
}).values('areabase')
response['Area']=round(countsBase[0]['areabase']/1000000,0)
else:
if flag=='entireAfg':
px = provincesummary.objects.aggregate(Sum('high_ava_population'),Sum('med_ava_population'),Sum('low_ava_population'),Sum('total_ava_population'),Sum('high_ava_area'),Sum('med_ava_area'),Sum('low_ava_area'),Sum('total_ava_area'), \
Sum('high_risk_population'),Sum('med_risk_population'),Sum('low_risk_population'),Sum('total_risk_population'), Sum('high_risk_area'),Sum('med_risk_area'),Sum('low_risk_area'),Sum('total_risk_area'), \
Sum('water_body_pop_risk'),Sum('barren_land_pop_risk'),Sum('built_up_pop_risk'),Sum('fruit_trees_pop_risk'),Sum('irrigated_agricultural_land_pop_risk'),Sum('permanent_snow_pop_risk'),Sum('rainfed_agricultural_land_pop_risk'),Sum('rangeland_pop_risk'),Sum('sandcover_pop_risk'),Sum('vineyards_pop_risk'),Sum('forest_pop_risk'), Sum('sand_dunes_pop_risk'), \
Sum('water_body_area_risk'),Sum('barren_land_area_risk'),Sum('built_up_area_risk'),Sum('fruit_trees_area_risk'),Sum('irrigated_agricultural_land_area_risk'),Sum('permanent_snow_area_risk'),Sum('rainfed_agricultural_land_area_risk'),Sum('rangeland_area_risk'),Sum('sandcover_area_risk'),Sum('vineyards_area_risk'),Sum('forest_area_risk'), Sum('sand_dunes_area_risk'), \
Sum('water_body_pop'),Sum('barren_land_pop'),Sum('built_up_pop'),Sum('fruit_trees_pop'),Sum('irrigated_agricultural_land_pop'),Sum('permanent_snow_pop'),Sum('rainfed_agricultural_land_pop'),Sum('rangeland_pop'),Sum('sandcover_pop'),Sum('vineyards_pop'),Sum('forest_pop'), Sum('sand_dunes_pop'), \
Sum('water_body_area'),Sum('barren_land_area'),Sum('built_up_area'),Sum('fruit_trees_area'),Sum('irrigated_agricultural_land_area'),Sum('permanent_snow_area'),Sum('rainfed_agricultural_land_area'),Sum('rangeland_area'),Sum('sandcover_area'),Sum('vineyards_area'),Sum('forest_area'), Sum('sand_dunes_area'), \
Sum('settlements_at_risk'), Sum('settlements'), Sum('Population'), Sum('Area'), Sum('ava_forecast_low_pop'), Sum('ava_forecast_med_pop'), Sum('ava_forecast_high_pop'), Sum('total_ava_forecast_pop'),
Sum('total_buildings'), Sum('total_risk_buildings'), Sum('high_ava_buildings'), Sum('med_ava_buildings'), Sum('total_ava_buildings') )
else:
if len(str(code)) > 2:
px = districtsummary.objects.filter(district=code).aggregate(Sum('high_ava_population'),Sum('med_ava_population'),Sum('low_ava_population'),Sum('total_ava_population'),Sum('high_ava_area'),Sum('med_ava_area'),Sum('low_ava_area'),Sum('total_ava_area'), \
Sum('high_risk_population'),Sum('med_risk_population'),Sum('low_risk_population'),Sum('total_risk_population'), Sum('high_risk_area'),Sum('med_risk_area'),Sum('low_risk_area'),Sum('total_risk_area'), \
Sum('water_body_pop_risk'),Sum('barren_land_pop_risk'),Sum('built_up_pop_risk'),Sum('fruit_trees_pop_risk'),Sum('irrigated_agricultural_land_pop_risk'),Sum('permanent_snow_pop_risk'),Sum('rainfed_agricultural_land_pop_risk'),Sum('rangeland_pop_risk'),Sum('sandcover_pop_risk'),Sum('vineyards_pop_risk'),Sum('forest_pop_risk'), Sum('sand_dunes_pop_risk'), \
Sum('water_body_area_risk'),Sum('barren_land_area_risk'),Sum('built_up_area_risk'),Sum('fruit_trees_area_risk'),Sum('irrigated_agricultural_land_area_risk'),Sum('permanent_snow_area_risk'),Sum('rainfed_agricultural_land_area_risk'),Sum('rangeland_area_risk'),Sum('sandcover_area_risk'),Sum('vineyards_area_risk'),Sum('forest_area_risk'), Sum('sand_dunes_area_risk'), \
Sum('water_body_pop'),Sum('barren_land_pop'),Sum('built_up_pop'),Sum('fruit_trees_pop'),Sum('irrigated_agricultural_land_pop'),Sum('permanent_snow_pop'),Sum('rainfed_agricultural_land_pop'),Sum('rangeland_pop'),Sum('sandcover_pop'),Sum('vineyards_pop'),Sum('forest_pop'), Sum('sand_dunes_pop'), \
Sum('water_body_area'),Sum('barren_land_area'),Sum('built_up_area'),Sum('fruit_trees_area'),Sum('irrigated_agricultural_land_area'),Sum('permanent_snow_area'),Sum('rainfed_agricultural_land_area'),Sum('rangeland_area'),Sum('sandcover_area'),Sum('vineyards_area'),Sum('forest_area'), Sum('sand_dunes_area'), \
Sum('settlements_at_risk'), Sum('settlements'), Sum('Population'), Sum('Area'), Sum('ava_forecast_low_pop'), Sum('ava_forecast_med_pop'), Sum('ava_forecast_high_pop'), Sum('total_ava_forecast_pop'),
Sum('total_buildings'), Sum('total_risk_buildings'), Sum('high_ava_buildings'), Sum('med_ava_buildings'), Sum('total_ava_buildings') )
else :
px = provincesummary.objects.filter(province=code).aggregate(Sum('high_ava_population'),Sum('med_ava_population'),Sum('low_ava_population'),Sum('total_ava_population'),Sum('high_ava_area'),Sum('med_ava_area'),Sum('low_ava_area'),Sum('total_ava_area'), \
Sum('high_risk_population'),Sum('med_risk_population'),Sum('low_risk_population'),Sum('total_risk_population'), Sum('high_risk_area'),Sum('med_risk_area'),Sum('low_risk_area'),Sum('total_risk_area'), \
Sum('water_body_pop_risk'),Sum('barren_land_pop_risk'),Sum('built_up_pop_risk'),Sum('fruit_trees_pop_risk'),Sum('irrigated_agricultural_land_pop_risk'),Sum('permanent_snow_pop_risk'),Sum('rainfed_agricultural_land_pop_risk'),Sum('rangeland_pop_risk'),Sum('sandcover_pop_risk'),Sum('vineyards_pop_risk'),Sum('forest_pop_risk'), Sum('sand_dunes_pop_risk'), \
Sum('water_body_area_risk'),Sum('barren_land_area_risk'),Sum('built_up_area_risk'),Sum('fruit_trees_area_risk'),Sum('irrigated_agricultural_land_area_risk'),Sum('permanent_snow_area_risk'),Sum('rainfed_agricultural_land_area_risk'),Sum('rangeland_area_risk'),Sum('sandcover_area_risk'),Sum('vineyards_area_risk'),Sum('forest_area_risk'), Sum('sand_dunes_area_risk'), \
Sum('water_body_pop'),Sum('barren_land_pop'),Sum('built_up_pop'),Sum('fruit_trees_pop'),Sum('irrigated_agricultural_land_pop'),Sum('permanent_snow_pop'),Sum('rainfed_agricultural_land_pop'),Sum('rangeland_pop'),Sum('sandcover_pop'),Sum('vineyards_pop'),Sum('forest_pop'), Sum('sand_dunes_pop'), \
Sum('water_body_area'),Sum('barren_land_area'),Sum('built_up_area'),Sum('fruit_trees_area'),Sum('irrigated_agricultural_land_area'),Sum('permanent_snow_area'),Sum('rainfed_agricultural_land_area'),Sum('rangeland_area'),Sum('sandcover_area'),Sum('vineyards_area'),Sum('forest_area'), Sum('sand_dunes_area'), \
Sum('settlements_at_risk'), Sum('settlements'), Sum('Population'), Sum('Area'), Sum('ava_forecast_low_pop'), Sum('ava_forecast_med_pop'), Sum('ava_forecast_high_pop'), Sum('total_ava_forecast_pop'),
Sum('total_buildings'), Sum('total_risk_buildings'), Sum('high_ava_buildings'), Sum('med_ava_buildings'), Sum('total_ava_buildings') )
for p in px:
response[p[:-5]] = px[p]
# Avalanche Forecasted
sql = ""
if flag=='entireAfg':
# cursor = connections['geodb'].cursor()
sql = "select forcastedvalue.riskstate, \
sum(afg_avsa.avalanche_pop) as pop, \
sum(afg_avsa.area_buildings) as building \
FROM afg_avsa \
INNER JOIN current_sc_basins ON (ST_WITHIN(ST_Centroid(afg_avsa.wkb_geometry), current_sc_basins.wkb_geometry)) \
INNER JOIN afg_sheda_lvl4 ON ( afg_avsa.basinmember_id = afg_sheda_lvl4.ogc_fid ) \
INNER JOIN forcastedvalue ON ( afg_sheda_lvl4.ogc_fid = forcastedvalue.basin_id ) \
WHERE (NOT (afg_avsa.basinmember_id IN (SELECT U1.ogc_fid FROM afg_sheda_lvl4 U1 LEFT OUTER JOIN forcastedvalue U2 ON ( U1.ogc_fid = U2.basin_id ) WHERE U2.riskstate IS NULL)) \
AND forcastedvalue.datadate = '%s-%s-%s' \
AND forcastedvalue.forecasttype = 'snowwater' ) \
GROUP BY forcastedvalue.riskstate" %(YEAR,MONTH,DAY)
# cursor.execute("select forcastedvalue.riskstate, \
# sum(afg_avsa.avalanche_pop) as pop, \
# sum(afg_avsa.area_buildings) as building \
# FROM afg_avsa \
# INNER JOIN current_sc_basins ON (ST_WITHIN(ST_Centroid(afg_avsa.wkb_geometry), current_sc_basins.wkb_geometry)) \
# INNER JOIN afg_sheda_lvl4 ON ( afg_avsa.basinmember_id = afg_sheda_lvl4.ogc_fid ) \
# INNER JOIN forcastedvalue ON ( afg_sheda_lvl4.ogc_fid = forcastedvalue.basin_id ) \
# WHERE (NOT (afg_avsa.basinmember_id IN (SELECT U1.ogc_fid FROM afg_sheda_lvl4 U1 LEFT OUTER JOIN forcastedvalue U2 ON ( U1.ogc_fid = U2.basin_id ) WHERE U2.riskstate IS NULL)) \
# AND forcastedvalue.datadate = '%s-%s-%s' \
# AND forcastedvalue.forecasttype = 'snowwater' ) \
# GROUP BY forcastedvalue.riskstate" %(YEAR,MONTH,DAY))
# row = cursor.fetchall()
# cursor.close()
elif flag=='currentProvince':
# cursor = connections['geodb'].cursor()
if len(str(code)) > 2:
ff0001 = "dist_code = '"+str(code)+"'"
else :
ff0001 = "prov_code = '"+str(code)+"'"
sql = "select forcastedvalue.riskstate, \
sum(afg_avsa.avalanche_pop) as pop, \
sum(afg_avsa.area_buildings) as building \
FROM afg_avsa \
INNER JOIN current_sc_basins ON (ST_WITHIN(ST_Centroid(afg_avsa.wkb_geometry), current_sc_basins.wkb_geometry)) \
INNER JOIN afg_sheda_lvl4 ON ( afg_avsa.basinmember_id = afg_sheda_lvl4.ogc_fid ) \
INNER JOIN forcastedvalue ON ( afg_sheda_lvl4.ogc_fid = forcastedvalue.basin_id ) \
WHERE (NOT (afg_avsa.basinmember_id IN (SELECT U1.ogc_fid FROM afg_sheda_lvl4 U1 LEFT OUTER JOIN forcastedvalue U2 ON ( U1.ogc_fid = U2.basin_id ) WHERE U2.riskstate IS NULL)) \
AND forcastedvalue.datadate = '%s-%s-%s' \
AND forcastedvalue.forecasttype = 'snowwater' ) \
and afg_avsa.%s \
GROUP BY forcastedvalue.riskstate" %(YEAR,MONTH,DAY,ff0001)
# cursor.execute("select forcastedvalue.riskstate, \
# sum(afg_avsa.avalanche_pop) as pop, \
# sum(afg_avsa.area_buildings) as building \
# FROM afg_avsa \
# INNER JOIN current_sc_basins ON (ST_WITHIN(ST_Centroid(afg_avsa.wkb_geometry), current_sc_basins.wkb_geometry)) \
# INNER JOIN afg_sheda_lvl4 ON ( afg_avsa.basinmember_id = afg_sheda_lvl4.ogc_fid ) \
# INNER JOIN forcastedvalue ON ( afg_sheda_lvl4.ogc_fid = forcastedvalue.basin_id ) \
# WHERE (NOT (afg_avsa.basinmember_id IN (SELECT U1.ogc_fid FROM afg_sheda_lvl4 U1 LEFT OUTER JOIN forcastedvalue U2 ON ( U1.ogc_fid = U2.basin_id ) WHERE U2.riskstate IS NULL)) \
# AND forcastedvalue.datadate = '%s-%s-%s' \
# AND forcastedvalue.forecasttype = 'snowwater' ) \
# and afg_avsa.%s \
# GROUP BY forcastedvalue.riskstate" %(YEAR,MONTH,DAY,ff0001))
# row = cursor.fetchall()
# cursor.close()
elif flag=='drawArea':
# cursor = connections['geodb'].cursor()
sql = "select forcastedvalue.riskstate, \
sum(case \
when ST_CoveredBy(afg_avsa.wkb_geometry , %s) then afg_avsa.avalanche_pop \
else st_area(st_intersection(afg_avsa.wkb_geometry, %s)) / st_area(afg_avsa.wkb_geometry)* avalanche_pop end \
) as pop, \
sum(case \
when ST_CoveredBy(afg_avsa.wkb_geometry , %s) then afg_avsa.area_buildings \
else st_area(st_intersection(afg_avsa.wkb_geometry, %s)) / st_area(afg_avsa.wkb_geometry)* area_buildings end \
) as building \
FROM afg_avsa \
INNER JOIN current_sc_basins ON (ST_WITHIN(ST_Centroid(afg_avsa.wkb_geometry), current_sc_basins.wkb_geometry)) \
INNER JOIN afg_sheda_lvl4 ON ( afg_avsa.basinmember_id = afg_sheda_lvl4.ogc_fid ) \
INNER JOIN forcastedvalue ON ( afg_sheda_lvl4.ogc_fid = forcastedvalue.basin_id ) \
WHERE (NOT (afg_avsa.basinmember_id IN (SELECT U1.ogc_fid FROM afg_sheda_lvl4 U1 LEFT OUTER JOIN forcastedvalue U2 ON ( U1.ogc_fid = U2.basin_id ) WHERE U2.riskstate IS NULL)) \
AND forcastedvalue.datadate = '%s-%s-%s' \
AND forcastedvalue.forecasttype = 'snowwater' ) \
GROUP BY forcastedvalue.riskstate" %(filterLock,filterLock, filterLock,filterLock,YEAR,MONTH,DAY)
# cursor.execute("select forcastedvalue.riskstate, \
# sum(case \
# when ST_CoveredBy(afg_avsa.wkb_geometry , %s) then afg_avsa.avalanche_pop \
# else st_area(st_intersection(afg_avsa.wkb_geometry, %s)) / st_area(afg_avsa.wkb_geometry)* avalanche_pop end \
# ) as pop, \
# sum(case \
# when ST_CoveredBy(afg_avsa.wkb_geometry , %s) then afg_avsa.area_buildings \
# else st_area(st_intersection(afg_avsa.wkb_geometry, %s)) / st_area(afg_avsa.wkb_geometry)* area_buildings end \
# ) as building \
# FROM afg_avsa \
# INNER JOIN current_sc_basins ON (ST_WITHIN(ST_Centroid(afg_avsa.wkb_geometry), current_sc_basins.wkb_geometry)) \
# INNER JOIN afg_sheda_lvl4 ON ( afg_avsa.basinmember_id = afg_sheda_lvl4.ogc_fid ) \
# INNER JOIN forcastedvalue ON ( afg_sheda_lvl4.ogc_fid = forcastedvalue.basin_id ) \
# WHERE (NOT (afg_avsa.basinmember_id IN (SELECT U1.ogc_fid FROM afg_sheda_lvl4 U1 LEFT OUTER JOIN forcastedvalue U2 ON ( U1.ogc_fid = U2.basin_id ) WHERE U2.riskstate IS NULL)) \
# AND forcastedvalue.datadate = '%s-%s-%s' \
# AND forcastedvalue.forecasttype = 'snowwater' ) \
# GROUP BY forcastedvalue.riskstate" %(filterLock,filterLock,YEAR,MONTH,DAY))
# row = cursor.fetchall()
# cursor.close()
else:
# cursor = connections['geodb'].cursor()
sql = "select forcastedvalue.riskstate, \
sum(afg_avsa.avalanche_pop) as pop, \
sum(afg_avsa.area_buildings) as building \
FROM afg_avsa \
INNER JOIN current_sc_basins ON (ST_WITHIN(ST_Centroid(afg_avsa.wkb_geometry), current_sc_basins.wkb_geometry)) \
INNER JOIN afg_sheda_lvl4 ON ( afg_avsa.basinmember_id = afg_sheda_lvl4.ogc_fid ) \
INNER JOIN forcastedvalue ON ( afg_sheda_lvl4.ogc_fid = forcastedvalue.basin_id ) \
WHERE (NOT (afg_avsa.basinmember_id IN (SELECT U1.ogc_fid FROM afg_sheda_lvl4 U1 LEFT OUTER JOIN forcastedvalue U2 ON ( U1.ogc_fid = U2.basin_id ) WHERE U2.riskstate IS NULL)) \
AND forcastedvalue.datadate = '%s-%s-%s' \
AND forcastedvalue.forecasttype = 'snowwater' ) \
AND ST_Within(afg_avsa.wkb_geometry, %s) \
GROUP BY forcastedvalue.riskstate" %(YEAR,MONTH,DAY,filterLock)
# cursor.execute("select forcastedvalue.riskstate, \
# sum(afg_avsa.avalanche_pop) as pop, \
# sum(afg_avsa.area_buildings) as building \
# FROM afg_avsa \
# INNER JOIN current_sc_basins ON (ST_WITHIN(ST_Centroid(afg_avsa.wkb_geometry), current_sc_basins.wkb_geometry)) \
# INNER JOIN afg_sheda_lvl4 ON ( afg_avsa.basinmember_id = afg_sheda_lvl4.ogc_fid ) \
# INNER JOIN forcastedvalue ON ( afg_sheda_lvl4.ogc_fid = forcastedvalue.basin_id ) \
# WHERE (NOT (afg_avsa.basinmember_id IN (SELECT U1.ogc_fid FROM afg_sheda_lvl4 U1 LEFT OUTER JOIN forcastedvalue U2 ON ( U1.ogc_fid = U2.basin_id ) WHERE U2.riskstate IS NULL)) \
# AND forcastedvalue.datadate = '%s-%s-%s' \
# AND forcastedvalue.forecasttype = 'snowwater' ) \
# AND ST_Within(afg_avsa.wkb_geometry, %s) \
# GROUP BY forcastedvalue.riskstate" %(YEAR,MONTH,DAY,filterLock))
# row = cursor.fetchall()
# cursor.close()
cursor = connections['geodb'].cursor()
row = query_to_dicts(cursor, sql)
counts = []
for i in row:
counts.append(i)
cursor.close()
temp = dict([(c['riskstate'], c['pop']) for c in counts])
# response['ava_forecast_low_pop']=round(dict(row).get(1, 0) or 0,0)
# response['ava_forecast_med_pop']=round(dict(row).get(2, 0) or 0,0)
# response['ava_forecast_high_pop']=round(dict(row).get(3, 0) or 0,0)
response['ava_forecast_low_pop']=round(temp.get(1, 0) or 0,0)
response['ava_forecast_med_pop']=round(temp.get(2, 0) or 0,0)
response['ava_forecast_high_pop']=round(temp.get(3, 0) or 0,0)
response['total_ava_forecast_pop']=response['ava_forecast_low_pop'] + response['ava_forecast_med_pop'] + response['ava_forecast_high_pop']
# avalanche forecast buildings
temp = dict([(c['riskstate'], c['building']) for c in counts])
# response['ava_forecast_low_buildings']=round(dict(row).get(1, 0) or 0,0)
# response['ava_forecast_med_buildings']=round(dict(row).get(2, 0) or 0,0)
# response['ava_forecast_high_buildings']=round(dict(row).get(3, 0) or 0,0)
response['ava_forecast_low_buildings']=round(temp.get(1, 0) or 0,0)
response['ava_forecast_med_buildings']=round(temp.get(2, 0) or 0,0)
response['ava_forecast_high_buildings']=round(temp.get(3, 0) or 0,0)
response['total_ava_forecast_buildings']=response['ava_forecast_low_buildings'] + response['ava_forecast_med_buildings'] + response['ava_forecast_high_buildings']
counts = getRiskNumber(targetRisk.exclude(mitigated_pop=0), filterLock, 'deeperthan', 'mitigated_pop', 'fldarea_sqm', 'area_buildings', flag, code, None)
temp = dict([(c['deeperthan'], c['count']) for c in counts])
response['high_risk_mitigated_population']=round(temp.get('271 cm', 0) or 0,0)
response['med_risk_mitigated_population']=round(temp.get('121 cm', 0) or 0, 0)
response['low_risk_mitigated_population']=round(temp.get('029 cm', 0) or 0,0)
response['total_risk_mitigated_population']=response['high_risk_mitigated_population']+response['med_risk_mitigated_population']+response['low_risk_mitigated_population']
# River Flood Forecasted
if rf_type == 'GFMS only':
bring = filterLock
temp_result = getFloodForecastBySource(rf_type, targetRisk, bring, flag, code, YEAR, MONTH, DAY)
for item in temp_result:
response[item]=temp_result[item]
# Flash Flood Forecasted
# AfgFldzonea100KRiskLandcoverPop.objects.all().select_related("basinmembers").values_list("agg_simplified_description","basinmember__basins__riskstate")
counts = getRiskNumber(targetRisk.exclude(mitigated_pop__gt=0).select_related("basinmembers").defer('basinmember__wkb_geometry').exclude(basinmember__basins__riskstate=None).filter(basinmember__basins__forecasttype='flashflood',basinmember__basins__datadate='%s-%s-%s' %(YEAR,MONTH,DAY)), filterLock, 'basinmember__basins__riskstate', 'fldarea_population', 'fldarea_sqm', 'area_buildings', flag, code, 'afg_fldzonea_100k_risk_landcover_pop')
temp = dict([(c['basinmember__basins__riskstate'], c['count']) for c in counts])
response['flashflood_forecast_verylow_pop']=round(temp.get(1, 0) or 0,0)
response['flashflood_forecast_low_pop']=round(temp.get(2, 0) or 0,0)
response['flashflood_forecast_med_pop']=round(temp.get(3, 0) or 0,0)
response['flashflood_forecast_high_pop']=round(temp.get(4, 0) or 0,0)
response['flashflood_forecast_veryhigh_pop']=round(temp.get(5, 0) or 0,0)
response['flashflood_forecast_extreme_pop']=round(temp.get(6, 0) or 0,0)
response['total_flashflood_forecast_pop']=response['flashflood_forecast_verylow_pop'] + response['flashflood_forecast_low_pop'] + response['flashflood_forecast_med_pop'] + response['flashflood_forecast_high_pop'] + response['flashflood_forecast_veryhigh_pop'] + response['flashflood_forecast_extreme_pop']
temp = dict([(c['basinmember__basins__riskstate'], c['areaatrisk']) for c in counts])
response['flashflood_forecast_verylow_area']=round((temp.get(1, 0) or 0)/1000000,0)
response['flashflood_forecast_low_area']=round((temp.get(2, 0) or 0)/1000000,0)
response['flashflood_forecast_med_area']=round((temp.get(3, 0) or 0)/1000000,0)
response['flashflood_forecast_high_area']=round((temp.get(4, 0) or 0)/1000000,0)
response['flashflood_forecast_veryhigh_area']=round((temp.get(5, 0) or 0)/1000000,0)
response['flashflood_forecast_extreme_area']=round((temp.get(6, 0) or 0)/1000000,0)
response['total_flashflood_forecast_area']=response['flashflood_forecast_verylow_area'] + response['flashflood_forecast_low_area'] + response['flashflood_forecast_med_area'] + response['flashflood_forecast_high_area'] + response['flashflood_forecast_veryhigh_area'] + response['flashflood_forecast_extreme_area']
# number of building on flahsflood forecasted
temp = dict([(c['basinmember__basins__riskstate'], c['houseatrisk']) for c in counts])
response['flashflood_forecast_verylow_buildings']=round(temp.get(1, 0) or 0,0)
response['flashflood_forecast_low_buildings']=round(temp.get(2, 0) or 0,0)
response['flashflood_forecast_med_buildings']=round(temp.get(3, 0) or 0,0)
response['flashflood_forecast_high_buildings']=round(temp.get(4, 0) or 0,0)
response['flashflood_forecast_veryhigh_buildings']=round(temp.get(5, 0) or 0,0)
response['flashflood_forecast_extreme_buildings']=round(temp.get(6, 0) or 0,0)
response['total_flashflood_forecast_buildings']=response['flashflood_forecast_verylow_buildings'] + response['flashflood_forecast_low_buildings'] + response['flashflood_forecast_med_buildings'] + response['flashflood_forecast_high_buildings'] + response['flashflood_forecast_veryhigh_buildings'] + response['flashflood_forecast_extreme_buildings']
response['total_flood_forecast_pop'] = response['total_riverflood_forecast_pop'] + response['total_flashflood_forecast_pop']
response['total_flood_forecast_area'] = response['total_riverflood_forecast_area'] + response['total_flashflood_forecast_area']
# flood risk and flashflood forecast matrix
px = targetRisk.exclude(mitigated_pop__gt=0).select_related("basinmembers").defer('basinmember__wkb_geometry').exclude(basinmember__basins__riskstate=None).filter(basinmember__basins__forecasttype='flashflood',basinmember__basins__datadate='%s-%s-%s' %(YEAR,MONTH,DAY))
# px = px.values('basinmember__basins__riskstate','deeperthan').annotate(counter=Count('ogc_fid')).extra(
# select={
# 'pop' : 'SUM(fldarea_population)'
# }).values('basinmember__basins__riskstate','deeperthan', 'pop')
if flag=='entireAfg':
px = px.values('basinmember__basins__riskstate','deeperthan').annotate(counter=Count('ogc_fid')).extra(
select={
'pop' : 'SUM(fldarea_population)',
'building' : 'SUM(area_buildings)'
}).values('basinmember__basins__riskstate','deeperthan', 'pop', 'building')
elif flag=='currentProvince':
if len(str(code)) > 2:
ff0001 = "dist_code = '"+str(code)+"'"
else :
if len(str(code))==1:
ff0001 = "left(cast(dist_code as text),1) = '"+str(code)+"'"
else:
ff0001 = "left(cast(dist_code as text),2) = '"+str(code)+"'"
px = px.values('basinmember__basins__riskstate','deeperthan').annotate(counter=Count('ogc_fid')).extra(
select={
'pop' : 'SUM(fldarea_population)',
'building' : 'SUM(area_buildings)'
},where={
ff0001
}).values('basinmember__basins__riskstate','deeperthan', 'pop', 'building')
elif flag=='drawArea':
px = px.values('basinmember__basins__riskstate','deeperthan').annotate(counter=Count('ogc_fid')).extra(
select={
'pop' : 'SUM( \
case \
when ST_CoveredBy(afg_fldzonea_100k_risk_landcover_pop.wkb_geometry ,'+filterLock+') then fldarea_population \
else st_area(st_intersection(afg_fldzonea_100k_risk_landcover_pop.wkb_geometry,'+filterLock+')) / st_area(afg_fldzonea_100k_risk_landcover_pop.wkb_geometry)* fldarea_population end \
)',
'building' : 'SUM( \
case \
when ST_CoveredBy(afg_fldzonea_100k_risk_landcover_pop.wkb_geometry ,'+filterLock+') then area_buildings \
else st_area(st_intersection(afg_fldzonea_100k_risk_landcover_pop.wkb_geometry,'+filterLock+')) / st_area(afg_fldzonea_100k_risk_landcover_pop.wkb_geometry)* area_buildings end \
)'
},
where = {
'ST_Intersects(afg_fldzonea_100k_risk_landcover_pop.wkb_geometry, '+filterLock+')'
}).values('basinmember__basins__riskstate','deeperthan', 'pop', 'building')
else:
px = px.values('basinmember__basins__riskstate','deeperthan').annotate(counter=Count('ogc_fid')).extra(
select={
'pop' : 'SUM(fldarea_population)',
'building' : 'SUM(area_buildings)'
},
where = {
'ST_Within(afg_fldzonea_100k_risk_landcover_pop.wkb_geometry, '+filterLock+')'
}).values('basinmember__basins__riskstate','deeperthan', 'pop', 'building')
tempD = [ num for num in px if num['basinmember__basins__riskstate'] == 1 ]
temp = dict([(c['deeperthan'], c['pop']) for c in tempD])
response['flashflood_forecast_verylow_risk_low_pop']=round(temp.get('029 cm', 0) or 0,0)
response['flashflood_forecast_verylow_risk_med_pop']=round(temp.get('121 cm', 0) or 0, 0)
response['flashflood_forecast_verylow_risk_high_pop']=round(temp.get('271 cm', 0) or 0,0)
temp = dict([(c['deeperthan'], c['building']) for c in tempD])
response['flashflood_forecast_verylow_risk_low_buildings']=round(temp.get('029 cm', 0) or 0,0)
response['flashflood_forecast_verylow_risk_med_buildings']=round(temp.get('121 cm', 0) or 0, 0)
response['flashflood_forecast_verylow_risk_high_buildings']=round(temp.get('271 cm', 0) or 0,0)
tempD = [ num for num in px if num['basinmember__basins__riskstate'] == 2 ]
temp = dict([(c['deeperthan'], c['pop']) for c in tempD])
response['flashflood_forecast_low_risk_low_pop']=round(temp.get('029 cm', 0) or 0,0)
response['flashflood_forecast_low_risk_med_pop']=round(temp.get('121 cm', 0) or 0, 0)
response['flashflood_forecast_low_risk_high_pop']=round(temp.get('271 cm', 0) or 0,0)
temp = dict([(c['deeperthan'], c['building']) for c in tempD])
response['flashflood_forecast_low_risk_low_buildings']=round(temp.get('029 cm', 0) or 0,0)
response['flashflood_forecast_low_risk_med_buildings']=round(temp.get('121 cm', 0) or 0, 0)
response['flashflood_forecast_low_risk_high_buildings']=round(temp.get('271 cm', 0) or 0,0)
tempD = [ num for num in px if num['basinmember__basins__riskstate'] == 3 ]
temp = dict([(c['deeperthan'], c['pop']) for c in tempD])
response['flashflood_forecast_med_risk_low_pop']=round(temp.get('029 cm', 0) or 0,0)
response['flashflood_forecast_med_risk_med_pop']=round(temp.get('121 cm', 0) or 0, 0)
response['flashflood_forecast_med_risk_high_pop']=round(temp.get('271 cm', 0) or 0,0)
temp = dict([(c['deeperthan'], c['building']) for c in tempD])
response['flashflood_forecast_med_risk_low_buildings']=round(temp.get('029 cm', 0) or 0,0)
response['flashflood_forecast_med_risk_med_buildings']=round(temp.get('121 cm', 0) or 0, 0)
response['flashflood_forecast_med_risk_high_buildings']=round(temp.get('271 cm', 0) or 0,0)
tempD = [ num for num in px if num['basinmember__basins__riskstate'] == 4 ]
temp = dict([(c['deeperthan'], c['pop']) for c in tempD])
response['flashflood_forecast_high_risk_low_pop']=round(temp.get('029 cm', 0) or 0,0)
response['flashflood_forecast_high_risk_med_pop']=round(temp.get('121 cm', 0) or 0, 0)
response['flashflood_forecast_high_risk_high_pop']=round(temp.get('271 cm', 0) or 0,0)
temp = dict([(c['deeperthan'], c['building']) for c in tempD])
response['flashflood_forecast_high_risk_low_buildings']=round(temp.get('029 cm', 0) or 0,0)
response['flashflood_forecast_high_risk_med_buildings']=round(temp.get('121 cm', 0) or 0, 0)
response['flashflood_forecast_high_risk_high_buildings']=round(temp.get('271 cm', 0) or 0,0)
tempD = [ num for num in px if num['basinmember__basins__riskstate'] == 5 ]
temp = dict([(c['deeperthan'], c['pop']) for c in tempD])
response['flashflood_forecast_veryhigh_risk_low_pop']=round(temp.get('029 cm', 0) or 0,0)
response['flashflood_forecast_veryhigh_risk_med_pop']=round(temp.get('121 cm', 0) or 0, 0)
response['flashflood_forecast_veryhigh_risk_high_pop']=round(temp.get('271 cm', 0) or 0,0)
temp = dict([(c['deeperthan'], c['building']) for c in tempD])
response['flashflood_forecast_veryhigh_risk_low_buildings']=round(temp.get('029 cm', 0) or 0,0)
response['flashflood_forecast_veryhigh_risk_med_buildings']=round(temp.get('121 cm', 0) or 0, 0)
response['flashflood_forecast_veryhigh_risk_high_buildings']=round(temp.get('271 cm', 0) or 0,0)
tempD = [ num for num in px if num['basinmember__basins__riskstate'] == 6 ]
temp = dict([(c['deeperthan'], c['pop']) for c in tempD])
response['flashflood_forecast_extreme_risk_low_pop']=round(temp.get('029 cm', 0) or 0,0)
response['flashflood_forecast_extreme_risk_med_pop']=round(temp.get('121 cm', 0) or 0, 0)
response['flashflood_forecast_extreme_risk_high_pop']=round(temp.get('271 cm', 0) or 0,0)
temp = dict([(c['deeperthan'], c['building']) for c in tempD])
response['flashflood_forecast_extreme_risk_low_buildings']=round(temp.get('029 cm', 0) or 0,0)
response['flashflood_forecast_extreme_risk_med_buildings']=round(temp.get('121 cm', 0) or 0, 0)
response['flashflood_forecast_extreme_risk_high_buildings']=round(temp.get('271 cm', 0) or 0,0)
try:
response['percent_total_risk_population'] = round((response['total_risk_population']/response['Population'])*100,0)
except ZeroDivisionError:
response['percent_total_risk_population'] = 0
try:
response['percent_high_risk_population'] = round((response['high_risk_population']/response['Population'])*100,0)
except ZeroDivisionError:
response['percent_high_risk_population'] = 0
try:
response['percent_med_risk_population'] = round((response['med_risk_population']/response['Population'])*100,0)
except ZeroDivisionError:
response['percent_med_risk_population'] = 0
try:
response['percent_low_risk_population'] = round((response['low_risk_population']/response['Population'])*100,0)
except ZeroDivisionError:
response['percent_low_risk_population'] = 0
try:
response['percent_total_risk_area'] = round((response['total_risk_area']/response['Area'])*100,0)
except ZeroDivisionError:
response['percent_total_risk_area'] = 0
try:
response['percent_high_risk_area'] = round((response['high_risk_area']/response['Area'])*100,0)
except ZeroDivisionError:
response['percent_high_risk_area'] = 0
try:
response['percent_med_risk_area'] = round((response['med_risk_area']/response['Area'])*100,0)
except ZeroDivisionError:
response['percent_med_risk_area'] = 0
try:
response['percent_low_risk_area'] = round((response['low_risk_area']/response['Area'])*100,0)
except ZeroDivisionError:
response['percent_low_risk_area'] = 0
try:
response['percent_total_ava_population'] = round((response['total_ava_population']/response['Population'])*100,0)
except ZeroDivisionError:
response['percent_total_ava_population'] = 0
try:
response['percent_high_ava_population'] = round((response['high_ava_population']/response['Population'])*100,0)
except ZeroDivisionError:
response['percent_high_ava_population'] = 0
try:
response['percent_med_ava_population'] = round((response['med_ava_population']/response['Population'])*100,0)
except ZeroDivisionError:
response['percent_med_ava_population'] = 0
try:
response['percent_low_ava_population'] = round((response['low_ava_population']/response['Population'])*100,0)
except ZeroDivisionError:
response['percent_low_ava_population'] = 0
try:
response['percent_total_ava_area'] = round((response['total_ava_area']/response['Area'])*100,0)
except ZeroDivisionError:
response['percent_total_ava_area'] = 0
try:
response['percent_high_ava_area'] = round((response['high_ava_area']/response['Area'])*100,0)
except ZeroDivisionError:
response['percent_high_ava_area'] = 0
try:
response['percent_med_ava_area'] = round((response['med_ava_area']/response['Area'])*100,0)
except ZeroDivisionError:
response['percent_med_ava_area'] = 0
try:
response['percent_low_ava_area'] = round((response['low_ava_area']/response['Area'])*100,0)
except ZeroDivisionError:
response['percent_low_ava_area'] = 0
# Population percentage
try:
response['precent_barren_land_pop_risk'] = round((response['barren_land_pop_risk']/response['barren_land_pop'])*100,0)
except ZeroDivisionError:
response['precent_barren_land_pop_risk'] = 0
try:
response['precent_built_up_pop_risk'] = round((response['built_up_pop_risk']/response['built_up_pop'])*100,0)
except ZeroDivisionError:
response['precent_built_up_pop_risk'] = 0
try:
response['precent_fruit_trees_pop_risk'] = round((response['fruit_trees_pop_risk']/response['fruit_trees_pop'])*100,0)
except ZeroDivisionError:
response['precent_fruit_trees_pop_risk'] = 0
try:
response['precent_irrigated_agricultural_land_pop_risk'] = round((response['irrigated_agricultural_land_pop_risk']/response['irrigated_agricultural_land_pop'])*100,0)
except ZeroDivisionError:
response['precent_irrigated_agricultural_land_pop_risk'] = 0
try:
response['precent_permanent_snow_pop_risk'] = round((response['permanent_snow_pop_risk']/response['permanent_snow_pop'])*100,0)
except ZeroDivisionError:
response['precent_permanent_snow_pop_risk'] = 0
try:
response['precent_rainfed_agricultural_land_pop_risk'] = round((response['rainfed_agricultural_land_pop_risk']/response['rainfed_agricultural_land_pop'])*100,0)
except ZeroDivisionError:
response['precent_rainfed_agricultural_land_pop_risk'] = 0
try:
response['precent_rangeland_pop_risk'] = round((response['rangeland_pop_risk']/response['rangeland_pop'])*100,0)
except ZeroDivisionError:
response['precent_rangeland_pop_risk'] = 0
try:
response['precent_sandcover_pop_risk'] = round((response['sandcover_pop_risk']/response['sandcover_pop'])*100,0)
except ZeroDivisionError:
response['precent_sandcover_pop_risk'] = 0
try:
response['precent_vineyards_pop_risk'] = round((response['vineyards_pop_risk']/response['vineyards_pop'])*100,0)
except ZeroDivisionError:
response['precent_vineyards_pop_risk'] = 0
try:
response['precent_water_body_pop_risk'] = round((response['water_body_pop_risk']/response['water_body_pop'])*100,0)
except ZeroDivisionError:
response['precent_water_body_pop_risk'] = 0
try:
response['precent_forest_pop_risk'] = round((response['forest_pop_risk']/response['forest_pop'])*100,0)
except ZeroDivisionError:
response['precent_forest_pop_risk'] = 0
try:
response['precent_sand_dunes_pop_risk'] = round((response['sand_dunes_pop_risk']/response['sand_dunes_pop'])*100,0)
except ZeroDivisionError:
response['precent_sand_dunes_pop_risk'] = 0
# Area percentage
try:
response['precent_barren_land_area_risk'] = round((response['barren_land_area_risk']/response['barren_land_area'])*100,0)
except ZeroDivisionError:
response['precent_barren_land_area_risk'] = 0
try:
response['precent_built_up_area_risk'] = round((response['built_up_area_risk']/response['built_up_area'])*100,0)
except ZeroDivisionError:
response['precent_built_up_area_risk'] = 0
try:
response['precent_fruit_trees_area_risk'] = round((response['fruit_trees_area_risk']/response['fruit_trees_area'])*100,0)
except ZeroDivisionError:
response['precent_fruit_trees_area_risk'] = 0
try:
response['precent_irrigated_agricultural_land_area_risk'] = round((response['irrigated_agricultural_land_area_risk']/response['irrigated_agricultural_land_area'])*100,0)
except ZeroDivisionError:
response['precent_irrigated_agricultural_land_area_risk'] = 0
try:
response['precent_permanent_snow_area_risk'] = round((response['permanent_snow_area_risk']/response['permanent_snow_area'])*100,0)
except ZeroDivisionError:
response['precent_permanent_snow_area_risk'] = 0
try:
response['precent_rainfed_agricultural_land_area_risk'] = round((response['rainfed_agricultural_land_area_risk']/response['rainfed_agricultural_land_area'])*100,0)
except ZeroDivisionError:
response['precent_rainfed_agricultural_land_area_risk'] = 0
try:
response['precent_rangeland_area_risk'] = round((response['rangeland_area_risk']/response['rangeland_area'])*100,0)
except ZeroDivisionError:
response['precent_rangeland_area_risk'] = 0
try:
response['precent_sandcover_area_risk'] = round((response['sandcover_area_risk']/response['sandcover_area'])*100,0)
except ZeroDivisionError:
response['precent_sandcover_area_risk'] = 0
try:
response['precent_vineyards_area_risk'] = round((response['vineyards_area_risk']/response['vineyards_area'])*100,0)
except ZeroDivisionError:
response['precent_vineyards_area_risk'] = 0
try:
response['precent_water_body_area_risk'] = round((response['water_body_area_risk']/response['water_body_area'])*100,0)
except ZeroDivisionError:
response['precent_water_body_area_risk'] = 0
try:
response['precent_forest_area_risk'] = round((response['forest_area_risk']/response['forest_area'])*100,0)
except ZeroDivisionError:
response['precent_forest_area_risk'] = 0
try:
response['precent_sand_dunes_area_risk'] = round((response['sand_dunes_area_risk']/response['sand_dunes_area'])*100,0)
except ZeroDivisionError:
response['precent_sand_dunes_area_risk'] = 0
# Roads
if flag=='drawArea':
countsRoadBase = AfgRdsl.objects.all().values('type_update').annotate(counter=Count('ogc_fid')).extra(
select={
'road_length' : 'SUM( \
case \
when ST_CoveredBy(wkb_geometry'+','+filterLock+') then road_length \
else ST_Length(st_intersection(wkb_geometry::geography'+','+filterLock+')) / road_length end \
)/1000'
},
where = {
'ST_Intersects(wkb_geometry'+', '+filterLock+')'
}).values('type_update','road_length')
countsHLTBase = AfgHltfac.objects.all().filter(activestatus='Y').values('facility_types_description').annotate(counter=Count('ogc_fid')).extra(
select={
'numberhospital' : 'count(*)'
},
where = {
'ST_Intersects(wkb_geometry'+', '+filterLock+')'
}).values('facility_types_description', 'numberhospital')
elif flag=='entireAfg':
countsRoadBase = AfgRdsl.objects.all().values('type_update').annotate(counter=Count('ogc_fid')).extra(
select={
'road_length' : 'SUM(road_length)/1000'
}).values('type_update', 'road_length')
# Health Facilities
countsHLTBase = AfgHltfac.objects.all().filter(activestatus='Y').values('facility_types_description').annotate(counter=Count('ogc_fid')).extra(
select={
'numberhospital' : 'count(*)'
}).values('facility_types_description', 'numberhospital')
elif flag=='currentProvince':
if len(str(code)) > 2:
ff0001 = "dist_code = '"+str(code)+"'"
else :
if len(str(code))==1:
ff0001 = "left(cast(dist_code as text),1) = '"+str(code)+"'"
else:
ff0001 = "left(cast(dist_code as text),2) = '"+str(code)+"'"
countsRoadBase = AfgRdsl.objects.all().values('type_update').annotate(counter=Count('ogc_fid')).extra(
select={
'road_length' : 'SUM(road_length)/1000'
},
where = {
ff0001
}).values('type_update','road_length')
countsHLTBase = AfgHltfac.objects.all().filter(activestatus='Y').values('facility_types_description').annotate(counter=Count('ogc_fid')).extra(
select={
'numberhospital' : 'count(*)'
},where = {
ff0001
}).values('facility_types_description', 'numberhospital')
elif flag=='currentBasin':
print 'currentBasin'
else:
countsRoadBase = AfgRdsl.objects.all().values('type_update').annotate(counter=Count('ogc_fid')).extra(
select={
'road_length' : 'SUM(road_length)/1000'
},
where = {
'ST_Within(wkb_geometry'+', '+filterLock+')'
}).values('type_update','road_length')
countsHLTBase = AfgHltfac.objects.all().filter(activestatus='Y').values('facility_types_description').annotate(counter=Count('ogc_fid')).extra(
select={
'numberhospital' : 'count(*)'
},where = {
'ST_Within(wkb_geometry'+', '+filterLock+')'
}).values('facility_types_description', 'numberhospital')
tempRoadBase = dict([(c['type_update'], c['road_length']) for c in countsRoadBase])
tempHLTBase = dict([(c['facility_types_description'], c['numberhospital']) for c in countsHLTBase])
response["highway_road_base"]=round(tempRoadBase.get("highway", 0),1)
response["primary_road_base"]=round(tempRoadBase.get("primary", 0),1)
response["secondary_road_base"]=round(tempRoadBase.get("secondary", 0),1)
response["tertiary_road_base"]=round(tempRoadBase.get("tertiary", 0),1)
response["residential_road_base"]=round(tempRoadBase.get("residential", 0),1)
response["track_road_base"]=round(tempRoadBase.get("track", 0),1)
response["path_road_base"]=round(tempRoadBase.get("path", 0),1)
response["river_crossing_road_base"]=round(tempRoadBase.get("river crossing", 0),1)
response["bridge_road_base"]=round(tempRoadBase.get("bridge", 0),1)
response["total_road_base"]=response["highway_road_base"]+response["primary_road_base"]+response["secondary_road_base"]+response["tertiary_road_base"]+response["residential_road_base"]+response["track_road_base"]+response["path_road_base"]+response["river_crossing_road_base"]+response["bridge_road_base"]
response["h1_health_base"]=round(tempHLTBase.get("Regional / National Hospital (H1)", 0))
response["h2_health_base"]=round(tempHLTBase.get("Provincial Hospital (H2)", 0))
response["h3_health_base"]=round(tempHLTBase.get("District Hospital (H3)", 0))
response["sh_health_base"]=round(tempHLTBase.get("Special Hospital (SH)", 0))
response["rh_health_base"]=round(tempHLTBase.get("Rehabilitation Center (RH)", 0))
response["mh_health_base"]=round(tempHLTBase.get("Maternity Home (MH)", 0))
response["datc_health_base"]=round(tempHLTBase.get("Drug Addicted Treatment Center", 0))
response["tbc_health_base"]=round(tempHLTBase.get("TB Control Center (TBC)", 0))
response["mntc_health_base"]=round(tempHLTBase.get("Mental Clinic / Hospital", 0))
response["chc_health_base"]=round(tempHLTBase.get("Comprehensive Health Center (CHC)", 0))
response["bhc_health_base"]=round(tempHLTBase.get("Basic Health Center (BHC)", 0))
response["dcf_health_base"]=round(tempHLTBase.get("Day Care Feeding", 0))
response["mch_health_base"]=round(tempHLTBase.get("MCH Clinic M1 or M2 (MCH)", 0))
response["shc_health_base"]=round(tempHLTBase.get("Sub Health Center (SHC)", 0))
response["ec_health_base"]=round(tempHLTBase.get("Eye Clinic / Hospital", 0))
response["pyc_health_base"]=round(tempHLTBase.get("Physiotherapy Center", 0))
response["pic_health_base"]=round(tempHLTBase.get("Private Clinic", 0))
response["mc_health_base"]=round(tempHLTBase.get("Malaria Center (MC)", 0))
response["moph_health_base"]=round(tempHLTBase.get("MoPH National", 0))
response["epi_health_base"]=round(tempHLTBase.get("EPI Fixed Center (EPI)", 0))
response["sfc_health_base"]=round(tempHLTBase.get("Supplementary Feeding Center (SFC)", 0))
response["mht_health_base"]=round(tempHLTBase.get("Mobile Health Team (MHT)", 0))
response["other_health_base"]=round(tempHLTBase.get("Other", 0))
response["total_health_base"] = response["bhc_health_base"]+response["dcf_health_base"]+response["mch_health_base"]+response["rh_health_base"]+response["h3_health_base"]+response["sh_health_base"]+response["mh_health_base"]+response["datc_health_base"]+response["h1_health_base"]+response["shc_health_base"]+response["ec_health_base"]+response["pyc_health_base"]+response["pic_health_base"]+response["tbc_health_base"]+response["mntc_health_base"]+response["chc_health_base"]+response["other_health_base"]+response["h2_health_base"]+response["mc_health_base"]+response["moph_health_base"]+response["epi_health_base"]+response["sfc_health_base"]+response["mht_health_base"]
sw = forecastedLastUpdate.objects.filter(forecasttype='snowwater').latest('datadate')
rf = forecastedLastUpdate.objects.filter(forecasttype='riverflood').latest('datadate')
# print rf.datadate
tempRF = rf.datadate + datetime.timedelta(hours=4.5)
tempSW = sw.datadate + datetime.timedelta(hours=4.5)
tz = timezone('Asia/Kabul')
stdSC = datetime.datetime.utcnow()
stdSC = stdSC.replace(hour=3, minute=00, second=00)
tempSC = datetime.datetime.utcnow()
if stdSC > tempSC:
tempSC = tempSC - datetime.timedelta(days=1)
tempSC = tempSC.replace(hour=3, minute=00, second=00)
tempSC = tempSC + datetime.timedelta(hours=4.5)
# tempSC = tempSC.replace(tzinfo=tz)
print tempSC, tempRF, tempSW
response["riverflood_lastupdated"] = timeago.format(tempRF, datetime.datetime.utcnow()+ datetime.timedelta(hours=4.5)) #tempRF.strftime("%d-%m-%Y %H:%M")
response["snowwater_lastupdated"] = timeago.format(tempSW, datetime.datetime.utcnow()+ datetime.timedelta(hours=4.5)) #tempSW.strftime("%d-%m-%Y %H:%M")
response["glofas_lastupdated"] = timeago.format(tempSC, datetime.datetime.utcnow()+ datetime.timedelta(hours=4.5)) #tempSC.strftime("%d-%m-%Y %H:%M")
return response
def getRisk(request):
# saving the user tracking records
o = urlparse(request.META.get('HTTP_REFERER')).path
o=o.split('/')
mapCode = o[2]
map_obj = _resolve_map(request, mapCode, 'base.view_resourcebase', _PERMISSION_MSG_VIEW)
queryset = matrix(user=request.user,resourceid=map_obj,action='Interactive Calculation')
queryset.save()
boundaryFilter = json.loads(request.body)
temp1 = []
for i in boundaryFilter['spatialfilter']:
temp1.append('ST_GeomFromText(\''+i+'\',4326)')
temp2 = 'ARRAY['
first=True
for i in temp1:
if first:
temp2 = temp2 + i
first=False
else :
temp2 = temp2 + ', ' + i
temp2 = temp2+']'
filterLock = 'ST_Union('+temp2+')'
response = self.getRiskExecute(filterLock)
return response
def getRiskNumber(data, filterLock, fieldGroup, popField, areaField, houseField, aflag, acode, atablename):
if atablename == None:
atablename = ''
else:
atablename = atablename+'.'
if aflag=='drawArea':
counts = list(data.values(fieldGroup).annotate(counter=Count('ogc_fid')).extra(
select={
'count' : 'SUM( \
case \
when ST_CoveredBy('+atablename+'wkb_geometry'+','+filterLock+') then '+popField+' \
else st_area(st_intersection('+atablename+'wkb_geometry'+','+filterLock+')) / st_area('+atablename+'wkb_geometry'+')*'+popField+' end \
)',
'areaatrisk' : 'SUM( \
case \
when ST_CoveredBy('+atablename+'wkb_geometry'+','+filterLock+') then '+areaField+' \
else st_area(st_intersection('+atablename+'wkb_geometry'+','+filterLock+')) / st_area('+atablename+'wkb_geometry'+')*'+areaField+' end \
)',
'houseatrisk' : 'SUM( \
case \
when ST_CoveredBy('+atablename+'wkb_geometry'+','+filterLock+') then '+houseField+' \
else st_area(st_intersection('+atablename+'wkb_geometry'+','+filterLock+')) / st_area('+atablename+'wkb_geometry'+')*'+houseField+' end \
)'
},
where = {
'ST_Intersects('+atablename+'wkb_geometry'+', '+filterLock+')'
}).values(fieldGroup,'count','areaatrisk','houseatrisk'))
elif aflag=='entireAfg':
counts = list(data.values(fieldGroup).annotate(counter=Count('ogc_fid')).extra(
select={
'count' : 'SUM('+popField+')',
'areaatrisk' : 'SUM('+areaField+')',
'houseatrisk' : 'SUM('+houseField+')'
}).values(fieldGroup,'count','areaatrisk','houseatrisk'))
elif aflag=='currentProvince':
# print "left(dist_code), "+str(len(str(acode)))+") = '"+str(acode)+"'"
# print "left(dist_code, "+len(str(acode))+") = '"+str(acode)+"'"
if len(str(acode)) > 2:
ff0001 = "dist_code = '"+str(acode)+"'"
else :
ff0001 = "prov_code = '"+str(acode)+"'"
counts = list(data.values(fieldGroup).annotate(counter=Count('ogc_fid')).extra(
select={
'count' : 'SUM('+popField+')',
'areaatrisk' : 'SUM('+areaField+')',
'houseatrisk' : 'SUM('+houseField+')'
},
where = {
ff0001
}).values(fieldGroup,'count','areaatrisk','houseatrisk'))
elif aflag=='currentBasin':
counts = list(data.values(fieldGroup).annotate(counter=Count('ogc_fid')).extra(
select={
'count' : 'SUM('+popField+')',
'areaatrisk' : 'SUM('+areaField+')',
'houseatrisk' : 'SUM('+houseField+')'
},
where = {
atablename+"vuid = '"+str(acode)+"'"
}).values(fieldGroup,'count','areaatrisk','houseatrisk'))
else:
counts = list(data.values(fieldGroup).annotate(counter=Count('ogc_fid')).extra(
select={
'count' : 'SUM('+popField+')',
'areaatrisk' : 'SUM('+areaField+')',
'houseatrisk' : 'SUM('+houseField+')'
},
where = {
'ST_Within('+atablename+'wkb_geometry'+', '+filterLock+')'
}).values(fieldGroup,'count','areaatrisk','houseatrisk'))
return counts
class getProvince(ModelResource):
"""Provinces api"""
class Meta:
queryset = AfgAdmbndaAdm1.objects.all().defer('wkb_geometry')
resource_name = 'getprovince'
allowed_methods = ('get')
filtering = { "id" : ALL }
class EarthQuakeStatisticResource(ModelResource):
"""Flood api"""
class Meta:
authorization = DjangoAuthorization()
resource_name = 'earthquakestat'
allowed_methods = ['post']
detail_allowed_methods = ['post']
always_return_data = True
def post_list(self, request, **kwargs):
self.method_check(request, allowed=['post'])
response = self.getEarthQuakeStats(request)
return self.create_response(request, response)
def getEarthQuakeStats(self, request):
# o = urlparse(request.META.get('HTTP_REFERER')).path
# o=o.split('/')
# mapCode = o[2]
# map_obj = _resolve_map(request, mapCode, 'base.view_resourcebase', _PERMISSION_MSG_VIEW)
# queryset = matrix(user=request.user,resourceid=map_obj,action='Interactive Calculation')
# queryset.save()
boundaryFilter = json.loads(request.body)
flag = boundaryFilter['flag']
temp1 = []
for i in boundaryFilter['spatialfilter']:
temp1.append('ST_GeomFromText(\''+i+'\',4326)')
temp2 = 'ARRAY['
first=True
for i in temp1:
if first:
temp2 = temp2 + i
first=False
else :
temp2 = temp2 + ', ' + i
temp2 = temp2+']'
filterLock = 'ST_Union('+temp2+')'
# villagesummaryEQ earthquake_shakemap
p = earthquake_shakemap.objects.all().filter(event_code=boundaryFilter['event_code'])
if p.count() == 0:
return {'message':'Mercalli Intensity Scale are not Available'}
# Book.objects.all().aggregate(Avg('price'))
# response = getEarthQuakeExecuteExternal(filterLock,boundaryFilter['flag'],boundaryFilter['code'])
if flag=='drawArea':
cursor = connections['geodb'].cursor()
cursor.execute("\
select coalesce(round(sum( \
case \
when ST_CoveredBy(a.wkb_geometry,"+filterLock+") then b.pop_shake_weak \
else st_area(st_intersection(a.wkb_geometry,"+filterLock+"))/st_area(a.wkb_geometry)*b.pop_shake_weak \
end \
)),0) as pop_shake_weak, \
coalesce(round(sum( \
case \
when ST_CoveredBy(a.wkb_geometry,"+filterLock+") then b.pop_shake_light \
else st_area(st_intersection(a.wkb_geometry,"+filterLock+"))/st_area(a.wkb_geometry)*b.pop_shake_light \
end \
)),0) as pop_shake_light, \
coalesce(round(sum( \
case \
when ST_CoveredBy(a.wkb_geometry,"+filterLock+") then b.pop_shake_moderate \
else st_area(st_intersection(a.wkb_geometry,"+filterLock+"))/st_area(a.wkb_geometry)*b.pop_shake_moderate \
end \
)),0) as pop_shake_moderate, \
coalesce(round(sum( \
case \
when ST_CoveredBy(a.wkb_geometry,"+filterLock+") then b.pop_shake_strong \
else st_area(st_intersection(a.wkb_geometry,"+filterLock+"))/st_area(a.wkb_geometry)*b.pop_shake_strong \
end \
)),0) as pop_shake_strong, \
coalesce(round(sum( \
case \
when ST_CoveredBy(a.wkb_geometry,"+filterLock+") then b.pop_shake_verystrong \
else st_area(st_intersection(a.wkb_geometry,"+filterLock+"))/st_area(a.wkb_geometry)*b.pop_shake_verystrong \
end \
)),0) as pop_shake_verystrong, \
coalesce(round(sum( \
case \
when ST_CoveredBy(a.wkb_geometry,"+filterLock+") then b.pop_shake_severe \
else st_area(st_intersection(a.wkb_geometry,"+filterLock+"))/st_area(a.wkb_geometry)*b.pop_shake_severe \
end \
)),0) as pop_shake_severe, \
coalesce(round(sum( \
case \
when ST_CoveredBy(a.wkb_geometry,"+filterLock+") then b.pop_shake_violent \
else st_area(st_intersection(a.wkb_geometry,"+filterLock+"))/st_area(a.wkb_geometry)*b.pop_shake_violent \
end \
)),0) as pop_shake_violent, \
coalesce(round(sum( \
case \
when ST_CoveredBy(a.wkb_geometry,"+filterLock+") then b.pop_shake_extreme \
else st_area(st_intersection(a.wkb_geometry,"+filterLock+"))/st_area(a.wkb_geometry)*b.pop_shake_extreme \
end \
)),0) as pop_shake_extreme, \
coalesce(round(sum( \
case \
when ST_CoveredBy(a.wkb_geometry,"+filterLock+") then b.settlement_shake_weak \
else st_area(st_intersection(a.wkb_geometry,"+filterLock+"))/st_area(a.wkb_geometry)*b.settlement_shake_weak \
end \
)),0) as settlement_shake_weak, \
coalesce(round(sum( \
case \
when ST_CoveredBy(a.wkb_geometry,"+filterLock+") then b.settlement_shake_light \
else st_area(st_intersection(a.wkb_geometry,"+filterLock+"))/st_area(a.wkb_geometry)*b.settlement_shake_light \
end \
)),0) as settlement_shake_light, \
coalesce(round(sum( \
case \
when ST_CoveredBy(a.wkb_geometry,"+filterLock+") then b.settlement_shake_moderate \
else st_area(st_intersection(a.wkb_geometry,"+filterLock+"))/st_area(a.wkb_geometry)*b.settlement_shake_moderate \
end \
)),0) as settlement_shake_moderate, \
coalesce(round(sum( \
case \
when ST_CoveredBy(a.wkb_geometry,"+filterLock+") then b.settlement_shake_strong \
else st_area(st_intersection(a.wkb_geometry,"+filterLock+"))/st_area(a.wkb_geometry)*b.settlement_shake_strong \
end \
)),0) as settlement_shake_strong, \
coalesce(round(sum( \
case \
when ST_CoveredBy(a.wkb_geometry,"+filterLock+") then b.settlement_shake_verystrong \
else st_area(st_intersection(a.wkb_geometry,"+filterLock+"))/st_area(a.wkb_geometry)*b.settlement_shake_verystrong \
end \
)),0) as settlement_shake_verystrong, \
coalesce(round(sum( \
case \
when ST_CoveredBy(a.wkb_geometry,"+filterLock+") then b.settlement_shake_severe \
else st_area(st_intersection(a.wkb_geometry,"+filterLock+"))/st_area(a.wkb_geometry)*b.settlement_shake_severe \
end \
)),0) as settlement_shake_severe, \
coalesce(round(sum( \
case \
when ST_CoveredBy(a.wkb_geometry,"+filterLock+") then b.settlement_shake_violent \
else st_area(st_intersection(a.wkb_geometry,"+filterLock+"))/st_area(a.wkb_geometry)*b.settlement_shake_violent \
end \
)),0) as settlement_shake_violent, \
coalesce(round(sum( \
case \
when ST_CoveredBy(a.wkb_geometry,"+filterLock+") then b.settlement_shake_extreme \
else st_area(st_intersection(a.wkb_geometry,"+filterLock+"))/st_area(a.wkb_geometry)*b.settlement_shake_extreme \
end \
)),0) as settlement_shake_extreme, \
coalesce(round(sum(b.buildings_shake_weak)),0) as buildings_shake_weak, \
coalesce(round(sum(b.buildings_shake_light)),0) as buildings_shake_light, \
coalesce(round(sum(b.buildings_shake_moderate)),0) as buildings_shake_moderate, \
coalesce(round(sum(b.buildings_shake_strong)),0) as buildings_shake_strong, \
coalesce(round(sum(b.buildings_shake_verystrong)),0) as buildings_shake_verystrong, \
coalesce(round(sum(b.buildings_shake_severe)),0) as buildings_shake_severe, \
coalesce(round(sum(b.buildings_shake_violent)),0) as buildings_shake_violent, \
coalesce(round(sum(b.buildings_shake_extreme)),0) as buildings_shake_extreme \
from afg_ppla a, villagesummary_eq b \
where a.vuid = b.village and b.event_code = '"+boundaryFilter['event_code']+"' \
and ST_Intersects(a.wkb_geometry,"+filterLock+") \
")
col_names = [desc[0] for desc in cursor.description]
row = cursor.fetchone()
row_dict = dict(izip(col_names, row))
cursor.close()
counts={}
counts[0] = row_dict
elif flag=='entireAfg':
counts = list(villagesummaryEQ.objects.all().extra(
select={
'pop_shake_weak' : 'coalesce(SUM(pop_shake_weak),0)',
'pop_shake_light' : 'coalesce(SUM(pop_shake_light),0)',
'pop_shake_moderate' : 'coalesce(SUM(pop_shake_moderate),0)',
'pop_shake_strong' : 'coalesce(SUM(pop_shake_strong),0)',
'pop_shake_verystrong' : 'coalesce(SUM(pop_shake_verystrong),0)',
'pop_shake_severe' : 'coalesce(SUM(pop_shake_severe),0)',
'pop_shake_violent' : 'coalesce(SUM(pop_shake_violent),0)',
'pop_shake_extreme' : 'coalesce(SUM(pop_shake_extreme),0)',
'settlement_shake_weak' : 'coalesce(SUM(settlement_shake_weak),0)',
'settlement_shake_light' : 'coalesce(SUM(settlement_shake_light),0)',
'settlement_shake_moderate' : 'coalesce(SUM(settlement_shake_moderate),0)',
'settlement_shake_strong' : 'coalesce(SUM(settlement_shake_strong),0)',
'settlement_shake_verystrong' : 'coalesce(SUM(settlement_shake_verystrong),0)',
'settlement_shake_severe' : 'coalesce(SUM(settlement_shake_severe),0)',
'settlement_shake_violent' : 'coalesce(SUM(settlement_shake_violent),0)',
'settlement_shake_extreme' : 'coalesce(SUM(settlement_shake_extreme),0)',
'buildings_shake_weak' : 'coalesce(SUM(buildings_shake_weak),0)',
'buildings_shake_light' : 'coalesce(SUM(buildings_shake_light),0)',
'buildings_shake_moderate' : 'coalesce(SUM(buildings_shake_moderate),0)',
'buildings_shake_strong' : 'coalesce(SUM(buildings_shake_strong),0)',
'buildings_shake_verystrong' : 'coalesce(SUM(buildings_shake_verystrong),0)',
'buildings_shake_severe' : 'coalesce(SUM(buildings_shake_severe),0)',
'buildings_shake_violent' : 'coalesce(SUM(buildings_shake_violent),0)',
'buildings_shake_extreme' : 'coalesce(SUM(buildings_shake_extreme),0)'
},
where = {
"event_code = '"+boundaryFilter['event_code']+"'"
}).values(
'pop_shake_weak',
'pop_shake_light',
'pop_shake_moderate',
'pop_shake_strong',
'pop_shake_verystrong',
'pop_shake_severe',
'pop_shake_violent',
'pop_shake_extreme',
'settlement_shake_weak',
'settlement_shake_light',
'settlement_shake_moderate',
'settlement_shake_strong',
'settlement_shake_verystrong',
'settlement_shake_severe',
'settlement_shake_violent',
'settlement_shake_extreme',
'buildings_shake_weak',
'buildings_shake_light',
'buildings_shake_moderate',
'buildings_shake_strong',
'buildings_shake_verystrong',
'buildings_shake_severe',
'buildings_shake_violent',
'buildings_shake_extreme'
))
elif flag =='currentProvince':
if len(str(boundaryFilter['code'])) > 2:
ff0001 = "district = '"+str(boundaryFilter['code'])+"'"
else :
ff0001 = "left(cast(district as text), "+str(len(str(boundaryFilter['code'])))+") = '"+str(boundaryFilter['code'])+"' and length(cast(district as text))="+ str(len(str(boundaryFilter['code']))+2)
counts = list(villagesummaryEQ.objects.all().extra(
select={
'pop_shake_weak' : 'coalesce(SUM(pop_shake_weak),0)',
'pop_shake_light' : 'coalesce(SUM(pop_shake_light),0)',
'pop_shake_moderate' : 'coalesce(SUM(pop_shake_moderate),0)',
'pop_shake_strong' : 'coalesce(SUM(pop_shake_strong),0)',
'pop_shake_verystrong' : 'coalesce(SUM(pop_shake_verystrong),0)',
'pop_shake_severe' : 'coalesce(SUM(pop_shake_severe),0)',
'pop_shake_violent' : 'coalesce(SUM(pop_shake_violent),0)',
'pop_shake_extreme' : 'coalesce(SUM(pop_shake_extreme),0)',
'settlement_shake_weak' : 'coalesce(SUM(settlement_shake_weak),0)',
'settlement_shake_light' : 'coalesce(SUM(settlement_shake_light),0)',
'settlement_shake_moderate' : 'coalesce(SUM(settlement_shake_moderate),0)',
'settlement_shake_strong' : 'coalesce(SUM(settlement_shake_strong),0)',
'settlement_shake_verystrong' : 'coalesce(SUM(settlement_shake_verystrong),0)',
'settlement_shake_severe' : 'coalesce(SUM(settlement_shake_severe),0)',
'settlement_shake_violent' : 'coalesce(SUM(settlement_shake_violent),0)',
'settlement_shake_extreme' : 'coalesce(SUM(settlement_shake_extreme),0)',
'buildings_shake_weak' : 'coalesce(SUM(buildings_shake_weak),0)',
'buildings_shake_light' : 'coalesce(SUM(buildings_shake_light),0)',
'buildings_shake_moderate' : 'coalesce(SUM(buildings_shake_moderate),0)',
'buildings_shake_strong' : 'coalesce(SUM(buildings_shake_strong),0)',
'buildings_shake_verystrong' : 'coalesce(SUM(buildings_shake_verystrong),0)',
'buildings_shake_severe' : 'coalesce(SUM(buildings_shake_severe),0)',
'buildings_shake_violent' : 'coalesce(SUM(buildings_shake_violent),0)',
'buildings_shake_extreme' : 'coalesce(SUM(buildings_shake_extreme),0)'
},
where = {
"event_code = '"+boundaryFilter['event_code']+"' and "+ff0001
}).values(
'pop_shake_weak',
'pop_shake_light',
'pop_shake_moderate',
'pop_shake_strong',
'pop_shake_verystrong',
'pop_shake_severe',
'pop_shake_violent',
'pop_shake_extreme',
'settlement_shake_weak',
'settlement_shake_light',
'settlement_shake_moderate',
'settlement_shake_strong',
'settlement_shake_verystrong',
'settlement_shake_severe',
'settlement_shake_violent',
'settlement_shake_extreme',
'buildings_shake_weak',
'buildings_shake_light',
'buildings_shake_moderate',
'buildings_shake_strong',
'buildings_shake_verystrong',
'buildings_shake_severe',
'buildings_shake_violent',
'buildings_shake_extreme'
))
else:
cursor = connections['geodb'].cursor()
cursor.execute("\
select coalesce(round(sum(b.pop_shake_weak)),0) as pop_shake_weak, \
coalesce(round(sum(b.pop_shake_light)),0) as pop_shake_light, \
coalesce(round(sum(b.pop_shake_moderate)),0) as pop_shake_moderate, \
coalesce(round(sum(b.pop_shake_strong)),0) as pop_shake_strong, \
coalesce(round(sum(b.pop_shake_verystrong)),0) as pop_shake_verystrong, \
coalesce(round(sum(b.pop_shake_severe)),0) as pop_shake_severe, \
coalesce(round(sum(b.pop_shake_violent)),0) as pop_shake_violent, \
coalesce(round(sum(b.pop_shake_extreme)),0) as pop_shake_extreme, \
coalesce(round(sum(b.settlement_shake_weak)),0) as settlement_shake_weak, \
coalesce(round(sum(b.settlement_shake_light)),0) as settlement_shake_light, \
coalesce(round(sum(b.settlement_shake_moderate)),0) as settlement_shake_moderate, \
coalesce(round(sum(b.settlement_shake_strong)),0) as settlement_shake_strong, \
coalesce(round(sum(b.settlement_shake_verystrong)),0) as settlement_shake_verystrong, \
coalesce(round(sum(b.settlement_shake_severe)),0) as settlement_shake_severe, \
coalesce(round(sum(b.settlement_shake_violent)),0) as settlement_shake_violent, \
coalesce(round(sum(b.settlement_shake_extreme)),0) as settlement_shake_extreme, \
coalesce(round(sum(b.buildings_shake_weak)),0) as buildings_shake_weak, \
coalesce(round(sum(b.buildings_shake_light)),0) as buildings_shake_light, \
coalesce(round(sum(b.buildings_shake_moderate)),0) as buildings_shake_moderate, \
coalesce(round(sum(b.buildings_shake_strong)),0) as buildings_shake_strong, \
coalesce(round(sum(b.buildings_shake_verystrong)),0) as buildings_shake_verystrong, \
coalesce(round(sum(b.buildings_shake_severe)),0) as buildings_shake_severe, \
coalesce(round(sum(b.buildings_shake_violent)),0) as buildings_shake_violent, \
coalesce(round(sum(b.buildings_shake_extreme)),0) as buildings_shake_extreme \
from afg_ppla a, villagesummary_eq b \
where a.vuid = b.village and b.event_code = '"+boundaryFilter['event_code']+"' \
and ST_Within(a.wkb_geometry,"+filterLock+") \
")
col_names = [desc[0] for desc in cursor.description]
row = cursor.fetchone()
row_dict = dict(izip(col_names, row))
cursor.close()
counts={}
counts[0] = row_dict
return counts[0]
def getEarthQuakeExecuteExternal(filterLock, flag, code, event_code):
response = {}
cursor = connections['geodb'].cursor()
cursor.execute("\
select b.grid_value, sum( \
case \
when ST_CoveredBy(a.wkb_geometry,b.wkb_geometry) then a.area_population \
else st_area(st_intersection(a.wkb_geometry,b.wkb_geometry))/st_area(a.wkb_geometry)*a.area_population \
end) as pop \
from afg_lndcrva a, earthquake_shakemap b \
where b.event_code = '"+event_code+"' and b.grid_value > 1 and a.vuid = '"+str(code)+"' \
and ST_Intersects(a.wkb_geometry,b.wkb_geometry) \
group by b.grid_value\
")
# cursor.execute("\
# select b.grid_value, sum( \
# case \
# when ST_CoveredBy(a.wkb_geometry,b.wkb_geometry) then a.vuid_population_landscan \
# else st_area(st_intersection(a.wkb_geometry,b.wkb_geometry))/st_area(a.wkb_geometry)*a.vuid_population_landscan \
# end) as pop \
# from afg_ppla a, earthquake_shakemap b \
# where b.event_code = '"+event_code+"' and b.grid_value > 1 and a.vuid = '"+str(code)+"' \
# and ST_Intersects(a.wkb_geometry,b.wkb_geometry) \
# group by b.grid_value\
# ")
row = cursor.fetchall()
temp = dict([(c[0], c[1]) for c in row])
response['pop_shake_weak']=round(temp.get(2, 0),0) + round(temp.get(3, 0),0)
response['pop_shake_light']=round(temp.get(4, 0),0)
response['pop_shake_moderate']=round(temp.get(5, 0),0)
response['pop_shake_strong']=round(temp.get(6, 0),0)
response['pop_shake_verystrong']=round(temp.get(7, 0),0)
response['pop_shake_severe']=round(temp.get(8, 0),0)
response['pop_shake_violent']=round(temp.get(9, 0),0)
response['pop_shake_extreme']=round(temp.get(10, 0),0)+round(temp.get(11, 0),0)+round(temp.get(12, 0),0)+round(temp.get(13, 0),0)+round(temp.get(14, 0),0)+round(temp.get(15, 0),0)
cursor.execute("\
select b.grid_value, count(*) as numbersettlements \
from afg_pplp a, earthquake_shakemap b \
where b.event_code = '"+event_code+"' and b.grid_value > 1 and a.vuid = '"+str(code)+"' \
and ST_Within(a.wkb_geometry,b.wkb_geometry) \
group by b.grid_value\
")
row = cursor.fetchall()
temp = dict([(c[0], c[1]) for c in row])
response['settlement_shake_weak']=round(temp.get(2, 0),0) + round(temp.get(3, 0),0)
response['settlement_shake_light']=round(temp.get(4, 0),0)
response['settlement_shake_moderate']=round(temp.get(5, 0),0)
response['settlement_shake_strong']=round(temp.get(6, 0),0)
response['settlement_shake_verystrong']=round(temp.get(7, 0),0)
response['settlement_shake_severe']=round(temp.get(8, 0),0)
response['settlement_shake_violent']=round(temp.get(9, 0),0)
response['settlement_shake_extreme']=round(temp.get(10, 0),0)+round(temp.get(11, 0),0)+round(temp.get(12, 0),0)+round(temp.get(13, 0),0)+round(temp.get(14, 0),0)+round(temp.get(15, 0),0)
cursor.close()
return response
class EQEventsSerializer(Serializer):
def to_json(self, data, options=None):
options = options or {}
data = self.to_simple(data, options)
data2 = self.to_simple({'objects':[]}, options)
for i in data['objects']:
i['sm_available'] = 'ShakeMap are Available'
data2['objects'].append(i)
return json.dumps(data2, cls=DjangoJSONEncoder, sort_keys=True)
class getEQEvents(ModelResource):
"""Provinces api"""
detail_title = fields.CharField()
date_custom = fields.CharField()
evFlag = fields.IntegerField()
smFlag = fields.IntegerField()
sm_available = fields.CharField()
def dehydrate_detail_title(self, bundle):
return bundle.obj.title + ' on ' + bundle.obj.dateofevent.strftime("%d-%m-%Y %H:%M:%S")
def dehydrate_date_custom(self, bundle):
return bundle.obj.dateofevent.strftime("%d-%m-%Y %H:%M:%S")
# def dehydrate_evFlag(self, bundle):
# pEV = earthquake_events.objects.extra(
# tables={'afg_admbnda_adm1'},
# where={"ST_Intersects(afg_admbnda_adm1.wkb_geometry,earthquake_events.wkb_geometry) and earthquake_events.event_code = '"+bundle.obj.event_code+"'"}
# )
# if pEV.count()>0:
# return 1
# else:
# return 0
# def dehydrate_smFlag(self, bundle):
# pSM = earthquake_shakemap.objects.extra(
# tables={'afg_admbnda_adm1'},
# where={"ST_Intersects(afg_admbnda_adm1.wkb_geometry,earthquake_shakemap.wkb_geometry) and earthquake_shakemap.event_code = '"+bundle.obj.event_code+"'"}
# )
# if pSM.count()>0:
# return 1
# else:
# return 0
class Meta:
queryset = earthquake_events.objects.all().exclude(shakemaptimestamp__isnull=True).order_by('dateofevent')
# queryset = earthquake_events.objects.extra(
# tables={'earthquake_shakemap'},
# where={'earthquake_events.event_code=earthquake_shakemap.event_code'
# # 'logsource_domain="example.com"',
# }
# ).values('event_code','title','dateofevent','magnitude','depth', 'shakemaptimestamp','wkb_geometry')
resource_name = 'geteqevents'
allowed_methods = ('get')
filtering = {
"dateofevent" : ['gte', 'lte']
}
serializer = EQEventsSerializer()
class getAccessibilities(ModelResource):
class Meta:
resource_name = 'getaccessibilities'
allowed_methods = ['post']
detail_allowed_methods = ['post']
cache = SimpleCache()
def post_list(self, request, **kwargs):
self.method_check(request, allowed=['post'])
response = self.getData(request)
return self.create_response(request, response)
def getData(self, request):
# AfgCaptAdm1ItsProvcImmap, AfgCaptAdm1NearestProvcImmap, AfgCaptAdm2NearestDistrictcImmap, AfgCaptAirdrmImmap, AfgCaptHltfacTier1Immap, AfgCaptHltfacTier2Immap
# px = provincesummary.objects.aggregate(Sum('high_ava_population')
boundaryFilter = json.loads(request.body)
temp1 = []
for i in boundaryFilter['spatialfilter']:
temp1.append('ST_GeomFromText(\''+i+'\',4326)')
temp2 = 'ARRAY['
first=True
for i in temp1:
if first:
temp2 = temp2 + i
first=False
else :
temp2 = temp2 + ', ' + i
temp2 = temp2+']'
filterLock = 'ST_Union('+temp2+')'
flag = boundaryFilter['flag']
code = boundaryFilter['code']
response = {}
if flag=='entireAfg':
q1 = AfgCaptAdm1ItsProvcImmap.objects.all().values('time').annotate(pop=Sum('sum_area_population'),buildings=Sum('area_buildings'))
q2 = AfgCaptAdm1NearestProvcImmap.objects.all().values('time').annotate(pop=Sum('sum_area_population'),buildings=Sum('area_buildings'))
q3 = AfgCaptAdm2NearestDistrictcImmap.objects.all().values('time').annotate(pop=Sum('sum_area_population'),buildings=Sum('area_buildings'))
q4 = AfgCaptAirdrmImmap.objects.all().values('time').annotate(pop=Sum('sum_area_population'))
q5 = AfgCaptHltfacTier1Immap.objects.all().values('time').annotate(pop=Sum('sum_area_population'),buildings=Sum('area_buildings'))
q6 = AfgCaptHltfacTier2Immap.objects.all().values('time').annotate(pop=Sum('sum_area_population'),buildings=Sum('area_buildings'))
q7 = AfgCaptHltfacTier3Immap.objects.all().values('time').annotate(pop=Sum('sum_area_population'),buildings=Sum('area_buildings'))
q8 = AfgCaptHltfacTierallImmap.objects.all().values('time').annotate(pop=Sum('sum_area_population'),buildings=Sum('area_buildings'))
gsm = AfgCapaGsmcvr.objects.all().aggregate(pop=Sum('gsm_coverage_population'),area=Sum('gsm_coverage_area_sqm'),buildings=Sum('area_buildings'))
elif flag =='currentProvince':
if len(str(boundaryFilter['code'])) > 2:
ff0001 = "dist_code = '"+str(boundaryFilter['code'])+"'"
else :
ff0001 = "left(cast(dist_code as text), "+str(len(str(boundaryFilter['code'])))+") = '"+str(boundaryFilter['code'])+"' and length(cast(dist_code as text))="+ str(len(str(boundaryFilter['code']))+2)
q1 = AfgCaptAdm1ItsProvcImmap.objects.all().values('time').annotate(pop=Sum('sum_area_population'),buildings=Sum('area_buildings')).extra(
where = {
ff0001
})
q2 = AfgCaptAdm1NearestProvcImmap.objects.all().values('time').annotate(pop=Sum('sum_area_population'),buildings=Sum('area_buildings')).extra(
where = {
ff0001
})
q3 = AfgCaptAdm2NearestDistrictcImmap.objects.all().values('time').annotate(pop=Sum('sum_area_population'),buildings=Sum('area_buildings')).extra(
where = {
ff0001
})
q4 = AfgCaptAirdrmImmap.objects.all().values('time').annotate(pop=Sum('sum_area_population')).extra(
where = {
ff0001
})
q5 = AfgCaptHltfacTier1Immap.objects.all().values('time').annotate(pop=Sum('sum_area_population'),buildings=Sum('area_buildings')).extra(
where = {
ff0001
})
q6 = AfgCaptHltfacTier2Immap.objects.all().values('time').annotate(pop=Sum('sum_area_population'),buildings=Sum('area_buildings')).extra(
where = {
ff0001
})
q7 = AfgCaptHltfacTier3Immap.objects.all().values('time').annotate(pop=Sum('sum_area_population'),buildings=Sum('area_buildings')).extra(
where = {
ff0001
})
q8 = AfgCaptHltfacTierallImmap.objects.all().values('time').annotate(pop=Sum('sum_area_population'),buildings=Sum('area_buildings')).extra(
where = {
ff0001
})
if len(str(boundaryFilter['code'])) > 2:
gsm = AfgCapaGsmcvr.objects.filter(dist_code=boundaryFilter['code']).aggregate(pop=Sum('gsm_coverage_population'),area=Sum('gsm_coverage_area_sqm'),buildings=Sum('area_buildings'))
else :
gsm = AfgCapaGsmcvr.objects.filter(prov_code=boundaryFilter['code']).aggregate(pop=Sum('gsm_coverage_population'),area=Sum('gsm_coverage_area_sqm'),buildings=Sum('area_buildings'))
elif flag =='drawArea':
tt = AfgPplp.objects.filter(wkb_geometry__intersects=boundaryFilter['spatialfilter'][0]).values('vuid')
q1 = AfgCaptAdm1ItsProvcImmap.objects.filter(vuid__in=tt).values('time').annotate(pop=Sum('sum_area_population'),buildings=Sum('area_buildings'))
q2 = AfgCaptAdm1NearestProvcImmap.objects.filter(vuid__in=tt).values('time').annotate(pop=Sum('sum_area_population'),buildings=Sum('area_buildings'))
q3 = AfgCaptAdm2NearestDistrictcImmap.objects.filter(vuid__in=tt).values('time').annotate(pop=Sum('sum_area_population'),buildings=Sum('area_buildings'))
q4 = AfgCaptAirdrmImmap.objects.filter(vuid__in=tt).values('time').annotate(pop=Sum('sum_area_population'))
q5 = AfgCaptHltfacTier1Immap.objects.filter(vuid__in=tt).values('time').annotate(pop=Sum('sum_area_population'),buildings=Sum('area_buildings'))
q6 = AfgCaptHltfacTier2Immap.objects.filter(vuid__in=tt).values('time').annotate(pop=Sum('sum_area_population'),buildings=Sum('area_buildings'))
q7 = AfgCaptHltfacTier3Immap.objects.filter(vuid__in=tt).values('time').annotate(pop=Sum('sum_area_population'),buildings=Sum('area_buildings'))
q8 = AfgCaptHltfacTierallImmap.objects.filter(vuid__in=tt).values('time').annotate(pop=Sum('sum_area_population'),buildings=Sum('area_buildings'))
gsm = AfgCapaGsmcvr.objects.filter(vuid__in=tt).aggregate(pop=Sum('gsm_coverage_population'),area=Sum('gsm_coverage_area_sqm'),buildings=Sum('area_buildings'))
else:
tt = AfgPplp.objects.filter(wkb_geometry__intersects=boundaryFilter['spatialfilter'][0]).values('vuid')
q1 = AfgCaptAdm1ItsProvcImmap.objects.filter(vuid__in=tt).values('time').annotate(pop=Sum('sum_area_population'),buildings=Sum('area_buildings'))
q2 = AfgCaptAdm1NearestProvcImmap.objects.filter(vuid__in=tt).values('time').annotate(pop=Sum('sum_area_population'),buildings=Sum('area_buildings'))
q3 = AfgCaptAdm2NearestDistrictcImmap.objects.filter(vuid__in=tt).values('time').annotate(pop=Sum('sum_area_population'),buildings=Sum('area_buildings'))
q4 = AfgCaptAirdrmImmap.objects.filter(vuid__in=tt).values('time').annotate(pop=Sum('sum_area_population'))
q5 = AfgCaptHltfacTier1Immap.objects.filter(vuid__in=tt).values('time').annotate(pop=Sum('sum_area_population'),buildings=Sum('area_buildings'))
q6 = AfgCaptHltfacTier2Immap.objects.filter(vuid__in=tt).values('time').annotate(pop=Sum('sum_area_population'),buildings=Sum('area_buildings'))
q7 = AfgCaptHltfacTier3Immap.objects.filter(vuid__in=tt).values('time').annotate(pop=Sum('sum_area_population'),buildings=Sum('area_buildings'))
q8 = AfgCaptHltfacTierallImmap.objects.filter(vuid__in=tt).values('time').annotate(pop=Sum('sum_area_population'),buildings=Sum('area_buildings'))
gsm = AfgCapaGsmcvr.objects.filter(vuid__in=tt).aggregate(pop=Sum('gsm_coverage_population'),area=Sum('gsm_coverage_area_sqm'),buildings=Sum('area_buildings'))
for i in q1:
timelabel = i['time'].replace(' ','_')
timelabel = timelabel.replace('<','l')
timelabel = timelabel.replace('>','g')
response[timelabel+'__itsx_prov']=round(i['pop'] or 0)
response[timelabel+'__itsx_prov_building']=round(i['buildings'] or 0)
for i in q2:
timelabel = i['time'].replace(' ','_')
timelabel = timelabel.replace('<','l')
timelabel = timelabel.replace('>','g')
response[timelabel+'__near_prov']=round(i['pop'] or 0)
response[timelabel+'__near_prov_building']=round(i['buildings'] or 0)
for i in q3:
timelabel = i['time'].replace(' ','_')
timelabel = timelabel.replace('<','l')
timelabel = timelabel.replace('>','g')
response[timelabel+'__near_dist']=round(i['pop'] or 0)
response[timelabel+'__near_dist_building']=round(i['buildings'] or 0)
for i in q4:
timelabel = i['time'].replace(' ','_')
timelabel = timelabel.replace('<','l')
timelabel = timelabel.replace('>','g')
response[timelabel+'__near_airp']=round(i['pop'] or 0)
for i in q5:
timelabel = i['time'].replace(' ','_')
timelabel = timelabel.replace('<','l')
timelabel = timelabel.replace('>','g')
response[timelabel+'__near_hlt1']=round(i['pop'] or 0)
response[timelabel+'__near_hlt1_building']=round(i['buildings'] or 0)
for i in q6:
timelabel = i['time'].replace(' ','_')
timelabel = timelabel.replace('<','l')
timelabel = timelabel.replace('>','g')
response[timelabel+'__near_hlt2']=round(i['pop'] or 0)
response[timelabel+'__near_hlt2_building']=round(i['buildings'] or 0)
for i in q7:
timelabel = i['time'].replace(' ','_')
timelabel = timelabel.replace('<','l')
timelabel = timelabel.replace('>','g')
response[timelabel+'__near_hlt3']=round(i['pop'] or 0)
response[timelabel+'__near_hlt3_building']=round(i['buildings'] or 0)
for i in q8:
timelabel = i['time'].replace(' ','_')
timelabel = timelabel.replace('<','l')
timelabel = timelabel.replace('>','g')
response[timelabel+'__near_hltall']=round(i['pop'] or 0)
response[timelabel+'__near_hltall_building']=round(i['buildings'] or 0)
response['pop_on_gsm_coverage'] = round((gsm['pop'] or 0),0)
response['area_on_gsm_coverage'] = round((gsm['area'] or 0)/1000000,0)
response['buildings_on_gsm_coverage'] = gsm['buildings'] or 0
return response
# lanjutan clone dari api.py
class getSAMParameters(ModelResource):
"""inicidents type and target api"""
class Meta:
authorization = DjangoAuthorization()
resource_name = 'sam_params'
allowed_methods = ['post']
detail_allowed_methods = ['post']
always_return_data = True
def post_list(self, request, **kwargs):
self.method_check(request, allowed=['post'])
response = self.getStats(request)
return self.create_response(request, response)
def getStats(self, request):
# print str(request.POST['query_type']) #.strip('[]')
# print request.POST
query_filter_group = []
temp_group = dict(request.POST)['query_type']
filterLock = dict(request.POST)['filterlock']
# print filterLock
response = {}
response['objects'] = []
resource = AfgIncidentOasis.objects.all()
if filterLock[0]!='':
resource = resource.filter(wkb_geometry__intersects=filterLock[0])
if len(temp_group)==1:
resource = resource.filter(incident_date__gte=request.POST['start_date'],incident_date__lte=request.POST['end_date'])
resource = resource.values(temp_group[0]).annotate(count=Count('uid'), affected=Sum('affected'), injured=Sum('injured'), violent=Sum('violent'), dead=Sum('dead')).order_by(temp_group[0])
elif len(temp_group)==2:
stat_type_filter = dict(request.POST)['incident_type'];
stat_target_filter = dict(request.POST)['incident_target'];
resource = resource.filter(incident_date__gte=request.POST['start_date'],incident_date__lte=request.POST['end_date'])
if stat_type_filter[0]=='':
resource = resource
else:
resource = resource.filter(main_type__in=stat_type_filter)
if stat_target_filter[0]=='':
resource = resource
else:
resource = resource.filter(main_target__in=stat_target_filter)
resourceAgregate = resource
resource = resource.values(temp_group[0],temp_group[1]).annotate(count=Count('uid'), affected=Sum('affected'), injured=Sum('injured'), violent=Sum('violent'), dead=Sum('dead')).order_by(temp_group[0],temp_group[1])
resourceAgregate = resourceAgregate.aggregate(count=Count('uid'), affected=Sum('affected'), injured=Sum('injured'), violent=Sum('violent'), dead=Sum('dead'))
response['total_incident'] = resourceAgregate['count']
response['total_injured'] = resourceAgregate['injured']
response['total_violent'] = resourceAgregate['violent']
response['total_dead'] = resourceAgregate['dead']
for i in resource:
i['visible']=True
response['objects'].append(i)
# response['objects'] = resource
response['total_count'] = resource.count()
cursor = connections['geodb'].cursor()
cursor.execute("select last_incidentdate, last_sync from ref_security")
row = cursor.fetchall()
response['last_incidentdate'] = row[0][0].strftime("%Y-%m-%d")
response['last_incidentsync'] = row[0][1].strftime("%Y-%m-%d")
date_N_days_ago = datetime.date.today() - row[0][0]
response['last_incidentdate_ago'] = str(date_N_days_ago).split(',')[0]
response['color_code'] = 'black'
if date_N_days_ago <= datetime.timedelta(days=2):
response['color_code'] = 'green'
elif date_N_days_ago > datetime.timedelta(days=2) and date_N_days_ago <= datetime.timedelta(days=4):
response['color_code'] = 'yellow'
elif date_N_days_ago > datetime.timedelta(days=4) and date_N_days_ago <= datetime.timedelta(days=5):
response['color_code'] = 'orange'
elif date_N_days_ago > datetime.timedelta(days=5):
response['color_code'] = 'red'
date_N_days_ago = datetime.date.today() - row[0][1]
response['last_incidentsync_ago'] = str(date_N_days_ago).split(',')[0]
cursor.close()
return response
# lanjutan clone dari api.py
class getIncidentsRaw(ModelResource):
"""Incidents raw api"""
class Meta:
authorization = DjangoAuthorization()
resource_name = 'incident_raw'
allowed_methods = ['post']
detail_allowed_methods = ['post']
always_return_data = True
def post_list(self, request, **kwargs):
self.method_check(request, allowed=['post'])
response = self.getStats(request)
return self.create_response(request, response)
def getStats(self, request):
query_filter_group = []
temp_group = dict(request.POST)['query_type']
filterLock = dict(request.POST)['filterlock']
response = {}
response['objects'] = []
resource = AfgIncidentOasis.objects.all()
if filterLock[0]!='':
resource = resource.filter(wkb_geometry__intersects=filterLock[0])
if len(temp_group)==1:
resource = resource.filter(incident_date__gte=request.POST['start_date'],incident_date__lte=request.POST['end_date']).order_by('-incident_date')
elif len(temp_group)==2:
stat_type_filter = dict(request.POST)['incident_type'];
stat_target_filter = dict(request.POST)['incident_target'];
resource = resource.filter(incident_date__gte=request.POST['start_date'],incident_date__lte=request.POST['end_date']).order_by('-incident_date')
if stat_type_filter[0]=='':
resource = resource
# resource = AfgIncidentOasis.objects.filter(incident_date__gte=request.POST['start_date'],incident_date__lte=request.POST['end_date']).values(temp_group[0],temp_group[1]).annotate(count=Count('uid'), affected=Sum('affected'), injured=Sum('injured'), violent=Sum('violent'), dead=Sum('dead')).order_by(temp_group[0],temp_group[1])
else:
resource = resource.filter(main_type__in=stat_type_filter)
if stat_target_filter[0]=='':
resource = resource[:100]
else:
resource = resource.filter(main_target__in=stat_target_filter)[:100]
for i in resource:
response['objects'].append({
'date':i.incident_date,
'desc':i.description
})
response['total_count'] = resource.count()
cursor = connections['geodb'].cursor()
cursor.execute("select last_incidentdate, last_sync from ref_security")
row = cursor.fetchall()
response['last_incidentdate'] = row[0][0].strftime("%Y-%m-%d")
response['last_incidentsync'] = row[0][1].strftime("%Y-%m-%d")
cursor.close()
return response
class getVillages(ModelResource):
"""villages api"""
class Meta:
authorization = DjangoAuthorization()
resource_name = 'get_villages'
allowed_methods = ['get']
detail_allowed_methods = ['get']
always_return_data = True
def get_list(self, request, **kwargs):
self.method_check(request, allowed=['get'])
if request.GET['type']=='VUID':
response = self.getVillageFromVUID(request)
else:
response = self.getStats(request)
# return self.create_response(request, response)
return HttpResponse(response, mimetype='application/json')
def getVillageFromVUID(self, request):
resource = AfgPplp.objects.all().values('vil_uid','name_en','type_settlement','wkb_geometry')
resource = resource.filter(vuid__icontains=request.GET['search'])
response = GeoJSONSerializer().serialize(resource, use_natural_keys=True, with_modelname=False, geometry_field='wkb_geometry', srid=3857)
data = json.loads(response)
for i in range(len(data['features'])):
data['features'][i]['properties']['number']=i+1
if 'name_en' in data['features'][i]['properties']:
data['features'][i]['properties']['fromlayer'] = 'glyphicon glyphicon-home'
return json.dumps(data)
def fuzzyLookup(self, request):
f = AfgPplp.objects.all().values('name_en','dist_na_en','prov_na_en','vil_uid','type_settlement','wkb_geometry')
if request.GET['provname'] != '':
f = f.filter(prov_na_en=request.GET['provname'])
if request.GET['distname'] != '':
f = f.filter(dist_na_en=request.GET['distname'])
choices = []
for i in f:
prov_na_en = ''
dist_na_en = ''
if request.GET['provname'] != '':
prov_na_en = i['prov_na_en']
if request.GET['distname'] != '':
dist_na_en = i['dist_na_en']
# choices.append(i['name_en'].lstrip()+';'+dist_na_en+';'+prov_na_en)
choices.append(i['name_en'])
# print choices
# x = process.extract(request.GET['search']+";"+request.GET['distname']+";"+request.GET['provname'], choices, scorer=fuzz.token_sort_ratio, limit=10)
# x = process.extract(request.GET['search'], choices, scorer=fuzz.token_sort_ratio, limit=25)
x = process.extractWithoutOrder(request.GET['search'], choices, scorer=fuzz.token_sort_ratio, score_cutoff=50)
# print x[0][0], request.GET['provname']+";"+request.GET['distname']+";"+request.GET['search']
scoreKeeper = {}
settlements = []
for i in x:
# print i[0]
scoreKeeper[i[0]]=i[1]
settlements.append(i[0])
f = f.filter(name_en__in=settlements)
# f = f.extra(where=["name_en+';'+dist_na_en+';'+prov_na_en in "])
return {'result':f, 'scoreKeeper':scoreKeeper}
def getStats(self, request):
# print request.GET['fuzzy']
fuzzy = False
if request.GET['fuzzy']== 'true' and request.GET['search'] != '' and (request.GET['type']=='settlements' or request.GET['type']=='s_oasis'):
dt = self.fuzzyLookup(request)
resource = dt['result']
fuzzy = True
else:
# resource = .transform(900913, field_name='wkb_geometry') string__icontains
if request.GET['type']=='settlements':
resource = AfgPplp.objects.all().values('vil_uid','name_en','type_settlement','wkb_geometry')
elif request.GET['type']=='healthfacility':
resource = AfgHltfac.objects.all()
elif request.GET['type']=='airport':
resource = AfgAirdrmp.objects.all()
else :
resource = OasisSettlements.objects.all().values('vil_uid','name_en','type_settlement','wkb_geometry')
# print request.GET['dist_code']
if request.GET['dist_code'] != '':
resource = resource.filter(dist_code=request.GET['dist_code'])
if request.GET['prov_code'] != '':
if request.GET['type']=='settlements':
resource = resource.filter(prov_code_1=request.GET['prov_code'])
else:
resource = resource.filter(prov_code=request.GET['prov_code'])
if request.GET['search'] != '':
if request.GET['type']=='settlements':
resource = resource.filter(name_en__icontains=request.GET['search'])
elif request.GET['type']=='healthfacility':
resource = resource.filter(facility_name__icontains=request.GET['search'])
elif request.GET['type']=='airport':
resource = resource.filter(namelong__icontains=request.GET['search'])
else:
resource = resource.filter(name_en__icontains=request.GET['search'])
response = GeoJSONSerializer().serialize(resource, use_natural_keys=True, with_modelname=False, geometry_field='wkb_geometry', srid=3857)
data = json.loads(response)
for i in range(len(data['features'])):
if fuzzy:
tmp_set = data['features'][i]['properties']['name_en']
data['features'][i]['properties']['score']=dt['scoreKeeper'][tmp_set]
data['features'][i]['properties']['number']=i+1
if 'name_en' in data['features'][i]['properties']:
data['features'][i]['properties']['fromlayer'] = 'glyphicon glyphicon-home'
elif 'namelong' in data['features'][i]['properties']:
data['features'][i]['properties']['fromlayer'] = 'glyphicon glyphicon-plane'
elif 'facility_name' in data['features'][i]['properties']:
data['features'][i]['properties']['fromlayer'] = 'glyphicon glyphicon-header'
# return response
return json.dumps(data)
# get last update values
class getLastUpdatedStatus(ModelResource):
"""last updated status api"""
class Meta:
resource_name = 'lastUpdated'
allowed_methods = ['get']
detail_allowed_methods = ['get']
def getUpdatedValues(self, request):
response = {}
sw = forecastedLastUpdate.objects.filter(forecasttype='snowwater').latest('datadate')
rf = forecastedLastUpdate.objects.filter(forecasttype='riverflood').latest('datadate')
eq = earthquake_events.objects.exclude(shakemaptimestamp__isnull=True).latest('dateofevent')
# print eq.event_code
# print eq.dateofevent
# print eq.title
# print rf.datadate
tempRF = rf.datadate + datetime.timedelta(hours=4.5)
tempSW = sw.datadate + datetime.timedelta(hours=4.5)
tempEQ = eq.dateofevent + datetime.timedelta(hours=4.5)
tz = timezone('Asia/Kabul')
tempRF = tempRF.replace(tzinfo=tz)
tempSW = tempSW.replace(tzinfo=tz)
tempEQ = tempEQ.replace(tzinfo=tz)
stdSC = datetime.datetime.utcnow()
stdSC = stdSC.replace(hour=10, minute=00, second=00)
tempSC = datetime.datetime.utcnow()
# tempSC = tempSC.replace(hour=10, minute=00, second=00)
if stdSC > tempSC:
tempSC = tempSC - datetime.timedelta(days=1)
# tempSC = tempSC.replace(hour=10, minute=00, second=00)
# else:
# tempSC = tempSC.replace(hour=10, minute=00, second=00)
tempSC = tempSC.replace(hour=10, minute=00, second=00)
tempSC = tempSC + datetime.timedelta(hours=4.5)
tempSC = tempSC.replace(tzinfo=tz)
# print tempSC
# print stdSC
# response["riverflood_lastupdated"] = tempRF.strftime("%d-%m-%Y %H:%M")
# response["snowwater_lastupdated"] = tempSW.strftime("%d-%m-%Y %H:%M")
response['flood_forecast_last_updated']=tempRF
response['avalanche_forecast_last_updated']=tempSW
response['snow_cover_forecast_last_updated']=tempSC
response['shakemap_event_code'] = eq.event_code
response['shakemap_title'] = eq.title
response['shakemap_last_updated'] = tempEQ
return response
def get_list(self, request, **kwargs):
self.method_check(request, allowed=['get'])
response = self.getUpdatedValues(request)
return self.create_response(request, response)
class getLandslide(ModelResource):
class Meta:
resource_name = 'getlandslide'
allowed_methods = ['post']
detail_allowed_methods = ['post']
cache = SimpleCache()
def post_list(self, request, **kwargs):
self.method_check(request, allowed=['post'])
response = self.getData(request)
return self.create_response(request, response)
def getData(self, request):
boundaryFilter = json.loads(request.body)
temp1 = []
for i in boundaryFilter['spatialfilter']:
temp1.append('ST_GeomFromText(\''+i+'\',4326)')
temp2 = 'ARRAY['
first=True
for i in temp1:
if first:
temp2 = temp2 + i
first=False
else :
temp2 = temp2 + ', ' + i
temp2 = temp2+']'
filterLock = 'ST_Union('+temp2+')'
flag = boundaryFilter['flag']
code = boundaryFilter['code']
response = {}
if flag=='entireAfg':
sql = "select \
coalesce(round(sum(case \
when afg_lsp_affpplp.lsi_immap >= 7 then afg_pplp.vuid_population \
end)),0) as lsi_immap_very_high, \
coalesce(round(sum(case \
when afg_lsp_affpplp.lsi_immap >= 5 and afg_lsp_affpplp.lsi_immap < 7 then afg_pplp.vuid_population \
end)),0) as lsi_immap_high, \
coalesce(round(sum(case \
when afg_lsp_affpplp.lsi_immap >= 4 and afg_lsp_affpplp.lsi_immap < 5 then afg_pplp.vuid_population \
end)),0) as lsi_immap_moderate, \
coalesce(round(sum(case \
when afg_lsp_affpplp.lsi_immap >= 2 and afg_lsp_affpplp.lsi_immap < 4 then afg_pplp.vuid_population \
end)),0) as lsi_immap_low, \
coalesce(round(sum(case \
when afg_lsp_affpplp.lsi_immap >= 1 and afg_lsp_affpplp.lsi_immap < 2 then afg_pplp.vuid_population \
end)),0) as lsi_immap_very_low, \
coalesce(round(sum(case \
when afg_lsp_affpplp.lsi_ku >= 7 then afg_pplp.vuid_population \
end)),0) as lsi_ku_very_high, \
coalesce(round(sum(case \
when afg_lsp_affpplp.lsi_ku >= 5 and afg_lsp_affpplp.lsi_ku < 7 then afg_pplp.vuid_population \
end)),0) as lsi_ku_high, \
coalesce(round(sum(case \
when afg_lsp_affpplp.lsi_ku >= 4 and afg_lsp_affpplp.lsi_ku < 5 then afg_pplp.vuid_population \
end)),0) as lsi_ku_moderate, \
coalesce(round(sum(case \
when afg_lsp_affpplp.lsi_ku >= 2 and afg_lsp_affpplp.lsi_ku < 4 then afg_pplp.vuid_population \
end)),0) as lsi_ku_low, \
coalesce(round(sum(case \
when afg_lsp_affpplp.lsi_ku >= 1 and afg_lsp_affpplp.lsi_ku < 2 then afg_pplp.vuid_population \
end)),0) as lsi_ku_very_low, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s1_wb >= 7 then afg_pplp.vuid_population \
end)),0) as ls_s1_wb_very_high, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s1_wb >= 5 and afg_lsp_affpplp.ls_s1_wb < 7 then afg_pplp.vuid_population \
end)),0) as ls_s1_wb_high, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s1_wb >= 4 and afg_lsp_affpplp.ls_s1_wb < 5 then afg_pplp.vuid_population \
end)),0) as ls_s1_wb_moderate, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s1_wb >= 2 and afg_lsp_affpplp.ls_s1_wb < 4 then afg_pplp.vuid_population \
end)),0) as ls_s1_wb_low, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s1_wb >= 1 and afg_lsp_affpplp.ls_s1_wb < 2 then afg_pplp.vuid_population \
end)),0) as ls_s1_wb_very_low, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s2_wb >= 7 then afg_pplp.vuid_population \
end)),0) as ls_s2_wb_very_high, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s2_wb >= 5 and afg_lsp_affpplp.ls_s2_wb < 7 then afg_pplp.vuid_population \
end)),0) as ls_s2_wb_high, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s2_wb >= 4 and afg_lsp_affpplp.ls_s2_wb < 5 then afg_pplp.vuid_population \
end)),0) as ls_s2_wb_moderate, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s2_wb >= 2 and afg_lsp_affpplp.ls_s2_wb < 4 then afg_pplp.vuid_population \
end)),0) as ls_s2_wb_low, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s2_wb >= 1 and afg_lsp_affpplp.ls_s2_wb < 2 then afg_pplp.vuid_population \
end)),0) as ls_s2_wb_very_low, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s3_wb >= 7 then afg_pplp.vuid_population \
end)),0) as ls_s3_wb_very_high, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s3_wb >= 5 and afg_lsp_affpplp.ls_s3_wb < 7 then afg_pplp.vuid_population \
end)),0) as ls_s3_wb_high, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s3_wb >= 4 and afg_lsp_affpplp.ls_s3_wb < 5 then afg_pplp.vuid_population \
end)),0) as ls_s3_wb_moderate, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s3_wb >= 2 and afg_lsp_affpplp.ls_s3_wb < 4 then afg_pplp.vuid_population \
end)),0) as ls_s3_wb_low, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s3_wb >= 1 and afg_lsp_affpplp.ls_s3_wb < 2 then afg_pplp.vuid_population \
end)),0) as ls_s3_wb_very_low \
from afg_lsp_affpplp \
inner join afg_pplp on afg_lsp_affpplp.vuid=afg_pplp.vuid"
elif flag =='currentProvince':
if len(str(boundaryFilter['code'])) > 2:
ff0001 = "afg_pplp.dist_code = '"+str(boundaryFilter['code'])+"'"
else :
ff0001 = "afg_pplp.prov_code_1 = '"+str(boundaryFilter['code'])+"'"
sql = "select \
coalesce(round(sum(case \
when afg_lsp_affpplp.lsi_immap >= 7 then afg_pplp.vuid_population \
end)),0) as lsi_immap_very_high, \
coalesce(round(sum(case \
when afg_lsp_affpplp.lsi_immap >= 5 and afg_lsp_affpplp.lsi_immap < 7 then afg_pplp.vuid_population \
end)),0) as lsi_immap_high, \
coalesce(round(sum(case \
when afg_lsp_affpplp.lsi_immap >= 4 and afg_lsp_affpplp.lsi_immap < 5 then afg_pplp.vuid_population \
end)),0) as lsi_immap_moderate, \
coalesce(round(sum(case \
when afg_lsp_affpplp.lsi_immap >= 2 and afg_lsp_affpplp.lsi_immap < 4 then afg_pplp.vuid_population \
end)),0) as lsi_immap_low, \
coalesce(round(sum(case \
when afg_lsp_affpplp.lsi_immap >= 1 and afg_lsp_affpplp.lsi_immap < 2 then afg_pplp.vuid_population \
end)),0) as lsi_immap_very_low, \
coalesce(round(sum(case \
when afg_lsp_affpplp.lsi_ku >= 7 then afg_pplp.vuid_population \
end)),0) as lsi_ku_very_high, \
coalesce(round(sum(case \
when afg_lsp_affpplp.lsi_ku >= 5 and afg_lsp_affpplp.lsi_ku < 7 then afg_pplp.vuid_population \
end)),0) as lsi_ku_high, \
coalesce(round(sum(case \
when afg_lsp_affpplp.lsi_ku >= 4 and afg_lsp_affpplp.lsi_ku < 5 then afg_pplp.vuid_population \
end)),0) as lsi_ku_moderate, \
coalesce(round(sum(case \
when afg_lsp_affpplp.lsi_ku >= 2 and afg_lsp_affpplp.lsi_ku < 4 then afg_pplp.vuid_population \
end)),0) as lsi_ku_low, \
coalesce(round(sum(case \
when afg_lsp_affpplp.lsi_ku >= 1 and afg_lsp_affpplp.lsi_ku < 2 then afg_pplp.vuid_population \
end)),0) as lsi_ku_very_low, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s1_wb >= 7 then afg_pplp.vuid_population \
end)),0) as ls_s1_wb_very_high, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s1_wb >= 5 and afg_lsp_affpplp.ls_s1_wb < 7 then afg_pplp.vuid_population \
end)),0) as ls_s1_wb_high, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s1_wb >= 4 and afg_lsp_affpplp.ls_s1_wb < 5 then afg_pplp.vuid_population \
end)),0) as ls_s1_wb_moderate, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s1_wb >= 2 and afg_lsp_affpplp.ls_s1_wb < 4 then afg_pplp.vuid_population \
end)),0) as ls_s1_wb_low, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s1_wb >= 1 and afg_lsp_affpplp.ls_s1_wb < 2 then afg_pplp.vuid_population \
end)),0) as ls_s1_wb_very_low, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s2_wb >= 7 then afg_pplp.vuid_population \
end)),0) as ls_s2_wb_very_high, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s2_wb >= 5 and afg_lsp_affpplp.ls_s2_wb < 7 then afg_pplp.vuid_population \
end)),0) as ls_s2_wb_high, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s2_wb >= 4 and afg_lsp_affpplp.ls_s2_wb < 5 then afg_pplp.vuid_population \
end)),0) as ls_s2_wb_moderate, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s2_wb >= 2 and afg_lsp_affpplp.ls_s2_wb < 4 then afg_pplp.vuid_population \
end)),0) as ls_s2_wb_low, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s2_wb >= 1 and afg_lsp_affpplp.ls_s2_wb < 2 then afg_pplp.vuid_population \
end)),0) as ls_s2_wb_very_low, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s3_wb >= 7 then afg_pplp.vuid_population \
end)),0) as ls_s3_wb_very_high, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s3_wb >= 5 and afg_lsp_affpplp.ls_s3_wb < 7 then afg_pplp.vuid_population \
end)),0) as ls_s3_wb_high, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s3_wb >= 4 and afg_lsp_affpplp.ls_s3_wb < 5 then afg_pplp.vuid_population \
end)),0) as ls_s3_wb_moderate, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s3_wb >= 2 and afg_lsp_affpplp.ls_s3_wb < 4 then afg_pplp.vuid_population \
end)),0) as ls_s3_wb_low, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s3_wb >= 1 and afg_lsp_affpplp.ls_s3_wb < 2 then afg_pplp.vuid_population \
end)),0) as ls_s3_wb_very_low \
from afg_lsp_affpplp \
inner join afg_pplp on afg_lsp_affpplp.vuid=afg_pplp.vuid \
where " + ff0001
elif flag =='drawArea':
sql = "select \
coalesce(round(sum(case \
when afg_lsp_affpplp.lsi_immap >= 7 then afg_pplp.vuid_population \
end)),0) as lsi_immap_very_high, \
coalesce(round(sum(case \
when afg_lsp_affpplp.lsi_immap >= 5 and afg_lsp_affpplp.lsi_immap < 7 then afg_pplp.vuid_population \
end)),0) as lsi_immap_high, \
coalesce(round(sum(case \
when afg_lsp_affpplp.lsi_immap >= 4 and afg_lsp_affpplp.lsi_immap < 5 then afg_pplp.vuid_population \
end)),0) as lsi_immap_moderate, \
coalesce(round(sum(case \
when afg_lsp_affpplp.lsi_immap >= 2 and afg_lsp_affpplp.lsi_immap < 4 then afg_pplp.vuid_population \
end)),0) as lsi_immap_low, \
coalesce(round(sum(case \
when afg_lsp_affpplp.lsi_immap >= 1 and afg_lsp_affpplp.lsi_immap < 2 then afg_pplp.vuid_population \
end)),0) as lsi_immap_very_low, \
coalesce(round(sum(case \
when afg_lsp_affpplp.lsi_ku >= 7 then afg_pplp.vuid_population \
end)),0) as lsi_ku_very_high, \
coalesce(round(sum(case \
when afg_lsp_affpplp.lsi_ku >= 5 and afg_lsp_affpplp.lsi_ku < 7 then afg_pplp.vuid_population \
end)),0) as lsi_ku_high, \
coalesce(round(sum(case \
when afg_lsp_affpplp.lsi_ku >= 4 and afg_lsp_affpplp.lsi_ku < 5 then afg_pplp.vuid_population \
end)),0) as lsi_ku_moderate, \
coalesce(round(sum(case \
when afg_lsp_affpplp.lsi_ku >= 2 and afg_lsp_affpplp.lsi_ku < 4 then afg_pplp.vuid_population \
end)),0) as lsi_ku_low, \
coalesce(round(sum(case \
when afg_lsp_affpplp.lsi_ku >= 1 and afg_lsp_affpplp.lsi_ku < 2 then afg_pplp.vuid_population \
end)),0) as lsi_ku_very_low, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s1_wb >= 7 then afg_pplp.vuid_population \
end)),0) as ls_s1_wb_very_high, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s1_wb >= 5 and afg_lsp_affpplp.ls_s1_wb < 7 then afg_pplp.vuid_population \
end)),0) as ls_s1_wb_high, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s1_wb >= 4 and afg_lsp_affpplp.ls_s1_wb < 5 then afg_pplp.vuid_population \
end)),0) as ls_s1_wb_moderate, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s1_wb >= 2 and afg_lsp_affpplp.ls_s1_wb < 4 then afg_pplp.vuid_population \
end)),0) as ls_s1_wb_low, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s1_wb >= 1 and afg_lsp_affpplp.ls_s1_wb < 2 then afg_pplp.vuid_population \
end)),0) as ls_s1_wb_very_low, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s2_wb >= 7 then afg_pplp.vuid_population \
end)),0) as ls_s2_wb_very_high, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s2_wb >= 5 and afg_lsp_affpplp.ls_s2_wb < 7 then afg_pplp.vuid_population \
end)),0) as ls_s2_wb_high, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s2_wb >= 4 and afg_lsp_affpplp.ls_s2_wb < 5 then afg_pplp.vuid_population \
end)),0) as ls_s2_wb_moderate, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s2_wb >= 2 and afg_lsp_affpplp.ls_s2_wb < 4 then afg_pplp.vuid_population \
end)),0) as ls_s2_wb_low, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s2_wb >= 1 and afg_lsp_affpplp.ls_s2_wb < 2 then afg_pplp.vuid_population \
end)),0) as ls_s2_wb_very_low, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s3_wb >= 7 then afg_pplp.vuid_population \
end)),0) as ls_s3_wb_very_high, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s3_wb >= 5 and afg_lsp_affpplp.ls_s3_wb < 7 then afg_pplp.vuid_population \
end)),0) as ls_s3_wb_high, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s3_wb >= 4 and afg_lsp_affpplp.ls_s3_wb < 5 then afg_pplp.vuid_population \
end)),0) as ls_s3_wb_moderate, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s3_wb >= 2 and afg_lsp_affpplp.ls_s3_wb < 4 then afg_pplp.vuid_population \
end)),0) as ls_s3_wb_low, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s3_wb >= 1 and afg_lsp_affpplp.ls_s3_wb < 2 then afg_pplp.vuid_population \
end)),0) as ls_s3_wb_very_low \
from afg_lsp_affpplp \
inner join afg_pplp on afg_lsp_affpplp.vuid=afg_pplp.vuid \
where ST_Intersects(afg_pplp.wkb_geometry,"+filterLock+")"
else:
sql = "select \
coalesce(round(sum(case \
when afg_lsp_affpplp.lsi_immap >= 7 then afg_pplp.vuid_population \
end)),0) as lsi_immap_very_high, \
coalesce(round(sum(case \
when afg_lsp_affpplp.lsi_immap >= 5 and afg_lsp_affpplp.lsi_immap < 7 then afg_pplp.vuid_population \
end)),0) as lsi_immap_high, \
coalesce(round(sum(case \
when afg_lsp_affpplp.lsi_immap >= 4 and afg_lsp_affpplp.lsi_immap < 5 then afg_pplp.vuid_population \
end)),0) as lsi_immap_moderate, \
coalesce(round(sum(case \
when afg_lsp_affpplp.lsi_immap >= 2 and afg_lsp_affpplp.lsi_immap < 4 then afg_pplp.vuid_population \
end)),0) as lsi_immap_low, \
coalesce(round(sum(case \
when afg_lsp_affpplp.lsi_immap >= 1 and afg_lsp_affpplp.lsi_immap < 2 then afg_pplp.vuid_population \
end)),0) as lsi_immap_very_low, \
coalesce(round(sum(case \
when afg_lsp_affpplp.lsi_ku >= 7 then afg_pplp.vuid_population \
end)),0) as lsi_ku_very_high, \
coalesce(round(sum(case \
when afg_lsp_affpplp.lsi_ku >= 5 and afg_lsp_affpplp.lsi_ku < 7 then afg_pplp.vuid_population \
end)),0) as lsi_ku_high, \
coalesce(round(sum(case \
when afg_lsp_affpplp.lsi_ku >= 4 and afg_lsp_affpplp.lsi_ku < 5 then afg_pplp.vuid_population \
end)),0) as lsi_ku_moderate, \
coalesce(round(sum(case \
when afg_lsp_affpplp.lsi_ku >= 2 and afg_lsp_affpplp.lsi_ku < 4 then afg_pplp.vuid_population \
end)),0) as lsi_ku_low, \
coalesce(round(sum(case \
when afg_lsp_affpplp.lsi_ku >= 1 and afg_lsp_affpplp.lsi_ku < 2 then afg_pplp.vuid_population \
end)),0) as lsi_ku_very_low, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s1_wb >= 7 then afg_pplp.vuid_population \
end)),0) as ls_s1_wb_very_high, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s1_wb >= 5 and afg_lsp_affpplp.ls_s1_wb < 7 then afg_pplp.vuid_population \
end)),0) as ls_s1_wb_high, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s1_wb >= 4 and afg_lsp_affpplp.ls_s1_wb < 5 then afg_pplp.vuid_population \
end)),0) as ls_s1_wb_moderate, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s1_wb >= 2 and afg_lsp_affpplp.ls_s1_wb < 4 then afg_pplp.vuid_population \
end)),0) as ls_s1_wb_low, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s1_wb >= 1 and afg_lsp_affpplp.ls_s1_wb < 2 then afg_pplp.vuid_population \
end)),0) as ls_s1_wb_very_low, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s2_wb >= 7 then afg_pplp.vuid_population \
end)),0) as ls_s2_wb_very_high, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s2_wb >= 5 and afg_lsp_affpplp.ls_s2_wb < 7 then afg_pplp.vuid_population \
end)),0) as ls_s2_wb_high, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s2_wb >= 4 and afg_lsp_affpplp.ls_s2_wb < 5 then afg_pplp.vuid_population \
end)),0) as ls_s2_wb_moderate, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s2_wb >= 2 and afg_lsp_affpplp.ls_s2_wb < 4 then afg_pplp.vuid_population \
end)),0) as ls_s2_wb_low, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s2_wb >= 1 and afg_lsp_affpplp.ls_s2_wb < 2 then afg_pplp.vuid_population \
end)),0) as ls_s2_wb_very_low, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s3_wb >= 7 then afg_pplp.vuid_population \
end)),0) as ls_s3_wb_very_high, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s3_wb >= 5 and afg_lsp_affpplp.ls_s3_wb < 7 then afg_pplp.vuid_population \
end)),0) as ls_s3_wb_high, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s3_wb >= 4 and afg_lsp_affpplp.ls_s3_wb < 5 then afg_pplp.vuid_population \
end)),0) as ls_s3_wb_moderate, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s3_wb >= 2 and afg_lsp_affpplp.ls_s3_wb < 4 then afg_pplp.vuid_population \
end)),0) as ls_s3_wb_low, \
coalesce(round(sum(case \
when afg_lsp_affpplp.ls_s3_wb >= 1 and afg_lsp_affpplp.ls_s3_wb < 2 then afg_pplp.vuid_population \
end)),0) as ls_s3_wb_very_low \
from afg_lsp_affpplp \
inner join afg_pplp on afg_lsp_affpplp.vuid=afg_pplp.vuid \
where ST_Intersects(afg_pplp.wkb_geometry,"+filterLock+")"
cursor = connections['geodb'].cursor()
row = query_to_dicts(cursor, sql)
counts = []
for i in row:
counts.append(i)
cursor.close()
return counts[0]
def getDroughtStatistics(filterLock, flag, code, woy, includes=[], excludes=[]):
import pandas as pd
if flag=='entireAfg':
sql = "select \
afg_lndcrva.agg_simplified_description, round(history_drought.mean-1) as min, \
COALESCE(ROUND(sum(afg_lndcrva.area_population)),0) as pop, \
COALESCE(ROUND(sum(afg_lndcrva.area_buildings)),0) as building, \
COALESCE(ROUND(sum(afg_lndcrva.area_sqm)/1000000,1),0) as area \
from afg_lndcrva inner join history_drought on history_drought.ogc_fid=afg_lndcrva.ogc_fid \
where afg_lndcrva.aggcode_simplified not in ('WAT','BRS', 'BSD', 'SNW') and aggcode not in ('AGR/NHS','NHS/NFS','NHS/BRS','NHS/WAT','NHS/URB','URB/AGT','URB/AGI','URB/NHS','URB/BRS','URB/BSD') \
and history_drought.woy='"+woy+"'\
group by afg_lndcrva.agg_simplified_description, round(history_drought.mean-1) \
order by afg_lndcrva.agg_simplified_description, round(history_drought.mean-1)"
elif flag =='currentProvince':
if len(str(code)) > 2:
ff0001 = "afg_lndcrva.dist_code = '"+str(code)+"'"
else :
ff0001 = "afg_lndcrva.prov_code = '"+str(code)+"'"
sql = "select \
afg_lndcrva.agg_simplified_description, round(history_drought.mean-1) as min, \
COALESCE(ROUND(sum(afg_lndcrva.area_population)),0) as pop, \
COALESCE(ROUND(sum(afg_lndcrva.area_buildings)),0) as building, \
COALESCE(ROUND(sum(afg_lndcrva.area_sqm)/1000000,1),0) as area \
from afg_lndcrva inner join history_drought on history_drought.ogc_fid=afg_lndcrva.ogc_fid \
where afg_lndcrva.aggcode_simplified not in ('WAT','BRS', 'BSD', 'SNW') and aggcode not in ('AGR/NHS','NHS/NFS','NHS/BRS','NHS/WAT','NHS/URB','URB/AGT','URB/AGI','URB/NHS','URB/BRS','URB/BSD') \
and history_drought.woy='"+woy+"'\
and "+ff0001+" \
group by afg_lndcrva.agg_simplified_description, round(history_drought.mean-1) \
order by afg_lndcrva.agg_simplified_description, round(history_drought.mean-1)"
elif flag =='drawArea':
sql = "select \
afg_lndcrva.agg_simplified_description, round(history_drought.mean-1) as min, \
COALESCE(ROUND(sum(afg_lndcrva.area_population)),0) as pop, \
COALESCE(ROUND(sum(afg_lndcrva.area_buildings)),0) as building, \
COALESCE(ROUND(sum(afg_lndcrva.area_sqm)/1000000,1),0) as area \
from afg_lndcrva inner join history_drought on history_drought.ogc_fid=afg_lndcrva.ogc_fid \
where afg_lndcrva.aggcode_simplified not in ('WAT','BRS', 'BSD', 'SNW') and aggcode not in ('AGR/NHS','NHS/NFS','NHS/BRS','NHS/WAT','NHS/URB','URB/AGT','URB/AGI','URB/NHS','URB/BRS','URB/BSD') \
and history_drought.woy='"+woy+"' \
and ST_Intersects(afg_lndcrva.wkb_geometry,"+filterLock+") \
group by afg_lndcrva.agg_simplified_description, round(history_drought.mean-1) \
order by afg_lndcrva.agg_simplified_description, round(history_drought.mean-1)"
else:
sql = "select \
afg_lndcrva.agg_simplified_description, round(history_drought.mean-1) as min, \
COALESCE(ROUND(sum(afg_lndcrva.area_population)),0) as pop, \
COALESCE(ROUND(sum(afg_lndcrva.area_buildings)),0) as building, \
COALESCE(ROUND(sum(afg_lndcrva.area_sqm)/1000000,1),0) as area \
from afg_lndcrva inner join history_drought on history_drought.ogc_fid=afg_lndcrva.ogc_fid \
where afg_lndcrva.aggcode_simplified not in ('WAT','BRS', 'BSD', 'SNW') and aggcode not in ('AGR/NHS','NHS/NFS','NHS/BRS','NHS/WAT','NHS/URB','URB/AGT','URB/AGI','URB/NHS','URB/BRS','URB/BSD') \
and history_drought.woy='"+woy+"'\
and ST_Intersects(afg_lndcrva.wkb_geometry,"+filterLock+") \
group by afg_lndcrva.agg_simplified_description, round(history_drought.mean-1) \
order by afg_lndcrva.agg_simplified_description, round(history_drought.mean-1)"
cursor = connections['geodb'].cursor()
row = query_to_dicts(cursor, sql)
counts = []
for i in row:
if i['min']>=0:
counts.append(i)
cursor.close()
df = pd.DataFrame(counts, columns=counts[0].keys())
d = {}
for i in df['agg_simplified_description'].unique():
d[i] = [{int(df['min'][j]): {
'pop':df['pop'][j],
'building':df['building'][j],
'area':df['area'][j]
}
} for j in df[df['agg_simplified_description']==i].index]
selected_date_range = getYearRangeFromWeek(woy)
data = {'record':[], 'woy':woy, 'start':selected_date_range[0], 'end':selected_date_range[1]}
for i in d:
detail = []
for det in d[i]:
ket = None
if det.keys()[0] == 0:
ket = 'Abnormally Dry Condition'
elif det.keys()[0] == 1:
ket = 'Moderate'
elif det.keys()[0] == 2:
ket = 'Severe'
elif det.keys()[0] == 3:
ket = 'Extreme'
elif det.keys()[0] == 4:
ket = 'Exceptional'
val = {}
val.update({'name':ket})
val.update(det[det.keys()[0]])
detail.append(val)
data['record'].append(
{
'name':i,
'detail': detail,
'woy':woy
}
)
return data
def getClosestDroughtWOY(woy):
sql = "select distinct woy from history_drought \
where to_timestamp(concat(substring(woy,6,2),' ',substring(woy,1,4)), 'W YYYY')::date < to_timestamp(concat(substring('"+woy+"',6,2),' ',substring('"+woy+"',1,4)), 'W YYYY')::date \
ORDER BY woy DESC \
LIMIT 1"
cursor = connections['geodb'].cursor()
row = query_to_dicts(cursor, sql)
data = None
for i in row:
data = i['woy']
cursor.close()
return data
class getDrought(ModelResource):
class Meta:
resource_name = 'getdrought'
allowed_methods = ['post']
detail_allowed_methods = ['post']
cache = SimpleCache()
def post_list(self, request, **kwargs):
self.method_check(request, allowed=['post'])
response = self.getData(request)
return self.create_response(request, response)
def getData(self, request):
boundaryFilter = json.loads(request.body)
temp1 = []
for i in boundaryFilter['spatialfilter']:
temp1.append('ST_GeomFromText(\''+i+'\',4326)')
temp2 = 'ARRAY['
first=True
for i in temp1:
if first:
temp2 = temp2 + i
first=False
else :
temp2 = temp2 + ', ' + i
temp2 = temp2+']'
filterLock = 'ST_Union('+temp2+')'
flag = boundaryFilter['flag']
code = boundaryFilter['code']
dateIn = boundaryFilter['date'].split('-')
closest_woy = getClosestDroughtWOY(dateIn[0] + '%03d' % datetime.date(int(dateIn[0]), int(dateIn[1]), int(dateIn[2])).isocalendar()[1])
response = {}
response = getDroughtStatistics(filterLock,flag,code, closest_woy)
return response
def getYearRangeFromWeek(woy):
year = int(woy[:-3])
week = int(woy[4:])
d = datetime.date(year,1,1)
d = d - datetime.timedelta(d.weekday())
dlt = datetime.timedelta(days = (week-1)*7)
return str(d + dlt), str(d + dlt + datetime.timedelta(days=6))
class getClosestDroughtWOYLayerAPI(ModelResource):
class Meta:
resource_name = 'getdroughtlayer'
allowed_methods = ['post']
detail_allowed_methods = ['post']
cache = SimpleCache()
def post_list(self, request, **kwargs):
self.method_check(request, allowed=['post'])
response = self.getData(request)
return self.create_response(request, response)
def getData(self, request):
boundaryFilter = json.loads(request.body)
dateIn = boundaryFilter['date'].split('-')
closest_woy = getClosestDroughtWOY(dateIn[0] + '%03d' % datetime.date(int(dateIn[0]), int(dateIn[1]), int(dateIn[2])).isocalendar()[1])
response = {'woy':closest_woy}
return response
| gpl-3.0 |
james4424/nest-simulator | topology/pynest/tests/test_dumping.py | 10 | 3706 | # -*- coding: utf-8 -*-
#
# test_dumping.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Tests for topology hl_api dumping functions.
NOTE: These tests only test whether the code runs, it does not check
whether the results produced are correct.
"""
import unittest
import nest
import nest.topology as topo
import sys
import os
import os.path
class PlottingTestCase(unittest.TestCase):
def nest_tmpdir(self):
"""Returns temp dir path from environment, current dir otherwise."""
if 'NEST_DATA_PATH' in os.environ:
return os.environ['NEST_DATA_PATH']
else:
return '.'
def test_DumpNodes(self):
"""Test dumping nodes."""
ldict = {'elements': 'iaf_neuron', 'rows': 3, 'columns': 3,
'extent': [2., 2.], 'edge_wrap': True}
nest.ResetKernel()
l = topo.CreateLayer(ldict)
topo.DumpLayerNodes(l, os.path.join(self.nest_tmpdir(),
'test_DumpNodes.out.lyr'))
self.assertTrue(True)
def test_DumpNodes2(self):
"""Test dumping nodes, two layers."""
ldict = {'elements': 'iaf_neuron', 'rows': 3, 'columns': 3,
'extent': [2., 2.], 'edge_wrap': True}
nest.ResetKernel()
l = topo.CreateLayer(ldict)
topo.DumpLayerNodes(l * 2, os.path.join(self.nest_tmpdir(),
'test_DumpNodes2.out.lyr'))
self.assertTrue(True)
def test_DumpConns(self):
"""Test dumping connections."""
ldict = {'elements': 'iaf_neuron', 'rows': 3, 'columns': 3,
'extent': [2., 2.], 'edge_wrap': True}
cdict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 1.}}}
nest.ResetKernel()
l = topo.CreateLayer(ldict)
topo.ConnectLayers(l, l, cdict)
topo.DumpLayerConnections(l, 'static_synapse',
os.path.join(self.nest_tmpdir(),
'test_DumpConns.out.cnn'))
self.assertTrue(True)
def test_DumpConns2(self):
"""Test dumping connections, 2 layers."""
ldict = {'elements': 'iaf_neuron', 'rows': 3, 'columns': 3,
'extent': [2., 2.], 'edge_wrap': True}
cdict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 1.}}}
nest.ResetKernel()
l = topo.CreateLayer(ldict)
topo.ConnectLayers(l, l, cdict)
topo.DumpLayerConnections(l * 2, 'static_synapse',
os.path.join(self.nest_tmpdir(),
'test_DumpConns2.out.cnn'))
self.assertTrue(True)
def suite():
suite = unittest.makeSuite(PlottingTestCase, 'test')
return suite
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
try:
import matplotlib.pyplot as plt
plt.show()
except ImportError:
pass
| gpl-2.0 |
mhdella/scikit-learn | examples/covariance/plot_mahalanobis_distances.py | 348 | 6232 | r"""
================================================================
Robust covariance estimation and Mahalanobis distances relevance
================================================================
An example to show covariance estimation with the Mahalanobis
distances on Gaussian distributed data.
For Gaussian distributed data, the distance of an observation
:math:`x_i` to the mode of the distribution can be computed using its
Mahalanobis distance: :math:`d_{(\mu,\Sigma)}(x_i)^2 = (x_i -
\mu)'\Sigma^{-1}(x_i - \mu)` where :math:`\mu` and :math:`\Sigma` are
the location and the covariance of the underlying Gaussian
distribution.
In practice, :math:`\mu` and :math:`\Sigma` are replaced by some
estimates. The usual covariance maximum likelihood estimate is very
sensitive to the presence of outliers in the data set and therefor,
the corresponding Mahalanobis distances are. One would better have to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set and that the
associated Mahalanobis distances accurately reflect the true
organisation of the observations.
The Minimum Covariance Determinant estimator is a robust,
high-breakdown point (i.e. it can be used to estimate the covariance
matrix of highly contaminated datasets, up to
:math:`\frac{n_\text{samples}-n_\text{features}-1}{2}` outliers)
estimator of covariance. The idea is to find
:math:`\frac{n_\text{samples}+n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant,
yielding a "pure" subset of observations from which to compute
standards estimates of location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced
by P.J.Rousseuw in [1].
This example illustrates how the Mahalanobis distances are affected by
outlying data: observations drawn from a contaminating distribution
are not distinguishable from the observations coming from the real,
Gaussian distribution that one may want to work with. Using MCD-based
Mahalanobis distances, the two populations become
distinguishable. Associated applications are outliers detection,
observations ranking, clustering, ...
For visualization purpose, the cubic root of the Mahalanobis distances
are represented in the boxplot, as Wilson and Hilferty suggest [2]
[1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
[2] Wilson, E. B., & Hilferty, M. M. (1931). The distribution of chi-square.
Proceedings of the National Academy of Sciences of the United States
of America, 17, 684-688.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.covariance import EmpiricalCovariance, MinCovDet
n_samples = 125
n_outliers = 25
n_features = 2
# generate data
gen_cov = np.eye(n_features)
gen_cov[0, 0] = 2.
X = np.dot(np.random.randn(n_samples, n_features), gen_cov)
# add some outliers
outliers_cov = np.eye(n_features)
outliers_cov[np.arange(1, n_features), np.arange(1, n_features)] = 7.
X[-n_outliers:] = np.dot(np.random.randn(n_outliers, n_features), outliers_cov)
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
robust_cov = MinCovDet().fit(X)
# compare estimators learnt from the full data set with true parameters
emp_cov = EmpiricalCovariance().fit(X)
###############################################################################
# Display results
fig = plt.figure()
plt.subplots_adjust(hspace=-.1, wspace=.4, top=.95, bottom=.05)
# Show data set
subfig1 = plt.subplot(3, 1, 1)
inlier_plot = subfig1.scatter(X[:, 0], X[:, 1],
color='black', label='inliers')
outlier_plot = subfig1.scatter(X[:, 0][-n_outliers:], X[:, 1][-n_outliers:],
color='red', label='outliers')
subfig1.set_xlim(subfig1.get_xlim()[0], 11.)
subfig1.set_title("Mahalanobis distances of a contaminated data set:")
# Show contours of the distance functions
xx, yy = np.meshgrid(np.linspace(plt.xlim()[0], plt.xlim()[1], 100),
np.linspace(plt.ylim()[0], plt.ylim()[1], 100))
zz = np.c_[xx.ravel(), yy.ravel()]
mahal_emp_cov = emp_cov.mahalanobis(zz)
mahal_emp_cov = mahal_emp_cov.reshape(xx.shape)
emp_cov_contour = subfig1.contour(xx, yy, np.sqrt(mahal_emp_cov),
cmap=plt.cm.PuBu_r,
linestyles='dashed')
mahal_robust_cov = robust_cov.mahalanobis(zz)
mahal_robust_cov = mahal_robust_cov.reshape(xx.shape)
robust_contour = subfig1.contour(xx, yy, np.sqrt(mahal_robust_cov),
cmap=plt.cm.YlOrBr_r, linestyles='dotted')
subfig1.legend([emp_cov_contour.collections[1], robust_contour.collections[1],
inlier_plot, outlier_plot],
['MLE dist', 'robust dist', 'inliers', 'outliers'],
loc="upper right", borderaxespad=0)
plt.xticks(())
plt.yticks(())
# Plot the scores for each point
emp_mahal = emp_cov.mahalanobis(X - np.mean(X, 0)) ** (0.33)
subfig2 = plt.subplot(2, 2, 3)
subfig2.boxplot([emp_mahal[:-n_outliers], emp_mahal[-n_outliers:]], widths=.25)
subfig2.plot(1.26 * np.ones(n_samples - n_outliers),
emp_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig2.plot(2.26 * np.ones(n_outliers),
emp_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig2.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig2.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig2.set_title("1. from non-robust estimates\n(Maximum Likelihood)")
plt.yticks(())
robust_mahal = robust_cov.mahalanobis(X - robust_cov.location_) ** (0.33)
subfig3 = plt.subplot(2, 2, 4)
subfig3.boxplot([robust_mahal[:-n_outliers], robust_mahal[-n_outliers:]],
widths=.25)
subfig3.plot(1.26 * np.ones(n_samples - n_outliers),
robust_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig3.plot(2.26 * np.ones(n_outliers),
robust_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig3.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig3.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig3.set_title("2. from robust estimates\n(Minimum Covariance Determinant)")
plt.yticks(())
plt.show()
| bsd-3-clause |
simo-tuomisto/portfolio | Statistical Methods 2014 - Home exam/Code/exam_p05.py | 1 | 2491 | import numpy as np
import matplotlib.pyplot as mpl
from scipy.stats import chisquare
from scipy.optimize import fmin_powell as fmin
def chisq(observed, expected):
return chisquare(observed[expected>0],expected[expected>0])
def getPseudo(background,n_samples):
back_cum = np.cumsum(background)/np.sum(background)
back_pseudo = np.zeros_like(background)
for n in np.arange(n_samples):
random_number = np.random.random()
i = np.sum(back_cum<random_number)
back_pseudo[i] += 1
return back_pseudo
measured = np.loadtxt('real_mass.dat').T[-1]
back1 = np.loadtxt('MC1_mass.dat').T[-1]
back2 = np.loadtxt('MC2_mass.dat').T[-1]
n = np.sum(measured)
back_pseudo1 = getPseudo(back1,n)
back_pseudo2 = getPseudo(back2,n)
chi1,p1 = chisq(measured, back1)
chi2,p2 = chisq(measured, back2)
chi_pseudo1,p_pseudo1 = chisq(measured, back_pseudo1)
chi_pseudo2,p_pseudo2 = chisq(measured, back_pseudo2)
print 'Background 1:'
print 'Chi:',chi1,'P-value:',p1
print 'Background 2:'
print 'Chi:',chi2,'P-value:',p2
print 'Background 1 pseudoexperiment:'
print 'Chi:',chi_pseudo1,'P-value:',p_pseudo1
print 'Background 2 pseudoexperiment:'
print 'Chi:',chi_pseudo2,'P-value:',p_pseudo2
a = fmin(lambda x: chisq(measured, x*back1 + (1.0-x)*back2)[0], 0.0, disp=False)
back_optimal = a*back1+(1-a)*back2
back_pseudo_optimal = getPseudo(back_optimal,n)
chi_optimal,p_optimal = chisq(measured, back_optimal)
chi_pseudo_optimal,p_pseudo_optimal = chisq(measured, back_pseudo_optimal)
print 'Optimal background:'
print 'a:',a
print 'Chi:',chi_optimal,'P-value:',p_optimal
print 'Optimal background pseudoexperiment:'
print 'Chi:',chi_pseudo_optimal,'P-value:',p_pseudo_optimal
mpl.figure(facecolor='white',figsize=(12,9))
mpl.plot(measured,'-b',label='Measured mass')
mpl.plot(back1,'-r',label='Background 1')
mpl.plot(back2,'-g',label='Background 2')
mpl.legend(loc=4)
mpl.savefig('p05_back.pdf')
mpl.figure(facecolor='white',figsize=(12,9))
mpl.plot(measured,'-b',label='Measured mass')
mpl.plot(back_pseudo1,'-r',label='Pseudoexperiment from background 1')
mpl.plot(back_pseudo2,'-g',label='Pseudoexperiment from background 2')
mpl.legend(loc=4)
mpl.savefig('p05_pseudo.pdf')
mpl.figure(facecolor='white',figsize=(12,9))
mpl.plot(measured,'-b',label='Measured mass')
mpl.plot(back_optimal,'-r',label='Optimal background')
mpl.plot(back_pseudo_optimal,'-g',label='Pseudoexperiment from optimal background')
mpl.legend(loc=4)
mpl.savefig('p05_optimal.pdf')
#mpl.show() | mit |
sonapraneeth-a/object-classification | library/tf/models/LinearClassifier.py | 1 | 34610 | import tensorflow as tf
from sklearn.metrics import classification_report, accuracy_score
from sklearn.model_selection import train_test_split
from library.tf import Layers
from library.plot_tools import plot_tools
from library.preprocessing import data_transform
import numpy as np
from library.utils import file_utils
from sklearn.preprocessing import StandardScaler, MinMaxScaler
import math
import os, glob, time, re
from os.path import basename
from tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file
from library.preprocessing import ZCA
# Resources
# https://www.youtube.com/watch?v=3VEXX73tnw4
class TFLinearClassifier:
def __init__(self, logs=True, log_dir='./logs/', learning_rate=0.01, activation_fn='softmax', restore=True,
num_iterations=100, device='', session_type='default', descent_method='gradient',
init_weights='random', display_step=10, reg_const=0.01, regularize=False, init_bias='ones',
learning_rate_type='constant', model_name='./model/linear_classifier_model.ckpt',
save_model=False, transform=True, test_log=True, transform_method='StandardScaler',
tolerance=1e-7, train_validate_split=None, separate_writer=False, verbose=False):
self.verbose = verbose
self.restore = restore
# Tensorflow logs and models
self.tensorboard_log_dir = log_dir
self.tensorboard_logs = logs
self.merged_summary_op = None
# Log writers
self.separate_writer = separate_writer
self.summary_writer = None
self.train_writer = None
self.validate_writer = None
self.test_writer = None
# Model info
self.model = None
self.model_name = model_name
self.save_model = save_model
# Summary methods
self.train_loss_summary = None
self.train_acc_summary = None
self.validate_loss_summary = None
self.validate_acc_summary = None
self.learning_rate_summary = None
self.test_acc_summary = None
self.w_hist = None
self.w_im = None
self.b_hist = None
# Tensorflow variables for later use
self.var_train_loss = None
self.update_train_loss = None
self.list_train_loss = []
self.var_train_acc = None
self.update_train_acc = None
self.list_train_acc = []
self.var_validate_loss = None
self.update_validate_loss = None
self.list_validate_loss = []
self.var_validate_acc = None
self.update_validate_acc = None
self.list_validate_acc = []
self.test_var_acc = None
self.update_test_acc = None
self.list_test_acc = []
self.var_learning_rate = None
self.update_learning_rate = None
self.list_learning_rate = []
#
self.session = None
if device == '' or device is None:
self.device = '/cpu:0'
else:
self.device = device
self.session_type = session_type
# Parameters
self.learning_rate_type = learning_rate_type
self.current_learning_rate = learning_rate
self.learning_rate = learning_rate
self.max_iterations = num_iterations
self.display_step = display_step
self.tolerance = tolerance
self.descent_method = descent_method
self.init_weights = init_weights
self.init_bias = init_bias
self.reg_const = reg_const
self.regularize = regularize
self.activation_fn = activation_fn
# Data transform methods
self.transform = transform
self.transform_method = transform_method
# Graph inputs
self.x = None
self.y_true = None
self.y_true_cls = None
self.num_features = None
self.num_classes = None
# Validation and testing
self.y_pred = None
self.y_pred_cls = None
#
self.init_var = None
self.last_epoch = 0
self.global_step = 0
self.optimizer = None
self.train_loss = None
self.validate_loss = None
self.train_accuracy = None
self.validate_accuracy = None
self.test_accuracy = None
self.test_log = test_log
self.weights = None
self.biases = None
self.logits = None
self.correct_prediction = None
self.cross_entropy = None
#
self.train_validate_split = train_validate_split
def print_parameters(self):
print('>> Parameters for Linear Classifier')
print('Activation function : ', self.activation_fn)
print('Gradient Descent Method : ', self.descent_method)
print('Learning rate type : ', self.learning_rate_type)
print('Learning rate : ', self.learning_rate)
if self.regularize is True:
print('Regularization constant : ', self.reg_const)
print('Error Tolerance : ', self.tolerance)
print('Data Transformation method : ', self.transform_method)
print('Weights initializer : ', self.init_weights)
print('Bias initializer : ', self.init_bias)
print('>> Inputs for Tensorflow Graph')
print('X : ', self.x)
print('Y_true : ', self.y_true)
print('Y_true_cls : ', self.y_true_cls)
print('Device to use : ', self.device)
print('>> Output parameters for Tensorflow Graph')
print('Restore model : ', self.restore)
print('W : ', self.weights)
print('b : ', self.biases)
print('logits : ', self.logits)
print('Y_pred : ', self.y_pred)
print('Y_pred_cls : ', self.y_pred_cls)
print('cross_entropy : ', self.cross_entropy)
print('train_loss : ', self.train_loss)
print('optimizer : ', self.optimizer)
print('correct_prediction : ', self.correct_prediction)
print('>> Accuracy parameters')
print('Train Accuracy : ', self.train_accuracy)
print('Validate Accuracy : ', self.validate_accuracy)
print('Test Accuracy : ', self.test_accuracy)
def make_placeholders_for_inputs(self, num_features, num_classes):
with tf.device(self.device):
with tf.name_scope('Inputs'):
with tf.name_scope('Data'):
self.x = tf.placeholder(tf.float32, [None, num_features], name='X')
with tf.name_scope('Train_Labels'):
self.y_true = tf.placeholder(tf.float32, [None, num_classes], name='y_label')
self.y_true_cls = tf.placeholder(tf.int64, [None], name='y_class')
with tf.name_scope('Input_Image'):
image_shaped_input = tf.reshape(self.x, [-1, 32, 32, 3])
tf.summary.image('Training_Images', image_shaped_input, 1)
def make_parameters(self, num_features, num_classes):
with tf.device(self.device):
with tf.name_scope('Parameters'):
with tf.name_scope('Weights'):
if self.init_weights == 'zeros':
self.weights = tf.Variable(tf.zeros([num_features, num_classes]), name='W_zeros')
elif self.init_weights == 'ones':
self.weights = tf.Variable(tf.ones([num_features, num_classes]), name='W_ones')
elif self.init_weights == 'random_normal':
self.weights = tf.Variable(tf.random_normal([num_features, num_classes]), name='W_random_normal')
else:
self.weights = tf.Variable(tf.random_normal([num_features, num_classes]), name='W_random_normal')
with tf.name_scope('Bias'):
if self.init_bias == 'zeros':
self.biases = tf.Variable(tf.zeros([num_classes]), name='b_zeros')
elif self.init_bias == 'ones':
self.biases = tf.Variable(tf.ones([num_classes]), name='b_ones')
elif self.init_bias == 'random_normal':
self.biases = tf.Variable(tf.random_normal([num_classes]), name='b_random_normal')
else:
self.biases = tf.Variable(tf.random_normal([num_classes]), name='b_random_normal')
self.w_hist = tf.summary.histogram('Weights_Histogram', self.weights)
self.w_im = tf.summary.image('Weights_Image', self.weights)
self.b_hist = tf.summary.histogram('Bias', self.biases)
def make_predictions(self):
with tf.device(self.device):
with tf.name_scope('Predictions'):
self.logits = tf.matmul(self.x, self.weights) + self.biases
if self.activation_fn == 'softmax':
self.y_pred = tf.nn.softmax(self.logits)
elif self.activation_fn == 'relu':
self.y_pred = tf.nn.relu(self.logits)
elif self.activation_fn == 'sigmoid':
self.y_pred = tf.nn.sigmoid(self.logits)
self.y_pred_cls = tf.argmax(self.y_pred, dimension=1)
def make_optimization(self):
with tf.device(self.device):
with tf.name_scope('Cross_Entropy'):
self.cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.y_true)
with tf.name_scope('Loss_Function'):
if self.regularize is True:
ridge_param = tf.cast(tf.constant(self.reg_const), dtype=tf.float32)
ridge_loss = tf.reduce_mean(tf.square(self.weights))
self.train_loss = tf.add(tf.reduce_mean(self.cross_entropy), tf.multiply(ridge_param, ridge_loss))
if self.train_validate_split is not None:
self.validate_loss = tf.add(tf.reduce_mean(self.cross_entropy), tf.multiply(ridge_param, ridge_loss))
else:
self.train_loss = tf.reduce_mean(self.cross_entropy)
if self.train_validate_split is not None:
self.validate_loss = tf.reduce_mean(self.cross_entropy)
self.train_loss_summary = tf.summary.scalar('Training_Error', self.train_loss)
if self.train_validate_split is not None:
self.validate_loss_summary = tf.summary.scalar('Validation_Error', self.validate_loss)
self.var_train_loss = tf.Variable([], dtype=tf.float32, trainable=False,
validate_shape=False, name='train_loss_list')
self.update_train_loss = tf.assign(self.var_train_loss, self.list_train_acc, validate_shape=False)
if self.train_validate_split is not None:
self.var_validate_loss = tf.Variable([], dtype=tf.float32, trainable=False,
validate_shape=False, name='validate_loss_list')
self.update_validate_loss = tf.assign(self.var_validate_loss, self.list_validate_acc, validate_shape=False)
with tf.name_scope('Optimizer'):
self.var_learning_rate= tf.Variable([], dtype=tf.float32, trainable=False,
validate_shape=False, name='learning_rate_progress')
self.update_learning_rate = tf.assign(self.var_learning_rate, self.list_learning_rate,
validate_shape=False)
if self.learning_rate_type == 'exponential':
self.current_learning_rate = tf.train.exponential_decay(self.learning_rate, self.global_step,
self.display_step, 0.96, staircase=True)
else:
self.current_learning_rate = self.learning_rate
self.learning_rate_summary = tf.summary.scalar('Learning_rate', self.current_learning_rate)
if self.descent_method == 'gradient':
self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.current_learning_rate)\
.minimize(self.train_loss)
elif self.descent_method == 'adam':
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.current_learning_rate)\
.minimize(self.train_loss)
else:
self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.current_learning_rate)\
.minimize(self.train_loss)
self.correct_prediction = tf.equal(self.y_pred_cls, self.y_true_cls)
def make_accuracy(self):
with tf.device(self.device):
with tf.name_scope('Accuracy'):
self.train_accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
self.var_train_acc = tf.Variable([], dtype=tf.float32, trainable=False,
validate_shape=False, name='train_accuracy_list')
self.update_train_acc = tf.assign(self.var_train_acc, self.list_train_acc,
validate_shape=False)
if self.separate_writer is True:
self.train_acc_summary = tf.summary.scalar('Train_Accuracy', self.train_accuracy)
else:
self.train_acc_summary = tf.summary.scalar('Train_Accuracy', self.train_accuracy)
if self.train_validate_split is not None:
self.validate_accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
self.var_validate_acc = tf.Variable([], dtype=tf.float32, trainable=False,
validate_shape=False, name='validate_accuracy_list')
self.update_validate_acc = tf.assign(self.var_validate_acc, self.list_validate_acc,
validate_shape=False)
if self.separate_writer is True:
self.validate_acc_summary = tf.summary.scalar('Validate_Accuracy', self.validate_accuracy)
else:
self.validate_acc_summary = tf.summary.scalar('Validation_Accuracy', self.validate_accuracy)
if self.test_log is True:
self.test_accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
self.test_var_acc = tf.Variable([], dtype=tf.float32, trainable=False,
validate_shape=False, name='test_accuracy_list')
self.update_test_acc = tf.assign(self.test_var_acc, self.list_test_acc,
validate_shape=False)
if self.separate_writer is True:
self.test_acc_summary = tf.summary.scalar('Test_Accuracy', self.test_accuracy)
else:
self.test_acc_summary = tf.summary.scalar('Test_Accuracy', self.test_accuracy)
def create_graph(self, num_features, num_classes):
start = time.time()
self.num_features = num_features
self.num_classes = num_classes
self.global_step = tf.Variable(0, name='last_successful_epoch', trainable=False, dtype=tf.int32)
self.last_epoch = tf.assign(self.global_step, self.global_step + 1, name='assign_updated_epoch')
# Step 1: Creating placeholders for inputs
self.make_placeholders_for_inputs(num_features, num_classes)
# Step 2: Creating initial parameters for the variables
self.make_parameters(num_features, num_classes)
# Step 3: Make predictions for the data
self.make_predictions()
# Step 4: Perform optimization operation
self.make_optimization()
# Step 5: Calculate accuracies
self.make_accuracy()
# Step 6: Initialize all the required variables
with tf.device(self.device):
self.init_var = tf.global_variables_initializer()
end = time.time()
print('Tensorflow graph created in %.4f seconds' %(end-start))
return True
def fit(self, data, labels, classes, test_data=None, test_labels=None, test_classes=None):
pattern_cpu = re.compile('/cpu:[0-9]')
pattern_gpu = re.compile('/gpu:[0-9]')
if pattern_cpu.match(self.device):
print('Using CPU: ', self.device)
config = tf.ConfigProto(
log_device_placement=True,
allow_soft_placement=True,
#allow_growth=True,
#device_count={'CPU': 0}
)
if pattern_gpu.match(self.device):
print('Using GPU: ', self.device)
config = tf.ConfigProto(
log_device_placement=True,
allow_soft_placement=True,
#allow_growth=True,
#device_count={'GPU': 0}
)
if self.session_type == 'default':
self.session = tf.Session(config=config)
if self.session_type == 'interactive':
self.session = tf.InteractiveSession(config=config)
print('Session: ' + str(self.session))
self.session.run(self.init_var)
if self.tensorboard_logs is True:
file_utils.mkdir_p(self.tensorboard_log_dir)
self.merged_summary_op = tf.summary.merge_all()
if self.restore is False:
file_utils.delete_all_files_in_dir(self.tensorboard_log_dir)
if self.separate_writer is False:
self.summary_writer = tf.summary.FileWriter(self.tensorboard_log_dir, graph=self.session.graph)
else:
self.train_writer = tf.summary.FileWriter(self.tensorboard_log_dir + 'train',
graph=self.session.graph)
if self.train_validate_split is not None:
self.validate_writer = tf.summary.FileWriter(self.tensorboard_log_dir + 'validate',
graph=self.session.graph)
if self.test_log is True:
self.test_writer = tf.summary.FileWriter(self.tensorboard_log_dir + 'test',
graph=self.session.graph)
if self.save_model is True:
self.model = tf.train.Saver(max_to_keep=2)
if self.train_validate_split is not None:
train_data, validate_data, train_labels, validate_labels, train_classes, validate_classes = \
train_test_split(data, labels, classes, train_size=self.train_validate_split)
if self.verbose is True:
print('Data shape : ' + str(data.shape))
print('Labels shape : ' + str(labels.shape))
print('Classes shape : ' + str(classes.shape))
print('Train Data shape : ' + str(train_data.shape))
print('Train Labels shape : ' + str(train_labels.shape))
print('Train Classes shape : ' + str(train_classes.shape))
print('Validate Data shape : ' + str(validate_data.shape))
print('Validate Labels shape : ' + str(validate_labels.shape))
print('Validate Classes shape : ' + str(validate_classes.shape))
if self.test_log is False:
self.optimize(train_data, train_labels, train_classes,
validate_data=validate_data, validate_labels=validate_labels,
validate_classes=validate_classes)
else:
self.optimize(train_data, train_labels, train_classes,
validate_data=validate_data, validate_labels=validate_labels,
validate_classes=validate_classes, test_data=test_data,
test_labels=test_labels, test_classes=test_classes)
else:
if self.test_log is False:
self.optimize(data, labels, classes)
else:
self.optimize(data, labels, classes, test_data=test_data,
test_labels=test_labels, test_classes=test_classes)
def optimize(self, train_data, train_labels, train_classes,
validate_data=None, validate_labels=None, validate_classes=None,
test_data = None, test_labels = None, test_classes = None):
if self.transform is True:
if self.transform_method == 'StandardScaler':
ss = StandardScaler()
train_data = ss.fit_transform(train_data)
if self.train_validate_split is not None:
validate_data = ss.fit_transform(validate_data)
if self.test_log is True:
test_data = ss.fit_transform(test_data)
if self.transform_method == 'MinMaxScaler':
ss = MinMaxScaler()
train_data = ss.fit_transform(train_data)
if self.train_validate_split is not None:
validate_data = ss.fit_transform(validate_data)
if self.test_log is True:
test_data = ss.fit_transform(test_data)
file_name = os.path.splitext(os.path.abspath(self.model_name))[0]
num_files = len(sorted(glob.glob(os.path.abspath(file_name + '*.meta'))))
if num_files > 0:
checkpoint_file = os.path.abspath(sorted(glob.glob(file_name + '*.data-00000-of-00001'), reverse=True)[0])
if os.path.exists(checkpoint_file):
print('Restoring model from %s' % checkpoint_file)
meta_file = os.path.abspath(sorted(glob.glob(file_name + '*.meta'), reverse=True)[0])
print('Loading: %s' %meta_file)
saver = tf.train.import_meta_graph(meta_file)
print('Loading: %s' %os.path.abspath(checkpoint_file))
cpk = tf.train.latest_checkpoint(os.path.dirname(meta_file))
print('Checkpoint: ' + str(cpk))
print('Tensors')
print(print_tensors_in_checkpoint_file(file_name=cpk, all_tensors='', tensor_name=''))
saver.restore(self.session, tf.train.latest_checkpoint(os.path.dirname(meta_file)))
print('Last epoch to restore: ' + str(self.session.run(self.global_step)))
if self.train_validate_split is not None:
if self.test_log is False:
self.run(train_data, train_labels, train_classes,
validate_data=validate_data, validate_labels=validate_labels,
validate_classes=validate_classes)
else:
self.run(train_data, train_labels, train_classes,
validate_data=validate_data, validate_labels=validate_labels,
validate_classes=validate_classes, test_data=test_data,
test_labels=test_labels, test_classes=test_classes)
else:
if self.test_log is False:
self.run(train_data, train_labels, train_classes)
else:
self.run(train_data, train_labels, train_classes,
test_data=test_data, test_labels=test_labels, test_classes=test_classes)
def run(self, train_data, train_labels, train_classes,
validate_data=None, validate_labels=None, validate_classes=None,
test_data=None, test_labels=None, test_classes=None):
feed_dict_train = {self.x: train_data,
self.y_true: train_labels,
self.y_true_cls: train_classes}
if self.train_validate_split is not None:
feed_dict_validate = {self.x: validate_data,
self.y_true: validate_labels,
self.y_true_cls: validate_classes}
if self.test_log is True:
feed_dict_test = {self.x: test_data,
self.y_true: test_labels,
self.y_true_cls: test_classes}
epoch = self.session.run(self.global_step)
self.list_train_loss = self.session.run(self.var_train_loss)
self.list_train_loss = self.list_train_loss.tolist()
self.list_train_acc = self.session.run(self.var_train_acc)
self.list_train_acc = self.list_train_acc.tolist()
print('Length of train loss : %d' %len(self.list_train_loss))
print('Length of train accuracy : %d' % len(self.list_train_acc))
if self.train_validate_split is not None:
self.list_validate_loss = self.session.run(self.var_validate_loss)
self.list_validate_loss = self.list_validate_loss.tolist()
self.list_validate_acc = self.session.run(self.var_validate_acc)
self.list_validate_acc = self.list_validate_acc.tolist()
print('Length of validate loss : %d' % len(self.list_validate_loss))
print('Length of validate accuracy : %d' % len(self.list_validate_acc))
if self.test_log is True:
self.list_test_acc = self.session.run(self.test_var_acc)
self.list_test_acc = self.list_test_acc.tolist()
print('Length of test accuracy : %d' % len(self.list_test_acc))
print('Restoring training from epoch :', epoch)
converged = False
prev_cost = 0
while (epoch != self.max_iterations) and converged is False:
start = time.time()
_, train_loss, train_acc, curr_epoch \
= self.session.run([self.optimizer, self.train_loss, self.train_accuracy,
self.last_epoch], feed_dict=feed_dict_train)
train_loss_summary = self.session.run(self.train_loss_summary, feed_dict=feed_dict_train)
validate_loss_summary = self.session.run(self.validate_loss_summary, feed_dict=feed_dict_validate)
train_acc_summary = self.session.run(self.train_acc_summary, feed_dict=feed_dict_train)
learning_rate_summary = self.session.run(self.learning_rate_summary)
self.list_train_loss.append(train_loss)
self.update_train_loss = tf.assign(self.var_train_loss, self.list_train_loss, validate_shape=False)
self.update_train_loss.eval()
self.list_train_acc.append(train_acc)
self.update_train_acc = tf.assign(self.var_train_acc, self.list_train_acc, validate_shape=False)
self.update_train_acc.eval()
self.list_learning_rate.append(self.current_learning_rate)
self.update_learning_rate = tf.assign(self.var_learning_rate, self.list_learning_rate, validate_shape=False)
self.update_learning_rate.eval()
# w_hist = self.session.run(self.w_hist, feed_dict=feed_dict_train)
# self.summary_writer.add_summary(w_hist, epoch)
# w_im = self.session.run(self.w_im, feed_dict=feed_dict_train)
# self.summary_writer.add_summary(w_im, epoch)
if self.train_validate_split is not None:
validate_loss, validate_acc, validate_acc_summary = \
self.session.run([self.validate_loss, self.validate_accuracy, self.validate_acc_summary],
feed_dict=feed_dict_validate)
self.list_validate_loss.append(validate_loss)
self.update_validate_loss = tf.assign(self.var_validate_loss, self.list_validate_loss, validate_shape=False)
self.update_validate_loss.eval()
self.list_validate_acc.append(validate_acc)
self.update_validate_acc = tf.assign(self.var_validate_acc, self.list_validate_acc, validate_shape=False)
self.update_validate_acc.eval()
if self.test_log is True:
test_acc, test_acc_summary = \
self.session.run([self.test_accuracy, self.test_acc_summary], feed_dict=feed_dict_test)
self.list_test_acc.append(test_acc)
self.update_test_acc = tf.assign(self.test_var_acc, self.list_test_acc, validate_shape=False)
self.update_test_acc.eval()
if self.separate_writer is False:
self.summary_writer.add_summary(train_loss_summary, epoch)
self.summary_writer.add_summary(train_acc_summary, epoch)
self.summary_writer.add_summary(validate_loss_summary, epoch)
self.summary_writer.add_summary(validate_acc_summary, epoch)
self.summary_writer.add_summary(test_acc_summary, epoch)
self.summary_writer.add_summary(learning_rate_summary, epoch)
else:
self.train_writer.add_summary(train_loss_summary, epoch)
self.train_writer.add_summary(train_acc_summary, epoch)
if self.train_validate_split is not None:
self.validate_writer.add_summary(validate_loss_summary, epoch)
self.validate_writer.add_summary(validate_acc_summary, epoch)
if self.test_log is True:
self.test_writer.add_summary(test_acc_summary, epoch)
if epoch % self.display_step == 0:
duration = time.time() - start
if self.train_validate_split is not None and self.test_log is False:
print('>>> Epoch [%*d/%*d]'
%(int(len(str(self.max_iterations))), epoch,
int(len(str(self.max_iterations))), self.max_iterations))
print('train_loss: %.4f | train_acc: %.4f | val_loss: %.4f | val_acc: %.4f | '
'Time: %.4f s' %(train_loss, train_acc, validate_loss, validate_acc, duration))
elif self.train_validate_split is not None and self.test_log is True:
print('>>> Epoch [%*d/%*d]'
% (int(len(str(self.max_iterations))), epoch,
int(len(str(self.max_iterations))), self.max_iterations))
print('train_loss: %.4f | train_acc: %.4f | val_loss: %.4f | val_acc: %.4f | '
'test_acc: %.4f | Time: %.4f s'
%(train_loss, train_acc, validate_loss, validate_acc, test_acc, duration))
elif self.train_validate_split is None and self.test_log is True:
print('>>> Epoch [%*d/%*d]'
% (int(len(str(self.max_iterations))), epoch,
int(len(str(self.max_iterations))), self.max_iterations))
print('train_loss: %.4f | train_acc: %.4f | test_acc: %.4f | Time: %.4f s'
%(train_loss, train_acc, test_acc, duration))
else:
print('>>> Epoch [%*d/%*d]'
% (int(len(str(self.max_iterations))), epoch,
int(len(str(self.max_iterations))), self.max_iterations))
print('train_loss: %.4f | train_acc: %.4f | Time: %.4f s'
% (train_loss, train_acc, duration))
if self.save_model is True:
model_directory = os.path.dirname(self.model_name)
file_utils.mkdir_p(model_directory)
self.model.save(self.session, self.model_name, global_step=epoch)
if epoch == 0:
prev_cost = train_loss
else:
if math.fabs(train_loss-prev_cost) < self.tolerance:
converged = False
epoch += 1
def predict(self, data):
if self.transform is True:
data = data_transform.transform(data, transform_method=self.transform_method)
feed_dict_data = {self.x: data}
predictions = self.session.run(self.y_pred_cls, feed_dict=feed_dict_data)
predictions = np.array(predictions)
return predictions
def load_model(self, model_name):
self.model.restore(self.session, model_name)
def close(self):
self.session.close()
def print_accuracy(self, test_data, test_labels, test_classes):
predict_classes = self.predict(test_data)
return accuracy_score(test_classes, predict_classes, normalize=True)
def print_classification_results(self, test_data, test_labels, test_classes, test_class_names=[],
normalize=True):
if self.transform is True:
test_data = data_transform.transform(test_data, transform_method=self.transform_method)
feed_dict_test = {self.x: test_data,
self.y_true: test_labels,
self.y_true_cls: test_classes}
cls_true = test_classes
cls_pred = self.session.run(self.y_pred_cls, feed_dict=feed_dict_test)
plot_tools.plot_confusion_matrix(cls_true, cls_pred, classes=test_class_names,
normalize=normalize, title='Confusion matrix')
print('Detailed classification report')
print(classification_report(y_true=cls_true, y_pred=cls_pred, target_names=test_class_names))
def __exit__(self, exc_type, exc_val, exc_tb):
self.session.close()
if self.separate_writer is False:
self.summary_writer.close()
else:
self.train_writer.close()
if self.train_validate_split is not None:
self.validate_writer.close()
if self.test_log is True:
self.test_writer.close()
def __del__(self):
self.session.close()
if self.separate_writer is False:
self.summary_writer.close()
else:
self.train_writer.close()
if self.train_validate_split is not None:
self.validate_writer.close()
if self.test_log is True:
self.test_writer.close()
| mit |
lbishal/scikit-learn | sklearn/model_selection/tests/test_search.py | 20 | 30855 | """Test the search module"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.fixes import sp_version
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import LeaveOneLabelOut
from sklearn.model_selection import LeavePLabelOut
from sklearn.model_selection import LabelKFold
from sklearn.model_selection import LabelShuffleSplit
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import ParameterGrid
from sklearn.model_selection import ParameterSampler
# TODO Import from sklearn.exceptions once merged.
from sklearn.base import ChangedBehaviorWarning
from sklearn.model_selection._validation import FitFailedWarning
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the parameter search algorithms"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_grid_search_labels():
# Check if ValueError (when labels is None) propagates to GridSearchCV
# And also check if labels is correctly passed to the cv object
rng = np.random.RandomState(0)
X, y = make_classification(n_samples=15, n_classes=2, random_state=0)
labels = rng.randint(0, 3, 15)
clf = LinearSVC(random_state=0)
grid = {'C': [1]}
label_cvs = [LeaveOneLabelOut(), LeavePLabelOut(2), LabelKFold(),
LabelShuffleSplit()]
for cv in label_cvs:
gs = GridSearchCV(clf, grid, cv=cv)
assert_raise_message(ValueError,
"The labels parameter should not be None",
gs.fit, X, y)
gs.fit(X, y, labels)
non_label_cvs = [StratifiedKFold(), StratifiedShuffleSplit()]
for cv in non_label_cvs:
print(cv)
gs = GridSearchCV(clf, grid, cv=cv)
# Should not raise an error
gs.fit(X, y)
def test_trivial_grid_scores():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
# Test that grid search can be used for model selection only
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
# Test that grid search will capture errors on data with different
# length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
# Test that grid search returns an error when using a kernel_function
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
@ignore_warnings
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
@ignore_warnings
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
# test that repeated calls yield identical parameters
param_distributions = {"C": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=3, random_state=0)
assert_equal([x for x in sampler], [x for x in sampler])
if sp_version >= (0, 16):
param_distributions = {"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
assert_equal([x for x in sampler], [x for x in sampler])
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=expon(scale=10),
gamma=expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv.split(X, y):
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(return_indicator=True,
random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv.split(X, y)):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv.split(X, y)):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
assert all(np.all(np.isnan(this_point.cv_validation_scores))
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
| bsd-3-clause |
PHLF/rasa_nlu | rasa_nlu/evaluate.py | 2 | 3814 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import itertools
import logging
import os
import matplotlib.pyplot as plt
from rasa_nlu.config import RasaNLUConfig
from rasa_nlu.converters import load_data
from rasa_nlu.model import Interpreter
from rasa_nlu.model import Metadata
logger = logging.getLogger(__name__)
def create_argparser():
parser = argparse.ArgumentParser(description='evaluate a trained Rasa NLU pipeline')
parser.add_argument('-d', '--data', default=None, help="file containing evaluation data")
parser.add_argument('-m', '--model', required=True, help="path to model")
parser.add_argument('-c', '--config', required=True, help="config file")
return parser
def plot_intent_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=None):
"""This function prints and plots the confusion matrix for the intent classification.
Normalization can be applied by setting `normalize=True`."""
import numpy as np
plt.imshow(cm, interpolation='nearest', cmap=cmap if cmap else plt.cm.Blues)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
logger.info("Normalized confusion matrix: \n{}".format(cm))
else:
logger.info("Confusion matrix, without normalization: \n{}".format(cm))
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
def run_intent_evaluation(config, model_path, component_builder=None):
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
from sklearn.utils.multiclass import unique_labels
# get the metadata config from the package data
test_data = load_data(config['data'])
metadata = Metadata.load(model_path)
interpreter = Interpreter.load(metadata, config, component_builder)
test_y = [e.get("intent") for e in test_data.training_examples]
preds = []
for e in test_data.training_examples:
res = interpreter.parse(e.text)
if res.get('intent'):
preds.append(res['intent'].get('name'))
else:
preds.append(None)
logger.info("Intent Evaluation Results")
logger.info("F1-Score: {}".format(f1_score(test_y, preds, average='weighted')))
logger.info("Precision: {}".format(precision_score(test_y, preds, average='weighted')))
logger.info("Accuracy: {}".format(accuracy_score(test_y, preds)))
logger.info("Classification report: \n{}".format(classification_report(test_y, preds)))
cnf_matrix = confusion_matrix(test_y, preds)
plot_intent_confusion_matrix(cnf_matrix, classes=unique_labels(test_y, preds),
title='Intent Confusion matrix')
plt.show()
return
if __name__ == '__main__':
parser = create_argparser()
args = parser.parse_args()
nlu_config = RasaNLUConfig(args.config, os.environ, vars(args))
logging.basicConfig(level=nlu_config['log_level'])
run_intent_evaluation(nlu_config, args.model)
logger.info("Finished evaluation")
| apache-2.0 |
smartscheduling/scikit-learn-categorical-tree | sklearn/utils/setup.py | 296 | 2884 | import os
from os.path import join
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration('utils', parent_package, top_path)
config.add_subpackage('sparsetools')
cblas_libs, blas_info = get_blas_info()
cblas_compile_args = blas_info.pop('extra_compile_args', [])
cblas_includes = [join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])]
libraries = []
if os.name == 'posix':
libraries.append('m')
cblas_libs.append('m')
config.add_extension('sparsefuncs_fast', sources=['sparsefuncs_fast.c'],
libraries=libraries)
config.add_extension('arrayfuncs',
sources=['arrayfuncs.c'],
depends=[join('src', 'cholesky_delete.h')],
libraries=cblas_libs,
include_dirs=cblas_includes,
extra_compile_args=cblas_compile_args,
**blas_info
)
config.add_extension(
'murmurhash',
sources=['murmurhash.c', join('src', 'MurmurHash3.cpp')],
include_dirs=['src'])
config.add_extension('lgamma',
sources=['lgamma.c', join('src', 'gamma.c')],
include_dirs=['src'],
libraries=libraries)
config.add_extension('graph_shortest_path',
sources=['graph_shortest_path.c'],
include_dirs=[numpy.get_include()])
config.add_extension('fast_dict',
sources=['fast_dict.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('seq_dataset',
sources=['seq_dataset.c'],
include_dirs=[numpy.get_include()])
config.add_extension('weight_vector',
sources=['weight_vector.c'],
include_dirs=cblas_includes,
libraries=cblas_libs,
**blas_info)
config.add_extension("_random",
sources=["_random.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension("_logistic_sigmoid",
sources=["_logistic_sigmoid.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
TREE-Edu/speaker-rec-skill-test | gmm/python/test.py | 2 | 1662 | #!/usr/bin/python2
# -*- coding: utf-8 -*-
# $File: test.py
# $Date: Mon Dec 16 04:27:02 2013 +0800
# $Author: Xinyu Zhou <zxytim[at]gmail[dot]com>
import pygmm
from sklearn.mixture import GMM as SKGMM
from numpy import *
import numpy as np
import sys
def read_data(fname):
with open(fname) as fin:
return map(lambda line: map(float, line.rstrip().split()), fin)
def get_gmm(where):
nr_mixture = 256
nr_iteration = 1
if where == 'pygmm':
return pygmm.GMM(nr_mixture = nr_mixture,
min_covar = 1e-3,
nr_iteration = nr_iteration,
init_with_kmeans = 0,
concurrency = 1)
elif where == 'sklearn':
return SKGMM(nr_mixture, n_iter = nr_iteration)
return None
def random_vector(n, dim):
import random
ret = []
for j in range(n):
ret.append([random.random() for i in range(dim)])
return ret
def extend_X(X, n):
import copy
Xt = copy.deepcopy(X)
for i in range(n - 1):
X.extend(Xt)
X = read_data('../test.data')
#X.extend(X + X + X)
#X = random_vector(100, 13)
#extend_X(X, 10)
#print(len(X))
#gmm_type = 'sklearn'
def test():
for nr_instance in [1000, 2000, 4000, 8000, 16000, 32000, 64000,
128000, 256000, 512000]:
for gmm_type in ['pygmm', 'sklearn']:
print(gmm_type)
gmm = get_gmm(gmm_type)
import time
start = time.time()
gmm.fit(X[:nr_instance])
print("result {} {} : {}" . format(
gmm_type, nr_instance, time.time() - start))
sys.stdout.flush()
test()
# vim: foldmethod=marker
| apache-2.0 |
e-q/scipy | doc/source/tutorial/stats/plots/kde_plot4.py | 142 | 1457 | from functools import partial
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
def my_kde_bandwidth(obj, fac=1./5):
"""We use Scott's Rule, multiplied by a constant factor."""
return np.power(obj.n, -1./(obj.d+4)) * fac
loc1, scale1, size1 = (-2, 1, 175)
loc2, scale2, size2 = (2, 0.2, 50)
x2 = np.concatenate([np.random.normal(loc=loc1, scale=scale1, size=size1),
np.random.normal(loc=loc2, scale=scale2, size=size2)])
x_eval = np.linspace(x2.min() - 1, x2.max() + 1, 500)
kde = stats.gaussian_kde(x2)
kde2 = stats.gaussian_kde(x2, bw_method='silverman')
kde3 = stats.gaussian_kde(x2, bw_method=partial(my_kde_bandwidth, fac=0.2))
kde4 = stats.gaussian_kde(x2, bw_method=partial(my_kde_bandwidth, fac=0.5))
pdf = stats.norm.pdf
bimodal_pdf = pdf(x_eval, loc=loc1, scale=scale1) * float(size1) / x2.size + \
pdf(x_eval, loc=loc2, scale=scale2) * float(size2) / x2.size
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111)
ax.plot(x2, np.zeros(x2.shape), 'b+', ms=12)
ax.plot(x_eval, kde(x_eval), 'k-', label="Scott's Rule")
ax.plot(x_eval, kde2(x_eval), 'b-', label="Silverman's Rule")
ax.plot(x_eval, kde3(x_eval), 'g-', label="Scott * 0.2")
ax.plot(x_eval, kde4(x_eval), 'c-', label="Scott * 0.5")
ax.plot(x_eval, bimodal_pdf, 'r--', label="Actual PDF")
ax.set_xlim([x_eval.min(), x_eval.max()])
ax.legend(loc=2)
ax.set_xlabel('x')
ax.set_ylabel('Density')
plt.show()
| bsd-3-clause |
abhipec/fnet | src/main_shimaoka.py | 1 | 12373 | """
Main code to run our experiments.
Usage:
main_our [options]
main_our -h | --help
Options:
-h, --help Print this.
--dataset=<d> Dataset.
--data_directory=<dir> Data directory.
--rnn_hidden_neurons=<size>
--keep_prob=<value>
--learning_rate=<value>
--epochs=<number>
--batch_size=<number>
--attention_size=<number>
--uid=<unique_id>
--retrain_word_embeddings
"""
import os
import sys
import json
from docopt import docopt
import numpy as np
import tensorflow as tf
#pylint: disable=no-member
import pandas as pd
#pylint: disable=import-error
import models.ShimaokaClassificationModel as model
from models.metrics import strict, loose_macro, loose_micro
import plotly
#pylint: disable=no-member
import plotly.graph_objs as go
#pylint: disable=too-many-arguments, too-many-locals
def run_one_epoch(batch,
inputs,
operations,
batch_size,
model_parameters,
session,
is_training=False):
"""
Run a single batch over dataset.
"""
batches_elapsed = 0
total_batches = model_parameters.data_size // batch_size + 1
# need to store predictions of not doing training
if not is_training:
predictions = np.empty((0, model_parameters.output_dim),
dtype=np.core.numerictypes.float32)
targets = np.empty((0, model_parameters.output_dim),
dtype=np.core.numerictypes.float32)
mentions = np.empty((0), dtype=np.object)
total_cost = []
while batches_elapsed * batch_size < model_parameters.data_size:
out = session.run(batch)
feed_dict = {}
to_feed = list(inputs.keys())
feed_dict[inputs['keep_prob']] = model_parameters.keep_prob
to_feed.remove('keep_prob')
for key in to_feed:
feed_dict[inputs[key]] = out[key]
if is_training:
cost, _ = session.run([operations['cost'], operations['optimize']], feed_dict=feed_dict)
else:
cost, scores = session.run([operations['cost'], operations['prediction']],
feed_dict=feed_dict)
predictions = np.vstack([predictions, scores])
targets = np.vstack([targets, out['labels']])
mentions = np.hstack((mentions, out['uid']))
batches_elapsed += 1
total_cost.append(cost)
sys.stdout.write('\r{} / {} : mean epoch cost = {}'.format(batches_elapsed,
total_batches,
np.mean(total_cost)))
sys.stdout.flush()
sys.stdout.write('\r')
if is_training:
return {
'cost': np.mean(total_cost)
}
else:
return {
'cost': np.mean(total_cost),
'predictions' : predictions[:model_parameters.data_size,],
'targets' : targets[:model_parameters.data_size,],
'mentions' : mentions[:model_parameters.data_size,]
}
def plot_dataframe(data_frame, filename):
"""
Plot the results in file.
"""
x_axis = list(range(len(data_frame)))
columns = list(data_frame.columns.values)
traces = []
for column in columns:
traces.append(go.Scatter(x=x_axis,
y=data_frame[column],
mode='lines',
name=column))
fig = go.Figure(data=traces)
plotly.offline.plot(fig, filename=filename, auto_open=False)
def log_results(results, num_to_label, log_directory, epoch):
"""
Log the results in a file.
"""
log_file = log_directory + '/data_log.csv'
result_file = log_directory + '/result_' + str(epoch) + '.txt'
result_file_dev = log_directory + '/result_dev_' + str(epoch) + '.txt'
image_file = log_directory + '/image'
if os.path.isfile(log_file):
data = pd.read_csv(log_file)
else:
data = pd.DataFrame(columns=('train_cost',
'dev_cost',
'test_cost',
'dev_acc',
'dev_ma_F1',
'dev_mi_F1',
'test_acc',
'test_ma_F1',
'test_mi_F1'))
current_result = []
current_result.append(results['train']['cost'])
current_result.append(results['dev']['cost'])
current_result.append(results['test']['cost'])
new_predictions = results['dev']['predictions']
# Make sure at-least one output
c_argmax = np.argmax(new_predictions, 1)[:, np.newaxis]
b_map = c_argmax == np.arange(new_predictions.shape[1])
new_predictions[b_map] = 1
new_predictions[new_predictions > 0.5] = 1
new_predictions[new_predictions <= 0.5] = 0
current_result.extend(compute_metrics(new_predictions,
results['dev']['targets']))
with open(result_file_dev, 'w') as file_p:
for i in range(len(new_predictions)):
labels = [num_to_label[x] for x in np.where(new_predictions[i] > 0)[0]]
file_p.write(str(results['dev']['mentions'][i], 'utf-8')
+ '\t' + ','.join(labels) + '\n')
new_predictions = results['test']['predictions']
# Make sure at-least one output
c_argmax = np.argmax(new_predictions, 1)[:, np.newaxis]
b_map = c_argmax == np.arange(new_predictions.shape[1])
new_predictions[b_map] = 1
new_predictions[new_predictions > 0.5] = 1
new_predictions[new_predictions <= 0.5] = 0
current_result.extend(compute_metrics(new_predictions,
results['test']['targets']))
data.loc[len(data)] = current_result
data.to_csv(log_file, index=False)
plot_dataframe(data, image_file)
with open(result_file, 'w') as file_p:
for i in range(len(new_predictions)):
labels = [num_to_label[x] for x in np.where(new_predictions[i] > 0)[0]]
file_p.write(str(results['test']['mentions'][i], 'utf-8')
+ '\t' + ','.join(labels) + '\n')
def compute_metrics(predictions, targets):
"""
Compute metrics as required.
"""
return (strict(predictions, targets),
loose_macro(predictions, targets),
loose_micro(predictions, targets))
def one_train_dev_test_epoch(batches,
inputs,
operations,
model_parameters,
session):
"""
Run one iteration of training epoch.
"""
batch_size = batches['size']
# training epoch
print('Training')
results = {}
results['train'] = run_one_epoch(batches['train'],
inputs,
operations,
batch_size,
model_parameters['train'],
session,
is_training=True)
print('Development')
results['dev'] = run_one_epoch(batches['dev'],
inputs,
operations,
batch_size,
model_parameters['dev'],
session,
is_training=False)
print('Test')
results['test'] = run_one_epoch(batches['test'],
inputs,
operations,
batch_size,
model_parameters['test'],
session,
is_training=False)
return results
def load_checkpoint(checkpoint_directory,
session,
finetune=None,
finetune_directory=None):
"""
Load checkpoint if exists.
"""
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
# filter variables if needed.
saver_ob = tf.train.Saver(variables, max_to_keep=0)
os.makedirs(checkpoint_directory, exist_ok=True)
# verify if we don't have a checkpoint saved directly
step = 0
ckpt = tf.train.get_checkpoint_state(checkpoint_directory)
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
model_checkpoint_path = ckpt.model_checkpoint_path
saver_ob.restore(session, model_checkpoint_path)
step = int(model_checkpoint_path.rsplit('-', 1)[1])
print('Model loaded = ', step)
elif finetune:
# if finetune flag is set and no checkpoint available
# load finetune model from finetune_directory
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
relevant_variables = [v for v in variables if v.name != 'embeddings/label_embeddings:0']
new_saver = tf.train.Saver(relevant_variables, max_to_keep=0)
ckpt = tf.train.get_checkpoint_state(finetune_directory)
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
model_checkpoint_path = ckpt.model_checkpoint_path
new_saver.restore(session, model_checkpoint_path)
step = int(model_checkpoint_path.rsplit('-', 1)[1])
print('Finetune Model loaded = ', step)
return saver_ob, step
#pylint: disable=invalid-name
if __name__ == '__main__':
tf.reset_default_graph()
arguments = docopt(__doc__)
print(arguments)
bs = int(arguments['--batch_size'])
relevant_directory = os.path.expanduser(
arguments['--data_directory']) + arguments['--dataset'] + '/'
l_variables, parameters = model.read_local_variables_and_params(arguments)
placeholders = model.create_placeholders(parameters['train'])
ops = model.model(placeholders,
parameters['train'],
l_variables['word_embedding'],
is_training=True)
data_batches, queue_runners = model.read_batches(relevant_directory, bs)
sess = tf.Session()
# Create a coordinator, launch the queue runner threads.
coord = tf.train.Coordinator()
# start queue runners
enqueue_threads = []
for qr in queue_runners:
enqueue_threads.append(queue_runners[qr].create_threads(sess, coord=coord, start=True))
sess.run(tf.initialize_all_variables())
sess.run(tf.initialize_local_variables())
tf.train.start_queue_runners(sess=sess)
# Create a saver and session object.
ckpt_directory = os.path.join(os.path.dirname(__file__), '../', 'ckpt', arguments['--uid'])
saver, initial_step = load_checkpoint(ckpt_directory,
sess)
epochs_elapsed = initial_step
# dump parameters used to disk
with open(ckpt_directory + '/parameters.json', 'w') as json_p:
json.dump(arguments, json_p, sort_keys=True, indent=4)
summary_writer = tf.train.SummaryWriter(ckpt_directory + '/graph/', sess.graph)
try:
while not coord.should_stop():
# Run training steps
if epochs_elapsed >= int(arguments['--epochs']):
print('MAX epoch completed using ckpt model.')
coord.request_stop()
break
result = one_train_dev_test_epoch(data_batches, placeholders, ops, parameters, sess)
epochs_elapsed += 1
log_results(result, l_variables['num_to_label'], ckpt_directory, epochs_elapsed)
saver.save(sess, ckpt_directory + '/', global_step=epochs_elapsed)
except tf.errors.OutOfRangeError:
print('Done training -- epoch limit reached')
except tf.errors.CancelledError:
print('Done training -- epoch limit reached counting checkpoints')
finally:
# When done, ask the threads to stop.
coord.request_stop()
# And wait for them to actually do it.
for threads in enqueue_threads:
coord.join(threads)
sess.close()
| mit |
evgchz/scikit-learn | sklearn/covariance/tests/test_covariance.py | 28 | 10115 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn import datasets
from sklearn.covariance import empirical_covariance, EmpiricalCovariance, \
ShrunkCovariance, shrunk_covariance, \
LedoitWolf, ledoit_wolf, ledoit_wolf_shrinkage, OAS, oas
X = datasets.load_diabetes().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_covariance():
"""Tests Covariance module on a simple dataset.
"""
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(
cov.error_norm(emp_cov, squared=False), 0)
assert_raises(NotImplementedError,
cov.error_norm, emp_cov, norm='foo')
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
print(np.amin(mahal_dist), np.amax(mahal_dist))
assert(np.amin(mahal_dist) > 0)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test with one sample
X_1sample = np.arange(5)
cov = EmpiricalCovariance()
assert_warns(UserWarning, cov.fit, X_1sample)
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_array_equal(cov.location_, np.zeros(X.shape[1]))
def test_shrunk_covariance():
"""Tests ShrunkCovariance module on a simple dataset.
"""
# compare shrunk covariance obtained from data and from MLE estimate
cov = ShrunkCovariance(shrinkage=0.5)
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X), shrinkage=0.5),
cov.covariance_, 4)
# same test with shrinkage not provided
cov = ShrunkCovariance()
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4)
# same test with shrinkage = 0 (<==> empirical_covariance)
cov = ShrunkCovariance(shrinkage=0.)
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = ShrunkCovariance(shrinkage=0.3)
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
cov.fit(X)
assert(cov.precision_ is None)
def test_ledoit_wolf():
"""Tests LedoitWolf module on a simple dataset.
"""
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
lw = LedoitWolf(assume_centered=True)
lw.fit(X_centered)
shrinkage_ = lw.shrinkage_
score_ = lw.score(X_centered)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered,
assume_centered=True),
shrinkage_)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered, assume_centered=True,
block_size=6),
shrinkage_)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_centered,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf(assume_centered=True)
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, lw.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X_centered)
assert_almost_equal(lw.score(X_centered), score_, 4)
assert(lw.precision_ is None)
# (too) large data set
X_large = np.ones((20, 200))
assert_raises(MemoryError, ledoit_wolf, X_large, block_size=100)
# Same tests without assuming centered data
# test shrinkage coeff on a simple data set
lw = LedoitWolf()
lw.fit(X)
assert_almost_equal(lw.shrinkage_, shrinkage_, 4)
assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X))
assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1])
assert_almost_equal(lw.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf()
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
# test with one sample
X_1sample = np.arange(5)
lw = LedoitWolf()
assert_warns(UserWarning, lw.fit, X_1sample)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False)
lw.fit(X)
assert_almost_equal(lw.score(X), score_, 4)
assert(lw.precision_ is None)
def test_oas():
"""Tests OAS module on a simple dataset.
"""
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
oa = OAS(assume_centered=True)
oa.fit(X_centered)
shrinkage_ = oa.shrinkage_
score_ = oa.score(X_centered)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_centered,
assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS(assume_centered=True)
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X_centered)
assert_almost_equal(oa.score(X_centered), score_, 4)
assert(oa.precision_ is None)
### Same tests without assuming centered data
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X)
assert_almost_equal(oa.shrinkage_, shrinkage_, 4)
assert_almost_equal(oa.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS()
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
# test with one sample
X_1sample = np.arange(5)
oa = OAS()
assert_warns(UserWarning, oa.fit, X_1sample)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X)
assert_almost_equal(oa.score(X), score_, 4)
assert(oa.precision_ is None)
| bsd-3-clause |
FrederichRiver/neutrino | applications/saturn/saturn/rank.py | 1 | 3715 | #!/usr/bin/python3
from polaris.mysql8 import mysqlBase
from dev_global.env import VIEWER_HEADER
class data_view_base(object):
def __init__(self):
self.mysql = mysqlBase(VIEWER_HEADER)
def get_data(self):
raise Exception
def visualize(self):
raise Exception
class profit_growth_rate(data_view_base):
def get_data(self, stock_code):
from datetime import date
import pandas
import copy
result = self.mysql.condition_select(
'income_statement','report_date,float_c40',f"char_stock_code='{stock_code}'")
if not result.empty:
result.columns = ['report_date', 'net_profit']
result.set_index('report_date', inplace=True)
#print(result)
result2 = copy.deepcopy(result)
for index, row in result2.iterrows():
if index.month == 6:
index_key = date(index.year, 3, 31)
for col in result.columns:
try:
row[col] = row[col] - result.loc[index_key, col]
except Exception as e:
pass
elif index.month == 9:
index_key = date(index.year, 6, 30)
for col in result.columns:
try:
row[col] = row[col] - result.loc[index_key, col]
except Exception as e:
pass
elif index.month == 12:
index_key = date(index.year, 9, 30)
for col in result.columns:
try:
row[col] = row[col] - result.loc[index_key, col]
except Exception as e:
pass
#x = pandas.concat([result,result2], axis=1 )
#print(x)
result2['last_period'] = result2['net_profit'].shift(4)
result2['growth_rate'] = result2['net_profit']/result2['last_period'] - 1
result2.dropna(axis=0, how='any', inplace=True)
result2.drop(['net_profit','last_period'], axis=1, inplace=True)
result2.columns = [f"{stock_code}"]
return result2
else:
return pandas.DataFrame()
def get_stock_list(self):
result = self.mysql.condition_select('stock_manager', 'stock_code', "flag='t'")
return list(result[0])
def profit_growth_analysis():
import pandas
# import xlwt
from datetime import date
from jupiter import calendar
event = profit_growth_rate()
stock_list = event.get_stock_list()
profit_data = pandas.DataFrame()
for stock in stock_list:
df = event.get_data(stock)
if not df.empty:
profit_data = pandas.concat([profit_data, df], axis=1)
result = profit_data.T
trade_period = calendar.TradeDay(2020)
result = result.loc[:,[trade_period.period(trade_period.today, 3-i) for i in range(3)]]
result.dropna(axis=0, inplace=True)
temp = result[[trade_period.period(trade_period.today, 3-i) for i in range(3)]]
result['mean'] = temp.mean(axis=1)
result = result.sort_values(by='mean',ascending=False)
result = result.applymap(lambda x: format(x, '.2%'))
result.insert(0, 'stock_name', result.index)
for index,row in result.iterrows():
stock_name = event.mysql.select_one('stock_manager', 'stock_name', f"stock_code='{index}'")
row['stock_name'] = stock_name[0]
result.to_excel('/home/friederich/Downloads/stock_data_3.xls')
#print(result)
if __name__ == "__main__":
profit_growth_analysis()
| bsd-3-clause |
jnez71/navboxplus | demos/demo_motor.py | 1 | 6671 | #!/usr/bin/env python
"""
Using navboxplus to perfectly control a motor sensed with only a cheap encoder.
Model-augmented state is: [position, velocity, drag/inertia, b/inertia, disturbance].
"""
from __future__ import division
import numpy as np; npl = np.linalg
import matplotlib.pyplot as plt
from navboxplus import NavBoxPlus
# Motor dynamics
def motor(x, u, wf, dt):
xdot = np.array([x[1],
x[4] + x[3]*u - x[2]*x[1],
0, 0, 0]) # parameters "don't change" (we assume)
xnext = x + xdot*dt + wf
if xnext[2] < 0.5: xnext[2] = 0.5 # prevent parameter drift into nonphysical
if xnext[3] < 0.5: xnext[3] = 0.5
return xnext
# Encoder model (only noise in the form of discretization)
res = 512/360 # ticks/deg
z_per_t = 20 # samples/s
def encoder(x, u, wh):
return np.floor(res*x[0])
# True noise characteristics
wf0_true = np.array([0, 0, 0, 0, 0])
Cf_true = np.diag([0, 0, 1E-3, 1E-6, 0])
# Our guesses at the dynamics and sensor noise characteristics
# We cannot express any perfect confidence
wf0 = np.zeros(5)
Cf = np.diag([1E-7, 1E-4, 1E-3, 1E-6, 1E-2]) # disturbance is not really constant
wh0 = 0
Ch = 1 # because the encoder discretization acts like noise
# Simulation time domain (also chooses predict frequency)
T = 40 # s
dt = 0.05 # s
t = np.arange(0, T, dt) # s
i_per_z = int(1/(z_per_t*dt)) # iters/sample
assert 1/z_per_t >= dt # time between samples >= sim timestep ?
# Desired trajectory
# r = [180, 0] * np.ones((len(t), 2)) # setpoint, not much excitation information
rv = 0.5
r = 15*np.vstack((np.sin(rv*t), rv*np.cos(rv*t))).T # sinusoid, good excitation
# Unknown external disturbance (tracked as a state)
dist = 8*np.ones_like(t); dist[:len(t)//2] = 0 # sudden push
# dist = 3*np.cos(2*rv*(t+2)) + 3 # sinusoid
# Controller with feedback and feedforward based on estimated model
ulims = (-50, 50)
gains = 5*np.array([1, 1])
feedback = 0; feedforward = 0 # for externally recording these quantities
def controller(r, rnext, x, Cx, dt):
global feedback, feedforward
feedback = gains.dot(r - x[:2])
feedforward = (1/x[3]) * ((rnext[1] - r[1])/dt + x[2]*r[1] - x[4])
return np.clip(feedback + feedforward, ulims[0], ulims[1])
# State, estimate, covariance, measurement, and effort timeseries
x = np.zeros((len(t), 5))
xh = np.zeros((len(t), 5))
Cx = np.zeros((len(t), 5, 5))
z = np.zeros((len(t), 1))
u = np.zeros((len(t), 1))
uff = np.zeros((len(t), 1))
# Initial conditions
x[0] = [15, 0, 5, 2, dist[0]]
xh[0] = [-15, 10, 1, 1, 0]
Cx[0] = 10*np.eye(5)
u[0] = 0
uff[0] = 0
# Configure navboxplus
# (note that we will give a "smoothed" encoder model to capture its true behavior)
nav = NavBoxPlus(x0=np.copy(xh[0]),
Cx0=np.copy(Cx[0]),
g=controller,
f=motor,
hDict={'encoder': lambda x, u, wh: res*x[0] + wh},
n_r=2,
n_wf=5,
n_whDict={'encoder': 1})
# Simulation
for i, ti in enumerate(t[1:]):
# Chose control and predict next state
try:
u[i+1] = nav.predict(r[i], r[i+1], wf0, Cf, dt)
uff[i+1] = feedforward
except npl.linalg.LinAlgError:
print("Cholesky failed in predict!")
break
# Advance true state using control
wf = np.random.multivariate_normal(wf0_true, Cf_true)
x[i+1] = motor(x[i], u[i+1], wf, dt)
x[i+1, 4] = dist[i+1] # update disturbance
# When new measurement comes in...
if i % i_per_z == 0:
# Get new measurement from real world
z[i+1] = encoder(x[i+1], 0, 0)
# Update state estimate
try:
nav.correct('encoder', z[i+1], wh0, Ch)
except npl.linalg.LinAlgError:
print("Cholesky failed in correct!")
break
# ...otherwise hold last measurement (for plotting only)
else:
z[i+1] = np.copy(z[i])
# Record new estimate
xh[i+1], Cx[i+1] = nav.get_state_and_cov()
# Just checkin...
if not nav.is_pdef(nav.Cx):
print("WOAH your state estimate covariance is not posdef, how'd that happen?\n")
print("Final state estimate covariance:")
print(np.round(nav.Cx, 3))
#### Plots
fig1 = plt.figure()
fig1.suptitle("Estimation and Tracking via Online UKF-Learned Model", fontsize=22)
ax1 = fig1.add_subplot(6, 1, 1)
ax1.plot(t[:i], x[:i, 0], label="true", color='g', lw=3)
ax1.plot(t[:i], xh[:i, 0], label="estimate", color='k', ls=':', lw=3)
ax1.plot(t[:i], r[:i, 0], label="desired", color='r', ls='--')
ax1.set_xlim([0, ti])
ax1.set_ylabel("position\ndeg", fontsize=12)
ax1.legend(loc='upper right')
ax1.grid(True)
ax1 = fig1.add_subplot(6, 1, 2)
ax1.plot(t[:i], x[:i, 1], label="true", color='g', lw=3)
ax1.plot(t[:i], xh[:i, 1], label="estimate", color='k', ls=':', lw=3)
ax1.plot(t[:i], r[:i, 1], label="desired", color='r', ls='--')
ax1.set_xlim([0, ti])
ax1.set_ylabel("velocity\ndeg/s", fontsize=12)
ax1.grid(True)
ax1 = fig1.add_subplot(6, 1, 3)
ax1.plot(t[:i], x[:i, 2], label="true", color='g', lw=3)
ax1.plot(t[:i], xh[:i, 2], label="estimate", color='k', ls=':', lw=3)
ax1.set_xlim([0, ti])
ax1.set_ylabel("drag/inertia\n(deg/s^2)/(deg/s)", fontsize=12)
ax1.grid(True)
ax1 = fig1.add_subplot(6, 1, 4)
ax1.plot(t[:i], x[:i, 3], label="true", color='g', lw=3)
ax1.plot(t[:i], xh[:i, 3], label="estimate", color='k', ls=':', lw=3)
ax1.set_xlim([0, ti])
ax1.set_ylabel("b/inertia\n(deg/s^2)/V", fontsize=12)
ax1.grid(True)
ax1 = fig1.add_subplot(6, 1, 5)
ax1.plot(t[:i], x[:i, 4], label="true", color='g', lw=3)
ax1.plot(t[:i], xh[:i, 4], label="estimate", color='k', ls=':', lw=3)
ax1.set_xlim([0, ti])
ax1.set_ylabel("disturbance\ndeg/s^2", fontsize=12)
ax1.grid(True)
ax1 = fig1.add_subplot(6, 1, 6)
ax1.plot(t[:i], u[:i], label="total", color='r', lw=3)
ax1.plot(t[:i], uff[:i], label="feedforward", color='b', ls='--', lw=2)
ax1.set_xlim([0, ti])
ax1.set_ylabel("effort\nV", fontsize=12)
ax1.set_xlabel("time\ns", fontsize=12)
ax1.legend(loc='upper right')
ax1.grid(True)
fig2 = plt.figure()
fig2.suptitle("Covariance Diagonals", fontsize=22)
ax2 = fig2.add_subplot(1, 1, 1)
dvs = np.array(map(np.diag, Cx[:i]))
for xi in xrange(len(x[0])):
ax2.plot(t[:i], dvs[:, xi], label="State {}".format(xi))
ax2.set_xlim([0, ti])
ax2.set_ylabel("value", fontsize=16)
ax2.set_xlabel("time\ns", fontsize=16)
ax2.legend(loc='upper right')
ax2.grid(True)
fig3 = plt.figure()
fig3.suptitle("Absolute Encoder Measurements", fontsize=22)
ax3 = fig3.add_subplot(1, 1, 1)
ax3.plot(t[:i], z[:i], color='b', lw=2)
ax3.set_xlim([0, ti])
ax3.set_ylabel("ticks", fontsize=16)
ax3.set_xlabel("time\ns", fontsize=16)
ax3.grid(True)
plt.show()
| mit |
jorge2703/scikit-learn | sklearn/utils/estimator_checks.py | 33 | 48331 | from __future__ import print_function
import types
import warnings
import sys
import traceback
import inspect
import pickle
from copy import deepcopy
import numpy as np
from scipy import sparse
import struct
from sklearn.externals.six.moves import zip
from sklearn.externals.joblib import hash, Memory
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
from sklearn.base import (clone, ClassifierMixin, RegressorMixin,
TransformerMixin, ClusterMixin, BaseEstimator)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.lda import LDA
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM
from sklearn.pipeline import make_pipeline
from sklearn.utils.validation import DataConversionWarning
from sklearn.cross_validation import train_test_split
from sklearn.utils import shuffle
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
MULTI_OUTPUT = ['CCA', 'DecisionTreeRegressor', 'ElasticNet',
'ExtraTreeRegressor', 'ExtraTreesRegressor', 'GaussianProcess',
'KNeighborsRegressor', 'KernelRidge', 'Lars', 'Lasso',
'LassoLars', 'LinearRegression', 'MultiTaskElasticNet',
'MultiTaskElasticNetCV', 'MultiTaskLasso', 'MultiTaskLassoCV',
'OrthogonalMatchingPursuit', 'PLSCanonical', 'PLSRegression',
'RANSACRegressor', 'RadiusNeighborsRegressor',
'RandomForestRegressor', 'Ridge', 'RidgeCV']
def _yield_non_meta_checks(name, Estimator):
yield check_estimators_dtypes
yield check_fit_score_takes_y
yield check_dtype_object
yield check_estimators_fit_returns_self
# Check that all estimator yield informative messages when
# trained on empty datasets
yield check_estimators_empty_data_messages
if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']:
# SpectralEmbedding is non-deterministic,
# see issue #4236
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency
if name not in ['Imputer']:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf
if name not in ['GaussianProcess']:
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params
if hasattr(Estimator, 'sparsify'):
yield check_sparsify_coefficients
yield check_estimator_sparse_data
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_estimators_pickle
def _yield_classifier_checks(name, Classifier):
# test classfiers can handle non-array data
yield check_classifier_data_not_an_array
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label
yield check_classifiers_classes
yield check_estimators_partial_fit_n_features
# basic consistency testing
yield check_classifiers_train
if (name not in ["MultinomialNB", "LabelPropagation", "LabelSpreading"]
# TODO some complication with -1 label
and name not in ["DecisionTreeClassifier",
"ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
yield check_supervised_y_2d
# test if NotFittedError is raised
yield check_estimators_unfitted
if 'class_weight' in Classifier().get_params().keys():
yield check_class_weight_classifiers
def _yield_regressor_checks(name, Regressor):
# TODO: test with intercept
# TODO: test with multiple responses
# basic testing
yield check_regressors_train
yield check_regressor_data_not_an_array
yield check_estimators_partial_fit_n_features
yield check_regressors_no_decision_function
yield check_supervised_y_2d
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int
# Test if NotFittedError is raised
yield check_estimators_unfitted
def _yield_transformer_checks(name, Transformer):
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer',
'FunctionTransformer', 'Normalizer']:
# basic tests
yield check_transformer_general
yield check_transformers_unfitted
def _yield_clustering_checks(name, Clusterer):
yield check_clusterer_compute_labels_predict
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering
yield check_estimators_partial_fit_n_features
def _yield_all_checks(name, Estimator):
for check in _yield_non_meta_checks(name, Estimator):
yield check
if issubclass(Estimator, ClassifierMixin):
for check in _yield_classifier_checks(name, Estimator):
yield check
if issubclass(Estimator, RegressorMixin):
for check in _yield_regressor_checks(name, Estimator):
yield check
if issubclass(Estimator, TransformerMixin):
for check in _yield_transformer_checks(name, Estimator):
yield check
if issubclass(Estimator, ClusterMixin):
for check in _yield_clustering_checks(name, Estimator):
yield check
def check_estimator(Estimator):
"""Check if estimator adheres to sklearn conventions.
This estimator will run an extensive test-suite for input validation,
shapes, etc.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
Parameters
----------
Estimator : class
Class to check.
"""
name = Estimator.__class__.__name__
check_parameters_default_constructible(name, Estimator)
for check in _yield_all_checks(name, Estimator):
check(name, Estimator)
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_fast_parameters(estimator):
# speed up some estimators
params = estimator.get_params()
if ("n_iter" in params
and estimator.__class__.__name__ != "TSNE"):
estimator.set_params(n_iter=5)
if "max_iter" in params:
# NMF
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR
if estimator.__class__.__name__ == 'LinearSVR':
estimator.set_params(max_iter=20)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if estimator.__class__.__name__ == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=.5)
if estimator.__class__.__name__ == "TheilSenRegressor":
estimator.max_subpopulation = 100
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=1)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_estimator_sparse_data(name, Estimator):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X_csr = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
for sparse_format in ['csr', 'csc', 'dok', 'lil', 'coo', 'dia', 'bsr']:
X = X_csr.asformat(sparse_format)
# catch deprecation warnings
with warnings.catch_warnings():
if name in ['Scaler', 'StandardScaler']:
estimator = Estimator(with_mean=False)
else:
estimator = Estimator()
set_fast_parameters(estimator)
# fit and predict
try:
estimator.fit(X, y)
if hasattr(estimator, "predict"):
pred = estimator.predict(X)
assert_equal(pred.shape, (X.shape[0],))
if hasattr(estimator, 'predict_proba'):
probs = estimator.predict_proba(X)
assert_equal(probs.shape, (X.shape[0], 4))
except TypeError as e:
if 'sparse' not in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
def check_dtype_object(name, Estimator):
# check that estimators treat dtype object as numeric if possible
rng = np.random.RandomState(0)
X = rng.rand(40, 10).astype(object)
y = (X[:, 0] * 4).astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
with warnings.catch_warnings():
estimator = Estimator()
set_fast_parameters(estimator)
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, "transform"):
estimator.transform(X)
try:
estimator.fit(X, y.astype(object))
except Exception as e:
if "Unknown label type" not in str(e):
raise
X[0, 0] = {'foo': 'bar'}
msg = "argument must be a string or a number"
assert_raises_regex(TypeError, msg, estimator.fit, X, y)
def check_transformer_general(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
_check_transformer(name, Transformer, X, y)
_check_transformer(name, Transformer, X.tolist(), y.tolist())
def check_transformer_data_not_an_array(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, Transformer, this_X, this_y)
def check_transformers_unfitted(name, Transformer):
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
transformer = Transformer()
assert_raises((AttributeError, ValueError), transformer.transform, X)
def _check_transformer(name, Transformer, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
set_random_state(transformer)
set_fast_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
X_pred = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
# check for consistent n_samples
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_array_almost_equal(
x_pred, x_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
x_pred, x_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
else:
assert_array_almost_equal(
X_pred, X_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
X_pred, X_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
assert_equal(len(X_pred2), n_samples)
assert_equal(len(X_pred3), n_samples)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
assert_raises(ValueError, transformer.transform, X.T)
@ignore_warnings
def check_pipeline_consistency(name, Estimator):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min()
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_array_almost_equal(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, Estimator):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
funcs = ["fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = inspect.getargspec(func).args
assert_true(args[2] in ["y", "Y"])
@ignore_warnings
def check_estimators_dtypes(name, Estimator):
rnd = np.random.RandomState(0)
X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
y = multioutput_estimator_convert_y_2d(name, y)
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
def check_estimators_empty_data_messages(name, Estimator):
e = Estimator()
set_fast_parameters(e)
set_random_state(e, 1)
X_zero_samples = np.empty(0).reshape(0, 3)
# The precise message can change depending on whether X or y is
# validated first. Let us test the type of exception only:
assert_raises(ValueError, e.fit, X_zero_samples, [])
X_zero_features = np.empty(0).reshape(3, 0)
# the following y should be accepted by both classifiers and regressors
# and ignored by unsupervised models
y = multioutput_estimator_convert_y_2d(name, np.array([1, 0, 1]))
msg = "0 feature(s) (shape=(3, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, e.fit, X_zero_features, y)
def check_estimators_nan_inf(name, Estimator):
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(name, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
# try to fit
try:
estimator.fit(X_train, y)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_fit, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, Estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, Estimator)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_predict, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, Estimator)
# transform
if hasattr(estimator, "transform"):
try:
estimator.transform(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_transform, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, Estimator)
def check_estimators_pickle(name, Estimator):
"""Test that we can pickle all estimators"""
check_methods = ["predict", "transform", "decision_function",
"predict_proba"]
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
# some estimators can't do features less than 0
X -= X.min()
# some estimators only take multioutputs
y = multioutput_estimator_convert_y_2d(name, y)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_random_state(estimator)
set_fast_parameters(estimator)
estimator.fit(X, y)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
# pickle and unpickle!
pickled_estimator = pickle.dumps(estimator)
unpickled_estimator = pickle.loads(pickled_estimator)
for method in result:
unpickled_result = getattr(unpickled_estimator, method)(X)
assert_array_almost_equal(result[method], unpickled_result)
def check_estimators_partial_fit_n_features(name, Alg):
# check if number of features changes between calls to partial_fit.
if not hasattr(Alg, 'partial_fit'):
return
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if isinstance(alg, ClassifierMixin):
classes = np.unique(y)
alg.partial_fit(X, y, classes=classes)
else:
alg.partial_fit(X, y)
assert_raises(ValueError, alg.partial_fit, X[:, :-1], y)
def check_clustering(name, Alg):
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if hasattr(alg, "n_clusters"):
alg.set_params(n_clusters=3)
set_random_state(alg)
if name == 'AffinityPropagation':
alg.set_params(preference=-100)
alg.set_params(max_iter=100)
# fit
alg.fit(X)
# with lists
alg.fit(X.tolist())
assert_equal(alg.labels_.shape, (n_samples,))
pred = alg.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name is 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(alg)
with warnings.catch_warnings(record=True):
pred2 = alg.fit_predict(X)
assert_array_equal(pred, pred2)
def check_clusterer_compute_labels_predict(name, Clusterer):
"""Check that predict is invariant of compute_labels"""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = Clusterer()
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
if hasattr(clusterer, "random_state"):
clusterer.set_params(random_state=0)
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
def check_classifiers_one_label(name, Classifier):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if 'class' not in repr(e):
print(error_string_fit, Classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, Classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, Classifier, exc)
raise exc
def check_classifiers_train(name, Classifier):
X_m, y_m = make_blobs(n_samples=300, random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
# catch deprecation warnings
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name in ['BernoulliNB', 'MultinomialNB']:
X -= X.min()
set_fast_parameters(classifier)
set_random_state(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB']:
assert_greater(accuracy_score(y, y_pred), 0.83)
# raises error on malformed input for predict
assert_raises(ValueError, classifier.predict, X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes is 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes is 3
and not isinstance(classifier, BaseLibSVM)):
# 1on1 of LibSVM works differently
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError,
classifier.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError,
classifier.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1),
np.ones(n_samples))
# raises error on malformed input
assert_raises(ValueError, classifier.predict_proba, X.T)
# raises error on malformed input for predict_proba
assert_raises(ValueError, classifier.predict_proba, X.T)
def check_estimators_fit_returns_self(name, Estimator):
"""Check if self is returned when calling fit"""
X, y = make_blobs(random_state=0, n_samples=9, n_features=4)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
assert_true(estimator.fit(X, y) is estimator)
@ignore_warnings
def check_estimators_unfitted(name, Estimator):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise either AttributeError or ValueError.
The specific exception type NotFittedError inherits from both and can
therefore be adequately raised for that purpose.
"""
# Common test for Regressors as well as Classifiers
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
est = Estimator()
msg = "fit"
if hasattr(est, 'predict'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict, X)
if hasattr(est, 'decision_function'):
assert_raise_message((AttributeError, ValueError), msg,
est.decision_function, X)
if hasattr(est, 'predict_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_proba, X)
if hasattr(est, 'predict_log_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_log_proba, X)
def check_supervised_y_2d(name, Estimator):
if "MultiTask" in name:
# These only work on 2d, so this test makes no sense
return
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
# fit
estimator.fit(X, y)
y_pred = estimator.predict(X)
set_random_state(estimator)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
warnings.simplefilter("ignore", RuntimeWarning)
estimator.fit(X, y[:, np.newaxis])
y_pred_2d = estimator.predict(X)
msg = "expected 1 DataConversionWarning, got: %s" % (
", ".join([str(w_x) for w_x in w]))
if name not in MULTI_OUTPUT:
# check that we warned if we don't support multi-output
assert_greater(len(w), 0, msg)
assert_true("DataConversionWarning('A column-vector y"
" was passed when a 1d array was expected" in msg)
assert_array_almost_equal(y_pred.ravel(), y_pred_2d.ravel())
def check_classifiers_classes(name, Classifier):
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
y_names = np.array(["one", "two", "three"])[y]
for y_names in [y_names, y_names.astype('O')]:
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name == 'BernoulliNB':
classifier.set_params(binarize=X.mean())
set_fast_parameters(classifier)
set_random_state(classifier)
# fit
classifier.fit(X, y_)
y_pred = classifier.predict(X)
# training set performance
assert_array_equal(np.unique(y_), np.unique(y_pred))
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
def check_regressors_int(name, Regressor):
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
regressor_1 = Regressor()
regressor_2 = Regressor()
set_fast_parameters(regressor_1)
set_fast_parameters(regressor_2)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_regressors_train(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y) # X is already scaled
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
set_fast_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name == 'PassiveAggressiveRegressor':
regressor.C = 0.01
# raises error on malformed input for fit
assert_raises(ValueError, regressor.fit, X, y[:-1])
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
y_pred = regressor.predict(X)
assert_equal(y_pred.shape, y_.shape)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
print(regressor)
assert_greater(regressor.score(X, y_), 0.5)
@ignore_warnings
def check_regressors_no_decision_function(name, Regressor):
# checks whether regressors have decision_function or predict_proba
rng = np.random.RandomState(0)
X = rng.normal(size=(10, 4))
y = multioutput_estimator_convert_y_2d(name, X[:, 0])
regressor = Regressor()
set_fast_parameters(regressor)
if hasattr(regressor, "n_components"):
# FIXME CCA, PLS is not robust to rank 1 effects
regressor.n_components = 1
regressor.fit(X, y)
funcs = ["decision_function", "predict_proba", "predict_log_proba"]
for func_name in funcs:
func = getattr(regressor, func_name, None)
if func is None:
# doesn't have function
continue
# has function. Should raise deprecation warning
msg = func_name
assert_warns_message(DeprecationWarning, msg, func, X)
def check_class_weight_classifiers(name, Classifier):
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
raise SkipTest
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
raise SkipTest
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
with warnings.catch_warnings(record=True):
classifier = Classifier(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "min_weight_fraction_leaf"):
classifier.set_params(min_weight_fraction_leaf=0.01)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
assert_greater(np.mean(y_pred == 0), 0.89)
def check_class_weight_balanced_classifiers(name, Classifier, X_train, y_train,
X_test, y_test, weights):
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='balanced')
classifier.fit(X_train, y_train)
y_pred_balanced = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_balanced, average='weighted'),
f1_score(y_test, y_pred, average='weighted'))
def check_class_weight_balanced_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = np.array([1, 1, 1, -1, -1])
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='balanced')
coef_balanced = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
n_samples = len(y)
n_classes = float(len(np.unique(y)))
class_weight = {1: n_samples / (np.sum(y == 1) * n_classes),
-1: n_samples / (np.sum(y == -1) * n_classes)}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_array_almost_equal(coef_balanced, coef_manual)
def check_estimators_overwrite_params(name, Estimator):
X, y = make_blobs(random_state=0, n_samples=9)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
with warnings.catch_warnings(record=True):
# catch deprecation warnings
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
# Make a physical copy of the orginal estimator parameters before fitting.
params = estimator.get_params()
original_params = deepcopy(params)
# Fit the model
estimator.fit(X, y)
# Compare the state of the model parameters with the original parameters
new_params = estimator.get_params()
for param_name, original_value in original_params.items():
new_value = new_params[param_name]
# We should never change or mutate the internal state of input
# parameters by default. To check this we use the joblib.hash function
# that introspects recursively any subobjects to compute a checksum.
# The only exception to this rule of immutable constructor parameters
# is possible RandomState instance but in this check we explicitly
# fixed the random_state params recursively to be integer seeds.
assert_equal(hash(new_value), hash(original_value),
"Estimator %s should not change or mutate "
" the parameter %s from %s to %s during fit."
% (name, param_name, original_value, new_value))
def check_sparsify_coefficients(name, Estimator):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = Estimator()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_classifier_data_not_an_array(name, Estimator):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_regressor_data_not_an_array(name, Estimator):
X, y = _boston_subset(n_samples=50)
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_estimators_data_not_an_array(name, Estimator, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
estimator_1 = Estimator()
estimator_2 = Estimator()
set_fast_parameters(estimator_1)
set_fast_parameters(estimator_2)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_parameters_default_constructible(name, Estimator):
classifier = LDA()
# test default-constructibility
# get rid of deprecation warnings
with warnings.catch_warnings(record=True):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(estimator.set_params() is estimator)
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
args, varargs, kws, defaults = inspect.getargspec(init)
except TypeError:
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they need a non-default argument
args = args[2:]
else:
args = args[1:]
if args:
# non-empty list
assert_equal(len(args), len(defaults))
else:
return
for arg, default in zip(args, defaults):
assert_in(type(default), [str, int, float, bool, tuple, type(None),
np.float64, types.FunctionType, Memory])
if arg not in params.keys():
# deprecated parameter, not in get_params
assert_true(default is None)
continue
if isinstance(params[arg], np.ndarray):
assert_array_equal(params[arg], default)
else:
assert_equal(params[arg], default)
def multioutput_estimator_convert_y_2d(name, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if name in (['MultiTaskElasticNetCV', 'MultiTaskLassoCV',
'MultiTaskLasso', 'MultiTaskElasticNet']):
return y[:, np.newaxis]
return y
def check_non_transformer_estimators_n_iter(name, estimator,
multi_output=False):
# Check if all iterative solvers, run for more than one iteratiom
iris = load_iris()
X, y_ = iris.data, iris.target
if multi_output:
y_ = y_[:, np.newaxis]
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
assert_greater(estimator.n_iter_, 0)
def check_transformer_n_iter(name, estimator):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater(iter_, 1)
else:
assert_greater(estimator.n_iter_, 1)
def check_get_params_invariance(name, estimator):
class T(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
pass
def fit(self, X, y):
return self
if name in ('FeatureUnion', 'Pipeline'):
e = estimator([('clf', T())])
elif name in ('GridSearchCV' 'RandomizedSearchCV'):
return
else:
e = estimator()
shallow_params = e.get_params(deep=False)
deep_params = e.get_params(deep=True)
assert_true(all(item in deep_params.items() for item in
shallow_params.items()))
| bsd-3-clause |
zooniverse/aggregation | experimental/paper/clustering/aggregation.py | 2 | 56968 | __author__ = 'greg'
import pymongo
import bisect
import sys
import os
import csv
import matplotlib.pyplot as plt
import urllib
import matplotlib.cbook as cbook
from collections import Iterator
import math
from scipy.stats.stats import pearsonr
import cPickle as pickle
from scipy.stats.mstats import normaltest
import warnings
import time
from pylab import meshgrid,cm,imshow,contour,clabel,colorbar,axis,title,show
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
# for Greg - which computer am I on?
if os.path.exists("/home/ggdhines"):
base_directory = "/home/ggdhines"
github_directory = base_directory + "/github"
code_directory = base_directory + "/PycharmProjects"
elif os.path.exists("/Users/greg"):
base_directory = "/Users/greg"
code_directory = base_directory + "/Code"
github_directory = base_directory +"/github"
print github_directory
else:
base_directory = "/home/greg"
code_directory = base_directory + "/github"
github_directory = base_directory + "/github"
sys.path.append(github_directory+"/pyIBCC/python")
sys.path.append(code_directory+"/reduction/experimental/clusteringAlg")
import ibcc
import multiClickCorrect
def index(a, x):
'Locate the leftmost value exactly equal to x'
i = bisect.bisect_left(a, x)
if i != len(a) and a[i] == x:
return i
raise ValueError
class ClassificationTools():
def __init__(self,scale=1):
self.scale = scale
def __classification_to_markings__(self,classification):
assert False
def __list_markings__(self, classification):
print "hello"
return False
# yield 1
# return
# print classification
# marks_list = self.__classification_to_markings__(classification)
# print marks_list
# assert False
#
# for mark in marks_list:
# x = float(mark["x"])*self.scale
# y = float(mark["y"])*self.scale
#
# if not("animal" in mark):
# animal_type = None
# else:
# animal_type = mark["animal"]
#
# yield (x,y),animal_type
class ROIClassificationTools(ClassificationTools):
def __init__(self,scale=1):
# roi is the region of interest - if a point lies outside of this region, we will ignore it
# such cases should probably all be errors - a marking lies outside of the image
# or, somehow, just outside of the ROI - see penguin watch for an example
# because Penguin Watch started it, and the images for penguin watch are flipped (grrrr)
# the roi is the region ABOVE the line segments
ClassificationTools.__init__(self,scale)
self.roi_dict = {}
def __load_roi__(self,classification):
assert False
def __list_markings__(self,classification):
marks_list = self.__classification_to_markings__(classification)
roi = self.__load_roi__(classification)
for mark in marks_list:
x = float(mark["x"])*self.scale
y = float(mark["y"])*self.scale
if not("animal" in mark):
animal_type = None
else:
animal_type = mark["animal"]
#find which line segment on the roi the point lies on (x-axis wise)
for segment_index in range(len(roi)-1):
if (roi[segment_index][0] <= x) and (roi[segment_index+1][0] >= x):
rX1,rY1 = roi[segment_index]
rX2,rY2 = roi[segment_index+1]
m = (rY2-rY1)/float(rX2-rX1)
rY = m*(x-rX1)+rY1
if y >= rY:
# we have found a valid marking
# create a special type of animal None that is used when the animal type is missing
# thus, the marking will count towards not being noise but will not be used when determining the type
yield (x,y),animal_type
break
else:
break
class Aggregation:
def __init__(self, project, date, tools=None, to_skip=[],clustering_alg=None):
self.project = project
client = pymongo.MongoClient()
db = client[project+"_"+date]
self.classification_collection = db[project+"_classifications"]
self.subject_collection = db[project+"_subjects"]
self.user_collection = db[project+"_users"]
# we a global list of logged in users so we use the index for the same user over multiple images
self.all_users = []
# we need a list of of users per subject (and a separate one for just those users who were not logged in
# those ones will just have ip addresses
self.users_per_subject = {}
self.ips_per_subject = {}
# dictionaries for the raw markings per image
self.markings_list = {}
#who made what marking
self.markings_to_user = {}
#self.users_per_ = {}
# what did the user think they saw these coordinates?
# for example, in penguin watch, it could be a penguin
self.what_list = {}
self.subjects_per_user = {}
self.correct_clusters = {}
# the clustering results per image
self.clusterResults = {}
self.signal_probability = []
# this is the iteration class that goes through all of the markings associated with each classification
# for at least some of the projects the above code will work just fine
self.tools = tools
self.to_skip = to_skip
# image dimensions - used to throw silly markings not actually on the image
self.dimensions = {}
self.num_clusters = None
self.closest_neighbours = {}
self.ibcc_confusion = None
self.gold_data = {}
self.correction = multiClickCorrect.MultiClickCorrect(overlap_threshold=0)
self.expert = None
self.clustering_alg = clustering_alg
def __get_users__(self,zooniverse_id):
return self.users_per_subject[zooniverse_id]
def __readin_users__(self):
for user_record in self.user_collection.find():
if "name" in user_record:
user = user_record["name"]
bisect.insort(self.all_users,user)
def __get_completed_subjects__(self):
id_list = []
for subject in self.subject_collection.find({"state": "complete"}):
zooniverse_id = subject["zooniverse_id"]
id_list.append(zooniverse_id)
self.dimensions[zooniverse_id] = subject["metadata"]["original_size"]
return id_list
def __cluster_overlap__(self, c1, c2):
return [c for c in c1 if c in c2]
# def __find_closest_neighbour__(self,zooniverse_id,to_skip=[]):
# cluster_results = self.clusterResults[zooniverse_id]
# # if there is only one cluster NN doesn't make sense
# if len(cluster_results[0]) == 1:
# return
#
# assert zooniverse_id in self.clusterResults
# self.closest_neighbours[zooniverse_id] = []
#
#
#
# assert len(cluster_results[0]) == len(cluster_results[1])
# assert len(cluster_results[1]) == len(cluster_results[2])
#
#
#
# for i1 in range(len(cluster_results[0])):
# if i1 in to_skip:
# self.closest_neighbours[zooniverse_id].append((None,None, None, None))
# continue
# center1,pts1,users1 = cluster_results[0][i1],cluster_results[1][i1],cluster_results[2][i1]
#
# minimum_distance = float("inf")
# overlap = None
# closest_neighbour = None
# closest_index = None
# for i2 in range(len(cluster_results[0])):
# if (i1 != i2) and not(i2 in to_skip):
# try:
# center2,pts2,users2 = cluster_results[0][i2],cluster_results[1][i2],cluster_results[2][i2]
# except IndexError:
# print i2
# print len(cluster_results[0])
# print len(cluster_results[1])
# print len(cluster_results[2])
# raise
#
# dist = math.sqrt((center1[0]-center2[0])**2+(center1[1]-center2[1])**2)
# if dist < minimum_distance:
# minimum_distance = dist
# overlap = self.__cluster_overlap__(users1, users2)
# closest_neighbour = center2[:]
# closest_index = i2
#
# assert overlap is not None
# assert closest_neighbour is not None
#
# self.closest_neighbours[zooniverse_id].append((closest_index,closest_neighbour, minimum_distance, overlap))
def __plot_closest_neighbours__(self,zooniverse_id_list):
totalY = []
totalDist = []
for zooniverse_id in zooniverse_id_list:
if zooniverse_id in self.closet_neighbours:
pt_l,dist_l = zip(*self.closet_neighbours[zooniverse_id])
X_pts,Y_pts = zip(*pt_l)
# find to flip the image
Y_pts = [-p for p in Y_pts]
plt.plot(dist_l,Y_pts,'.',color="red")
totalDist.extend(dist_l)
totalY.extend(Y_pts)
print pearsonr(dist_l,Y_pts)
plt.show()
def __find_one__(self,zooniverse_id_list):
# for zooniverse_id in zooniverse_id_list:
# if zooniverse_id in self.closet_neighbours:
# self.__display_image__(zooniverse_id)
# for cluster_index in range(len(self.clusterResults[zooniverse_id][0])):
# center = self.clusterResults[zooniverse_id][0][cluster_index]
# nearest_neigbhour = self.closet_neighbours[zooniverse_id][cluster_index][0]
# overlap = self.closet_neighbours[zooniverse_id][cluster_index][2]
#
# plt.plot([center[0],], [center[1],],'o',color="blue")
# if overlap == 1:
# plt.plot([center[0],nearest_neigbhour[0]],[center[1],nearest_neigbhour[1]],color="red")
#
# plt.show()
# return
# sort of barnes interpolation
totalY = []
totalDist = []
scale = 1
Ypixel_range = np.arange(0,1,0.005)
dist_range = np.arange(0,scale,0.01)
X,Y = np.meshgrid(Ypixel_range,dist_range)
Z = []
#convert into one big list
for zooniverse_id in zooniverse_id_list:
if zooniverse_id in self.closet_neighbours:
cluster_centers = self.clusterResults[zooniverse_id][0]
X_pts,Y_pts = zip(*cluster_centers)
distance_to_nn = zip(*self.closet_neighbours[zooniverse_id])[1]
totalDist.extend(distance_to_nn)
totalY.extend(Y_pts)
#scale
minY,maxY = min(totalY),max(totalY)
minD,maxD = min(totalDist),max(totalDist)
totalY = [scale*(y-minY)/(maxY-minY) for y in totalY]
totalDist = [scale*(d-minD)/(maxD-minD) for d in totalDist]
# now search for all of the clusters whose neighbour has one or zero users in common
# convert into one big list
to_show = []
for zooniverse_id in zooniverse_id_list:
if zooniverse_id in self.closet_neighbours:
for cluster_index,center in enumerate(self.clusterResults[zooniverse_id][0]):
y_pixel = center[1]
# find out about the nearest neighbour
neighbour,dist,overlap_l = self.closet_neighbours[zooniverse_id][cluster_index]
if len(overlap_l) == 1:
#normalize the y pixel height
y_pixel = scale*(y_pixel-minY)/(maxY-minY)
#normalize the distance
dist = scale*(dist-minD)/(maxD-minD)
z = sum([math.exp(-(Y-y_pixel)**2) for Y,d in zip(totalY,totalDist) if d <= dist])
z_max = sum([math.exp(-(Y-y_pixel)**2) for Y,d in zip(totalY,totalDist) if d <= scale])
to_show.append((z/z_max,zooniverse_id,center[:],neighbour[:],overlap_l[:]))
to_show.sort(key = lambda x:x[0])
shownPts = []
for p,zooniverse_id,pt1,pt2,overlap in to_show:
if (pt1 in shownPts) or (pt2 in shownPts):
continue
print overlap
shownPts.append(pt1)
shownPts.append(pt2)
self.__display_image__(zooniverse_id)
#plt.plot([pt1[0],pt2[0]],[pt1[1],pt2[1]],"o-",color="blue")
#plt.plot([pt1[0],],[pt1[1],],"o",color="red")
plt.plot([0,pt1[0]],[0,pt1[1]])
plt.plot([1000,pt2[0]],[0,pt2[1]])
plt.show()
def __get_image_fname__(self,zooniverse_id):
"""
get the path to JPG for this image - also download the image if necessary
:param zooniverse_id:
:return:
"""
subject = self.subject_collection.find_one({"zooniverse_id": zooniverse_id})
url = subject["location"]["standard"]
slash_index = url.rfind("/")
object_id = url[slash_index+1:]
#print object_id
if not(os.path.isfile(base_directory+"/Databases/"+self.project+"/images/"+object_id)):
urllib.urlretrieve(url, base_directory+"/Databases/"+self.project+"/images/"+object_id)
fname = base_directory+"/Databases/"+self.project+"/images/"+object_id
return fname
def __display_image__(self,zooniverse_id):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fname = self.__get_image_fname__(zooniverse_id)
image_file = cbook.get_sample_data(fname)
image = plt.imread(image_file)
fig, ax = plt.subplots()
im = ax.imshow(image)
def __barnes_interpolation__(self,zooniverse_id_list):
# sort of barnes interpolation
totalY = []
totalDist = []
scale = 1
Ypixel_range = np.arange(0,1,0.005)
dist_range = np.arange(0,scale,0.01)
X,Y = np.meshgrid(Ypixel_range,dist_range)
Z = []
# convert into one big list
for zooniverse_id in zooniverse_id_list:
if zooniverse_id in self.closet_neighbours:
closest_neighbours = self.closet_neighbours[zooniverse_id]
pt_l,pt_2,dist_l,overlap_size_l = zip(*closest_neighbours)
X_pts,Y_pts = zip(*pt_l)
totalDist.extend(dist_l)
totalY.extend(Y_pts)
#scale
minY,maxY = min(totalY),max(totalY)
minD,maxD = min(totalDist),max(totalDist)
totalY = [scale*(y-minY)/(maxY-minY) for y in totalY]
totalDist = [scale*(d-minD)/(maxD-minD) for d in totalDist]
# now search for all of the clusters whose neighebour has one or zero users in common
# convert into one big list
P = []
for zooniverse_id in zooniverse_id_list:
if zooniverse_id in self.closet_neighbours:
closest_neighbours = self.closet_neighbours[zooniverse_id]
pt_l,pt_2,dist_l,overlap_size_l = zip(*closest_neighbours)
for pts,dist,overlap_size in zip(pt_l,dist_l,overlap_size_l):
if overlap_size == 1:
y_pixel = scale*(pts[1]-minY)/(maxY-minY)
dist = scale*(dist-minD)/(maxD-minD)
z = sum([math.exp(-(Y-y_pixel)**2) for Y,d in zip(totalY,totalDist) if d <= dist])
z_max = sum([math.exp(-(Y-y_pixel)**2) for Y,d in zip(totalY,totalDist) if d <= 1])
P.append(z/z_max)
print len(P)
plt.hist(P,bins=20,normed=1,cumulative=True)
plt.xlabel("Percentile for Clusters with |NN|=1")
plt.show()
# y_pixel = 0.1
# Z = []
# for dist in dist_range:
# Z.append(sum([math.exp(-(Y-y_pixel)**2) for Y,d in zip(totalY,totalDist) if d <= dist]))
#
# Z = [z/max(Z) for z in Z]
# plt.plot(dist_range,Z)
#
# y_pixel = 0.9
# Z = []
# for dist in dist_range:
# Z.append(sum([math.exp(-(Y-y_pixel)**2) for Y,d in zip(totalY,totalDist) if d <= dist]))
#
# Z = [z/max(Z) for z in Z]
# plt.plot(dist_range,Z)
# plt.xlim((0,0.2))
# plt.title("CDF of Nearest Neighbour Distance")
# plt.legend(("Upper row", "Lower row"), "lower right")
# plt.show()
def __plot_cluster_size__(self,zooniverse_id_list):
data = {}
for zooniverse_id in zooniverse_id_list:
if self.clusterResults[zooniverse_id] is not None:
centers,pts,users = self.clusterResults[zooniverse_id]
Y = [700-c[1] for c in centers]
X = [len(p) for p in pts]
plt.plot(X,Y,'.',color="blue")
for x,y in zip(X,Y):
if not(x in data):
data[x] = [y]
else:
data[x].append(y)
print pearsonr(X,Y)
X = sorted(data.keys())
Y = [np.mean(data[x]) for x in X]
plt.plot(X,Y,'o-')
plt.xlabel("Cluster Size")
plt.ylabel("Height in Y-Pixels")
plt.show()
def __cluster_subject__(self,zooniverse_id,clustering_alg=None,correction_alg=None,fix_distinct_clusters = False):
if clustering_alg is None:
clustering_alg = self.clustering_alg
assert clustering_alg is not None
assert zooniverse_id in self.markings_list
if self.markings_list[zooniverse_id] != []:
# cluster results will be a 3-tuple containing a list of the cluster centers, a list of the points in each
# cluster and a list of the users who marked each point
fname = self.__get_image_fname__(zooniverse_id)
self.clusterResults[zooniverse_id],time_to_cluster = clustering_alg(self.markings_list[zooniverse_id],self.markings_to_user[zooniverse_id])
self.num_clusters = len(zip(*self.clusterResults[zooniverse_id]))
assert type(self.num_clusters) == int
# make sure we got a 3 tuple and since there was a least one marking, we should have at least one cluster
# pruning will come later
if not(len(self.clusterResults[zooniverse_id]) == 3):
print self.clusterResults[zooniverse_id]
assert len(self.clusterResults[zooniverse_id]) == 3
assert self.clusterResults[zooniverse_id][0] != []
# for cluster in self.clusterResults[zooniverse_id][1]:
# if len(cluster) >= 10:
# X,Y = zip(*cluster)
# print "**"
# print normaltest(X)
# print normaltest(Y)
# print "-"
# fix the cluster if desired
if fix_distinct_clusters:
self.clusterResults[zooniverse_id] = self.correction.__fix__(self.clusterResults[zooniverse_id])
self.num_clusters = len(self.clusterResults[zooniverse_id][0])
assert type(self.num_clusters) == int
if correction_alg is not None:
self.clusterResults[zooniverse_id] = correction_alg(self.clusterResults[zooniverse_id])
self.num_clusters = len(self.clusterResults[zooniverse_id][0])
assert type(self.num_clusters) == int
else:
self.clusterResults[zooniverse_id] = [],[],[]
self.num_clusters = 0
time_to_cluster = 0
#print self.clusterResults
return len(self.clusterResults[zooniverse_id][0]),time_to_cluster
def __signal_ibcc__(self):
self.__readin_users__()
# run ibcc on each cluster to determine if it is a signal (an actual animal) or just noise
# run ibcc on all of the subjects that have been processed (read in and clustered) so far
# each cluster needs to have a universal index
cluster_count = -1
# need to give the ip addresses unique indices, so update ip_index after every subject
ip_index = 0
# needed for determining priors for IBCC
real_animals = 0
fake_animals = 0
true_pos = 0
false_neg = 0
false_pos = 0
true_neg = 0
# intermediate holder variable
# because ibcc needs indices to be nice and ordered with no gaps, we have to make two passes through the data
to_ibcc = []
self.global_index_list = {}
for zooniverse_id in self.clusterResults:
self.global_index_list[zooniverse_id] = []
if self.clusterResults[zooniverse_id] is None:
continue
for cluster_center,cluster_markings,user_per_cluster in zip(*self.clusterResults[zooniverse_id]):
# moving on to the next animal so increase counter
# universal counter over all images
cluster_count += 1
# needed for determining priors for IBCC
pos = 0
neg = 0
self.global_index_list[zooniverse_id].append(cluster_count) #(cluster_count,cluster_center[:]))
# check whether or not each user marked this cluster
for u in self.users_per_subject[zooniverse_id]:
# was this user logged in or not?
# if not, their user name will be an ip address
try:
i = self.ips_per_subject[zooniverse_id].index(u) + ip_index
if u in user_per_cluster:
to_ibcc.append((u,-i,cluster_count,1))
pos += 1
else:
to_ibcc.append((u,-i,cluster_count,0))
neg += 1
# ifNone a ValueError was thrown, the user name was not in the list of ip addresses
# and therefore, the user name was not an ip address, which means the user was logged in
except ValueError as e:
if u in user_per_cluster:
to_ibcc.append((u,index(self.all_users,u),cluster_count,1))
pos += 1
else:
to_ibcc.append((u,index(self.all_users,u),cluster_count,0))
neg += 1
if pos > neg:
real_animals += 1
true_pos += pos/float(pos+neg)
false_neg += neg/float(pos+neg)
else:
fake_animals += 1
false_pos += pos/float(pos+neg)
true_neg += neg/float(pos+neg)
ip_index += len(self.ips_per_subject[zooniverse_id])
# now run through again - this will make sure that all of the indices are ordered with no gaps
# since the user list is created by reading through all the users, even those which haven't annotated
# of the specific images we are currently looking at
ibcc_user_list = []
self.ibcc_users = {}
for user,user_index,animal_index,found in to_ibcc:
# can't use bisect or the indices will be out of order
if not(user_index in ibcc_user_list):
ibcc_user_list.append(user_index)
self.ibcc_users[user] = len(ibcc_user_list)-1
# write out the input file for IBCC
with open(base_directory+"/Databases/"+self.project+"_ibcc.csv","wb") as f:
f.write("a,b,c\n")
for user,user_index,animal_index,found in to_ibcc:
i = ibcc_user_list.index(user_index)
f.write(str(i)+","+str(animal_index)+","+str(found)+"\n")
# create the prior estimate and the default confusion matrix
prior = real_animals/float(real_animals + fake_animals)
confusion = [[max(int(true_neg),1),max(int(false_pos),1)],[max(int(false_neg),1),max(int(true_pos),1)]]
# create the config file
print "this is here"
print base_directory+"/Databases/"+self.project+"_ibcc.py"
with open(base_directory+"/Databases/"+self.project+"_ibcc.py","wb") as f:
f.write("import numpy as np\n")
f.write("scores = np.array([0,1])\n")
f.write("nScores = len(scores)\n")
f.write("nClasses = 2\n")
f.write("inputFile = \""+base_directory+"/Databases/"+self.project+"_ibcc.csv\"\n")
f.write("outputFile = \""+base_directory+"/Databases/"+self.project+"_signal.out\"\n")
f.write("confMatFile = \""+base_directory+"/Databases/"+self.project+"_ibcc.mat\"\n")
f.write("nu0 = np.array(["+str(max(int((1-prior)*100),1))+","+str(max(int(prior*100),1))+"])\n")
f.write("alpha0 = np.array("+str(confusion)+")\n")
# start by removing all temp files
try:
os.remove(base_directory+"/Databases/"+self.project+"_signal.out")
except OSError:
pass
try:
os.remove(base_directory+"/Databases/"+self.project+"_ibcc.mat")
except OSError:
pass
try:
os.remove(base_directory+"/Databases/"+self.project+"_ibcc.csv.dat")
except OSError:
pass
# pickle.dump((big_subjectList,big_userList),open(base_directory+"/Databases/tempOut.pickle","wb"))
ibcc.runIbcc(base_directory+"/Databases/"+self.project+"_ibcc.py")
def __calc_correct_markings__(self,zooniverse_id):
# return the local indices of the correct markings
correct_pts = []
cluster_centers = self.clusterResults[zooniverse_id][0]
gold_pts = self.gold_data[zooniverse_id]
# if either of these sets are empty, then by def'n we can't have any correct WRT this image
if (cluster_centers == []) or (gold_pts == []):
return correct_pts
userToGold = [[] for i in range(len(gold_pts))]
goldToUser = [[] for i in range(len(cluster_centers))]
# find which gold standard pts, the user cluster pts are closest to
for local_index, (x,y) in enumerate(cluster_centers):
dist = [math.sqrt((float(pt["x"])-x)**2+(float(pt["y"])-y)**2) for pt in gold_pts]
userToGold[dist.index(min(dist))].append(local_index)
# find out which user pts each gold standard pt is closest to
for gold_index, pt in enumerate(gold_pts):
dist = [math.sqrt((float(pt["x"])-x)**2+(float(pt["y"])-y)**2) for (x,y) in cluster_centers]
goldToUser[dist.index(min(dist))].append(gold_index)
for local_index in range(len(cluster_centers)):
if len(goldToUser[local_index]) >= 1:
correct_pts.append(local_index)
return correct_pts
def __display_false_positives__(self):
for zooniverse_id in self.clusterResults:
if self.clusterResults[zooniverse_id] is None:
continue
correct_pts = self.__calc_correct_markings__(zooniverse_id)
cluster_centers = self.clusterResults[zooniverse_id][0]
cluster_pts = self.clusterResults[zooniverse_id][1]
if len(correct_pts) != len(cluster_centers):
self.__display_image__(zooniverse_id)
for index,(x,y) in enumerate(cluster_centers):
if index in correct_pts:
plt.plot(x,y,'o',color="red")
else:
plt.plot(x,y,'o',color="blue")
for pts in cluster_pts:
for (x,y) in pts:
plt.plot(x,y,'.',color="yellow")
plt.show()
def __roc__(self,plot=False):
correct_pts = []
#print self.global_index_list
# use the gold standard data to determine which of our points is correct
for zooniverse_id, global_indices in self.global_index_list.items():
gold_pts = self.gold_data[zooniverse_id]
#print gold_pts
# if either of these sets are empty, then by def'n we can't have any correct WRT this image
if (global_indices == []) or (gold_pts == []):
continue
userToGold = [[] for i in range(len(gold_pts))]
goldToUser = [[] for i in range(len(global_indices))]
#global_cluster_indices,cluster_centers = zip(*user_pts)
cluster_centers = self.clusterResults[zooniverse_id][0]
# find which gold standard pts, the user cluster pts are closest to
for local_index, (x,y) in enumerate(cluster_centers):
dist = [math.sqrt((float(pt["x"])-x)**2+(float(pt["y"])-y)**2) for pt in gold_pts]
userToGold[dist.index(min(dist))].append(local_index)
# find out which user pts each gold standard pt is closest to
for gold_index, pt in enumerate(gold_pts):
dist = [math.sqrt((float(pt["x"])-x)**2+(float(pt["y"])-y)**2) for (x,y) in cluster_centers]
goldToUser[dist.index(min(dist))].append(gold_index)
for local_index,global_index in zip(range(len(cluster_centers)),global_indices):
# which gold pt did this user pt map to?
# gold_index = [i for i,pts in enumerate(userToGold) if local_index in pts][0]
# print [i for i,pts in enumerate(userToGold) if local_index in pts]
# did this gold pt also map to the user pt?
# look at all of the
# for g in gold:
# if g in goldToUser[local_index]:
# correct_pts.append(global_index)
# break
if len(goldToUser[local_index]) >= 1:
correct_pts.append(global_index)
truePos = []
falsePos = []
with open(base_directory+"/Databases/"+self.project+"_signal.out","rb") as f:
for ii,l in enumerate(f.readlines()):
a,b,prob = l[:-1].split(" ")
if ii in correct_pts:
truePos.append(float(prob))
else:
falsePos.append(float(prob))
alphas = truePos[:]
alphas.extend(falsePos)
alphas.sort()
X = []
Y = []
for a in alphas:
X.append(len([x for x in falsePos if x >= a]))
Y.append(len([y for y in truePos if y >= a]))
if plot:
plt.plot(X,Y)
plt.xlabel("False Positive Count")
plt.ylabel("True Positive Count")
plt.show()
return X,Y
def __outliers__(self,zooniverse_id):
for (Cx,Cy), markings in zip(self.clusterResults[zooniverse_id][0],self.clusterResults[zooniverse_id][1]):
distances = []
if len(markings) == 1:
continue
for (x,y) in markings:
dist = math.sqrt((x-Cx)**2+(y-Cy)**2)
distances.append(dist)
ratio = max(distances)/np.mean(distances)
if ratio > 4:
print ratio
self.__display_image__(zooniverse_id)
X,Y = zip(*markings)
plt.plot(X,Y,'.')
plt.show()
def __process_signal__(self):
self.signal_probability = []
with open(base_directory+"/Databases/"+self.project+"_signal.out","rb") as f:
results = csv.reader(f, delimiter=' ')
for row in results:
self.signal_probability.append(float(row[2]))
def __get_subjects_per_site__(self,zooniverse_id):
pass
def __display__markings__(self, zooniverse_id):
assert zooniverse_id in self.clusterResults
subject = self.subject_collection.find_one({"zooniverse_id": zooniverse_id})
zooniverse_id = subject["zooniverse_id"]
print zooniverse_id
url = subject["location"]["standard"]
slash_index = url.rfind("/")
object_id = url[slash_index+1:]
if not(os.path.isfile(base_directory+"/Databases/"+self.project+"/images/"+object_id)):
urllib.urlretrieve(url, base_directory+"/Databases/"+self.project+"/images/"+object_id)
image_file = cbook.get_sample_data(base_directory+"/Databases/"+self.project+"/images/"+object_id)
image = plt.imread(image_file)
fig, ax = plt.subplots()
im = ax.imshow(image)
for (x, y), pts, users in zip(*self.clusterResults[zooniverse_id]):
plt.plot([x, ], [y, ], 'o', color="red")
plt.show()
plt.close()
def __display_raw_markings__(self,zooniverse_id):
self.__display_image__(zooniverse_id)
print "Num users: " + str(len(self.users_per_subject[zooniverse_id]))
X,Y = zip(*self.markings_list[zooniverse_id])
plt.plot(X,Y,'.')
plt.xlim((0,1000))
plt.ylim((563,0))
plt.show()
plt.close()
def __save_raw_markings__(self,zooniverse_id):
self.__display_image__(zooniverse_id)
print "Num users: " + str(len(self.users_per_subject[zooniverse_id]))
X,Y = zip(*self.markings_list[zooniverse_id])
plt.plot(X,Y,'.')
plt.xlim((0,1000))
plt.ylim((563,0))
plt.xticks([])
plt.yticks([])
plt.savefig(base_directory+"/Databases/"+self.project+"/examples/"+zooniverse_id+".pdf",bbox_inches='tight')
plt.close()
# def __display_image__(self,zooniverse_id):
# #assert zooniverse_id in self.clusterResults
# subject = self.subject_collection.find_one({"zooniverse_id": zooniverse_id})
# zooniverse_id = subject["zooniverse_id"]
# #print zooniverse_id
# url = subject["location"]["standard"]
#
# slash_index = url.rfind("/")
# object_id = url[slash_index+1:]
#
# if not(os.path.isfile(base_directory+"/Databases/"+self.project+"/images/"+object_id)):
# urllib.urlretrieve(url, base_directory+"/Databases/"+self.project+"/images/"+object_id)
#
# image_file = cbook.get_sample_data(base_directory+"/Databases/"+self.project+"/images/"+object_id)
# image = plt.imread(image_file)
#
# fig, ax = plt.subplots()
# im = ax.imshow(image)
# plt.xlim((0,1000))
# plt.ylim((563,0))
def __soy_it__(self,zooniverse_id):
self.__display_image__(zooniverse_id)
gold_markings = self.gold_data[zooniverse_id]
# start by matching each of the user output images to the gold standard
userToGold = [[] for i in range(len(gold_markings))]
goldToUser = [[] for i in range(len(zip(*self.clusterResults[zooniverse_id])))]
#print len(gold_markings)
#print len(zip(*self.clusterResults[zooniverse_id]))
for marking_index,((x, y), pts, users) in enumerate(zip(*self.clusterResults[zooniverse_id])):
dist = [math.sqrt((pt["x"]-x)**2+(pt["y"]-y)**2) for pt in gold_markings]
userToGold[dist.index(min(dist))].append(marking_index)
for gold_index, pt in enumerate(gold_markings):
dist = [math.sqrt((pt["x"]-x)**2+(pt["y"]-y)**2) for (x,y),pts,users in zip(*self.clusterResults[zooniverse_id])]
goldToUser[dist.index(min(dist))].append(gold_index)
for marking_index, pt in enumerate(gold_markings):
if len(userToGold[marking_index]) == 0:
plt.plot(pt["x"], pt["y"], 'o', color="red")
elif len(userToGold[marking_index]) > 1:
plt.plot(pt["x"], pt["y"], 'o', color="blue")
for marking_index,((x, y), pts, users) in enumerate(zip(*self.clusterResults[zooniverse_id])):
if len(goldToUser[marking_index]) == 1:
plt.plot([x, ], [y, ], 'o', color="yellow")
elif len(goldToUser[marking_index]) == 0:
plt.plot([x, ], [y, ], 'o', color="grey")
else:
plt.plot([x, ], [y, ], 'o', color="green")
# print "===---"
# for index in goldToUser[marking_index]:
# print gold_markings[index]
# for marking_index,((x, y), pts, users) in enumerate(zip(*self.clusterResults[zooniverse_id])):
# #find the match
# for gold_index, (x2,y2) in enumerate(gold_markings):
# if (marking_index in userToGold[gold_index]) and (gold_index in goldToUser[marking_index]):
# plt.plot((x,x2),(y,y2),"-",color="blue")
# elif marking_index in userToGold[gold_index]:
# plt.plot((x,x2),(y,y2),"-",color="green")
# elif gold_index in goldToUser[marking_index]:
# plt.plot((x,x2),(y,y2),"-",color="red")
#for (x, y) in gold_markings:
# plt.plot([x, ], [y, ], 'o', color="red")
#ROI = [(0, 1050),(0, 370),(1920, 370),(1920, 1050)]
#print zip(*ROI)
#X = [x/1.92 for x,y in ROI]
#Y = [y/1.92 for x,y in ROI]
#print X,Y
#plt.plot(X,Y,"o-",color="red")
plt.show()
def __display_signal_noise(self):
for ii,zooniverse_id in enumerate(self.clusterResults):
print zooniverse_id
subject = self.subject_collection.find_one({"zooniverse_id":zooniverse_id})
zooniverse_id = subject["zooniverse_id"]
url = subject["location"]["standard"]
slash_index = url.rfind("/")
object_id = url[slash_index+1:]
if not(os.path.isfile(base_directory+"/Databases/condors/images/"+object_id)):
urllib.urlretrieve (url, base_directory+"/Databases/condors/images/"+object_id)
image_file = cbook.get_sample_data(base_directory+"/Databases/condors/images/"+object_id)
image = plt.imread(image_file)
fig, ax = plt.subplots()
im = ax.imshow(image)
for center,animal_index,users_l,user_count in results_dict[zooniverse_id]:
if ibcc_v[animal_index] >= 0.5:
print center[0],center[1],1
plt.plot([center[0],],[center[1],],'o',color="green")
else:
print center[0],center[1],0
plt.plot([center[0],],[center[1],],'o',color="red")
plt.show()
plt.close()
def __load_dimensions__(self,zooniverse_id,subject=None):
pass
def __get_status__(self,zooniverse_id):
return self.subject_collection.find_one({"zooniverse_id":zooniverse_id})["state"]
def __load_roi__(self,zooniverse_id):
assert False
def __load_gold_standard__(self,zooniverse_id):
# have we already encountered this subject?
if os.path.isfile(base_directory+"/Databases/"+self.project+"/"+zooniverse_id+"_gold.pickle"):
self.gold_data[zooniverse_id] = pickle.load(open(base_directory+"/Databases/"+self.project+"/"+zooniverse_id+"_gold.pickle","rb"))
else:
self.gold_data[zooniverse_id] = []
classification = self.classification_collection.find_one({"subjects.zooniverse_id":zooniverse_id,"user_name":self.expert})
for pt, animal_type in self.tools.__list_markings__(classification):
marking = {"x":pt[0],"y":pt[1]}
self.gold_data[zooniverse_id].append(marking)
pickle.dump(self.gold_data[zooniverse_id],open(base_directory+"/Databases/"+self.project+"/"+zooniverse_id+"_gold.pickle","wb"))
def __accuracy__(self,zooniverse_id):
"""
Calculate the accuracy for the given zooniverse_id, clustering needs to have already been done
and gold standard data needs to have already been read in
:param zooniverse_id:
:return:
"""
gold_markings = self.gold_data[zooniverse_id]
userToGold = [[] for i in range(len(gold_markings))]
goldToUser = [[] for i in range(len(zip(*self.clusterResults[zooniverse_id])))]
for marking_index,((x, y), pts, users) in enumerate(zip(*self.clusterResults[zooniverse_id])):
try:
dist = [math.sqrt((gold["x"]-x)**2+(gold["y"]-y)**2) for gold in gold_markings]
userToGold[dist.index(min(dist))].append(marking_index)
except ValueError:
#print zooniverse_id
#print gold_markings
#print self.clusterResults[zooniverse_id]
print "Empty gold standard: " + zooniverse_id
return 0
for gold_index, gold in enumerate(gold_markings):
try:
dist = [math.sqrt((gold["x"]-x)**2+(gold["y"]-y)**2) for (x,y),pts,users in zip(*self.clusterResults[zooniverse_id])]
goldToUser[dist.index(min(dist))].append(gold_index)
except ValueError:
print "Empty user clusters: " + zooniverse_id
return 0
num_match = len([x for x in userToGold if len(x) > 0])
return num_match
#print userToGold
lower_bound = num_match/float(len(userToGold))
additional = len([x for x in goldToUser if len(x) == 0])
could_have_found = 0
missed = 0
#which penguins have we missed?
missedPenguins = [i for i in range(len(gold_markings)) if userToGold[i] == []]
# find which are the closest penguins we found which correspond to these penguins
for i in missedPenguins:
for j in range(len(goldToUser)):
try:
goldToUser[j].index(i)
break
except ValueError:
pass
goldToUser[j].index(i)
#which gold penguin did j map to?
for ii in range(len(gold_markings)):
try:
userToGold[ii].index(j)
break
except ValueError:
pass
userToGold[ii].index(j)
x_i,y_i = gold_markings[i]
x_ii,y_ii = gold_markings[ii]
pts = zip(*self.clusterResults[zooniverse_id])[j][1]
# are these points closer to i or ii?
dist_i = [math.sqrt((x-x_i)**2+(y-y_i)**2) for (x,y) in pts]
dist_ii = [math.sqrt((x-x_ii)**2+(y-y_ii)**2) for (x,y) in pts]
close_to_i = sum([1 for (di,dii) in zip(dist_i,dist_ii) if di < dii])
missed += 1
if close_to_i > 0:
could_have_found += 1
#print "** " +str((close_to_ii,len(pts)))
#print (additional,len(userToGold))
#print "\t"+str((lower_bound,(num_match+additional)/float(len(userToGold)+additional)))
return lower_bound,could_have_found,missed
def __find_correct__(self,zooniverse_id,gold_markings):
# find all one to one mappings, in the case where we have multiple markings, take the closest to the
# gold standard
goldToUser = [[] for i in range(len(zip(*self.clusterResults[zooniverse_id])))]
for gold_index, (Gx,Gy) in enumerate(gold_markings):
dist = [math.sqrt((Gx-x)**2+(Gy-y)**2) for (x,y),pts,users in zip(*self.clusterResults[zooniverse_id])]
goldToUser[dist.index(min(dist))].append(gold_index)
self.correct_clusters[zooniverse_id] = [i for i in range(len(zip(*self.clusterResults[zooniverse_id]))) if goldToUser[i] != []]
return [i for i in range(len(zip(*self.clusterResults[zooniverse_id]))) if goldToUser[i] != []]
def __user_accuracy__(self,user_id):
r = []
for zooniverse_id,clusters_index_list in self.correct_clusters.items():
if not(user_id in self.users_per_subject[zooniverse_id]):
continue
clusters = zip(*self.clusterResults[zooniverse_id])
#how many of these clusters did this user mark?
accuracy_count = 0
for i in clusters_index_list:
if user_id in clusters[i][2]:
accuracy_count += 1
r.append(accuracy_count/float(len(clusters_index_list)))
return np.mean(r)
def __relative_sizes__(self,zooniverse_id,gold_markings):
correct = self.__find_correct__(zooniverse_id,gold_markings)
not_correct = [i for i in range(len(zip(*self.clusterResults[zooniverse_id]))) if not(i in correct)]
X = []
Y = []
self.__find_closest_neighbour__(zooniverse_id)
for i in range(self.num_clusters):
if i in not_correct:
if len(self.closest_neighbours[zooniverse_id][i][3]) == 1:
j = self.closest_neighbours[zooniverse_id][i][0]
size_i = len(zip(*self.clusterResults[zooniverse_id])[i][1])
size_j = len(zip(*self.clusterResults[zooniverse_id])[j][1])
X.append(max(size_i,size_j))
Y.append(min(size_i,size_j))
return X,Y
def __overlap__(self,zooniverse_id,gold_markings):
correct = self.__find_correct__(zooniverse_id,gold_markings)
not_correct = [i for i in range(len(zip(*self.clusterResults[zooniverse_id]))) if not(i in correct)]
to_return = []
to_return2 = []
self.__find_closest_neighbour__(zooniverse_id)
for i in range(self.num_clusters):
if (i in not_correct) and (len(self.closest_neighbours[zooniverse_id][i][3]) == 1):
to_return2.extend(self.closest_neighbours[zooniverse_id][i][3])
return to_return2
def __off_by_one__(self,display=False):
# return the global indices of all clusters which have only one user in common (obviously from the same image)
# and the image ID and the local indices - so we can tell which users saw
one_overlap = []
for zooniverse_id in self.clusterResults:
one = self.__find_closest__(zooniverse_id,user_threshold=1)
# convert from local to global indices
global_indices = self.global_index_list[zooniverse_id]
for (i,j,overlap) in one:
# we should have already dealt with clusters where the overlap is zero
assert len(overlap) == 1
global_i = global_indices[i]
global_j = global_indices[j]
one_overlap.append((zooniverse_id,global_i,global_j,i,j))
if display:
self.__display_image__(zooniverse_id)
x1,y1 = self.clusterResults[zooniverse_id][0][i]
plt.plot([x1],[y1],"o",color="red")
x2,y2 = self.clusterResults[zooniverse_id][0][j]
plt.plot([x2],[y2],".",color="blue")
print len(self.clusterResults[zooniverse_id][1][i]),len(self.clusterResults[zooniverse_id][1][j])
plt.show()
return one_overlap
def __relative_confusion__(self,t):
# what is the relative probability that we have two distinct clusters as opposed to one?
if self.ibcc_confusion is None:
self.ibcc_confusion = []
with open("/Users/greg/Databases/penguin_ibcc.mat","rb") as f:
for l in f.readlines():
probs = l[:-1].split(" ")
probs = [float(p) for p in probs]
self.ibcc_confusion.append(probs[:])
zooniverse_id = t[0]
i = t[3]
j = t[4]
all_users = self.users_per_subject[zooniverse_id]
users_i = self.clusterResults[zooniverse_id][2][i]
users_j = self.clusterResults[zooniverse_id][2][j]
ibcc_user_indices = [self.ibcc_users[u] for u in all_users]
# 0 or 1 for whether or not each user tagged the first cluster
annotated_i = [1 if u in users_i else 0 for u in all_users]
annotated_j = [1 if u in users_j else 0 for u in all_users]
# what is "probability"
prob_i = 1
for index, annotated in zip(ibcc_user_indices,annotated_i):
prob_i = prob_i * self.ibcc_confusion[index][2+annotated]
prob_j = 1
for index, annotated in zip(ibcc_user_indices,annotated_j):
prob_j = prob_j * self.ibcc_confusion[index][2+annotated]
prob_ij = 1
for index, a1,a2 in zip(ibcc_user_indices,annotated_i,annotated_j):
prob_ij = prob_ij * self.ibcc_confusion[index][2+max(a1,a2)]
print sum(annotated_i),sum(annotated_j)
print prob_ij/(prob_i*prob_j)
# def __off_by_one__(self,zooniverse_id,gold_markings):
# correct = self.__find_correct__(zooniverse_id,gold_markings)
# not_correct = [i for i in range(len(zip(*self.clusterResults[zooniverse_id]))) if not(i in correct)]
#
# to_return = []
# to_return2 = []
#
# self.__find_closest_neighbour__(zooniverse_id,to_skip=not_correct)
# for i in range(self.num_clusters):
# if i in not_correct:
# continue
#
# to_return.append((len(self.closest_neighbours[zooniverse_id][i][3])))
#
#
# self.__find_closest_neighbour__(zooniverse_id)
# for i in range(self.num_clusters):
# if i in not_correct:
# to_return2.append((len(self.closest_neighbours[zooniverse_id][i][3])))
# return to_return,to_return2
# userToGold = [[] for i in range(len(gold_markings))]
# goldToUser = [[] for i in range(len(zip(*self.clusterResults[zooniverse_id])))]
# #print len(goldToUser)
# #print len(userToGold)
# for marking_index,((x, y), pts, users) in enumerate(zip(*self.clusterResults[zooniverse_id])):
# dist = [math.sqrt((Gx-x)**2+(Gy-y)**2) for (Gx,Gy) in gold_markings]
# userToGold[dist.index(min(dist))].append(marking_index)
#
# for gold_index, (Gx,Gy) in enumerate(gold_markings):
# dist = [math.sqrt((Gx-x)**2+(Gy-y)**2) for (x,y),pts,users in zip(*self.clusterResults[zooniverse_id])]
# goldToUser[dist.index(min(dist))].append(gold_index)
#
# # which user penguins do not have a corresponding gold standard penguin - i.e., a false positive
# false_positive = [j for j,closest in enumerate(goldToUser) if closest == []]
# if false_positive == []:
# return 0
# print "^^^^^"
# for j in false_positive:
# for ii in range(len(gold_markings)):
# try:
# userToGold[ii].index(j)
# break
# except ValueError:
# pass
# if len(userToGold[ii]) == 2:
# print "***"
# print userToGold[ii]
# print self.closest_neighbours[zooniverse_id][userToGold[ii][0]][0],len(self.closest_neighbours[zooniverse_id][userToGold[ii][0]][3])
# print self.closest_neighbours[zooniverse_id][userToGold[ii][1]][0],len(self.closest_neighbours[zooniverse_id][userToGold[ii][1]][3])
# print "--/"
# return len(false_positive)
def __precision__(self,zooniverse_id,gold_markings):
# start by matching each of the user output images to the gold standard
userToGold = [[] for i in range(len(gold_markings))]
goldToUser = [[] for i in range(len(zip(*self.clusterResults[zooniverse_id])))]
for marking_index,((x, y), pts, users) in enumerate(zip(*self.clusterResults[zooniverse_id])):
dist = [math.sqrt((Gx-x)**2+(Gy-y)**2) for (Gx,Gy) in gold_markings]
userToGold[dist.index(min(dist))].append(marking_index)
for gold_index, (Gx,Gy) in enumerate(gold_markings):
dist = [math.sqrt((Gx-x)**2+(Gy-y)**2) for (x,y),pts,users in zip(*self.clusterResults[zooniverse_id])]
goldToUser[dist.index(min(dist))].append(gold_index)
match1 = len([g for g in goldToUser if len(g) == 1])
missed2 = len([g for g in userToGold if len(g) == 0])
for marking_index,((x, y), pts, users) in enumerate(zip(*self.clusterResults[zooniverse_id])):
for gold_index, (Gx,Gy) in enumerate(gold_markings):
if (goldToUser[marking_index] == [gold_index]) and (userToGold[gold_index] == [marking_index]) and (len(pts) >= 10):
X,Y = zip(*pts)
print normaltest(X)[1], normaltest(Y)[1]
#print missed,missed2
#print missed,missed2
def __display_nearest_neighbours__(self,zooniverse_id):
if self.clusterResults[zooniverse_id] == None:
return
self.__display_image__(zooniverse_id)
centers,pts,users = self.clusterResults[zooniverse_id]
neighbours = multiClickCorrect.MultiClickCorrect().__find_closest__(centers,users)
for c1_index,c2_index,users in neighbours:
x1,y1 = centers[c1_index]
x2,y2 = centers[c2_index]
print (x1,y1),(x2,y2),len(users)
if len(users) == 1:
plt.plot([x1,x2],[y1,y2],"o-",color="red")
else:
plt.plot([x1,x2],[y1,y2],"o-",color="blue")
plt.show()
def __num_gold_clusters__(self,zooniverse_id):
return len(self.gold_data[zooniverse_id])
def __readin_subject__(self, zooniverse_id,read_in_gold=False):
subject = self.subject_collection.find_one({"zooniverse_id":zooniverse_id})
#print subject["location"]["standard"]
# records relating to the individual annotations
# first - the actual XY markings, then what species are associated with the annotations,
# then who made each marking
self.markings_list[zooniverse_id] = []
self.subjects_per_user[zooniverse_id] = []
self.what_list[zooniverse_id] = []
self.markings_to_user[zooniverse_id] = []
# keep track of which users annotated this. Users per subject will contain all users - ip_addresses
# will contain just those users who were not logged in so we only have the ip address to identify them
# we need to deal with non-logged in users slightly differently
self.ips_per_subject[zooniverse_id] = []
self.users_per_subject[zooniverse_id] = []
if read_in_gold and not(zooniverse_id in self.gold_data):
self.__load_gold_standard__(zooniverse_id)
#roi = self.__load_roi__(zooniverse_id)
if os.path.isfile(base_directory+"/Databases/"+self.project+"/"+zooniverse_id+".pickle"):
mongo_results = pickle.load(open(base_directory+"/Databases/"+self.project+"/"+zooniverse_id+".pickle","rb"))
else:
mongo_results = list(self.classification_collection.find({"subjects.zooniverse_id":zooniverse_id}))
pickle.dump(mongo_results,open(base_directory+"/Databases/"+self.project+"/"+zooniverse_id+".pickle","wb"))
for user_index, classification in enumerate(mongo_results):
# get the name of this user
if "user_name" in classification:
user = classification["user_name"]
else:
user = classification["user_ip"]
if not(user == self.expert):
self.ips_per_subject[zooniverse_id].append(user)
if user == self.expert:
continue
# check to see if we have already encountered this subject/user pairing
# due to some double classification errors
if user in self.users_per_subject[zooniverse_id]:
continue
self.users_per_subject[zooniverse_id].append(user)
if not(user in self.subjects_per_user):
self.subjects_per_user[user] = [zooniverse_id]
else:
self.subjects_per_user[user].extend(zooniverse_id)
# read in all of the markings this user made - which might be none
for pt, animal_type in self.tools.__list_markings__(classification):
if not(animal_type in self.to_skip):
self.markings_list[zooniverse_id].append(pt)
# print annotation_list
self.markings_to_user[zooniverse_id].append(user)
self.what_list[zooniverse_id].append(animal_type)
| apache-2.0 |
wesovilabs/wesovilabs_tf_samples | build/lib/wesovilabs_tensorflow_samples/claims.py | 1 | 5900 | import pandas as pd
import tensorflow as tf
import tempfile
from six.moves import urllib
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string("model_dir", "", "Base directory for output models.")
flags.DEFINE_string("model_type", "deep", "Valid model types: {'wide', 'deep', 'wide_n_deep'}.")
flags.DEFINE_string("train_data", "", "Path to the training data.")
flags.DEFINE_string("test_data", "", "Path to the test data.")
flags.DEFINE_integer("train_steps", 4, "Number of training steps.")
COLUMNS = ["firstname","lastname","age", "city", "state", "country", "zip", "years_of_driving_license","claims_per_year"]
CATEGORICAL_COLUMNS = [ "country"]
CONTINUOUS_COLUMNS = [ "age", "years_of_driving_license" ]
LABEL_COLUMN = "label"
def _prepare_date():
return "/Users/Ivan/Sandbox/WesoviLabs/wesovilabs_tensorflow_samples/data/car_insurance.csv", "/Users/Ivan/Sandbox/WesoviLabs/wesovilabs_tensorflow_samples/data/car_insurance.csv"
def _download_resource(url):
file = tempfile.NamedTemporaryFile(delete=False)
urllib.request.urlretrieve(
url,
file.name
)
filename = file.name
file.close()
return filename
def _download_data():
if FLAGS.train_data:
train_file_name = FLAGS.train_data
else:
train_file_name = _download_resource(
"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data")
print("Training data is downloaded to %s" % train_file_name)
if FLAGS.test_data:
test_file_name = FLAGS.test_data
else:
test_file_name = _download_resource(
"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test")
print("Testing data is downloaded to %s" % test_file_name)
return train_file_name, test_file_name
def build_estimator(model_dir):
"""Build an estimator."""
#Categorical base columns
country = tf.contrib.layers.sparse_column_with_hash_bucket("country", hash_bucket_size=1000)
#Continuous base columns
age = tf.contrib.layers.real_valued_column("age")
years_of_driving_license = tf.contrib.layers.real_valued_column("years_of_driving_license")
# Transformations.
age_buckets = tf.contrib.layers.bucketized_column(age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
years_of_driving_license_buckets = tf.contrib.layers.bucketized_column(years_of_driving_license, boundaries=[1, 2, 3, 5, 10, 15, 25])
wide_columns = [
tf.contrib.layers.crossed_column([country, age_buckets], hash_bucket_size=int(1e4)),
"""
tf.contrib.layers.crossed_column([age, years_of_driving_license], hash_bucket_size=int(1e4)),
tf.contrib.layers.crossed_column([age, salary], hash_bucket_size=int(1e4)),
tf.contrib.layers.crossed_column([gender, salary], hash_bucket_size=int(1e6)),
"""
]
deep_columns = [
age,
years_of_driving_license,
"""
tf.contrib.layers.embedding_column(country, dimension=8),
tf.contrib.layers.embedding_column(age_buckets, dimension=8),
tf.contrib.layers.embedding_column(years_of_driving_license_buckets, dimension=8),
"""
]
if FLAGS.model_type == "wide":
m = tf.contrib.learn.LinearClassifier(model_dir=model_dir,
feature_columns=wide_columns)
elif FLAGS.model_type == "deep":
m = tf.contrib.learn.DNNClassifier(model_dir=model_dir,
feature_columns=deep_columns,
hidden_units=[100, 50])
else:
m = tf.contrib.learn.DNNLinearCombinedClassifier(
model_dir=model_dir,
linear_feature_columns=wide_columns,
dnn_feature_columns=deep_columns,
dnn_hidden_units=[100, 50])
return m
def input_fn(df):
"""Input builder function."""
# Creates a dictionary mapping from each continuous feature column name (k) to
# the values of that column stored in a constant Tensor.
continuous_cols = {k: tf.constant(df[k].values) for k in CONTINUOUS_COLUMNS}
# Creates a dictionary mapping from each categorical feature column name (k)
# to the values of that column stored in a tf.SparseTensor.
categorical_cols = {k: tf.SparseTensor(
indices=[[i, 0] for i in range(df[k].size)],
values=df[k].values,
shape=[df[k].size, 1])
for k in CATEGORICAL_COLUMNS}
# Merges the two dictionaries into one.
feature_cols = dict(continuous_cols)
feature_cols.update(categorical_cols)
# Converts the label column into a constant Tensor.
label = tf.constant(df[LABEL_COLUMN].values)
# Returns the feature columns and the label.
print('\n\n')
print(feature_cols)
print(label)
print('\n\n')
return feature_cols, label
def main(_):
train_file_name, test_file_name = _download_data()
df_train = pd.read_csv(
tf.gfile.Open(train_file_name),
names=COLUMNS,
sep=',',
engine="python")
df_test = pd.read_csv(
tf.gfile.Open(test_file_name),
names=COLUMNS,
sep=',',
engine="python")
df_train = df_train.dropna(how='any', axis=0)
df_test = df_test.dropna(how='any', axis=0)
df_train[LABEL_COLUMN] = (df_train["claims_per_year"].apply(lambda x: ">2" in x)).astype(int)
df_test[LABEL_COLUMN] = (df_test["claims_per_year"].apply(lambda x: ">2" in x)).astype(int)
model_dir = tempfile.mkdtemp() if not FLAGS.model_dir else FLAGS.model_dir
print("model directory = %s" % model_dir)
m = build_estimator(model_dir)
m.fit(input_fn=lambda: input_fn(df_train), steps=FLAGS.train_steps)
results = m.evaluate(input_fn=lambda: input_fn(df_test), steps=1)
for key in sorted(results):
print("%s: %s" % (key, results[key]))
if __name__ == "__main__":
tf.app.run()
| mit |
binghongcha08/pyQMD | QTM/MixQC/mixQC.py | 1 | 2193 | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 1 16:07:54 2016
@author: bing
"""
import numpy as np
import pylab as pl
import matplotlib as mpl
pl.style.use('ggplot')
font = {'family' : 'Times New Roman',
'weight' : 'normal',
'size' : '18'}
mpl.rc('font', **font) # pass in the font dict as kwargs
#import seaborn as sns
#sns.set_context('poster')
#sns.set_style("whitegrid")
data = np.genfromtxt(fname='/home/bing/qt/1.0.7/den.dat')
#dat = np.genfromtxt(fname='../2d12/en.dat')
#data = np.loadtxt('traj.dat')
#for x in range(1,10):
pl.figure(figsize=(14,9))
pl.subplot(111)
#pl.ylabel('Energy [hartree]')
pl.plot(data[:,0],data[:,1],'b-.',linewidth=2,label='L=8,$\partial_y U =0 $ ')
pl.plot(data[:,0],data[:,2],'b-.',linewidth=2)
dat = np.genfromtxt(fname='/home/bing/qt/spo_2d/1.0.3/den.dat')
pl.plot(dat[:,0],dat[:,1],'k',linewidth=3,label='QM w/o $U_c$')
pl.plot(dat[:,0],dat[:,2],'k',linewidth=3)
#pl.plot(data[:,0],data[:,4],'k-',linewidth=2,label='Energy')
#pl.legend(bbox_to_anchor=(0.5, 0.38, 0.42, .302), loc=3,ncol=1, mode="expand", borderaxespad=0.)
#dat = np.genfromtxt(fname='dom2/den.dat')
#pl.plot(dat[:,0],dat[:,1],'c',linewidth=2,label='L = 2 ')
#pl.plot(dat[:,0],dat[:,2],'c',linewidth=2)
#
#dat = np.genfromtxt(fname='dom16/den.dat')
#pl.plot(dat[:,0],dat[:,1],'r--',linewidth=3,label='L = 16 ')
#pl.plot(dat[:,0],dat[:,2],'r--',linewidth=3)
#
#
dat1 = np.genfromtxt(fname='/home/bing/qt/1.0.6/den.dat')
pl.plot(dat1[:,0],dat1[:,1],'r--',linewidth=2,label='L = 8, $\partial_y \Lambda =0 $' )
pl.plot(dat1[:,0],dat1[:,2],'r--',linewidth=2)
dat2 = np.genfromtxt(fname='/home/bing/qt/1.0.5/dom8/den.dat')
pl.plot(dat2[:,0],dat2[:,1],'g',linewidth=2,label='L = 8')
pl.plot(dat2[:,0],dat2[:,2],'g',linewidth=2)
#pl.subplot(212)
#plt.figure(1)
#plt.plot(x,y1,'-')
#plt.plot(x,y2,'g-')
pl.xlim(0,4)
#pl.xlabel('time [a.u.]')
##pl.ylabel('Energy [hartree#')
#pl.plot(data[:,0],data[:,1]#'r--',linewidth=2,label='Kinetic')
#pl.plot(dat[:,0],dat[:,1],'k-',linewidth=2)
#pl.yscale('log')
pl.legend(loc=2)
pl.xlabel('Time [a.u.]')
pl.ylabel('Density overlap')
#pl.title()
pl.savefig('den.pdf')
pl.title('QC dynamics with 8 domains')
pl.show() | gpl-3.0 |
RomainBrault/operalib | operalib/kernels.py | 2 | 18637 | """
:mod:`operalib.kernels` implements some Operator-Valued Kernel
models.
"""
# Author: Romain Brault <[email protected]> with help from
# the scikit-learn community.
# License: MIT
from numpy import dot, diag, sqrt
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.kernel_approximation import RBFSampler, SkewedChi2Sampler
from scipy.sparse.linalg import LinearOperator
from scipy.linalg import svd
class DotProductKernel(object):
r"""
Dot product Operator-Valued Kernel of the form:
.. math::
x, y \mapsto K(x, y) = \mu \langle x, y \rangle 1_p + (1-\mu) \langle
x, y \rangle^2 I_p
Attributes
----------
mu : {array, LinearOperator}, shape = [n_targets, n_targets]
Tradeoff between shared and independant components
p : {Int}
dimension of the targets (n_targets).
References
----------
See also
--------
DotProductKernelMap
Dot Product Kernel Map
Examples
--------
>>> import operalib as ovk
>>> import numpy as np
>>> X = np.random.randn(100, 10)
>>> K = ovk.DotProductKernel(mu=.2, p=5)
>>> # The kernel matrix as a linear operator
>>> K(X, X) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
<500x500 _CustomLinearOperator with dtype=float64>
"""
def __init__(self, mu, p):
"""Initialize the Dot product Operator-Valued Kernel.
Parameters
----------
mu : {float}
Tradeoff between shared and independant components.
p : {integer}
dimension of the targets (n_targets).
"""
self.mu = mu
self.p = p
def get_kernel_map(self, X):
r"""Return the kernel map associated with the data X.
.. math::
K_x: Y \mapsto K(X, Y)
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
K_x : DotProductKernelMap, callable
.. math::
K_x: Y \mapsto K(X, Y).
"""
from .kernel_maps import DotProductKernelMap
return DotProductKernelMap(X, self.mu, self.p)
def __call__(self, X, Y=None):
r"""Return the kernel map associated with the data X.
.. math::
K_x: \begin{cases}
Y \mapsto K(X, Y) \enskip\text{if } Y \text{is None,} \\
K(X, Y) \enskip\text{otherwise.}
\end{cases}
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples1, n_features]
Samples.
Y : {array-like, sparse matrix}, shape = [n_samples2, n_features],
default = None
Samples.
Returns
-------
K_x : DotProductKernelMap, callable or LinearOperator
.. math::
K_x: \begin{cases}
Y \mapsto K(X, Y) \enskip\text{if } Y \text{is None,} \\
K(X, Y) \enskip\text{otherwise}
\end{cases}
"""
Kmap = self.get_kernel_map(X)
if Y is None:
return Kmap
else:
return Kmap(Y)
class DecomposableKernel(object):
r"""
Decomposable Operator-Valued Kernel of the form:
.. math::
X, Y \mapsto K(X, Y) = k_s(X, Y) A
where A is a symmetric positive semidefinite operator acting on the
outputs.
Attributes
----------
A : {array, LinearOperator}, shape = [n_targets, n_targets]
Linear operator acting on the outputs
scalar_kernel : {callable}
Callable which associate to the training points X the Gram matrix.
scalar_kernel_params : {mapping of string to any}
Additional parameters (keyword arguments) for kernel function passed as
callable object.
References
----------
See also
--------
DecomposableKernelMap
Decomposable Kernel map
Examples
--------
>>> import operalib as ovk
>>> import numpy as np
>>> X = np.random.randn(100, 10)
>>> K = ovk.DecomposableKernel(np.eye(2))
>>> # The kernel matrix as a linear operator
>>> K(X, X) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
<200x200 _CustomLinearOperator with dtype=float64>
"""
def __init__(self, A, scalar_kernel=rbf_kernel, scalar_kernel_params=None):
"""Initialize the Decomposable Operator-Valued Kernel.
Parameters
----------
A : {array, LinearOperator}, shape = [n_targets, n_targets]
Linear operator acting on the outputs
scalar_kernel : {callable}
Callable which associate to the training points X the Gram matrix.
scalar_kernel_params : {mapping of string to any}, optional
Additional parameters (keyword arguments) for kernel function
passed as callable object.
"""
self.A = A
self.scalar_kernel = scalar_kernel
self.scalar_kernel_params = scalar_kernel_params
self.p = A.shape[0]
def get_kernel_map(self, X):
r"""Return the kernel map associated with the data X.
.. math::
K_x: Y \mapsto K(X, Y)
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
K_x : DecomposableKernelMap, callable
.. math::
K_x: Y \mapsto K(X, Y).
"""
from .kernel_maps import DecomposableKernelMap
return DecomposableKernelMap(X, self.A,
self.scalar_kernel,
self.scalar_kernel_params)
def get_orff_map(self, X, D=100, eps=1e-5, random_state=0):
r"""Return the Random Fourier Feature map associated with the data X.
.. math::
K_x: Y \mapsto \tilde{\Phi}(X)
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
\tilde{\Phi}(X) : Linear Operator, callable
"""
u, s, v = svd(self.A, full_matrices=False, compute_uv=True)
self.B_ = dot(diag(sqrt(s[s > eps])), v[s > eps, :])
self.r = self.B_.shape[0]
if (self.scalar_kernel is rbf_kernel) and not hasattr(self, 'Xb_'):
if self.scalar_kernel_params is None:
gamma = 1.
else:
gamma = self.scalar_kernel_params['gamma']
self.phi_ = RBFSampler(gamma=gamma,
n_components=D, random_state=random_state)
self.phi_.fit(X)
self.Xb_ = self.phi_.transform(X).astype(X.dtype)
elif (self.scalar_kernel is 'skewed_chi2') and not hasattr(self,
'Xb_'):
if self.scalar_kernel_params is None:
skew = 1.
else:
skew = self.scalar_kernel_params['skew']
self.phi_ = SkewedChi2Sampler(skewedness=skew,
n_components=D,
random_state=random_state)
self.phi_.fit(X)
self.Xb_ = self.phi_.transform(X).astype(X.dtype)
elif not hasattr(self, 'Xb_'):
raise NotImplementedError('ORFF map for kernel is not '
'implemented yet')
D = self.phi_.n_components
if X is self.Xb_:
cshape = (D, self.r)
rshape = (self.Xb_.shape[0], self.p)
oshape = (self.Xb_.shape[0] * self.p, D * self.r)
return LinearOperator(oshape,
dtype=self.Xb_.dtype,
matvec=lambda b: dot(dot(self.Xb_,
b.reshape(cshape)),
self.B_),
rmatvec=lambda r: dot(Xb.T,
dot(r.reshape(rshape),
self.B_.T)))
else:
Xb = self.phi_.transform(X)
cshape = (D, self.r)
rshape = (X.shape[0], self.p)
oshape = (Xb.shape[0] * self.p, D * self.r)
return LinearOperator(oshape,
dtype=self.Xb_.dtype,
matvec=lambda b: dot(dot(Xb,
b.reshape(cshape)),
self.B_),
rmatvec=lambda r: dot(Xb.T,
dot(r.reshape(rshape),
self.B_.T)))
def __call__(self, X, Y=None):
r"""Return the kernel map associated with the data X.
.. math::
K_x: \begin{cases}
Y \mapsto K(X, Y) \enskip\text{if } Y \text{is None,} \\
K(X, Y) \enskip\text{otherwise.}
\end{cases}
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples1, n_features]
Samples.
Y : {array-like, sparse matrix}, shape = [n_samples2, n_features],
default = None
Samples.
Returns
-------
K_x : DecomposableKernelMap, callable or LinearOperator
.. math::
K_x: \begin{cases}
Y \mapsto K(X, Y) \enskip\text{if } Y \text{is None,} \\
K(X, Y) \enskip\text{otherwise}
\end{cases}
"""
Kmap = self.get_kernel_map(X)
if Y is None:
return Kmap
else:
return Kmap(Y)
class RBFCurlFreeKernel(object):
r"""
Curl-free Operator-Valued Kernel of the form:
.. math::
X \mapsto K_X(Y) = 2 \gamma exp(-\gamma||X - Y||^2)(I - 2\gamma(X - Y)
(X - T)^T).
Attributes
----------
gamma : {float}
RBF kernel parameter.
References
----------
See also
--------
RBFCurlFreeKernelMap
Curl-free Kernel map
Examples
--------
>>> import operalib as ovk
>>> import numpy as np
>>> X = np.random.randn(100, 2)
>>> K = ovk.RBFCurlFreeKernel(1.)
>>> # The kernel matrix as a linear operator
>>> K(X, X) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
<200x200 _CustomLinearOperator with dtype=float64>
"""
def __init__(self, gamma):
"""Initialize the Decomposable Operator-Valued Kernel.
Parameters
----------
gamma : {float}, shape = [n_targets, n_targets]
RBF kernel parameter.
"""
self.gamma = gamma
def get_kernel_map(self, X):
r"""Return the kernel map associated with the data X.
.. math::
K_x: Y \mapsto K(X, Y)
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
K_x : DecomposableKernelMap, callable
.. math::
K_x: Y \mapsto K(X, Y).
"""
from .kernel_maps import RBFCurlFreeKernelMap
return RBFCurlFreeKernelMap(X, self.gamma)
def get_orff_map(self, X, D=100, random_state=0):
r"""Return the Random Fourier Feature map associated with the data X.
.. math::
K_x: Y \mapsto \tilde{\Phi}(X)
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
\tilde{\Phi}(X) : Linear Operator, callable
"""
self.r = 1
if not hasattr(self, 'Xb_'):
self.phi_ = RBFSampler(gamma=self.gamma,
n_components=D, random_state=random_state)
self.phi_.fit(X)
self.Xb_ = self.phi_.transform(X)
self.Xb_ = (self.Xb_.reshape((self.Xb_.shape[0],
1, self.Xb_.shape[1])) *
self.phi_.random_weights_.reshape((1, -1,
self.Xb_.shape[1])))
self.Xb_ = self.Xb_.reshape((-1, self.Xb_.shape[2]))
D = self.phi_.n_components
if X is self.Xb_:
return LinearOperator(self.Xb_.shape,
matvec=lambda b: dot(self.Xb_ * b),
rmatvec=lambda r: dot(self.Xb_.T * r))
else:
Xb = self.phi_.transform(X)
Xb = (Xb.reshape((Xb.shape[0], 1, Xb.shape[1])) *
self.phi_.random_weights_.reshape((1, -1, Xb.shape[1])))
Xb = Xb.reshape((-1, Xb.shape[2]))
return LinearOperator(Xb.shape,
matvec=lambda b: dot(Xb, b),
rmatvec=lambda r: dot(Xb.T, r))
def __call__(self, X, Y=None):
r"""Return the kernel map associated with the data X.
.. math::
K_x: \begin{cases}
Y \mapsto K(X, Y) \enskip\text{if } Y \text{is None,} \\
K(X, Y) \enskip\text{otherwise.}
\end{cases}
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples1, n_features]
Samples.
Y : {array-like, sparse matrix}, shape = [n_samples2, n_features],
default = None
Samples.
Returns
-------
K_x : DecomposableKernelMap, callable or LinearOperator
.. math::
K_x: \begin{cases}
Y \mapsto K(X, Y) \enskip\text{if } Y \text{is None,} \\
K(X, Y) \enskip\text{otherwise}
\end{cases}
"""
Kmap = self.get_kernel_map(X)
if Y is None:
return Kmap
else:
return Kmap(Y)
class RBFDivFreeKernel(object):
r"""
Divergence-free Operator-Valued Kernel of the form:
.. math::
X \mapsto K_X(Y) = exp(-\gamma||X-Y||^2)A_{X,Y},
where,
.. math::
A_{X,Y} = 2\gamma(X-Y)(X-T)^T+((d-1)-2\gamma||X-Y||^2 I).
Attributes
----------
gamma : {float}
RBF kernel parameter.
References
----------
See also
--------
RBFDivFreeKernelMap
Divergence-free Kernel map
Examples
--------
>>> import operalib as ovk
>>> import numpy as np
>>> X = np.random.randn(100, 2)
>>> K = ovk.RBFDivFreeKernel(1.)
>>> # The kernel matrix as a linear operator
>>> K(X, X) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
<200x200 _CustomLinearOperator with dtype=float64>
"""
def __init__(self, gamma):
"""Initialize the Decomposable Operator-Valued Kernel.
Parameters
----------
gamma : {float}, shape = [n_targets, n_targets]
RBF kernel parameter.
"""
self.gamma = gamma
def get_kernel_map(self, X):
r"""Return the kernel map associated with the data X.
.. math::
K_x: Y \mapsto K(X, Y)
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
K_x : DecomposableKernelMap, callable
.. math::
K_x: Y \mapsto K(X, Y).
"""
from .kernel_maps import RBFDivFreeKernelMap
return RBFDivFreeKernelMap(X, self.gamma)
def get_orff_map(self, X, D=100, random_state=0):
r"""Return the Random Fourier Feature map associated with the data X.
.. math::
K_x: Y \mapsto \tilde{\Phi}(X)
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
\tilde{\Phi}(X) : Linear Operator, callable
"""
self.r = 1
if not hasattr(self, 'Xb_'):
self.phi_ = RBFSampler(gamma=self.gamma,
n_components=D, random_state=random_state)
self.phi_.fit(X)
self.Xb_ = self.phi_.transform(X)
self.Xb_ = (self.Xb_.reshape((self.Xb_.shape[0],
1, self.Xb_.shape[1])) *
self.phi_.random_weights_.reshape((1, -1,
self.Xb_.shape[1])))
self.Xb_ = self.Xb_.reshape((-1, self.Xb_.shape[2]))
D = self.phi_.n_components
if X is self.Xb_:
return LinearOperator(self.Xb_.shape,
matvec=lambda b: dot(self.Xb_ * b),
rmatvec=lambda r: dot(self.Xb_.T * r))
else:
Xb = self.phi_.transform(X)
# TODO:
# w = self.phi_.random_weights_.reshape((1, -1, Xb.shape[1]))
# wn = np.linalg.norm(w)
# Xb = (Xb.reshape((Xb.shape[0], 1, Xb.shape[1])) *
# wn * np.eye()w np.dot(w.T, w) / wn)
Xb = Xb.reshape((-1, Xb.shape[2]))
return LinearOperator(Xb.shape,
matvec=lambda b: dot(Xb, b),
rmatvec=lambda r: dot(Xb.T, r))
def __call__(self, X, Y=None):
r"""Return the kernel map associated with the data X.
.. math::
K_x: \begin{cases}
Y \mapsto K(X, Y) \enskip\text{if } Y \text{is None,} \\
K(X, Y) \enskip\text{otherwise.}
\end{cases}
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples1, n_features]
Samples.
Y : {array-like, sparse matrix}, shape = [n_samples2, n_features],
default = None
Samples.
Returns
-------
K_x : DecomposableKernelMap, callable or LinearOperator
.. math::
K_x: \begin{cases}
Y \mapsto K(X, Y) \enskip\text{if } Y \text{is None,} \\
K(X, Y) \enskip\text{otherwise}
\end{cases}
"""
Kmap = self.get_kernel_map(X)
if Y is None:
return Kmap
else:
return Kmap(Y)
| bsd-3-clause |
ahmadia/bokeh | bokeh/charts/builder/line_builder.py | 43 | 5360 | """This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Line class which lets you build your Line charts just
passing the arguments to the Chart class and calling the proper functions.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from six import string_types
import numpy as np
from ..utils import cycle_colors
from .._builder import Builder, create_and_build
from ...models import ColumnDataSource, DataRange1d, GlyphRenderer, Range1d
from ...models.glyphs import Line as LineGlyph
from ...properties import Any
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def Line(values, index=None, **kws):
""" Create a line chart using :class:`LineBuilder <bokeh.charts.builder.line_builder.LineBuilder>` to
render the geometry from values and index.
Args:
values (iterable): iterable 2d representing the data series
values matrix.
index (str|1d iterable, optional): can be used to specify a common custom
index for all data series as an **1d iterable** of any sort that will be used as
series common index or a **string** that corresponds to the key of the
mapping to be used as index (and not as data series) if
area.values is a mapping (like a dict, an OrderedDict
or a pandas DataFrame)
In addition the the parameters specific to this chart,
:ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
import numpy as np
from bokeh.charts import Line, output_file, show
# (dict, OrderedDict, lists, arrays and DataFrames are valid inputs)
xyvalues = np.array([[2, 3, 7, 5, 26], [12, 33, 47, 15, 126], [22, 43, 10, 25, 26]])
line = Line(xyvalues, title="line", legend="top_left", ylabel='Languages')
output_file('line.html')
show(line)
"""
return create_and_build(LineBuilder, values, index=index, **kws)
class LineBuilder(Builder):
"""This is the Line class and it is in charge of plotting
Line charts in an easy and intuitive way.
Essentially, we provide a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the ranges.
And finally add the needed lines taking the references from the source.
"""
index = Any(help="""
An index to be used for all data series as follows:
- A 1d iterable of any sort that will be used as
series common index
- As a string that corresponds to the key of the
mapping to be used as index (and not as data
series) if area.values is a mapping (like a dict,
an OrderedDict or a pandas DataFrame)
""")
def _process_data(self):
"""Calculate the chart properties accordingly from line.values.
Then build a dict containing references to all the points to be
used by the line glyph inside the ``_yield_renderers`` method.
"""
self._data = dict()
# list to save all the attributes we are going to create
self._attr = []
xs = self._values_index
self.set_and_get("x", "", np.array(xs))
for col, values in self._values.items():
if isinstance(self.index, string_types) and col == self.index:
continue
# save every new group we find
self._groups.append(col)
self.set_and_get("y_", col, values)
def _set_sources(self):
"""
Push the Line data into the ColumnDataSource and calculate the
proper ranges.
"""
self._source = ColumnDataSource(self._data)
self.x_range = DataRange1d()
y_names = self._attr[1:]
endy = max(max(self._data[i]) for i in y_names)
starty = min(min(self._data[i]) for i in y_names)
self.y_range = Range1d(
start=starty - 0.1 * (endy - starty),
end=endy + 0.1 * (endy - starty)
)
def _yield_renderers(self):
"""Use the line glyphs to connect the xy points in the Line.
Takes reference points from the data loaded at the ColumnDataSource.
"""
colors = cycle_colors(self._attr, self.palette)
for i, duplet in enumerate(self._attr[1:], start=1):
glyph = LineGlyph(x='x', y=duplet, line_color=colors[i - 1])
renderer = GlyphRenderer(data_source=self._source, glyph=glyph)
self._legends.append((self._groups[i-1], [renderer]))
yield renderer
| bsd-3-clause |
Srisai85/scikit-learn | doc/sphinxext/gen_rst.py | 142 | 40026 | """
Example generation for the scikit learn
Generate the rst files for the examples by iterating over the python
example files.
Files that generate images should start with 'plot'
"""
from __future__ import division, print_function
from time import time
import ast
import os
import re
import shutil
import traceback
import glob
import sys
import gzip
import posixpath
import subprocess
import warnings
from sklearn.externals import six
# Try Python 2 first, otherwise load from Python 3
try:
from StringIO import StringIO
import cPickle as pickle
import urllib2 as urllib
from urllib2 import HTTPError, URLError
except ImportError:
from io import StringIO
import pickle
import urllib.request
import urllib.error
import urllib.parse
from urllib.error import HTTPError, URLError
try:
# Python 2 built-in
execfile
except NameError:
def execfile(filename, global_vars=None, local_vars=None):
with open(filename, encoding='utf-8') as f:
code = compile(f.read(), filename, 'exec')
exec(code, global_vars, local_vars)
try:
basestring
except NameError:
basestring = str
import token
import tokenize
import numpy as np
try:
# make sure that the Agg backend is set before importing any
# matplotlib
import matplotlib
matplotlib.use('Agg')
except ImportError:
# this script can be imported by nosetest to find tests to run: we should not
# impose the matplotlib requirement in that case.
pass
from sklearn.externals import joblib
###############################################################################
# A tee object to redict streams to multiple outputs
class Tee(object):
def __init__(self, file1, file2):
self.file1 = file1
self.file2 = file2
def write(self, data):
self.file1.write(data)
self.file2.write(data)
def flush(self):
self.file1.flush()
self.file2.flush()
###############################################################################
# Documentation link resolver objects
def _get_data(url):
"""Helper function to get data over http or from a local file"""
if url.startswith('http://'):
# Try Python 2, use Python 3 on exception
try:
resp = urllib.urlopen(url)
encoding = resp.headers.dict.get('content-encoding', 'plain')
except AttributeError:
resp = urllib.request.urlopen(url)
encoding = resp.headers.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'plain':
pass
elif encoding == 'gzip':
data = StringIO(data)
data = gzip.GzipFile(fileobj=data).read()
else:
raise RuntimeError('unknown encoding')
else:
with open(url, 'r') as fid:
data = fid.read()
fid.close()
return data
mem = joblib.Memory(cachedir='_build')
get_data = mem.cache(_get_data)
def parse_sphinx_searchindex(searchindex):
"""Parse a Sphinx search index
Parameters
----------
searchindex : str
The Sphinx search index (contents of searchindex.js)
Returns
-------
filenames : list of str
The file names parsed from the search index.
objects : dict
The objects parsed from the search index.
"""
def _select_block(str_in, start_tag, end_tag):
"""Select first block delimited by start_tag and end_tag"""
start_pos = str_in.find(start_tag)
if start_pos < 0:
raise ValueError('start_tag not found')
depth = 0
for pos in range(start_pos, len(str_in)):
if str_in[pos] == start_tag:
depth += 1
elif str_in[pos] == end_tag:
depth -= 1
if depth == 0:
break
sel = str_in[start_pos + 1:pos]
return sel
def _parse_dict_recursive(dict_str):
"""Parse a dictionary from the search index"""
dict_out = dict()
pos_last = 0
pos = dict_str.find(':')
while pos >= 0:
key = dict_str[pos_last:pos]
if dict_str[pos + 1] == '[':
# value is a list
pos_tmp = dict_str.find(']', pos + 1)
if pos_tmp < 0:
raise RuntimeError('error when parsing dict')
value = dict_str[pos + 2: pos_tmp].split(',')
# try to convert elements to int
for i in range(len(value)):
try:
value[i] = int(value[i])
except ValueError:
pass
elif dict_str[pos + 1] == '{':
# value is another dictionary
subdict_str = _select_block(dict_str[pos:], '{', '}')
value = _parse_dict_recursive(subdict_str)
pos_tmp = pos + len(subdict_str)
else:
raise ValueError('error when parsing dict: unknown elem')
key = key.strip('"')
if len(key) > 0:
dict_out[key] = value
pos_last = dict_str.find(',', pos_tmp)
if pos_last < 0:
break
pos_last += 1
pos = dict_str.find(':', pos_last)
return dict_out
# Make sure searchindex uses UTF-8 encoding
if hasattr(searchindex, 'decode'):
searchindex = searchindex.decode('UTF-8')
# parse objects
query = 'objects:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"objects:" not found in search index')
sel = _select_block(searchindex[pos:], '{', '}')
objects = _parse_dict_recursive(sel)
# parse filenames
query = 'filenames:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"filenames:" not found in search index')
filenames = searchindex[pos + len(query) + 1:]
filenames = filenames[:filenames.find(']')]
filenames = [f.strip('"') for f in filenames.split(',')]
return filenames, objects
class SphinxDocLinkResolver(object):
""" Resolve documentation links using searchindex.js generated by Sphinx
Parameters
----------
doc_url : str
The base URL of the project website.
searchindex : str
Filename of searchindex, relative to doc_url.
extra_modules_test : list of str
List of extra module names to test.
relative : bool
Return relative links (only useful for links to documentation of this
package).
"""
def __init__(self, doc_url, searchindex='searchindex.js',
extra_modules_test=None, relative=False):
self.doc_url = doc_url
self.relative = relative
self._link_cache = {}
self.extra_modules_test = extra_modules_test
self._page_cache = {}
if doc_url.startswith('http://'):
if relative:
raise ValueError('Relative links are only supported for local '
'URLs (doc_url cannot start with "http://)"')
searchindex_url = doc_url + '/' + searchindex
else:
searchindex_url = os.path.join(doc_url, searchindex)
# detect if we are using relative links on a Windows system
if os.name.lower() == 'nt' and not doc_url.startswith('http://'):
if not relative:
raise ValueError('You have to use relative=True for the local'
' package on a Windows system.')
self._is_windows = True
else:
self._is_windows = False
# download and initialize the search index
sindex = get_data(searchindex_url)
filenames, objects = parse_sphinx_searchindex(sindex)
self._searchindex = dict(filenames=filenames, objects=objects)
def _get_link(self, cobj):
"""Get a valid link, False if not found"""
fname_idx = None
full_name = cobj['module_short'] + '.' + cobj['name']
if full_name in self._searchindex['objects']:
value = self._searchindex['objects'][full_name]
if isinstance(value, dict):
value = value[next(iter(value.keys()))]
fname_idx = value[0]
elif cobj['module_short'] in self._searchindex['objects']:
value = self._searchindex['objects'][cobj['module_short']]
if cobj['name'] in value.keys():
fname_idx = value[cobj['name']][0]
if fname_idx is not None:
fname = self._searchindex['filenames'][fname_idx] + '.html'
if self._is_windows:
fname = fname.replace('/', '\\')
link = os.path.join(self.doc_url, fname)
else:
link = posixpath.join(self.doc_url, fname)
if hasattr(link, 'decode'):
link = link.decode('utf-8', 'replace')
if link in self._page_cache:
html = self._page_cache[link]
else:
html = get_data(link)
self._page_cache[link] = html
# test if cobj appears in page
comb_names = [cobj['module_short'] + '.' + cobj['name']]
if self.extra_modules_test is not None:
for mod in self.extra_modules_test:
comb_names.append(mod + '.' + cobj['name'])
url = False
if hasattr(html, 'decode'):
# Decode bytes under Python 3
html = html.decode('utf-8', 'replace')
for comb_name in comb_names:
if hasattr(comb_name, 'decode'):
# Decode bytes under Python 3
comb_name = comb_name.decode('utf-8', 'replace')
if comb_name in html:
url = link + u'#' + comb_name
link = url
else:
link = False
return link
def resolve(self, cobj, this_url):
"""Resolve the link to the documentation, returns None if not found
Parameters
----------
cobj : dict
Dict with information about the "code object" for which we are
resolving a link.
cobi['name'] : function or class name (str)
cobj['module_short'] : shortened module name (str)
cobj['module'] : module name (str)
this_url: str
URL of the current page. Needed to construct relative URLs
(only used if relative=True in constructor).
Returns
-------
link : str | None
The link (URL) to the documentation.
"""
full_name = cobj['module_short'] + '.' + cobj['name']
link = self._link_cache.get(full_name, None)
if link is None:
# we don't have it cached
link = self._get_link(cobj)
# cache it for the future
self._link_cache[full_name] = link
if link is False or link is None:
# failed to resolve
return None
if self.relative:
link = os.path.relpath(link, start=this_url)
if self._is_windows:
# replace '\' with '/' so it on the web
link = link.replace('\\', '/')
# for some reason, the relative link goes one directory too high up
link = link[3:]
return link
###############################################################################
rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
"""
plot_rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
%(image_list)s
%(stdout)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
**Total running time of the example:** %(time_elapsed) .2f seconds
(%(time_m) .0f minutes %(time_s) .2f seconds)
"""
# The following strings are used when we have several pictures: we use
# an html div tag that our CSS uses to turn the lists into horizontal
# lists.
HLIST_HEADER = """
.. rst-class:: horizontal
"""
HLIST_IMAGE_TEMPLATE = """
*
.. image:: images/%s
:scale: 47
"""
SINGLE_IMAGE = """
.. image:: images/%s
:align: center
"""
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'plot_classifier_comparison_001.png': (1, 600),
'plot_outlier_detection_001.png': (3, 372),
'plot_gp_regression_001.png': (2, 250),
'plot_adaboost_twoclass_001.png': (1, 372),
'plot_compare_methods_001.png': (1, 349)}
def extract_docstring(filename, ignore_heading=False):
""" Extract a module-level docstring, if any
"""
if six.PY2:
lines = open(filename).readlines()
else:
lines = open(filename, encoding='utf-8').readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs, extract
# the first one:
paragraphs = '\n'.join(
line.rstrip() for line
in docstring.split('\n')).split('\n\n')
if paragraphs:
if ignore_heading:
if len(paragraphs) > 1:
first_par = re.sub('\n', ' ', paragraphs[1])
first_par = ((first_par[:95] + '...')
if len(first_par) > 95 else first_par)
else:
raise ValueError("Docstring not found by gallery.\n"
"Please check the layout of your"
" example file:\n {}\n and make sure"
" it's correct".format(filename))
else:
first_par = paragraphs[0]
break
return docstring, first_par, erow + 1 + start_row
def generate_example_rst(app):
""" Generate the list of examples, as well as the contents of
examples.
"""
root_dir = os.path.join(app.builder.srcdir, 'auto_examples')
example_dir = os.path.abspath(os.path.join(app.builder.srcdir, '..',
'examples'))
generated_dir = os.path.abspath(os.path.join(app.builder.srcdir,
'modules', 'generated'))
try:
plot_gallery = eval(app.builder.config.plot_gallery)
except TypeError:
plot_gallery = bool(app.builder.config.plot_gallery)
if not os.path.exists(example_dir):
os.makedirs(example_dir)
if not os.path.exists(root_dir):
os.makedirs(root_dir)
if not os.path.exists(generated_dir):
os.makedirs(generated_dir)
# we create an index.rst with all examples
fhindex = open(os.path.join(root_dir, 'index.rst'), 'w')
# Note: The sidebar button has been removed from the examples page for now
# due to how it messes up the layout. Will be fixed at a later point
fhindex.write("""\
.. raw:: html
<style type="text/css">
div#sidebarbutton {
/* hide the sidebar collapser, while ensuring vertical arrangement */
display: none;
}
</style>
.. _examples-index:
Examples
========
""")
# Here we don't use an os.walk, but we recurse only twice: flat is
# better than nested.
seen_backrefs = set()
generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
for directory in sorted(os.listdir(example_dir)):
if os.path.isdir(os.path.join(example_dir, directory)):
generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
fhindex.flush()
def extract_line_count(filename, target_dir):
# Extract the line count of a file
example_file = os.path.join(target_dir, filename)
if six.PY2:
lines = open(example_file).readlines()
else:
lines = open(example_file, encoding='utf-8').readlines()
start_row = 0
if lines and lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
check_docstring = True
erow_docstring = 0
for tok_type, _, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif (tok_type == 'STRING') and check_docstring:
erow_docstring = erow
check_docstring = False
return erow_docstring+1+start_row, erow+1+start_row
def line_count_sort(file_list, target_dir):
# Sort the list of examples by line-count
new_list = [x for x in file_list if x.endswith('.py')]
unsorted = np.zeros(shape=(len(new_list), 2))
unsorted = unsorted.astype(np.object)
for count, exmpl in enumerate(new_list):
docstr_lines, total_lines = extract_line_count(exmpl, target_dir)
unsorted[count][1] = total_lines - docstr_lines
unsorted[count][0] = exmpl
index = np.lexsort((unsorted[:, 0].astype(np.str),
unsorted[:, 1].astype(np.float)))
if not len(unsorted):
return []
return np.array(unsorted[index][:, 0]).tolist()
def _thumbnail_div(subdir, full_dir, fname, snippet):
"""Generates RST to place a thumbnail in a gallery"""
thumb = os.path.join(full_dir, 'images', 'thumb', fname[:-3] + '.png')
link_name = os.path.join(full_dir, fname).replace(os.path.sep, '_')
ref_name = os.path.join(subdir, fname).replace(os.path.sep, '_')
if ref_name.startswith('._'):
ref_name = ref_name[2:]
out = []
out.append("""
.. raw:: html
<div class="thumbnailContainer" tooltip="{}">
""".format(snippet))
out.append('.. figure:: %s\n' % thumb)
if link_name.startswith('._'):
link_name = link_name[2:]
if full_dir != '.':
out.append(' :target: ./%s/%s.html\n\n' % (full_dir, fname[:-3]))
else:
out.append(' :target: ./%s.html\n\n' % link_name[:-3])
out.append(""" :ref:`example_%s`
.. raw:: html
</div>
""" % (ref_name))
return ''.join(out)
def generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs):
""" Generate the rst file for an example directory.
"""
if not directory == '.':
target_dir = os.path.join(root_dir, directory)
src_dir = os.path.join(example_dir, directory)
else:
target_dir = root_dir
src_dir = example_dir
if not os.path.exists(os.path.join(src_dir, 'README.txt')):
raise ValueError('Example directory %s does not have a README.txt' %
src_dir)
fhindex.write("""
%s
""" % open(os.path.join(src_dir, 'README.txt')).read())
if not os.path.exists(target_dir):
os.makedirs(target_dir)
sorted_listdir = line_count_sort(os.listdir(src_dir),
src_dir)
if not os.path.exists(os.path.join(directory, 'images', 'thumb')):
os.makedirs(os.path.join(directory, 'images', 'thumb'))
for fname in sorted_listdir:
if fname.endswith('py'):
backrefs = generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery)
new_fname = os.path.join(src_dir, fname)
_, snippet, _ = extract_docstring(new_fname, True)
fhindex.write(_thumbnail_div(directory, directory, fname, snippet))
fhindex.write("""
.. toctree::
:hidden:
%s/%s
""" % (directory, fname[:-3]))
for backref in backrefs:
include_path = os.path.join(root_dir, '../modules/generated/%s.examples' % backref)
seen = backref in seen_backrefs
with open(include_path, 'a' if seen else 'w') as ex_file:
if not seen:
# heading
print(file=ex_file)
print('Examples using ``%s``' % backref, file=ex_file)
print('-----------------%s--' % ('-' * len(backref)),
file=ex_file)
print(file=ex_file)
rel_dir = os.path.join('../../auto_examples', directory)
ex_file.write(_thumbnail_div(directory, rel_dir, fname, snippet))
seen_backrefs.add(backref)
fhindex.write("""
.. raw:: html
<div class="clearer"></div>
""") # clear at the end of the section
# modules for which we embed links into example code
DOCMODULES = ['sklearn', 'matplotlib', 'numpy', 'scipy']
def make_thumbnail(in_fname, out_fname, width, height):
"""Make a thumbnail with the same aspect ratio centered in an
image with a given width and height
"""
# local import to avoid testing dependency on PIL:
try:
from PIL import Image
except ImportError:
import Image
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = width / float(width_in)
scale_h = height / float(height_in)
if height_in * scale_w <= height:
scale = scale_w
else:
scale = scale_h
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image
img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
# insert centered
thumb = Image.new('RGB', (width, height), (255, 255, 255))
pos_insert = ((width - width_sc) // 2, (height - height_sc) // 2)
thumb.paste(img, pos_insert)
thumb.save(out_fname)
# Use optipng to perform lossless compression on the resized image if
# software is installed
if os.environ.get('SKLEARN_DOC_OPTIPNG', False):
try:
subprocess.call(["optipng", "-quiet", "-o", "9", out_fname])
except Exception:
warnings.warn('Install optipng to reduce the size of the generated images')
def get_short_module_name(module_name, obj_name):
""" Get the shortest possible module name """
parts = module_name.split('.')
short_name = module_name
for i in range(len(parts) - 1, 0, -1):
short_name = '.'.join(parts[:i])
try:
exec('from %s import %s' % (short_name, obj_name))
except ImportError:
# get the last working module name
short_name = '.'.join(parts[:(i + 1)])
break
return short_name
class NameFinder(ast.NodeVisitor):
"""Finds the longest form of variable names and their imports in code
Only retains names from imported modules.
"""
def __init__(self):
super(NameFinder, self).__init__()
self.imported_names = {}
self.accessed_names = set()
def visit_Import(self, node, prefix=''):
for alias in node.names:
local_name = alias.asname or alias.name
self.imported_names[local_name] = prefix + alias.name
def visit_ImportFrom(self, node):
self.visit_Import(node, node.module + '.')
def visit_Name(self, node):
self.accessed_names.add(node.id)
def visit_Attribute(self, node):
attrs = []
while isinstance(node, ast.Attribute):
attrs.append(node.attr)
node = node.value
if isinstance(node, ast.Name):
# This is a.b, not e.g. a().b
attrs.append(node.id)
self.accessed_names.add('.'.join(reversed(attrs)))
else:
# need to get a in a().b
self.visit(node)
def get_mapping(self):
for name in self.accessed_names:
local_name = name.split('.', 1)[0]
remainder = name[len(local_name):]
if local_name in self.imported_names:
# Join import path to relative path
full_name = self.imported_names[local_name] + remainder
yield name, full_name
def identify_names(code):
"""Builds a codeobj summary by identifying and resovles used names
>>> code = '''
... from a.b import c
... import d as e
... print(c)
... e.HelloWorld().f.g
... '''
>>> for name, o in sorted(identify_names(code).items()):
... print(name, o['name'], o['module'], o['module_short'])
c c a.b a.b
e.HelloWorld HelloWorld d d
"""
finder = NameFinder()
finder.visit(ast.parse(code))
example_code_obj = {}
for name, full_name in finder.get_mapping():
# name is as written in file (e.g. np.asarray)
# full_name includes resolved import path (e.g. numpy.asarray)
module, attribute = full_name.rsplit('.', 1)
# get shortened module name
module_short = get_short_module_name(module, attribute)
cobj = {'name': attribute, 'module': module,
'module_short': module_short}
example_code_obj[name] = cobj
return example_code_obj
def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
""" Generate the rst file for a given example.
Returns the set of sklearn functions/classes imported in the example.
"""
base_image_name = os.path.splitext(fname)[0]
image_fname = '%s_%%03d.png' % base_image_name
this_template = rst_template
last_dir = os.path.split(src_dir)[-1]
# to avoid leading . in file names, and wrong names in links
if last_dir == '.' or last_dir == 'examples':
last_dir = ''
else:
last_dir += '_'
short_fname = last_dir + fname
src_file = os.path.join(src_dir, fname)
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
# The following is a list containing all the figure names
figure_list = []
image_dir = os.path.join(target_dir, 'images')
thumb_dir = os.path.join(image_dir, 'thumb')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
image_path = os.path.join(image_dir, image_fname)
stdout_path = os.path.join(image_dir,
'stdout_%s.txt' % base_image_name)
time_path = os.path.join(image_dir,
'time_%s.txt' % base_image_name)
thumb_file = os.path.join(thumb_dir, base_image_name + '.png')
time_elapsed = 0
if plot_gallery and fname.startswith('plot'):
# generate the plot as png image if file name
# starts with plot and if it is more recent than an
# existing image.
first_image_file = image_path % 1
if os.path.exists(stdout_path):
stdout = open(stdout_path).read()
else:
stdout = ''
if os.path.exists(time_path):
time_elapsed = float(open(time_path).read())
if not os.path.exists(first_image_file) or \
os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime:
# We need to execute the code
print('plotting %s' % fname)
t0 = time()
import matplotlib.pyplot as plt
plt.close('all')
cwd = os.getcwd()
try:
# First CD in the original example dir, so that any file
# created by the example get created in this directory
orig_stdout = sys.stdout
os.chdir(os.path.dirname(src_file))
my_buffer = StringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
my_globals = {'pl': plt}
execfile(os.path.basename(src_file), my_globals)
time_elapsed = time() - t0
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue()
if '__doc__' in my_globals:
# The __doc__ is often printed in the example, we
# don't with to echo it
my_stdout = my_stdout.replace(
my_globals['__doc__'],
'')
my_stdout = my_stdout.strip().expandtabs()
if my_stdout:
stdout = '**Script output**::\n\n %s\n\n' % (
'\n '.join(my_stdout.split('\n')))
open(stdout_path, 'w').write(stdout)
open(time_path, 'w').write('%f' % time_elapsed)
os.chdir(cwd)
# In order to save every figure we have two solutions :
# * iterate from 1 to infinity and call plt.fignum_exists(n)
# (this requires the figures to be numbered
# incrementally: 1, 2, 3 and not 1, 2, 5)
# * iterate over [fig_mngr.num for fig_mngr in
# matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
for fig_mngr in fig_managers:
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
fig = plt.figure(fig_mngr.num)
kwargs = {}
to_rgba = matplotlib.colors.colorConverter.to_rgba
for attr in ['facecolor', 'edgecolor']:
fig_attr = getattr(fig, 'get_' + attr)()
default_attr = matplotlib.rcParams['figure.' + attr]
if to_rgba(fig_attr) != to_rgba(default_attr):
kwargs[attr] = fig_attr
fig.savefig(image_path % fig_mngr.num, **kwargs)
figure_list.append(image_fname % fig_mngr.num)
except:
print(80 * '_')
print('%s is not compiling:' % fname)
traceback.print_exc()
print(80 * '_')
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
print(" - time elapsed : %.2g sec" % time_elapsed)
else:
figure_list = [f[len(image_dir):]
for f in glob.glob(image_path.replace("%03d",
'[0-9][0-9][0-9]'))]
figure_list.sort()
# generate thumb file
this_template = plot_rst_template
car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/stable/_images/')
# Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file`
# which is within `auto_examples/../images/thumbs` depending on the example.
# Because the carousel has different dimensions than those of the examples gallery,
# I did not simply reuse them all as some contained whitespace due to their default gallery
# thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't
# just be overwritten with the carousel dimensions as it messes up the examples gallery layout).
# The special carousel thumbnails are written directly to _build/html/stable/_images/,
# as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the
# auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
# have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then
# copied to the _images folder during the `Copying Downloadable Files` step like the rest.
if not os.path.exists(car_thumb_path):
os.makedirs(car_thumb_path)
if os.path.exists(first_image_file):
# We generate extra special thumbnails for the carousel
carousel_tfile = os.path.join(car_thumb_path, base_image_name + '_carousel.png')
first_img = image_fname % 1
if first_img in carousel_thumbs:
make_thumbnail((image_path % carousel_thumbs[first_img][0]),
carousel_tfile, carousel_thumbs[first_img][1], 190)
make_thumbnail(first_image_file, thumb_file, 400, 280)
if not os.path.exists(thumb_file):
# create something to replace the thumbnail
make_thumbnail('images/no_image.png', thumb_file, 200, 140)
docstring, short_desc, end_row = extract_docstring(example_file)
# Depending on whether we have one or more figures, we're using a
# horizontal list or a single rst call to 'image'.
if len(figure_list) == 1:
figure_name = figure_list[0]
image_list = SINGLE_IMAGE % figure_name.lstrip('/')
else:
image_list = HLIST_HEADER
for figure_name in figure_list:
image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
time_m, time_s = divmod(time_elapsed, 60)
f = open(os.path.join(target_dir, base_image_name + '.rst'), 'w')
f.write(this_template % locals())
f.flush()
# save variables so we can later add links to the documentation
if six.PY2:
example_code_obj = identify_names(open(example_file).read())
else:
example_code_obj = \
identify_names(open(example_file, encoding='utf-8').read())
if example_code_obj:
codeobj_fname = example_file[:-3] + '_codeobj.pickle'
with open(codeobj_fname, 'wb') as fid:
pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL)
backrefs = set('{module_short}.{name}'.format(**entry)
for entry in example_code_obj.values()
if entry['module'].startswith('sklearn'))
return backrefs
def embed_code_links(app, exception):
"""Embed hyperlinks to documentation into example code"""
if exception is not None:
return
print('Embedding documentation hyperlinks in examples..')
if app.builder.name == 'latex':
# Don't embed hyperlinks when a latex builder is used.
return
# Add resolvers for the packages for which we want to show links
doc_resolvers = {}
doc_resolvers['sklearn'] = SphinxDocLinkResolver(app.builder.outdir,
relative=True)
resolver_urls = {
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.6.0',
'scipy': 'http://docs.scipy.org/doc/scipy-0.11.0/reference',
}
for this_module, url in resolver_urls.items():
try:
doc_resolvers[this_module] = SphinxDocLinkResolver(url)
except HTTPError as e:
print("The following HTTP Error has occurred:\n")
print(e.code)
except URLError as e:
print("\n...\n"
"Warning: Embedding the documentation hyperlinks requires "
"internet access.\nPlease check your network connection.\n"
"Unable to continue embedding `{0}` links due to a URL "
"Error:\n".format(this_module))
print(e.args)
example_dir = os.path.join(app.builder.srcdir, 'auto_examples')
html_example_dir = os.path.abspath(os.path.join(app.builder.outdir,
'auto_examples'))
# patterns for replacement
link_pattern = '<a href="%s">%s</a>'
orig_pattern = '<span class="n">%s</span>'
period = '<span class="o">.</span>'
for dirpath, _, filenames in os.walk(html_example_dir):
for fname in filenames:
print('\tprocessing: %s' % fname)
full_fname = os.path.join(html_example_dir, dirpath, fname)
subpath = dirpath[len(html_example_dir) + 1:]
pickle_fname = os.path.join(example_dir, subpath,
fname[:-5] + '_codeobj.pickle')
if os.path.exists(pickle_fname):
# we have a pickle file with the objects to embed links for
with open(pickle_fname, 'rb') as fid:
example_code_obj = pickle.load(fid)
fid.close()
str_repl = {}
# generate replacement strings with the links
for name, cobj in example_code_obj.items():
this_module = cobj['module'].split('.')[0]
if this_module not in doc_resolvers:
continue
try:
link = doc_resolvers[this_module].resolve(cobj,
full_fname)
except (HTTPError, URLError) as e:
print("The following error has occurred:\n")
print(repr(e))
continue
if link is not None:
parts = name.split('.')
name_html = period.join(orig_pattern % part
for part in parts)
str_repl[name_html] = link_pattern % (link, name_html)
# do the replacement in the html file
# ensure greediness
names = sorted(str_repl, key=len, reverse=True)
expr = re.compile(r'(?<!\.)\b' + # don't follow . or word
'|'.join(re.escape(name)
for name in names))
def substitute_link(match):
return str_repl[match.group()]
if len(str_repl) > 0:
with open(full_fname, 'rb') as fid:
lines_in = fid.readlines()
with open(full_fname, 'wb') as fid:
for line in lines_in:
line = line.decode('utf-8')
line = expr.sub(substitute_link, line)
fid.write(line.encode('utf-8'))
print('[done]')
def setup(app):
app.connect('builder-inited', generate_example_rst)
app.add_config_value('plot_gallery', True, 'html')
# embed links after build is finished
app.connect('build-finished', embed_code_links)
# Sphinx hack: sphinx copies generated images to the build directory
# each time the docs are made. If the desired image name already
# exists, it appends a digit to prevent overwrites. The problem is,
# the directory is never cleared. This means that each time you build
# the docs, the number of images in the directory grows.
#
# This question has been asked on the sphinx development list, but there
# was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html
#
# The following is a hack that prevents this behavior by clearing the
# image build directory each time the docs are built. If sphinx
# changes their layout between versions, this will not work (though
# it should probably not cause a crash). Tested successfully
# on Sphinx 1.0.7
build_image_dir = '_build/html/_images'
if os.path.exists(build_image_dir):
filelist = os.listdir(build_image_dir)
for filename in filelist:
if filename.endswith('png'):
os.remove(os.path.join(build_image_dir, filename))
def setup_module():
# HACK: Stop nosetests running setup() above
pass
| bsd-3-clause |
Tobychev/tardis | tardis/plasma/properties/level_population.py | 3 | 3153 | import logging
import pandas as pd
import numpy as np
from tardis.plasma.properties.base import ProcessingPlasmaProperty
logger = logging.getLogger(__name__)
__all__ = ['LevelNumberDensity']
class LevelNumberDensity(ProcessingPlasmaProperty):
"""
Attributes:
level_number_density : Pandas DataFrame, dtype float
Index atom number, ion number, level number. Columns are zones.
"""
outputs = ('level_number_density',)
latex_name = ('N_{i,j,k}',)
latex_formula = ('N_{i,j}\\dfrac{bf_{i,j,k}}{Z_{i,j}}',)
def calculate(self):
pass
def __init__(self, plasma_parent, helium_treatment='dilute-lte'):
"""
Calculates the level populations with the Boltzmann equation in LTE.
"""
super(LevelNumberDensity, self).__init__(plasma_parent)
if hasattr(self.plasma_parent, 'plasma_properties_dict'):
if 'HeliumNLTE' in \
self.plasma_parent.plasma_properties_dict.keys():
helium_treatment='recomb-nlte'
if helium_treatment=='recomb-nlte':
self.calculate = self._calculate_helium_nlte
elif helium_treatment=='dilute-lte':
self.calculate = self._calculate_dilute_lte
self._update_inputs()
self.initialize_indices = True
def _initialize_indices(self, levels, partition_function):
indexer = pd.Series(np.arange(partition_function.shape[0]),
index=partition_function.index)
self._ion2level_idx = indexer.ix[levels.droplevel(2)].values
def _calculate_dilute_lte(self, level_boltzmann_factor, ion_number_density,
levels, partition_function):
"""
Reduces non-metastable level populations by a factor of W compared to LTE in the case of dilute-lte excitation.
"""
if self.initialize_indices:
self._initialize_indices(levels, partition_function)
self.initialize_indices = False
partition_function_broadcast = partition_function.values[
self._ion2level_idx]
level_population_fraction = (level_boltzmann_factor.values
/ partition_function_broadcast)
ion_number_density_broadcast = ion_number_density.values[
self._ion2level_idx]
level_number_density = (level_population_fraction *
ion_number_density_broadcast)
return pd.DataFrame(level_number_density,
index=level_boltzmann_factor.index)
def _calculate_helium_nlte(self, level_boltzmann_factor,
ion_number_density, levels, partition_function, helium_population):
"""
If one of the two helium NLTE methods is used, this updates the helium level populations to the appropriate
values.
"""
level_number_density = self._calculate_dilute_lte(
level_boltzmann_factor, ion_number_density, levels,
partition_function)
if helium_population is not None:
level_number_density.ix[2].update(helium_population)
return level_number_density | bsd-3-clause |
plxaye/chromium | src/chrome/browser/nacl_host/test/gdb_rsp.py | 99 | 2431 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file is based on gdb_rsp.py file from NaCl repository.
import re
import socket
import time
def RspChecksum(data):
checksum = 0
for char in data:
checksum = (checksum + ord(char)) % 0x100
return checksum
class GdbRspConnection(object):
def __init__(self, addr):
self._socket = self._Connect(addr)
def _Connect(self, addr):
# We have to poll because we do not know when sel_ldr has
# successfully done bind() on the TCP port. This is inherently
# unreliable.
# TODO(mseaborn): Add a more reliable connection mechanism to
# sel_ldr's debug stub.
timeout_in_seconds = 10
poll_time_in_seconds = 0.1
for i in xrange(int(timeout_in_seconds / poll_time_in_seconds)):
# On Mac OS X, we have to create a new socket FD for each retry.
sock = socket.socket()
try:
sock.connect(addr)
except socket.error:
# Retry after a delay.
time.sleep(poll_time_in_seconds)
else:
return sock
raise Exception('Could not connect to sel_ldr\'s debug stub in %i seconds'
% timeout_in_seconds)
def _GetReply(self):
reply = ''
while True:
data = self._socket.recv(1024)
if len(data) == 0:
raise AssertionError('EOF on socket reached with '
'incomplete reply message: %r' % reply)
reply += data
if '#' in data:
break
match = re.match('\+\$([^#]*)#([0-9a-fA-F]{2})$', reply)
if match is None:
raise AssertionError('Unexpected reply message: %r' % reply)
reply_body = match.group(1)
checksum = match.group(2)
expected_checksum = '%02x' % RspChecksum(reply_body)
if checksum != expected_checksum:
raise AssertionError('Bad RSP checksum: %r != %r' %
(checksum, expected_checksum))
# Send acknowledgement.
self._socket.send('+')
return reply_body
# Send an rsp message, but don't wait for or expect a reply.
def RspSendOnly(self, data):
msg = '$%s#%02x' % (data, RspChecksum(data))
return self._socket.send(msg)
def RspRequest(self, data):
self.RspSendOnly(data)
return self._GetReply()
def RspInterrupt(self):
self._socket.send('\x03')
return self._GetReply()
| apache-2.0 |
abhisg/scikit-learn | examples/neighbors/plot_classification.py | 287 | 1790 | """
================================
Nearest Neighbors Classification
================================
Sample usage of Nearest Neighbors classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for weights in ['uniform', 'distance']:
# we create an instance of Neighbours Classifier and fit the data.
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(X, y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title("3-Class classification (k = %i, weights = '%s')"
% (n_neighbors, weights))
plt.show()
| bsd-3-clause |
Caranarq/01_Dmine | 10_ResiduosSolidosUrbanos/P1017/P1017.py | 1 | 3804 | # -*- coding: utf-8 -*-
"""
Started on thu, dec 12nd, 2017
@author: carlos.arana
"""
# Librerias utilizadas
import pandas as pd
import sys
module_path = r'D:\PCCS\01_Dmine\Scripts'
if module_path not in sys.path:
sys.path.append(module_path)
from VarInt.VarInt import VarInt
from classes.Meta import Meta
from Compilador.Compilador import compilar
"""
Las librerias locales utilizadas renglones arriba se encuentran disponibles en las siguientes direcciones:
SCRIPT: | DISPONIBLE EN:
------ | ------------------------------------------------------------------------------------
VarInt | https://github.com/INECC-PCCS/01_Dmine/tree/master/Scripts/VarInt
Meta | https://github.com/INECC-PCCS/01_Dmine/tree/master/Scripts/Classes
Compilador | https://github.com/INECC-PCCS/01_Dmine/tree/master/Scripts/Compilador
"""
# Documentacion del Parametro ---------------------------------------------------------------------------------------
# Descripciones del Parametro
M = Meta
M.ClaveParametro = 'P1017'
M.NombreParametro = 'Viviendas que no entregan sus residuos al Servicio Publico de Recolección'
M.DescParam = 'Porcentaje de viviendas que no entregan sus residuos al Servcio Publico de Recoleccion, disponiendo de ' \
'estos de manera inadecuada'
M.UnidadesParam = 'Porcentaje'
M.TituloParametro = 'RSU_NO_SPR' # Para nombrar la columna del parametro
M.PeriodoParam = '2015'
M.TipoInt = 2
# Handlings
M.ParDtype = 'float'
M.TipoVar = 'C' # (Tipos de Variable: [C]ontinua, [D]iscreta [O]rdinal, [B]inaria o [N]ominal)
M.array = []
M.TipoAgr = 'mean'
# Descripciones del proceso de Minería
M.nomarchivodataset = '19'
M.extarchivodataset = 'xlsx'
M.ContenidoHojaDatos = 'Datos disponibles por municipio para 2015, utilizados para la construcción del parametro'
M.ClaveDataset = 'EI2015'
M.ActDatos = '2015'
M.Agregacion = 'Este parámetro utiliza las variables "Queman_residuos" y "Entierran_residuos_o_tiran_en_otro_lugar". ' \
'Para agregar la información y construir el parámetro, se suman ambas variables y se promedian los ' \
'valores para los municipios que componen una Ciudad del SUN. En la agregación de datos ' \
'municipales a ciudades del SUN se han excluido los Municipos en los que la muestra de la Encuesta ' \
'Intercensal fue clasificada como insuficiente.'
M.getmetafromds = 1
# Descripciones generadas desde la clave del parámetro
Meta.fillmeta(M)
M.Notas = 'Este parámetro considera las viviendas que no entregan sus residuos a un servicio público de ' \
'recolección, y que disponen de estos quemándolos o tirándolos en lugares inadecuados (Como puede ser en ' \
'la calle, baldío o río). El valor indica el porcentaje de viviendas en cada ciudad que cumplen esta ' \
'condicion (por ejemplo, 0.4619 = 0.4916 % y 50.95 = 50.95 %)'
# Construccion del Parámetro -----------------------------------------------------------------------------------------
# Cargar dataset inicial
dataset = pd.read_excel(M.DirFuente + '\\' + M.ArchivoDataset,
sheetname=M.nomarchivodataset, dtype={'CVE_MUN': str})
dataset.set_index('CVE_MUN', inplace=True)
# Generar dataset para parámetro y Variable de Integridad
dataset = dataset[~dataset['Municipio'].str.contains('\*\*')] # Excluir municipios con ** muestra insuficiente
columnas = list(dataset)[4:6]
dataset = dataset[columnas]
par_dataset = dataset.sum(axis=1) # Construccion del Parámetro
par_dataset = par_dataset.to_frame(name = M.ClaveParametro)
par_dataset, variables_dataset = VarInt(par_dataset, dataset, tipo=M.TipoInt)
# Compilacion
compilar(M, dataset, par_dataset, variables_dataset)
| gpl-3.0 |
bundgus/python-playground | matplotlib-playground/examples/statistics/multiple_histograms_side_by_side.py | 1 | 1314 |
"""
Demo of how to produce multiple histograms side by side
"""
import numpy as np
import matplotlib.pyplot as plt
number_of_bins = 20
# An example of three data sets to compare
number_of_data_points = 1000
labels = ["A", "B", "C"]
data_sets = [np.random.normal(0, 1, number_of_data_points),
np.random.normal(6, 1, number_of_data_points),
np.random.normal(-3, 1, number_of_data_points)]
# Computed quantities to aid plotting
hist_range = (np.min(data_sets), np.max(data_sets))
binned_data_sets = [np.histogram(d, range=hist_range, bins=number_of_bins)[0]
for d in data_sets]
binned_maximums = np.max(binned_data_sets, axis=1)
x_locations = np.arange(0, sum(binned_maximums), np.max(binned_maximums))
# The bin_edges are the same for all of the histograms
bin_edges = np.linspace(hist_range[0], hist_range[1], number_of_bins + 1)
centers = .5 * (bin_edges + np.roll(bin_edges, 1))[:-1]
heights = np.diff(bin_edges)
# Cycle through and plot each histogram
ax = plt.subplot(111)
for x_loc, binned_data in zip(x_locations, binned_data_sets):
lefts = x_loc - .5 * binned_data
ax.barh(centers, binned_data, height=heights, left=lefts)
ax.set_xticks(x_locations)
ax.set_xticklabels(labels)
ax.set_ylabel("Data values")
ax.set_xlabel("Data sets")
plt.show()
| mit |
zonca/healpy | healpy/newvisufunc.py | 1 | 7064 | __all__ = ['mollview', 'projplot']
import numpy as np
from pixelfunc import ang2pix, npix2nside
from rotator import Rotator
import exceptions
from matplotlib.projections.geo import GeoAxes
###### WARNING #################
# this module is work in progress, the aim is to reimplement the healpy
# plot functions using the new features of matplotlib and remove most
# of the custom projection code
class ThetaFormatterShiftPi(GeoAxes.ThetaFormatter):
"""Shifts labelling by pi
Shifts labelling from -180,180 to 0-360"""
def __call__(self, x, pos=None):
if x != 0:
x *= -1
if x < 0:
x += 2*np.pi
return super(ThetaFormatterShiftPi, self).__call__(x, pos)
def lonlat(theta, phi):
"""Converts theta and phi to longitude and latitude
From colatitude to latitude and from astro longitude to geo longitude"""
longitude = -1*np.asarray(phi)
latitude = np.pi/2 - np.asarray(theta)
return longitude, latitude
def mollview(m=None, rot=None, coord=None, unit='',
xsize=1000, nest=False,
min=None, max=None, flip='astro',
format='%g',
cbar=True, cmap=None,
norm=None,
graticule=False, graticule_labels=False,
**kwargs):
"""Plot an healpix map (given as an array) in Mollweide projection.
Parameters
----------
map : float, array-like or None
An array containing the map, supports masked maps, see the `ma` function.
If None, will display a blank map, useful for overplotting.
rot : scalar or sequence, optional
Describe the rotation to apply.
In the form (lon, lat, psi) (unit: degrees) : the point at
longitude *lon* and latitude *lat* will be at the center. An additional rotation
of angle *psi* around this direction is applied.
coord : sequence of character, optional
Either one of 'G', 'E' or 'C' to describe the coordinate
system of the map, or a sequence of 2 of these to rotate
the map from the first to the second coordinate system.
unit : str, optional
A text describing the unit of the data. Default: ''
xsize : int, optional
The size of the image. Default: 800
nest : bool, optional
If True, ordering scheme is NESTED. Default: False (RING)
min : float, optional
The minimum range value
max : float, optional
The maximum range value
flip : {'astro', 'geo'}, optional
Defines the convention of projection : 'astro' (default, east towards left, west towards right)
or 'geo' (east towards roght, west towards left)
format : str, optional
The format of the scale label. Default: '%g'
cbar : bool, optional
Display the colorbar. Default: True
norm : {'hist', 'log', None}
Color normalization, hist= histogram equalized color mapping,
log= logarithmic color mapping, default: None (linear color mapping)
kwargs : keywords
any additional keyword is passed to pcolormesh
graticule : bool
add graticule
graticule_labels : bool
longitude and latitude labels
"""
# not implemented features
if not (norm is None):
raise exceptions.NotImplementedError()
# Create the figure
import matplotlib.pyplot as plt
width = 8.5
fig = plt.figure(figsize=(width,width*.63))
ax = fig.add_subplot(111, projection="mollweide")
# FIXME: make a more general axes creation that works also with subplots
#ax = plt.gcf().add_axes((.125, .1, .9, .9), projection="mollweide")
# remove white space around the image
plt.subplots_adjust(left=0.02, right=0.98, top=0.95, bottom=0.05)
if graticule and graticule_labels:
plt.subplots_adjust(left=0.04, right=0.98, top=0.95, bottom=0.05)
if not m is None:
# auto min and max
if min is None:
min = m.min()
if max is None:
max = m.max()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ysize = xsize/2
theta = np.linspace(np.pi, 0, ysize)
phi = np.linspace(-np.pi, np.pi, xsize)
longitude = np.radians(np.linspace(-180, 180, xsize))
if flip == "astro":
longitude = longitude[::-1]
latitude = np.radians(np.linspace(-90, 90, ysize))
# project the map to a rectangular matrix xsize x ysize
PHI, THETA = np.meshgrid(phi, theta)
# coord or rotation
if coord or rot:
r = Rotator(coord=coord, rot=rot, inv=True)
THETA, PHI = r(THETA.flatten(), PHI.flatten())
THETA = THETA.reshape(ysize, xsize)
PHI = PHI.reshape(ysize, xsize)
nside = npix2nside(len(m))
if not m is None:
grid_pix = ang2pix(nside, THETA, PHI, nest=nest)
grid_map = m[grid_pix]
# plot
ret = plt.pcolormesh(longitude, latitude, grid_map, vmin=min, vmax=max, rasterized=True, **kwargs)
# graticule
plt.grid(graticule)
if graticule:
longitude_grid_spacing = 60 # deg
ax.set_longitude_grid(longitude_grid_spacing)
if width < 10:
ax.set_latitude_grid(45)
ax.set_longitude_grid_ends(90)
if graticule_labels:
ax.xaxis.set_major_formatter(ThetaFormatterShiftPi(longitude_grid_spacing))
else:
# remove longitude and latitude labels
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
# colorbar
if cbar and not m is None:
cb = fig.colorbar(ret, orientation='horizontal', shrink=.4, pad=0.05, ticks=[min, max])
cb.ax.xaxis.set_label_text(unit)
cb.ax.xaxis.labelpad = -8
# workaround for issue with viewers, see colorbar docstring
cb.solids.set_edgecolor("face")
plt.draw()
finally:
ax.hold(washold)
return ret
def projplot(theta, phi, fmt=None, **kwargs):
"""projplot is a wrapper around :func:`matplotlib.Axes.plot` to take into account the
spherical projection.
You can call this function as::
projplot(theta, phi) # plot a line going through points at coord (theta, phi)
projplot(theta, phi, 'bo') # plot 'o' in blue at coord (theta, phi)
Parameters
----------
theta, phi : float, array-like
Coordinates of point to plot in radians.
fmt : str
A format string (see :func:`matplotlib.Axes.plot` for details)
Notes
-----
Other keywords are passed to :func:`matplotlib.Axes.plot`.
See Also
--------
projscatter, projtext
"""
import matplotlib.pyplot as plt
longitude, latitude = lonlat(theta, phi)
if fmt is None:
ret = plt.plot(longitude, latitude, **kwargs)
else:
ret = plt.plot(longitude, latitude, fmt, **kwargs)
return ret
| gpl-2.0 |
indhub/mxnet | example/autoencoder/data.py | 18 | 1348 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
from __future__ import print_function
import os
import numpy as np
from sklearn.datasets import fetch_mldata
def get_mnist():
np.random.seed(1234) # set seed for deterministic ordering
data_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
data_path = os.path.join(data_path, '../../data')
mnist = fetch_mldata('MNIST original', data_home=data_path)
p = np.random.permutation(mnist.data.shape[0])
X = mnist.data[p].astype(np.float32)*0.02
Y = mnist.target[p]
return X, Y
| apache-2.0 |
RomainBrault/scikit-learn | benchmarks/bench_plot_incremental_pca.py | 374 | 6430 | """
========================
IncrementalPCA benchmark
========================
Benchmarks for IncrementalPCA
"""
import numpy as np
import gc
from time import time
from collections import defaultdict
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_lfw_people
from sklearn.decomposition import IncrementalPCA, RandomizedPCA, PCA
def plot_results(X, y, label):
plt.plot(X, y, label=label, marker='o')
def benchmark(estimator, data):
gc.collect()
print("Benching %s" % estimator)
t0 = time()
estimator.fit(data)
training_time = time() - t0
data_t = estimator.transform(data)
data_r = estimator.inverse_transform(data_t)
reconstruction_error = np.mean(np.abs(data - data_r))
return {'time': training_time, 'error': reconstruction_error}
def plot_feature_times(all_times, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_times['pca'], label="PCA")
plot_results(all_components, all_times['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_times['rpca'], label="RandomizedPCA")
plt.legend(loc="upper left")
plt.suptitle("Algorithm runtime vs. n_components\n \
LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Time (seconds)")
def plot_feature_errors(all_errors, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_errors['pca'], label="PCA")
plot_results(all_components, all_errors['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_errors['rpca'], label="RandomizedPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. n_components\n"
"LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Mean absolute error")
def plot_batch_times(all_times, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_times['pca'], label="PCA")
plot_results(all_batch_sizes, all_times['rpca'], label="RandomizedPCA")
plot_results(all_batch_sizes, all_times['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm runtime vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Time (seconds)")
def plot_batch_errors(all_errors, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_errors['pca'], label="PCA")
plot_results(all_batch_sizes, all_errors['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Mean absolute error")
def fixed_batch_size_comparison(data):
all_features = [i.astype(int) for i in np.linspace(data.shape[1] // 10,
data.shape[1], num=5)]
batch_size = 1000
# Compare runtimes and error for fixed batch size
all_times = defaultdict(list)
all_errors = defaultdict(list)
for n_components in all_features:
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
ipca = IncrementalPCA(n_components=n_components, batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('ipca', ipca),
('rpca', rpca)]}
for k in sorted(results_dict.keys()):
all_times[k].append(results_dict[k]['time'])
all_errors[k].append(results_dict[k]['error'])
plot_feature_times(all_times, batch_size, all_features, data)
plot_feature_errors(all_errors, batch_size, all_features, data)
def variable_batch_size_comparison(data):
batch_sizes = [i.astype(int) for i in np.linspace(data.shape[0] // 10,
data.shape[0], num=10)]
for n_components in [i.astype(int) for i in
np.linspace(data.shape[1] // 10,
data.shape[1], num=4)]:
all_times = defaultdict(list)
all_errors = defaultdict(list)
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('rpca', rpca)]}
# Create flat baselines to compare the variation over batch size
all_times['pca'].extend([results_dict['pca']['time']] *
len(batch_sizes))
all_errors['pca'].extend([results_dict['pca']['error']] *
len(batch_sizes))
all_times['rpca'].extend([results_dict['rpca']['time']] *
len(batch_sizes))
all_errors['rpca'].extend([results_dict['rpca']['error']] *
len(batch_sizes))
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=n_components,
batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('ipca',
ipca)]}
all_times['ipca'].append(results_dict['ipca']['time'])
all_errors['ipca'].append(results_dict['ipca']['error'])
plot_batch_times(all_times, n_components, batch_sizes, data)
# RandomizedPCA error is always worse (approx 100x) than other PCA
# tests
plot_batch_errors(all_errors, n_components, batch_sizes, data)
faces = fetch_lfw_people(resize=.2, min_faces_per_person=5)
# limit dataset to 5000 people (don't care who they are!)
X = faces.data[:5000]
n_samples, h, w = faces.images.shape
n_features = X.shape[1]
X -= X.mean(axis=0)
X /= X.std(axis=0)
fixed_batch_size_comparison(X)
variable_batch_size_comparison(X)
plt.show()
| bsd-3-clause |
ch3ll0v3k/scikit-learn | examples/model_selection/plot_train_error_vs_test_error.py | 349 | 2577 | """
=========================
Train error vs Test error
=========================
Illustration of how the performance of an estimator on unseen data (test data)
is not the same as the performance on training data. As the regularization
increases the performance on train decreases while the performance on test
is optimal within a range of values of the regularization parameter.
The example with an Elastic-Net regression model and the performance is
measured using the explained variance a.k.a. R^2.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from sklearn import linear_model
###############################################################################
# Generate sample data
n_samples_train, n_samples_test, n_features = 75, 150, 500
np.random.seed(0)
coef = np.random.randn(n_features)
coef[50:] = 0.0 # only the top 10 features are impacting the model
X = np.random.randn(n_samples_train + n_samples_test, n_features)
y = np.dot(X, coef)
# Split train and test data
X_train, X_test = X[:n_samples_train], X[n_samples_train:]
y_train, y_test = y[:n_samples_train], y[n_samples_train:]
###############################################################################
# Compute train and test errors
alphas = np.logspace(-5, 1, 60)
enet = linear_model.ElasticNet(l1_ratio=0.7)
train_errors = list()
test_errors = list()
for alpha in alphas:
enet.set_params(alpha=alpha)
enet.fit(X_train, y_train)
train_errors.append(enet.score(X_train, y_train))
test_errors.append(enet.score(X_test, y_test))
i_alpha_optim = np.argmax(test_errors)
alpha_optim = alphas[i_alpha_optim]
print("Optimal regularization parameter : %s" % alpha_optim)
# Estimate the coef_ on full data with optimal regularization parameter
enet.set_params(alpha=alpha_optim)
coef_ = enet.fit(X, y).coef_
###############################################################################
# Plot results functions
import matplotlib.pyplot as plt
plt.subplot(2, 1, 1)
plt.semilogx(alphas, train_errors, label='Train')
plt.semilogx(alphas, test_errors, label='Test')
plt.vlines(alpha_optim, plt.ylim()[0], np.max(test_errors), color='k',
linewidth=3, label='Optimum on test')
plt.legend(loc='lower left')
plt.ylim([0, 1.2])
plt.xlabel('Regularization parameter')
plt.ylabel('Performance')
# Show estimated coef_ vs true coef
plt.subplot(2, 1, 2)
plt.plot(coef, label='True coef')
plt.plot(coef_, label='Estimated coef')
plt.legend()
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.26)
plt.show()
| bsd-3-clause |
q1ang/tushare | setup.py | 9 | 2613 | from setuptools import setup, find_packages
import codecs
import os
import tushare
def read(fname):
return codecs.open(os.path.join(os.path.dirname(__file__), fname)).read()
long_desc = """
TuShare
===============
.. image:: https://api.travis-ci.org/waditu/tushare.png?branch=master
:target: https://travis-ci.org/waditu/tushare
.. image:: https://badge.fury.io/py/tushare.png
:target: http://badge.fury.io/py/tushare
* easy to use as most of the data returned are pandas DataFrame objects
* can be easily saved as csv, excel or json files
* can be inserted into MySQL or Mongodb
Target Users
--------------
* financial market analyst of China
* learners of financial data analysis with pandas/NumPy
* people who are interested in China financial data
Installation
--------------
pip install tushare
Upgrade
---------------
pip install tushare --upgrade
Quick Start
--------------
::
import tushare as ts
ts.get_hist_data('600848')
return::
open high close low volume p_change ma5 \
date
2012-01-11 6.880 7.380 7.060 6.880 14129.96 2.62 7.060
2012-01-12 7.050 7.100 6.980 6.900 7895.19 -1.13 7.020
2012-01-13 6.950 7.000 6.700 6.690 6611.87 -4.01 6.913
2012-01-16 6.680 6.750 6.510 6.480 2941.63 -2.84 6.813
2012-01-17 6.660 6.880 6.860 6.460 8642.57 5.38 6.822
2012-01-18 7.000 7.300 6.890 6.880 13075.40 0.44 6.788
2012-01-19 6.690 6.950 6.890 6.680 6117.32 0.00 6.770
2012-01-20 6.870 7.080 7.010 6.870 6813.09 1.74 6.832
"""
setup(
name='tushare',
version=tushare.__version__,
description='A utility for crawling historical and Real-time Quotes data of China stocks',
# long_description=read("READM.rst"),
long_description = long_desc,
author='Jimmy Liu',
author_email='[email protected]',
license='BSD',
url='http://tushare.org',
keywords='China stock data',
classifiers=['Development Status :: 4 - Beta',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'License :: OSI Approved :: BSD License'],
packages=['tushare','tushare.stock', 'tushare.data', 'tushare.util', 'tushare.datayes'],
package_data={'': ['*.csv']},
) | bsd-3-clause |
ky822/scikit-learn | benchmarks/bench_sgd_regression.py | 283 | 5569 | """
Benchmark for SGD regression
Compares SGD regression against coordinate descent and Ridge
on synthetic data.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
# License: BSD 3 clause
import numpy as np
import pylab as pl
import gc
from time import time
from sklearn.linear_model import Ridge, SGDRegressor, ElasticNet
from sklearn.metrics import mean_squared_error
from sklearn.datasets.samples_generator import make_regression
if __name__ == "__main__":
list_n_samples = np.linspace(100, 10000, 5).astype(np.int)
list_n_features = [10, 100, 1000]
n_test = 1000
noise = 0.1
alpha = 0.01
sgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
elnet_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
ridge_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
asgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
for i, n_train in enumerate(list_n_samples):
for j, n_features in enumerate(list_n_features):
X, y, coef = make_regression(
n_samples=n_train + n_test, n_features=n_features,
noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
print("=======================")
print("Round %d %d" % (i, j))
print("n_features:", n_features)
print("n_samples:", n_train)
# Shuffle data
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
print("- benchmarking ElasticNet")
clf = ElasticNet(alpha=alpha, l1_ratio=0.5, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
elnet_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
elnet_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.01, power_t=0.25)
tstart = time()
clf.fit(X_train, y_train)
sgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
sgd_results[i, j, 1] = time() - tstart
gc.collect()
print("n_iter", n_iter)
print("- benchmarking A-SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.002, power_t=0.05,
average=(n_iter * n_train // 2))
tstart = time()
clf.fit(X_train, y_train)
asgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
asgd_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking RidgeRegression")
clf = Ridge(alpha=alpha, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
ridge_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
ridge_results[i, j, 1] = time() - tstart
# Plot results
i = 0
m = len(list_n_features)
pl.figure('scikit-learn SGD regression benchmark results',
figsize=(5 * 2, 4 * m))
for j in range(m):
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 0]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 0]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 0]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 0]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("RMSE")
pl.title("Test error - %d features" % list_n_features[j])
i += 1
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 1]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 1]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 1]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 1]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("Time [sec]")
pl.title("Training time - %d features" % list_n_features[j])
i += 1
pl.subplots_adjust(hspace=.30)
pl.show()
| bsd-3-clause |
shenzebang/scikit-learn | examples/semi_supervised/plot_label_propagation_digits.py | 268 | 2723 | """
===================================================
Label Propagation digits: Demonstrating performance
===================================================
This example demonstrates the power of semisupervised learning by
training a Label Spreading model to classify handwritten digits
with sets of very few labels.
The handwritten digit dataset has 1797 total points. The model will
be trained using all points, but only 30 will be labeled. Results
in the form of a confusion matrix and a series of metrics over each
class will be very good.
At the end, the top 10 most uncertain predictions will be shown.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import confusion_matrix, classification_report
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 30
indices = np.arange(n_total_samples)
unlabeled_set = indices[n_labeled_points:]
# shuffle everything around
y_train = np.copy(y)
y_train[unlabeled_set] = -1
###############################################################################
# Learn with LabelSpreading
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_set]
true_labels = y[unlabeled_set]
cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_)
print("Label Spreading model: %d labeled & %d unlabeled points (%d total)" %
(n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# calculate uncertainty values for each transduced distribution
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# pick the top 10 most uncertain labels
uncertainty_index = np.argsort(pred_entropies)[-10:]
###############################################################################
# plot
f = plt.figure(figsize=(7, 5))
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(2, 5, index + 1)
sub.imshow(image, cmap=plt.cm.gray_r)
plt.xticks([])
plt.yticks([])
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]))
f.suptitle('Learning with small amount of labeled data')
plt.show()
| bsd-3-clause |
meclav/whistle | src/main.py | 1 | 4753 | import sys
import time
import numpy as np
import random
import matplotlib.pyplot as plt
import queue
import matplotlib.animation as animation
import threading
from scipy.io.wavfile import read as wavread
from scipy.signal import blackmanharris
from pysoundcard import *
from math import log
from sys import float_info
from collections import deque
"""
This function takes a numpy vector that represents the sampled sound from the stream, and processes it.
"""
def get_frequency(in_data, chunk):
# Take the fft and square each value
windowed = in_data[:,0] * blackmanharris(len(in_data))
data_after_fft = np.fft.rfft(windowed)
# Find the peak
i = np.argmax(abs(data_after_fft))
# Convert to equivalent frequency
# TODO: calibrate the frequency so it shows up in Hz ,this is not the right calculation
thefreq= chunk * i / len(windowed)
data_in_decibels = map (lambda x : - 30 if x<sys.float_info.min else 20* log(x) , data_after_fft)
# TODO: a more accurate method would be to use quadratic interpolation around this value to get a better estimate of where the maximum is
# TODO: the code iterates through the chunk again needlessly to find the peak intensity. Improve the algorithm.
peak_intensity = max(data_in_decibels)
return thefreq, peak_intensity
"""
The API for the sound input operates on callbacks. A function like this needs to be provided to the constructor:
def simple_callback(in_data, frame_count, time_info, status):
print(get_frequency(in_data, frame_count, 2048))
return (in_data, continue_flag)
which is then called asynchronously after the chunk of input is received.
"""
def make_callback_that_puts_into_queue(queue):
def callback(in_data, frame_count, time_info, status):
frequency, intensity = get_frequency(in_data, block_length)
queue.put((frequency, intensity))
return (in_data, continue_flag)
return callback
queue_for_the_stream = queue.Queue()
# FFT works best when the block length is a power of two.
block_length = 2048
s = Stream(sample_rate=44100, block_length=block_length,output_device=False, callback=make_callback_that_puts_into_queue(queue_for_the_stream))
s.start()
"""
The input part of the code finishes here. The input gets taken from the stream, transformed and placed into a queue.
We can retrieve the data with the queue.get() operation. The operation works very nicely, because if the queue is empty, it blocks until it can receive an input.
"""
"""
A simple implementation of a display.
We store the incoming data into a buffer. One thread fills the buffer constantly, the other redraws the buffer as fast as it can.
"""
buffer_size = 20
buffer = deque([0]*buffer_size, maxlen=buffer_size)
# let the thread add elements to the queue in a loop.
#TODO: think of a better implementation that doesn't involve a separate thread and time.sleep().
def keepFillingTheBuffer(queue,buffer):
while True:
time.sleep(0.03) # 0.03 is about half the time between successive chunks appearing.
next , threshold = queue.get()
buffer.append(next)
t= threading.Thread(target=keepFillingTheBuffer, args = (queue_for_the_stream, buffer))
t.daemon=True
t.start()
"""
This makes an animation using matplotlib. Shamelessly copypasted and slightly adapted.
"""
fig = plt.figure()
ax = plt.axes(xlim=(0, 20), ylim=(10, 160))
line, = ax.plot([], [], lw=2)
def init():
line.set_data([], [])
return line,
def make_animate( buffer, queue):
def animate(i):
x = np.linspace(0, buffer_size, buffer_size)
y = list(buffer)
line.set_data(x, y)
return line,
return animate
animate = make_animate(buffer, queue_for_the_stream)
# call the animator. blit=True means only re-draw the parts that have changed.
anim = animation.FuncAnimation(fig, animate, init_func=init, blit=True)
plt.show()
#TODO: implement stopping after a keystroke as opposed to ctrl+c.
time.sleep(1000)
s.stop()
"""
I experimented with the callback function that takes an object responsible for plotting.
The problem was that the plotting might be slower than incoming data, so you can't redraw every time you receive a chunk.
class DrawingBuffer:
def __init__(self, bufferSize):
#self.buffer = deque([-30]*bufferSize, maxlen=bufferSize)
self.bufferSize = bufferSize
self.current = 0
plt.axis([0, bufferSize, 0, 200])
plt.ion()
plt.show()
def newFrame(self, frequency, intensity): # for now do nothing with intensity
self.current = (self.current + 1 ) % self.bufferSize
plt.scatter(self.current, frequency)
plt.draw()
def make_callback_that_draws(drawing_object):
def callback(in_data, frame_count, time_info, status):
drawing_object.newFrame(*get_frequency(in_data, frame_count, block_length))
return (in_data, continue_flag)
return callback
"""
| mit |
BiRG/Omics-Dashboard | modules/sbin/perform_opls.py | 1 | 7464 | #!/usr/bin/env python3
import argparse
import h5py
import pandas as pd
from pyopls import OPLSValidator
import os
import sys
import numpy as np
from joblib import parallel_backend
print(' '.join(sys.argv))
def load_data(filename, group_key):
with h5py.File(filename, 'r') as file:
description_ = file[group_key].attrs['description']
try:
pos_label_ = file[group_key].attrs['pos_label']
except:
pos_label_ = None
try:
neg_label_ = file[group_key].attrs['neg_label']
except:
neg_label_ = None
return (pd.read_hdf(filename, f'{group_key}/X'), pd.read_hdf(filename, f'{group_key}/y'),
description_, pos_label_, neg_label_)
def serialize_opls(filename, validator_: OPLSValidator, name, description_, pos_label_, neg_label_, target,
feature_labels_):
significant_features = feature_labels_[validator_.feature_significance_]
with h5py.File(filename, 'a') as file:
group = file.create_group(name)
group.attrs['description'] = description_
group.attrs['pos_label'] = pos_label_ if pos_label_ is not None else ''
group.attrs['neg_label'] = neg_label_ if neg_label_ is not None else ''
group.attrs['k'] = validator_.k
group.attrs['n_permutations'] = validator_.n_permutations
group.attrs['n_inner_permutations'] = validator_.n_inner_permutations
group.attrs['n_outer_permutations'] = validator_.n_outer_permutations
group.attrs['inner_alpha'] = validator_.inner_alpha
group.attrs['outer_alpha'] = validator_.outer_alpha
group.attrs['n_components'] = validator_.n_components_
group.attrs['q_squared'] = validator_.q_squared_
group.attrs['q_squared_p_value'] = validator_.q_squared_p_value_
group.attrs['r_squared_Y'] = validator_.r_squared_Y_
group.attrs['r_squared_X'] = validator_.r_squared_X_
group.create_dataset('permutation_q_squared', data=validator_.permutation_q_squared_)
group.create_dataset('permutation_loadings', data=validator_.permutation_loadings_)
group.create_dataset('feature_p_values', data=validator_.feature_p_values_)
target_dtype = h5py.special_dtype(vlen=bytes) if target.dtype.type is np.object_ else target.dtype
group.create_dataset('target', data=target.to_numpy(), dtype=target_dtype)
group.create_dataset('index', data=target.index.to_numpy())
group.create_dataset('feature_labels', data=feature_labels_)
group.create_dataset('significant_features', data=significant_features)
opls_group = group.create_group('opls')
opls_group.create_dataset('W_ortho', data=validator_.opls_.W_ortho_)
opls_group.create_dataset('P_ortho', data=validator_.opls_.P_ortho_)
opls_group.create_dataset('T_ortho', data=validator_.opls_.T_ortho_)
pls_group = group.create_group('pls')
pls_group.create_dataset('x_weights', data=validator_.pls_.x_weights_)
pls_group.create_dataset('y_weights', data=validator_.pls_.y_weights_)
pls_group.create_dataset('x_loadings', data=validator_.pls_.x_loadings_)
pls_group.create_dataset('x_scores', data=validator_.pls_.x_scores_)
pls_group.create_dataset('y_scores', data=validator_.pls_.y_scores_)
pls_group.create_dataset('x_rotations', data=validator_.pls_.x_rotations_)
pls_group.create_dataset('y_rotations', data=validator_.pls_.y_rotations_)
pls_group.create_dataset('coef', data=validator_.pls_.coef_)
pls_group.create_dataset('n_iter', data=validator.pls_.n_iter_)
if validator_.accuracy_ is not None:
group['transformed_target'] = validator_.binarizer_.transform(target)
group.attrs['accuracy'] = validator_.accuracy_
group.attrs['accuracy_p_value'] = validator_.accuracy_p_value_
group.attrs['roc_auc'] = validator_.roc_auc_
group.attrs['roc_auc_p_value'] = validator_.roc_auc_p_value_
group.attrs['discriminant_q_squared'] = validator_.discriminant_q_squared_
group.attrs['discriminant_q_squared_p_value'] = validator_.discriminant_q_squared_p_value_
group.attrs['discriminant_r_squared'] = validator_.discriminant_r_squared_
group.create_dataset('permutation_accuracy', data=validator_.permutation_accuracy_)
group.create_dataset('permutation_roc_auc', data=validator_.permutation_roc_auc_)
group.create_dataset('permutation_discriminant_q_squared',
data=validator_.permutation_discriminant_q_squared_)
parser = argparse.ArgumentParser(description='Perform Orthogonal Projection to Latent Structures')
parser.add_argument('dataframe_filename', type=str,
help='HDF5 file containing two pandas DataFrames, "numeric_df" and "label_df".')
parser.add_argument('k', type=int, help='Number of cross-validation folds, -1 for leave-one-out.')
parser.add_argument('min_n_components', type=int, help='Minimum number of orthogonal components to remove.')
parser.add_argument('inner_test_alpha', type=float,
help='First significance threshold, values outside of this will be '
'tested for outer_test_permutations')
parser.add_argument('outer_test_alpha', type=float,
help='Second significance threshold, applied to values tested with outer_test_permutations.')
parser.add_argument('metric_test_permutations', type=int,
help='Number of permutations to perform to determine significance of metrics (like R-squared).')
parser.add_argument('inner_test_permutations', type=int,
help='Number of permutations to perform for all features.')
parser.add_argument('outer_test_permutations', type=int,
help='Number of permutations to perform for features deemed significant with inner_test_alpha.')
parser.add_argument('--force_regression', type=bool, default=False,
help='If True, treat numeric multiclass or binary variables as continuous variables.')
args = parser.parse_args()
group_keys = [key for key in h5py.File(args.dataframe_filename).keys()]
output_filename = os.path.splitext(os.path.basename(args.dataframe_filename))[0] + '_results.h5'
with h5py.File(output_filename, 'w') as out_file, h5py.File(args.dataframe_filename, 'r') as in_file:
if 'collection_id' in in_file.attrs:
out_file.attrs['input_collection_id'] = in_file.attrs['collection_id']
out_file.attrs.update({key: value for key, value in in_file.attrs.items() if key != 'collection_id'})
out_file.attrs['analysis_type'] = 'opls'
for key in group_keys:
X, y, description, pos_label, neg_label = load_data(args.dataframe_filename, key)
feature_labels = np.array([float(c) for c in X.columns])
print(description)
validator = OPLSValidator(args.min_n_components, args.k, False, args.force_regression,
args.metric_test_permutations, args.inner_test_permutations, args.outer_test_permutations,
args.inner_test_alpha, args.outer_test_alpha)
print(f'====== Fitting {key} ======')
with parallel_backend('threading'):
validator.fit(X, y, pos_label=pos_label, verbose=1)
serialize_opls(output_filename, validator, key, description, pos_label, neg_label, y, feature_labels)
| mit |
ovgarol/chaPulin9.0 | simPulsar/classesPulsar.py | 1 | 19879 | #!/usr/bin/env python
""" Tercera implementacion del codigo
Segunda modulariozacion
Datos obtenidos de atnfParameters
Segunda construccion completa
Debug astronomico completo: L max a 27 Jy kpc2
Indice espectral a -1.82
maximo brillo de burst 10e5 L mean
Ahora modela p1 para sacar el nivel de nulling en B,
basado en Bsurf=3.2e19*(p0*p1)^(1/2)<2 Gauss
solo para envejecer la poblacion y decir cuales no se verian
chaPulin_8.2
Primera revision con modelos completos
"""
import numpy as np
#import scipy as sp
#import time as tm
import matplotlib.pyplot as plt
from scipy import optimize, array, stats
from matplotlib import rc, rcParams
rc('text',usetex=True)
rc('font',**{'family':'serif','serif':['Computer Modern']})
###########################################################################################
# CLASE genericPulsar ALMACENA LA INFORMACION DE LOS PULARES DE UNA GALAXIA
###########################################################################################
class genericPulsar(object):
def __init__(self, name, logP, logQ, age, p2, p3): # BASE NATURAL
"""Input: Caracteristicas galaxyales P, Q, L, W, age (name es el no. de iteracion!!)
"""
self.spectral_index = np.random.normal(-1.8,0.2) # correccion por banda de observacion SpectralIndex_Maron
self.name = int(name) # entero solo para id
self.sigma = 0 # este numero va a ser sigmaN, sigmaLN o alpha segun toque
self.pulses = [] # vector vacio donde van los pulsos
self.maxburst = np.random.normal(100,10000)
##########################################################
# Un primer modelo de envejecimiento, TOMADO DE GUSINOV #
T0 = float(np.exp(logP)) # periodo inicial al tiempo tc (s)
T1 = float(np.exp(logQ)) # primera derivada inicial (s/s)
tc = T0/(2*T1)/(3.6*2.4*3.6e6) # edad caracteristica (yr)
age = age - 1.e6*np.random.uniform(7.,22.) # Consideracion del tiempo de secuencia principal
tm = np.random.uniform(5e6,5e7) # tiempo de evolucion (yr)
if age > 0.0 and age > tc:
B = np.sqrt(T0*T1)*np.exp(-2*(age-tc)/tm) # decaimiento segun gusinov
T0 = np.sqrt(T0**2+0.5*(3.6*2.4*3.6e6)*tm*T0*T1*(1-np.exp(-2*(age-tc)/tm)))
T1 = B**2/T0
elif age < 0.0:
T0 = 0.0
T1 = 0.0
##########################################################
if T1 != 0.0:
logQcrit = 2.0*np.log10(T0)-16.52 + 1 + np.random.normal(0,0.5/2) # deathline segun BING ZHAN
if np.log10(T1) > logQcrit and not np.isinf(T1):
self.P = T0 # s
self.Q = T1 # adim
# brillo promedio mas grande permitido valor del atnf 26100 mJy
self.L = float(np.exp(log_Brillo(self.P,p2[0],p2[1],p2[2])))
while self.L > 27: self.L = float(np.exp(log_Brillo(self.P,p2[0],p2[1],p2[2])))
self.W = float(np.exp(log_W(self.P,p3[0],p3[1],p3[2]))) # s
self.dist = np.random.randint(0,3) # entero 0,1,2--aleatorio
if self.dist == 0: self.sigma = np.random.uniform(1,100) #normal
elif self.dist == 1: self.sigma = np.random.uniform(1.01,100) #lognormal
elif self.dist == 2: self.sigma = np.random.uniform(0,3) #powerlaw
else: # esta debajo de la dead line
self.P = 0.0 # s
self.Q = 0.0 # adim
self.dist = 'D' # invisible
self.L = 0.0 # Jk kpc^2
self.W = 0.0 # s
else: # el pulsar no ha nacido
self.P = 0.0 # s
self.Q = 0.0 # adim
self.dist = 'D' # invisible
self.L = 0.0 # Jk kpc^2
self.W = 0.0 # s
def observeIt(self, t):
maxBurst = self.maxburst * self.L
if self.dist == 'D':# invisible
pulses = [0] # si es invisible no emite pulsos
else:
maxT = int(t*3600/self.P) # numero de iteraciones que depende del periodo
pulses = [0]*maxT # estoy generando una lista de longitud maxT llena de ceros
if self.dist == 0: # distribucion normal
i=0
while i < maxT:
x = np.random.normal(self.L,self.sigma)
if(x>0) and x/self.L <= maxBurst:
pulses[i] = x
i = i + 1
if self.dist == 1: # distribucion lognormal
i=0
while i < maxT:
z = pulses[i] = np.random.lognormal(np.log(self.L)-0.5*np.log(self.sigma)**2,np.log(self.sigma))
if(z/self.L <= maxBurst): #se reducen los picos hasta 10^5 el promedio
pulses[i] = z
i = i + 1
if self.dist == 2: # distribucion de powerlaw
i=0
while i < maxT:
y = self.L*self.sigma/(self.sigma+1) + np.random.pareto(self.sigma)
if(y/self.L <= maxBurst): #se reducen los picos hasta 10^5 el promedio
pulses[i] = y
i = i + 1
self.pulses = pulses
return pulses
def pulseIt(self):
maxBurst = self.maxburst*self.L
if self.dist == 'D': #invisible
return 0
if self.dist == 0: #normal
x = np.random.normal(self.L,self.sigma)
while x < 0 or x/self.L > maxBurst:
x = np.random.normal(self.L,self.sigma)
return x
if self.dist == 1: #lognormal
z = np.random.lognormal(np.log(self.L)-0.5*np.log(self.sigma)**2,np.log(self.sigma))
while z/self.L > maxBurst: #se reducen los picos hasta 10^5 el promedio
z = np.random.lognormal(np.log(self.L)-0.5*np.log(self.sigma)**2,np.log(self.sigma))
return z
if self.dist == 2: #powerlaw
y = self.L*self.sigma/(self.sigma+1) + np.random.pareto(self.sigma)
while y/self.L > maxBurst: #se reducen los picos hasta 10^5 el promedio
y = self.L*self.sigma/(self.sigma+1) + np.random.pareto(self.sigma)
return y
def showIt(self): # Exporta los datos del objeto pulsar a una lista, para extraer datos
return self.name, self.P, self.Q, self.L, self.W, self.dist, self.sigma
###########################################################################################
# DEFINICIONES PRELIMINARES
###########################################################################################
def log_Brillo(x,D,E,sigmaL):
return D*x + E + np.random.normal(0,sigmaL)
def log_W(x,F,G,sigmaW):
#return 1.06*np.log(F*np.exp(x)**0.9+G)+ np.random.normal(0,sigmaW)
return G*x + F + np.random.normal(0,sigmaW)
###########################################################################################
# CLASE genericModel EN EL VAN A ESTAR LOS PULSARES
###########################################################################################
class genericModel(object):
def __init__(self, name, nTot, distance, p1, p2, p3, p4, age):
self.name = name # solo para id
self.nTot = int(nTot) # Cantidad de objetos
self.distance = float(distance) # distancia de la galaxia
self.p1 = p1 # modelo para distribucion de P0
self.p2 = p2 # modelo para distribucion de L(P)
self.p3 = p3 # modelo para distribucion de W(P)
self.p4 = p4 # modelo para distribucion de P1
self.pulsar = [0]*self.nTot # Espacio para los pulsares del tamano requerido para no fragmentar
for i in range(self.nTot):
x = np.random.normal(p1[1],p1[2]) # aplicacion del modelo de P0
q = np.random.normal(p4[1],p4[2]) # aplicacion del modelo de P1
self.pulsar[i]=(genericPulsar(i,x,q,age,self.p2,self.p3)) # crea los pulsares
del x, q, p1, p2, p3, p4, name, age, nTot, distance
def graficar(self): # devuelve los valores de cada pulsar individual
p=[]
q=[]
l=[]
w=[]
b=[]
D=[]
sigma=[]
for i in range(self.nTot): # BASE 10
D.append(self.pulsar[i].dist)
p.append(np.log10(self.pulsar[i].P))
l.append(np.log10(self.pulsar[i].L))
w.append(np.log10(self.pulsar[i].W))
q.append(np.log10(self.pulsar[i].Q))
sigma.append(self.pulsar[i].sigma)
b.append(np.log10(3.2e19*(self.pulsar[i].P*self.pulsar[i].Q)**(1./2.)))
return p, q, l, w, b, D, sigma
###########################################################################################
# CLASE SFHModel Se considera la sfh como un grupo de burst
###########################################################################################
class SFHModel(object):
def __init__(self, name, distance, p1, p2, p3, p4, SFH):
# LA SFH SE INTRODUCE COMO UNA LISTA DE (EDAD, NUMERO ESPERADO DE PULSARES)
self.name = name # solo para id
self.distance = float(distance) # distancia de la galaxia
self.p1 = p1
self.p2 = p2
self.p3 = p3
self.p4 = p4
self.pulsar = [0]*N # Espacio para los pulsares del tamano requerido para no fragmentar
age = SFH[0] # edad del burst
cuantos = SFH[1] # numero de pulsares esperados
self.nTot = array(cuantos).sum() # numero total de pulsares
J = 0
N = 0
for i in range(len(cuantos)): N = N + cuantos[i]
for i in range(len(cuantos)):
for j in range(int(cuantos[i])):
x = np.random.normal(p1[1],p1[2])
q = np.random.normal(p4[1],p4[2])
self.pulsar[j+J]=(genericPulsar(j+J ,x,q,age[i],self.p2,self.p3)) # crea el pulsar
J = J + int(cuantos[i])
j=0
del q, x, p1, p2, p3, p4, name, SFH, cuantos, distance
def graficar(self):
p=[]
q=[]
l=[]
w=[]
b=[]
D=[]
sigma=[]
for i in range(self.nTot): # BASE 10
D.append(self.pulsar[i].dist)
p.append(np.log10(self.pulsar[i].P))
l.append(np.log10(self.pulsar[i].L))
w.append(np.log10(self.pulsar[i].W))
q.append(np.log10(self.pulsar[i].Q))
sigma.append(self.pulsar[i].sigma)
b.append(np.log10(3.2e19*(self.pulsar[i].P*self.pulsar[i].Q)**(1./2.)))
return p, q, l, w, b, D, sigma
###########################################################################################
# CLASE CompletModel Se considera la sfh, la imf y la metalicidad
###########################################################################################
class CompletModel(object):
def __init__(self, name, distance, p1, p2, p3, p4, SFH, index):
# LA SFH SE INTRODUCE COMO UNA LISTA DE (EDAD, SFR, Z, ancho_de_burst)
age = SFH[0] # edad de cada burst (yr)
sfr = SFH[1] # star formation rate (m_sol/yr)
metal = SFH[2] # [Z]
width = SFH[3] # ancho del burst (yr)
Mtotal = 0.0 # masa estelar total M_star (m_sol)
cuantos = [0]*len(sfr) # numero de bursts
Mmin = [0]*len(sfr) # masa minima para un SNeII (m_sol)
Mmax = [0]*len(sfr) # masa minima para un SNeII (m_sol)
Mu = 100.0 # masa maxima en la galaxia (m_sun)
self.index = index
self.name = name # solo para id
self.distance = float(distance) # distancia de la galaxia
self.p1 = p1 # igual que el anterior
self.p2 = p2
self.p3 = p3
self.p4 = p4
#### parte anulada calculo INCORRECTO
"""
Z0 = 3.55e-5
Z1 = 1.404e-4
Z2 = 1.3614e-3
Z3 = -1.699
for i in range(len(sfr)):
m = np.log10(Z0*10**metal[i]/(Z1*10**metal[i]+Z2))-Z3 # reescalar Z en cuncion de [Fe/H]
Mmin[i] = 9.40858+1.14548*m+3.96e-1*m**2+2.96e-2*m**3-8.79e-3*m**4-1.96e-3*m**5-1.12e-4*m**6
Mtotal = Mtotal + sfr[i]*width[i]
"""#####
for i in range(len(sfr)):
if metal[i] > 0.0: Mmax[i] = Mu
else: Mmax[i] = 25.0
m = metal[i]
Mmin[i] = 9.40858+1.14548*m+3.96e-1*m**2+2.96e-2*m**3-8.79e-3*m**4-1.96e-3*m**5-1.12e-4*m**6
Mtotal = Mtotal + sfr[i]*width[i]
a0 = np.random.normal(0.3,0.7/2)
a1 = np.random.normal(1.3,0.5/2)
a2 = np.random.normal(2.3,0.3/2)
C0 = (0.08**(2-a0)-0.01**(2-a0))/(2.-a0)+(0.5**(2-a1)-0.08**(2-a1))*0.08**(a1-a0)/(2.-a1)+(1.0**(2-a2)-0.5**(2-a2))*0.08**(a1-a0)*0.5**(a2-a1)/(2.-a2) # c_0 parametro de la imf
C1 = 0.08**(a1-a0)*0.5**(a2-a1)*1.**(index-a2) # c_1 parametro de la imf
K3 =C1/(C0+C1/(2-index)*(Mu**(2-index)-1)) # parametro de la imf
self.C0 =C0
self.C1 =C1
for i in range(len(sfr)):
cuantos[i] = int(1./6.*K3*sfr[i]*width[i]*(Mmax[i]**(1-index)-Mmin[i]**(1-index))/(1-index)) # aplicacion del modelo de la imf
self.nTot = array(cuantos).sum() # numero total de pulsares
J = 0
N = 0
for i in range(len(cuantos)): N = N + cuantos[i]
self.pulsar = [0]*N # Espacio para los pulsares del tamano requerido para no fragmentar
# imprime los resultados del modelo imf-sfh-metalicidad
print "age, sfr, metal, width, Mmin, cuantos"
for i in range(len(cuantos)):
print age[i], sfr[i], metal[i], width[i], Mmin[i], cuantos[i]
#print C0,C1,K3
print 'Masa total: ', Mtotal, 'En total: ', self.nTot, ' pulsares', 'Indice: ', index
for i in range(len(cuantos)):
for j in range(int(cuantos[i])):
x = np.random.normal(p1[1],p1[2]) # aplicacion del modelo para P0
q = np.random.normal(p4[1],p4[2]) # aplicacion del modelo para P1
age_i = age[i]+np.random.uniform(-width[i]/2,width[i]/2) # distribuye el nacimiento en el ancho del burst
self.pulsar[j+J]=(genericPulsar(j+J ,x,q,age_i,self.p2,self.p3)) # crea el pulsar
J = J + int(cuantos[i])
j=0
def graficar(self):
p=[]
q=[]
l=[]
w=[]
b=[]
D=[]
sigma=[]
for i in range(self.nTot): # BASE 10
D.append(self.pulsar[i].dist)
p.append(np.log10(self.pulsar[i].P))
l.append(np.log10(self.pulsar[i].L))
w.append(np.log10(self.pulsar[i].W))
q.append(np.log10(self.pulsar[i].Q))
sigma.append(self.pulsar[i].sigma)
b.append(np.log10(3.2e19*(self.pulsar[i].P*self.pulsar[i].Q)**(1./2.)))
return p, q, l, w, b, D, sigma
def TotalPulsares(self):
return self.nTot
def showParametros(self):
return self.index, self.C0, self.C1, self.nTot
###########################################################################################
# CLASE gernericTelescope AQUI ES DONDE SE VEN LOS PULSARES
###########################################################################################
class genericTelescope(object):
def __init__(self, name, G, beta, np, deltaF, Tsys, f_obs):
"""Input: Caracteristicas fisicas del RT
"""
self.name = int(name) # entero solo para id
self.G = float(G) # K/Jk
self.beta = float(beta) # adim
self.np = float(np) # adim
self.deltaF = float(deltaF) # Hz (s^-1)
self.Tsys = float(Tsys) # K
self.f_obs = float(f_obs) # MHz frecuencia de observacion nuevo para chaPulin_3.6
def periodicDetect(self, SN, galaxy, time):
d = galaxy.distance
Detected = [] # pulsares detectados
for i in range(len(galaxy.pulsar)):
spectral_index = galaxy.pulsar[i].spectral_index
tipo = galaxy.pulsar[i].dist
if tipo == 'D':
Detected.append(0)
else:
p = galaxy.pulsar[i].P
w = galaxy.pulsar[i].W
l = galaxy.pulsar[i].L*(self.f_obs/400)**spectral_index # correccion por banda de observacion
if (p > w):
Lmin = d**2*self.beta*SN*self.Tsys/(self.G*np.sqrt(self.np * self.deltaF \
* time * 3600))*np.sqrt(w/(p-w)) # ecuacion de la S/N
if (Lmin < l): Detected.append(1)
else: Detected.append(0)
else: Detected.append(0)
return Detected
def pulseDetect(self, SN, galaxy, time):
d = galaxy.distance
giantPulses = []
xx = 0
for i in range(len(galaxy.pulsar)):
spectral_index = galaxy.pulsar[i].spectral_index
tipo = galaxy.pulsar[i].dist
if tipo == 'D':
giantPulses.append(0)
else:
p = galaxy.pulsar[i].P
w = galaxy.pulsar[i].W
l = galaxy.pulsar[i].L*(self.f_obs/400)**spectral_index # correccion por banda de observacion
Lmin = d**2*SN*self.Tsys/(self.G*np.sqrt(self.np*self.deltaF*w)) # ecuacion de la S/N
# mejora para liberar memoria
if galaxy.pulsar[i].maxburst*l < Lmin:
giantPulses.append(0)
else:
j = 0
nn = int(3600*time/galaxy.pulsar[i].P)
while xx == 0 and j < nn:
if Lmin < galaxy.pulsar[i].pulseIt(): xx = 1
j = j+1
giantPulses.append(xx)
xx = 0
return giantPulses
def pulsarDetect(self, SN, galaxy, time):
Periodicos = self.periodicDetect(SN, galaxy, time)
Pulsos = self.pulseDetect(SN, galaxy, time)
Total = [0]*len(Pulsos)
for i in range(len(Pulsos)):
if Periodicos[i] ==1 or Pulsos[i]==1: Total[i] = 1
p, q, l, w, b, D, sigma = galaxy.graficar()
return p, q, l, w, b, D, sigma, Periodicos, Pulsos, Total
###########################################################################################
# FUNCIONES UTILITARIAS
###########################################################################################
def pulseHist(pulsar,time,ID): # grafica el histograma de pulsos
pulsar.observeIt(time)
plt.clf()
plt.suptitle(r'Tipo '+ repr(pulsar.dist)+', $L_{media}$ = ' + repr(pulsar.L)+ ' [Jy kpc$^2$]')
plt.hist(pulsar.pulses,30,color='black',histtype='step',lw='2')
#, histtype='step', histtype='bar',log='True',normed='False',log='True'
plt.xlabel(r'$L$ [Jy kpc$^{2}$]')
plt.ylabel(r'$N_{Pulsos}$')
plt.xscale('linear')
plt.yscale('symlog')
plt.grid(True)
plt.savefig('out/img/pulsar_'+ repr(pulsar.name)+'_'+ID+'.png')
plt.clf()
| agpl-3.0 |
OpringaoDoTurno/airflow | airflow/hooks/base_hook.py | 8 | 2969 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import random
from airflow import settings
from airflow.models import Connection
from airflow.exceptions import AirflowException
from airflow.utils.db import provide_session
from airflow.utils.log.logging_mixin import LoggingMixin
CONN_ENV_PREFIX = 'AIRFLOW_CONN_'
class BaseHook(LoggingMixin):
"""
Abstract base class for hooks, hooks are meant as an interface to
interact with external systems. MySqlHook, HiveHook, PigHook return
object that can handle the connection and interaction to specific
instances of these systems, and expose consistent methods to interact
with them.
"""
def __init__(self, source):
pass
@classmethod
@provide_session
def _get_connections_from_db(cls, conn_id, session=None):
db = (
session.query(Connection)
.filter(Connection.conn_id == conn_id)
.all()
)
session.expunge_all()
if not db:
raise AirflowException(
"The conn_id `{0}` isn't defined".format(conn_id))
return db
@classmethod
def _get_connection_from_env(cls, conn_id):
environment_uri = os.environ.get(CONN_ENV_PREFIX + conn_id.upper())
conn = None
if environment_uri:
conn = Connection(conn_id=conn_id, uri=environment_uri)
return conn
@classmethod
def get_connections(cls, conn_id):
conn = cls._get_connection_from_env(conn_id)
if conn:
conns = [conn]
else:
conns = cls._get_connections_from_db(conn_id)
return conns
@classmethod
def get_connection(cls, conn_id):
conn = random.choice(cls.get_connections(conn_id))
if conn.host:
log = LoggingMixin().log
log.info("Using connection to: %s", conn.host)
return conn
@classmethod
def get_hook(cls, conn_id):
connection = cls.get_connection(conn_id)
return connection.get_hook()
def get_conn(self):
raise NotImplementedError()
def get_records(self, sql):
raise NotImplementedError()
def get_pandas_df(self, sql):
raise NotImplementedError()
def run(self, sql):
raise NotImplementedError()
| apache-2.0 |
c-owens/ems-api-sdk | python/query/tsquery.py | 1 | 5965 | from emspy.query import *
from query import Query
import cPickle
import warnings
import pandas as pd
import numpy as np
class TSeriesQuery(Query):
def __init__(self, conn, ems_name, new_data = False):
Query.__init__(self, conn, ems_name)
self._init_assets(new_data)
self.reset()
def _init_assets(self, new_data):
# Query._init_assets(self)
self.__analytic = Analytic(self._conn, self._ems_id, new_data)
def reset(self):
self.__columns = list()
self.__queryset = dict()
def select(self, *args):
keywords = args
save_table = False
for kw in keywords:
# Get the param from param table
prm = self.__analytic.get_param(kw)
if prm['id'] == "":
# If the param's not found, call EMS API
res_df = self.__analytic.search_param(kw, in_dataframe = True)
# The first one is with the shortest name string. Pick that.
prm = res_df.iloc[0,:].to_dict()
# Add the new parameters to the param table for later uses
self.__analytic._param_table = self.__analytic._param_table.append(res_df, ignore_index = True)
save_table = True
self.__columns.append(prm)
if save_table:
self.__analytic._save_paramtable()
def range(self, start, end):
self.__queryset['start'] = start
self.__queryset['end'] = end
def timepoint(self, tpoint):
if type(tpoint) == np.ndarray:
tpoint = tpoint.tolist()
self.__queryset['offsets'] = tpoint
def run(self, flight, start = None, end = None, timestep = 1.0, timepoint = None):
if start is None:
start = 0.0
if end is None:
end = self.flight_duration(flight)
if timestep is not None:
timepoint = np.arange(start, end, timestep)
for i, p in enumerate(self.__columns):
# Print what is going on:
print "\r\x1b[K%d/%d: %s" % (i+1, len(self.__columns), p['name']),
if timepoint is not None:
self.timepoint(timepoint)
else:
self.range(start, end)
q = self.__queryset.copy()
q['select'] = [{'analyticId': p['id']}]
resp_h, content = self._conn.request( uri_keys = ("analytic", "query"),
uri_args = (self._ems_id, flight),
jsondata = q)
if content.has_key('message'):
sys.exit('API query for flight %d, parameter = "%s" was unsuccessful.\nHere is the message from API: %s' % (flight, p['name'], content['message']))
if i == 0:
df = pd.DataFrame({"Time (sec)": content['offsets']})
df[p['name']] = content['results'][0]['values']
else:
df1 = pd.DataFrame({"Time (sec)": content['offsets']})
df1[p['name']] = content['results'][0]['values']
df = pd.merge(df, df1, how = "outer", on="Time (sec)", sort= True)
print "\r\x1b[K",
return df
def multi_run(self, flight, start = None, end = None, timestep=1.0, timepoint = None, save_file = None):
res = list()
attr_flag = False
if isinstance(flight, pd.DataFrame):
FR = flight["Flight Record"]
attr_flag = True
else:
FR = flight
# param processing
if start is None: start = [None]*len(FR)
if end is None: end = [None]*len(FR)
if not hasattr(timestep, "__len__"):
timestep = [timestep]*len(FR)
if timepoint is None:
timepoint = [None]*len(FR)
else:
warnings.warn("Time points are not yet supported. The given time points will be ignored.")
timepoint = [None]*len(FR)
print('\n=== Start running time-series data querying for %d flights ===\n' % len(FR))
for i, fr in enumerate(FR):
print '%d / %d: FR %d' % (i+1, len(FR), fr)
i_res = dict()
if attr_flag:
i_res['flt_data'] = flight.iloc[i,:].to_dict()
else:
i_res['flt_data'] = {'Flight Record': fr}
i_res['ts_data'] = self.run(fr, start[i], end[i], timepoint[i])
res.append(i_res)
if save_file is not None:
cPickle.dump(res, open(save_file, 'wb'))
return res
def flight_duration(self, flight, unit = "second"):
p = self.__analytic.get_param("hours of data (hours)")
if p["id"] == "":
res_df = self.__analytic.search_param("hours of data (hours)", in_dataframe = True)
p = res_df.iloc[0].to_dict()
self.__analytic._param_table = self.__analytic._param_table.append(res_df, ignore_index = True)
self.__analytic._save_paramtable()
q = {
"select": [{"analyticId": p["id"]}],
"size": 1
}
resp_h, content = self._conn.request( uri_keys = ("analytic", "query"),
uri_args = (self._ems_id, flight),
jsondata = q)
if content.has_key('message'):
sys.exit('API query for flight %d, parameter = "%s" was unsuccessful.\nHere is the message from API: %s' % (flight, p['name'], content['message']))
fl_len = content['results'][0]['values'][0]
if unit == "second":
t = fl_len * 60 * 60
elif unit == "minute":
t = fl_len * 60
elif unit == "hour":
t = fl_len
else:
sys.exit("Unrecognizable time unit (%s)." % unit)
return t
| mit |
TiKunze/CanMics | src/python/01_SingleChannel/2pop/Nii/Auswertung_HeHi_2nd.py | 2 | 12930 | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 22 17:15:03 2015
@author: Tim Kunze
Copyright (C) 2015, Tim Kunze. All rights reserved.
This script is a modified version of the RUM Detector:
instead of sweeping over He and Hi in every diagram, we sweep over lenge and intensity of the impulse (as in the actiation plot)
"""
###############################################################################
#
# Imports
#
###############################################################################
import numpy as np
import sys
import scipy as sc
import os # to enable some C commands (cwd,listdir)
root='/home/raid3/tkunze/Documents/Programming/JansenModels/Kunze_JuR/003_ModelDevelopment/001_Unifying_Framework/EIPy_StateSpaceExploration/'
currpath = 'EI_Cluster/Data/2nd/2pop/2popNpp_HeHi_full1'
root='/home/raid3/tkunze/Documents/Programming/JansenModels/Kunze_JuR/003_ModelDevelopment/001_Unifying_Framework/EIPy_StateSpaceExploration/'
currpath = 'EI_Cluster/Data/2nd/3pop/full_2'
os.chdir(root+currpath)
###############################################################################
#
# Function definitions
#
###############################################################################
def cleargrid2(state_grid):
[x,y,z]=np.shape(state_grid)
out_grid=np.zeros((x,y))
for i in range(x):
for j in range(y):
if state_grid[i,j,0] > 0.004: # (1) Region was up
if state_grid[i,j,1] > 0.004: # (3) Region is up
out_grid[i,j] = 5 # was up, (stayed up) and is now up
elif state_grid[i,j,1] < 0.004: # (3) Region is now low
if state_grid[i,j,2] > 0.004: # (2) Region was temporary up
out_grid[i,j] = 4 # was up, was temporary up and is now low
elif state_grid[i,j,2] < 0.004: # (2) Region wasn't temporary up
raise ValueError('1') # was up, wasn't temporary up and is now low
elif state_grid[i,j,0] < 0.004: # (1) Region was low
if state_grid[i,j,1] > 0.004: # (3) Region is up
out_grid[i,j] = 3 # was low, (became up) and is now up
elif state_grid[i,j,1] < 0.004: # (3) Region is now low
if state_grid[i,j,2] > 0.004: # (2) Region was temporary up
out_grid[i,j] = 2 # was low, was temporary up and is now low
elif state_grid[i,j,2] < 0.004: # (2) Region wasn't temporary up
out_grid[i,j] = 1 # was low and stayed permantly low
else:
raise ValueError('2')
return out_grid
def cleargrid(state_grid):
[x,y,z]=np.shape(state_grid)
out_grid=np.zeros((x,y))
for i in range(x):
for j in range(y):
if state_grid[i,j,1] > 0.004:
out_grid[i,j] = 3 # permantly up
elif state_grid[i,j,1] < 0.004:
if state_grid[i,j,2] > 0.004+state_grid[i,j,0]:
out_grid[i,j] = 2.1 # temporary up
#print "tempo"
else:
out_grid[i,j] = 1
else:
raise ValueError('Error')
print "ERROR"
return out_grid
#
def classifygrid(state_grid):
[x,y,z]=np.shape(state_grid)
sectiongrid=np.zeros((x,y))
for i in range(x):
for j in range(y):
if state_grid[i,j,2] > 0.004:
if state_grid[i,j,1] > 0.004:
sectiongrid[i,j] = 3.0 #permanent state
else:
sectiongrid[i,j] = 2.1 #temporal state
elif state_grid[i,j,2] < 0.004:
sectiongrid[i,j] = 1 #low state
else:
raise ValueError('Error')
print "ERROR"
return sectiongrid
###############################################################################
#
# Analysis
#
###############################################################################
# baselevel, 2: endlevel, 3:max()
from matplotlib import cm
import matplotlib.pyplot as plt
###full
#hirange = np.arange(2,8.1,0.5)*1e-3
#herange = np.arange(7.,0.99,-0.5)*1e-3
#length_range = np.arange(500,1501,10)
#intensity_range = np.arange(250,69,-2)
#full2
hirange = np.arange(10,26,1)*1e-3
herange = np.arange(7.,2.49,-0.25)*1e-3
length_range = np.arange(500,1501,10)
intensity_range = np.arange(250,49,-2)
low_lenge=np.min(length_range)
high_lenge=np.max(length_range)
low_inte=np.min(intensity_range)
high_inte=np.max(intensity_range)
#%%
state_grid=np.zeros((len(herange),len(hirange),len(intensity_range),len(length_range),3))
i=0
j=0
for he in herange:
j=0
for hi in hirange:
#state_grid[i,j,:,:,:]=np.load('RUM_Dec_meas_Npp100_HeHi_le500t1500i10msInt50t250i2_He%.2fmV_Hi%.1fmV.npy' %(he*1000,hi*1000))
state_grid[i,j,:,:,:]=np.load('RUM_Dec_meas_full2_le500t1500i10msInt50t250i2_He%.2fmV_Hi%.1fmV.npy' %(he*1000,hi*1000))
print "he:%.2f | hi:%.2f " %(he*1000,hi*1000)
j=j+1
i=i+1
glob_low_val_max=np.min(state_grid[:,:,:,:,2])
glob_high_val_max=np.max(state_grid[:,:,:,:,2])
glob_low_val_dete=np.min(state_grid[:,:,:,:,1])
glob_high_val_dete=np.max(state_grid[:,:,:,:,1])
glob_low_val_base=np.min(state_grid[:,:,:,:,0])
glob_high_val_base=np.max(state_grid[:,:,:,:,0])
scale_base=np.zeros((2,2))
scale_base[0]=glob_low_val_base
scale_base[1]=glob_high_val_base
scale_detec=np.zeros((2,2))
scale_detec[0]=glob_low_val_dete
scale_detec[1]=glob_high_val_dete
scale_max=np.zeros((2,2))
scale_max[0]=glob_low_val_max
scale_max[1]=glob_high_val_max
scale_colorcode=np.array([[5,5],[4,4],[3,3],[2,2],[1,1]])
fig=plt.figure(100)
plt.clf()
plt.subplot(141)
plt.imshow(scale_base, aspect='auto', interpolation='none')
plt.colorbar()
plt.title('scale base level')
plt.subplot(142)
plt.imshow(scale_detec, aspect='auto', interpolation='none')
plt.colorbar()
plt.title('scale detector level')
plt.subplot(143)
plt.imshow(scale_max, aspect='auto', interpolation='none')
plt.colorbar()
plt.title('scale max level')
plt.subplot(144)
plt.imshow(scale_colorcode, aspect='auto', interpolation='none',cmap=cm.Accent)
plt.colorbar()
plt.title('scale color code')
#%%
plt.figure(1)
plt.clf()
plt.figure(2)
plt.clf()
plt.figure(3)
plt.clf()
plt.figure(4)
plt.clf()
plt.figure(5)
plt.clf()
i=0
j=0
n=1
for he in herange:
j=0
for hi in hirange:
print "he:%.2f | hi:%.2f " %(he*1000,hi*1000)
# if np.min(state_grid[i,j,:,:,2]) < glob_low_val_max:
# glob_low_val_max=np.min(state_grid[i,j,:,:,2])
# print he,hi,glob_low_val_max
# if np.max(state_grid[i,j,:,:,2]) > glob_high_val_max:
# glob_high_val_max=np.max(state_grid[i,j,:,:,2])
# print he,hi,glob_high_val_max,1
#
# if np.min(state_grid[i,j,:,:,1]) < glob_low_val_dete:
# glob_low_val_dete=np.min(state_grid[i,j,:,:,1])
# print he,hi,glob_low_val_dete
# if np.max(state_grid[i,j,:,:,1]) > glob_high_val_dete:
# glob_high_val_dete=np.max(state_grid[i,j,:,:,1])
# print he,hi,glob_high_val_dete,1
# if np.min(state_grid[i,j,:,:,0]) < glob_low_val_base:
# glob_low_val_base=np.min(state_grid[i,j,:,:,0])
# print he,hi,glob_low_val_base
# if np.max(state_grid[i,j,:,:,0]) > glob_high_val_base:
# glob_high_val_base=np.max(state_grid[i,j,:,:,0])
# print he,hi,glob_high_val_base,1
#Visualize parameter ranges
plt.figure(1)
plt.subplot(len(herange),len(hirange),n)
plt.text(0.5, 0.5, "'he:%.2f,hi:%.2f,#:%.0f" %(he*1000,hi*1000,n), size=8, rotation=0.,
ha="center", va="center",
bbox=dict(boxstyle="square", ec=(1., 1., 1.), fc=(1., 1., 1.), ))
a=plt.gca()
a.axes.set_xticklabels([])
a.axes.set_yticklabels([])
#Visualize base level
plt.figure(2)
plt.subplot(len(herange),len(hirange),n)
current_grid=state_grid[i,j,:,:,:]
current_grid[-1,-1]=glob_low_val_base
current_grid[-1,-2]=glob_high_val_base
plt.imshow(np.flipud(current_grid[:,:,0]), aspect='auto', extent = (low_lenge,high_lenge,low_inte,high_inte),interpolation='none',cmap=cm.Accent)
a=plt.gca()
a.axes.set_xticklabels([])
a.axes.set_yticklabels([])
#Visualize detector level
plt.figure(3)
plt.subplot(len(herange),len(hirange),n)
current_grid=state_grid[i,j,:,:,:]
current_grid[-1,-1]=glob_low_val_dete
current_grid[-1,-2]=glob_high_val_dete
plt.imshow(np.flipud(current_grid[:,:,1]), aspect='auto', extent = (low_lenge,high_lenge,low_inte,high_inte),interpolation='none',cmap=cm.Accent)
a=plt.gca()
a.axes.set_xticklabels([])
a.axes.set_yticklabels([])
plt.figure(4)
plt.subplot(len(herange),len(hirange),n)
current_grid=state_grid[i,j,:,:,:]
current_grid[-1,-1]=glob_low_val_max
current_grid[-1,-2]=glob_high_val_max
plt.imshow(np.flipud(current_grid[:,:,2]), aspect='auto', extent = (low_lenge,high_lenge,low_inte,high_inte),interpolation='none',cmap=cm.Accent)
a=plt.gca()
a.axes.set_xticklabels([])
a.axes.set_yticklabels([])
# for Visualization of the sectioned values
plt.figure(5)
plt.subplot(len(herange),len(hirange),n)
current_grid=cleargrid2(state_grid[i,j,:,:,:])
current_grid[-1,-1]=5
current_grid[1,2]=1
plt.imshow(np.flipud(current_grid[:,:]), aspect='auto', extent = (low_lenge,high_lenge,low_inte,high_inte),interpolation='none',cmap=cm.Accent)
a=plt.gca()
a.axes.set_xticklabels([])
a.axes.set_yticklabels([])
j=j+1
n=n+1
if n==3000:
raise ValueError('dd')
i=i+1
plt.figure(2)
plt.title('Baselevel')
plt.tight_layout()
plt.figure(3)
plt.title('Detectorlevel')
plt.tight_layout()
plt.figure(4)
plt.title('Maximum Values')
plt.tight_layout()
plt.figure(5)
plt.tight_layout()
#cb=plt.colorbar()
#plt.ylabel('intensity')
#plt.xlabel('length')
print "fertig"
#%%
he=7e-3
hi=3.5e-3
a=np.load('RUM_Dec_meas_Npp100_HeHi_le500t1500i10msInt50t250i2_He%.2fmV_Hi%.1fmV.npy' %(he*1000,hi*1000))
base=np.flipud(a[:,:,0])
base[-1,-1]=glob_low_val_base
base[-1,-2]=glob_high_val_base
detector=np.flipud(a[:,:,1])
detector[-1,-1]=glob_low_val_dete
detector[-1,-2]=glob_high_val_dete
maxlevel=np.flipud(a[:,:,2])
maxlevel[-1,-1]=glob_low_val_max
maxlevel[-1,-2]=glob_high_val_max
plt.figure(14)
plt.clf()
plt.imshow(base, aspect='auto', extent = (low_lenge,high_lenge,low_inte,high_inte),interpolation='none')
plt.colorbar()
plt.title('baselevel, he%.1f:, hi:%.1f' %(he*1000,hi*1000))
plt.figure(15)
plt.clf()
plt.imshow(detector, aspect='auto', extent = (low_lenge,high_lenge,low_inte,high_inte),interpolation='none')
plt.colorbar()
plt.title('detektorlevel, he%.1f:, hi:%.1f' %(he*1000,hi*1000))
plt.figure(16)
plt.clf()
plt.imshow(maxlevel, aspect='auto', extent = (low_lenge,high_lenge,low_inte,high_inte),interpolation='none')
plt.colorbar()
plt.title('maxlevel, he%.1f:, hi:%.1f' %(he*1000,hi*1000))
#%%
currpath = '/home/raid3/tkunze/Documents/Programming/JansenModels/Kunze_JuR/003_ModelDevelopment/001_Unifying_Framework/EIPy_StateSpaceExploration/EI_Cluster/Figures'
os.chdir(currpath)
plt.figure(1)
plt.savefig('RUM_Detektor_2popNpp_1_axis.pdf', format='pdf', dpi=1000)
plt.figure(2)
plt.savefig('RUM_Detektor_2popNpp_1_baselevel.pdf', format='pdf', dpi=1000)
plt.figure(3)
plt.savefig('RUM_Detektor_2popNpp_1_detectorlevel.pdf', format='pdf', dpi=1000)
plt.figure(4)
plt.savefig('RUM_Detektor_2popNpp_1_maxlevel.pdf', format='pdf', dpi=1000)
plt.figure(5)
plt.savefig('RUM_Detektor_2popNpp_1_sectioned.pdf', format='pdf', dpi=1000)
plt.figure(14)
plt.savefig('RUM_Detektor_2popNpp_1_baselevel_single.pdf', format='pdf', dpi=1000)
plt.figure(15)
plt.savefig('RUM_Detektor_2popNpp_1_detelevel_single.pdf', format='pdf', dpi=1000)
plt.figure(16)
plt.savefig('RUM_Detektor_2popNpp_1_maxlevel_single.pdf', format='pdf', dpi=1000)
#plt.close('all')
| gpl-3.0 |
Ledoux/ShareYourSystem | Pythonlogy/ShareYourSystem/Specials/Predicters/Predicter/draft/__init__.py | 2 | 6026 | # -*- coding: utf-8 -*-
"""
<DefineSource>
@Date : Fri Nov 14 13:20:38 2014 \n
@Author : Erwan Ledoux \n\n
</DefineSource>
"""
#<DefineAugmentation>
import ShareYourSystem as SYS
import types
BaseModuleStr="ShareYourSystem.Standards.Controllers.Systemer"
DecorationModuleStr="ShareYourSystem.Standards.Classors.Classer"
SYS.setSubModule(globals())
SYS.addDo('Predicter','Predict','Predicting','Predicted')
#</DefineAugmentation>
#<ImportSpecificModules>
import scipy.stats
import numpy as np
from matplotlib import pyplot
#</ImportSpecificModules>
#<DefineLocals>
def getNullFloatsArray(_FloatsArray, _RtolFloat=1e-5):
u, s, v = np.linalg.svd(_FloatsArray)
RankInt = (s > _RtolFloat*s[0]).sum()
return v[RankInt:].T.copy()
#</DefineLocals>
#<DefineClass>
@DecorationClass()
class PredicterClass(BaseClass):
def default_init(self,
_PredictingUnitsInt=0,
_PredictingSensorsInt=0,
_PredictingConstantTimeFloat=0.01,
_PredictedSensorJacobianFloatsArray=None,
_PredictedDecoderWeigthFloatsArray=None,
_PredictedLateralWeigthFloatsArray=None,
_PredictedLeakLateralWeigthFloatsArray=None,
**_KwargVariablesDict
):
""" """
#Call the parent init method
BaseClass.__init__(self,**_KwargVariablesDict)
self.set(
'-Layers':{
'|Sensor':{
''
},
'|Decoder':{
''
}
}
)
def do_predict(self):
#/#################/#
# Sensor care : Prepare the input weigth and the null matrix
#
self.PredictedSensorJacobianFloatsArray=-np.diag(
(1./self.PredictingConstantTimeFloat)*np.ones(
self.PredictingSensorsInt
)
)
#debug
'''
self.debug(
[
'We have prepared the sensor jacobian',
('self.',self,['PredictedSensorJacobianFloatsArray'])
]
)
'''
#/#################/#
# Prepare the Decoders weigths
#
#Perturbative and exact
#random
self.PredictedDecoderWeigthFloatsArray=self.PredictingDecoderWeigtFloat*getattr(
scipy.stats,
self.PredictingDecoderStatStr
).rvs(
size=(
self.PredictingSensorsInt,
self.PredictingUnitsInt
)
)/(self.PredictingUnitsInt**self.PredictingNormalisationInt)
#find the null space
self.PredictedNullFloatsArray=getNullFloatsArray(
self.PredictedExactDecoderWeigthFloatsArray
)
#debug
'''
PredictedProductArray=np.dot(
self.PredictedExactDecoderWeigthFloatsArray,
self.PredictedNullFloatsArray
)
self.debug(
[
('self.',self,[
'PredictedExactDecoderWeigthFloatsArray',
'PredictingUnitsInt'
]
),
("locals()['",locals(),['PredictedProductArray'],"']")
]
)
'''
#Control
#pinv
self.PredictedControlDecoderWeigthFloatsArray=np.linalg.pinv(
self.PredictedExactDecoderWeigthFloatsArray.T
)
#debug
'''
PredictedPinvFloatsArray=np.dot(
self.PredictedControlDecoderWeigthFloatsArray,
self.PredictedExactDecoderWeigthFloatsArray.T
)
self.debug(
[
'PredictedPinvFloatsArray is ',
str(PredictedPinvFloatsArray)
]
)
'''
#/#################/#
# Build the perturbative random matrices
#
#random
self.PredictedInputRandomFloatsArray=self.PredictingPerturbativeInputWeightFloat*getattr(
scipy.stats,
self.PredictingDecoderRandomStatStr
).rvs(
size=(
np.shape(self.PredictedNullFloatsArray)[1],
self.PredictingSensorsInt
)
)
#dot
self.PredictedPerturbativeInputWeigthFloatsArray=np.dot(
self.PredictedNullFloatsArray,
self.PredictedInputRandomFloatsArray
)
#/#################/#
# Build all the perturbative input
#
#sum
self.PredictedTotalPerturbativeInputWeigthFloatsArray=self.PredictedExactDecoderWeigthFloatsArray.T+self.PredictedPerturbativeInputWeigthFloatsArray
#/#################/#
# Build all the perturbative input
#
self.PredictedLeakWeigthFloatsArray=np.diag(np.ones(self.PredictingUnitsInt))
#/#################/#
# Build all the possible lateral connectivities
#
#Exact
#dot
self.PredictedExactLateralWeigthFloatsArray=np.dot(
self.PredictedExactDecoderWeigthFloatsArray.T,
self.PredictedExactDecoderWeigthFloatsArray
)
#add the leaky part to compensate
self.PredictedLeakExactLateralWeigthFloatsArray=self.PredictedExactLateralWeigthFloatsArray-(
1.-self.PredictingCostFloat)*np.diag(
np.ones(self.PredictingUnitsInt)
)
#Perturbative
#random
self.PredictedLateralRandomFloatsArray=self.PredictingPerturbativeLateralWeightFloat*getattr(
scipy.stats,
self.PredictingLateralRandomStatStr
).rvs(
size=(
np.shape(self.PredictedNullFloatsArray)[1],
self.PredictingUnitsInt
)
)
#dot
self.PredictedPerturbativeLateralWeigthFloatsArray=np.dot(
self.PredictedNullFloatsArray,
self.PredictedLateralRandomFloatsArray
)
#sum
self.PredictedTotalPerturbativeLateralWeigthFloatsArray=self.PredictedLeakExactLateralWeigthFloatsArray+self.PredictedPerturbativeLateralWeigthFloatsArray
#</DefineClass>
#</DefinePrint>
PredicterClass.PrintingClassSkipKeyStrsList.extend(
[
'PredictingUnitsInt',
'PredictingSensorsInt',
'PredictingConstantTimeFloat',
'PredictingDecoderWeigtFloat',
'PredictingNormalisationInt',
'PredictingCostFloat',
'PredictingPerturbativeInputWeightFloat',
'PredictingPerturbativeLateralWeightFloat',
'PredictingInputStatStr',
'PredictingDecoderRandomStatStr',
'PredictingLateralRandomStatStr',
'PredictedSensorJacobianFloatsArray',
'PredictedLeakWeigthFloatsArray',
'PredictedControlDecoderWeigthFloatsArray',
'PredictedExactDecoderWeigthFloatsArray',
'PredictedInputRandomFloatsArray',
'PredictedPerturbativeInputWeigthFloatsArray',
'PredictedNullFloatsArray',
'PredictedTotalPerturbativeInputWeigthFloatsArray',
'PredictedExactLateralWeigthFloatsArray',
'PredictedLeakExactLateralWeigthFloatsArray',
'PredictedLateralRandomFloatsArray',
'PredictedPerturbativeLateralWeigthFloatsArray',
'PredictedTotalPerturbativeLateralWeigthFloatsArray',
]
)
#<DefinePrint> | mit |
JPFrancoia/scikit-learn | examples/linear_model/plot_robust_fit.py | 147 | 3050 | """
Robust linear estimator fitting
===============================
Here a sine function is fit with a polynomial of order 3, for values
close to zero.
Robust fitting is demoed in different situations:
- No measurement errors, only modelling errors (fitting a sine with a
polynomial)
- Measurement errors in X
- Measurement errors in y
The median absolute deviation to non corrupt new data is used to judge
the quality of the prediction.
What we can see that:
- RANSAC is good for strong outliers in the y direction
- TheilSen is good for small outliers, both in direction X and y, but has
a break point above which it performs worse than OLS.
- The scores of HuberRegressor may not be compared directly to both TheilSen
and RANSAC because it does not attempt to completely filter the outliers
but lessen their effect.
"""
from matplotlib import pyplot as plt
import numpy as np
from sklearn.linear_model import (
LinearRegression, TheilSenRegressor, RANSACRegressor, HuberRegressor)
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
np.random.seed(42)
X = np.random.normal(size=400)
y = np.sin(X)
# Make sure that it X is 2D
X = X[:, np.newaxis]
X_test = np.random.normal(size=200)
y_test = np.sin(X_test)
X_test = X_test[:, np.newaxis]
y_errors = y.copy()
y_errors[::3] = 3
X_errors = X.copy()
X_errors[::3] = 3
y_errors_large = y.copy()
y_errors_large[::3] = 10
X_errors_large = X.copy()
X_errors_large[::3] = 10
estimators = [('OLS', LinearRegression()),
('Theil-Sen', TheilSenRegressor(random_state=42)),
('RANSAC', RANSACRegressor(random_state=42)),
('HuberRegressor', HuberRegressor())]
colors = {'OLS': 'turquoise', 'Theil-Sen': 'gold', 'RANSAC': 'lightgreen', 'HuberRegressor': 'black'}
linestyle = {'OLS': '-', 'Theil-Sen': '-.', 'RANSAC': '--', 'HuberRegressor': '--'}
lw = 3
x_plot = np.linspace(X.min(), X.max())
for title, this_X, this_y in [
('Modeling Errors Only', X, y),
('Corrupt X, Small Deviants', X_errors, y),
('Corrupt y, Small Deviants', X, y_errors),
('Corrupt X, Large Deviants', X_errors_large, y),
('Corrupt y, Large Deviants', X, y_errors_large)]:
plt.figure(figsize=(5, 4))
plt.plot(this_X[:, 0], this_y, 'b+')
for name, estimator in estimators:
model = make_pipeline(PolynomialFeatures(3), estimator)
model.fit(this_X, this_y)
mse = mean_squared_error(model.predict(X_test), y_test)
y_plot = model.predict(x_plot[:, np.newaxis])
plt.plot(x_plot, y_plot, color=colors[name], linestyle=linestyle[name],
linewidth=lw, label='%s: error = %.3f' % (name, mse))
legend_title = 'Error of Mean\nAbsolute Deviation\nto Non-corrupt Data'
legend = plt.legend(loc='upper right', frameon=False, title=legend_title,
prop=dict(size='x-small'))
plt.xlim(-4, 10.2)
plt.ylim(-2, 10.2)
plt.title(title)
plt.show()
| bsd-3-clause |
paulray/NICERsoft | scripts/master_plotter-deprecated.py | 1 | 15931 | #!/usr/bin/env python
from __future__ import (print_function, division, unicode_literals, absolute_import)
import sys
# Hack to add this to pythonpath
#sys.path.append('/Users/paulr/src/NICERsoft')
import matplotlib.pyplot as plt
import numpy as np
import argparse
from astropy import log
from astropy.table import Table, vstack
import astropy.io.fits as pyfits
import astropy.units as u
from astropy.time import Time
from os import path
from nicer.values import *
from nicer.cartographer import *
from nicer.plotutils import *
from nicer.sci_plots import sci_plots
from nicer.eng_plots import eng_plots
from glob import glob
import sys
from nicer.bkg_plots import *
from nicer.fitsutils import *
from InteractiveLC import *
from nicer.NicerFileSet import *
parser = argparse.ArgumentParser(description = "Plot the NICER data nicely.")
parser.add_argument("infiles", help="Input files", nargs='*', default = None)
parser.add_argument("--obsdir",help = "Find alllllllll the files!", default = None)
parser.add_argument("--object", help="Override object name", default=None)
parser.add_argument("--guessobj", help="Try to guess object from directory name", action="store_true")
parser.add_argument("--useftools", help="Use FTOOLS for filter and merge", action="store_true")
parser.add_argument("--mask",help="Mask these IDS", nargs = '*', type=int, default=None)
parser.add_argument("-s", "--save", help = "Save plots to file", action = "store_true")
parser.add_argument("--sci", help = "Makes some nice science plots", action = "store_true")
parser.add_argument("--eng", help = "Makes some nice engineering plots", action = "store_true")
parser.add_argument("--bkg", help = "Display background diagnostic plots", action = 'store_true')
parser.add_argument("--filtswtrig", help = "Filter SW TRIG events", action = "store_true")
parser.add_argument("--filtovershoot", help = "Filter OVERSHOOT events", action = "store_true")
parser.add_argument("--filtundershoot", help = "Filter UNDERSHOOT events", action = "store_true")
parser.add_argument("--filtratio", help="Filter PHA/PHA_FAST ratio (argument is ratio to cut at)", type=float, default=1.4)
parser.add_argument("--filtall", help = "Filter SWTRIG, UNDERSHOOT and OVERSHOOT events", action = "store_true")
parser.add_argument("--emin", help="Minimum energy (keV) to keep", default=-1.0, type=float)
parser.add_argument("--emax", help="Minimum energy (keV) to keep", default=-1.0, type=float)
parser.add_argument("--tskip", help="Seconds to skip at beginning of data", default=0.0, type=float)
parser.add_argument("--lcbinsize", help="Light curve bin size (s)", default=1.0, type=float)
parser.add_argument("--pi", help="Force use of internal PHA to PI conversion", action='store_true')
parser.add_argument("--basename", help="Basename for output plots", default=None)
parser.add_argument("--lclog", help = "make light curve log axis", action = "store_true")
parser.add_argument("--foldfreq", help="Make pulse profile by folding at a fixed freq (Hz)",default=0.0,type=float)
parser.add_argument("--nyquist", help="Nyquist freq for power spectrum (Hz)",default=100.0,type=float)
parser.add_argument("--map", help= "Creates a map with overshoots and undershoots", action = 'store_true')
parser.add_argument("--orb", help="Path to orbit FITS filed", default = None)
parser.add_argument("--par", help="Path to par file", default = None)
parser.add_argument("--sps", help="Path to SPS HK file (_apid0260.hk)",default=None)
parser.add_argument("--powspec",help = "Display power spectrum (replaces ratio plot)", action = 'store_true')
parser.add_argument("--pslog", help = "make power spectrum log axis", action = "store_true")
parser.add_argument("--writeps", help = "write out power spectrum", action = "store_true")
parser.add_argument("--writebkf",help="Write useful rates for background filtering to FITS file", action='store_true')
parser.add_argument("--applygti",help="Read GTI from provided FITS file", default=None)
parser.add_argument("--extraphkshootrate",help="Compute HK shoot rates from a single MPU", action='store_true')
parser.add_argument("--eventshootrate",help="Gets over/undershoot rates from the events", action='store_true')
parser.add_argument("--interactive", help= "TEST FOR INTERACTIVE LC", action = 'store_true')
parser.add_argument("--readovs", help = "Filters events with overshoot > input number", nargs = '*', type = float, default = None)
parser.add_argument("--gtirows",help="Select GTI rows", nargs = '*', type=int, default=None)
args = parser.parse_args()
#------------------------------Getting the data and concatenating------------------------------
if np.logical_or(args.obsdir is not None, args.infiles is not None):
if args.obsdir is not None:
#Create the data structure
data = NicerFileSet(args)
etable = data.etable
gtitable = data.gtitable
#Some Definitions
mktable = data.mktable
hkmet = data.hkmet
basename = data.basename
else:
#Creating the data table for each separate file
if args.useftools:
etable = filtandmerge(args.infiles,workdir=None)
else:
log.info('Reading files')
tlist = []
for fn in args.infiles:
log.info('Reading file {0}'.format(fn))
tlist.append(Table.read(fn,hdu=1))
log.info('Concatenating files')
if len(tlist) == 1:
etable = tlist[0]
else:
etable = vstack(tlist,metadata_conflicts='silent')
del tlist
# Read the GTIs from the first event FITS file
gtitable = Table.read(args.infiles[0],hdu=2)
log.info('Got the good times from GTI')
gtitable['DURATION'] = gtitable['STOP']- gtitable['START']
# Only keep GTIs longer than 16 seconds
idx = np.where(gtitable['DURATION']>16.0)[0]
gtitable = gtitable[idx]
print(gtitable)
# Change TIME column name to MET to reflect what it really is
etable.columns['TIME'].name = 'MET'
# Update exposure to be sum of GTI durations
etable.meta['EXPOSURE'] = gtitable['DURATION'].sum()
log.info('Got the good times from GTI')
# Sort table by MET
etable.sort('MET')
log.info("Event MET Range : {0} to {1}".format(etable['MET'].min(),etable['MET'].max(), etable['MET'].max()-etable['MET'].min()))
log.info("TSTART {0} TSTOP {1} (Span {2} seconds)".format(etable.meta['TSTART'],etable.meta['TSTOP'], etable.meta['TSTOP']-etable.meta['TSTART'] ))
log.info("DATE Range {0} to {1}".format(etable.meta['DATE-OBS'],etable.meta['DATE-END']))
if args.object is not None:
etable.meta['OBJECT'] = args.object
bn = path.basename(args.infiles[0]).split('_')[0]
log.info('OBS_ID {0}'.format(etable.meta['OBS_ID']))
if etable.meta['OBS_ID'].startswith('000000'):
log.info('Overwriting OBS_ID with {0}'.format(bn))
etable.meta['OBS_ID'] = bn
etable.meta['OBS_ID'] = bn
if args.basename is None:
basename = '{0}'.format(bn)
else:
basename = args.basename
reset_rates = None
else:
log.warning('You have not specified any files, please input the path to the files you want to see. Exiting.')
sys.exit()
#---------------------Options for data filtering / Plotting -------------------
if not args.sci and not args.eng and not args.map and not args.bkg and not args.interactive:
log.warning("No specific plot requested, making all")
args.sci = True
args.eng = True
args.map = True
args.bkg = True
if args.filtall:
args.filtswtrig=True
args.filtovershoot=True
args.filtundershoot=True
args.filtratio=1.4
#--------------------Editing / Filtering the event data Options-----------------
# Hack to trim first chunk of data
if args.tskip > 0.0:
t0 = gtitable['START'][0]
etable = etable[etable['MET']>t0+args.tskip]
# Correct exposure (approximately)
etable.meta['TSTART'] += args.tskip
if gtitable['START'][0]+args.tskip < gtitable['STOP'][0]:
gtitable['START'][0] += args.tskip
else:
log.error('Trying to skip more than first GTI segment! **NOT IMPLEMENTED**')
sys.exit(1)
#filtering out chosen IDS
if args.mask is not None:
log.info('Masking IDS')
for id in args.mask:
etable = etable[np.where(etable['DET_ID'] != id)]
# If there are no PI columns, add them with approximate calibration
if args.pi or not ('PI' in etable.colnames):
log.info('Adding PI')
calfile = path.join(datadir,'gaincal_linear.txt')
pi = calc_pi(etable,calfile)
etable['PI'] = pi
# Set up the light curve bins, so we can have them for building
# light curves of various quantities, like overshoot rate and ratio filtered events
# Hmmm. The lc_elapsed_bins and lc_met_bins are never used, but CUMTIME is
if gtitable is not None:
startmet = gtitable['START'][0]
stopmet = gtitable['STOP'][0]
duration = stopmet-startmet
# Add 1 bin to make sure last bin covers last events
lc_elapsed_bins = np.arange(0.0,duration+args.lcbinsize,args.lcbinsize)
lc_met_bins = startmet+lc_elapsed_bins
cumtimes = [ 0.0 ]
cumtime = lc_elapsed_bins[-1]+args.lcbinsize
for i in range(1,len(gtitable['START'])):
startmet = gtitable['START'][i]
stopmet = gtitable['STOP'][i]
duration = stopmet-startmet
myelapsedbins = np.arange(0.0,duration+args.lcbinsize,args.lcbinsize)
lc_elapsed_bins = np.append(lc_elapsed_bins,cumtime+myelapsedbins)
lc_met_bins = np.append(lc_met_bins,np.arange(startmet,stopmet+args.lcbinsize,args.lcbinsize))
mylcduration = myelapsedbins[-1]+args.lcbinsize
cumtimes.append(cumtime)
cumtime += mylcduration
gtitable['CUMTIME'] = np.array(cumtimes)
# Overwrite bad OBJECT name, if requested (early FITS all have OBJECT=Crab)
if args.guessobj and args.obsdir:
# Trim trailing slash, if needed
if args.obsdir[-1] == '/':
args.obsdir = args.obsdir[:-1]
objname = path.basename(args.obsdir)[11:]
log.info('Guessing Object name {0}'.format(objname))
etable.meta['OBJECT'] = objname
etable.meta['OBJECT'] = objname
#Getting over/undershoot rate from event data.
if args.eventshootrate:
eventovershoots = data.eventovershoots
eventundershoots = data.eventundershoots
eventbothshoots = data.eventbothshoots
else:
eventbothshoots = None
eventundershoots = None
eventovershoots = None
if args.obsdir is not None:
hkovershoots = data.hkovershoots
hkundershoots = data.hkundershoots
reset_rates = data.reset_rates
# Write overshoot and undershoot rates to file for filtering
if args.writebkf:
data.writebkffile()
if np.logical_and(args.readovs is not None, args.writebkf == True):
ovsfile = "{0}.ovs".format(basename)
ovstable = Table.read(ovsfile,hdu=1)
print(ovstable)
#---------------------------------------------Filting all the data as necessary!---------------------------------------------------------------
log.info('Filtering...')
filt_str = 'Filter: {0:.2f} < E < {1:.2f} keV'.format(args.emin,args.emax)
if args.emin >= 0:
b4 = etable['PI'] > args.emin/PI_TO_KEV
else:
b4 = np.ones_like(etable['PI'],dtype=np.bool)
if args.emax >= 0:
b4 = np.logical_and(b4, etable['PI']< args.emax/PI_TO_KEV)
if args.filtswtrig:
b1 = etable['EVENT_FLAGS'][:,FLAG_SWTRIG] == False
filt_str += ", not SWTRIG"
else:
b1 = np.ones_like(etable['PI'],dtype=np.bool)
if args.filtundershoot:
b2 = etable['EVENT_FLAGS'][:,FLAG_UNDERSHOOT] == False
filt_str += ", not UNDERSHOOT"
else:
b2 = np.ones_like(etable['PI'],dtype=np.bool)
if args.filtovershoot:
b3 = etable['EVENT_FLAGS'][:,FLAG_OVERSHOOT] == False
filt_str += ", not OVERSHOOT"
else:
b3 = np.ones_like(etable['PI'],dtype=np.bool)
idx = np.where(b1 & b2 & b3 & b4)[0]
del b1, b2, b3, b4
filttable = etable[idx]
filttable.meta['FILT_STR'] = filt_str
etable.meta['FILT_STR'] = filt_str
# Add Time column with astropy Time for ease of use and for PINT TOAs
if args.par is not None:
log.info('Adding time column')
# This should really be done the FITS way using MJDREF etc...
# For now, just using MET0
etime = filttable.columns['MET'] + MET0
filttable['T'] = etime
if args.applygti is not None:
g = Table.read(args.applygti)
log.info('Applying external GTI from {0}'.format(args.applygti))
g['DURATION'] = g['STOP']-g['START']
# Only keep GTIs longer than 16 seconds
g = g[np.where(g['DURATION']>16.0)]
print(g)
etable = apply_gti(etable,g)
# Replacing this GTI does not work. It needs to be ANDed with the existing GTI
etable.meta['EXPOSURE'] = g['DURATION'].sum()
gtitable = g
log.info('Exposure : {0:.2f}'.format(etable.meta['EXPOSURE']))
#If you want to correlate over/undershoot data to time, then data.hkshoottable or data.eventshoottable will get you there.
#------------------------------------------------------PLOTTING HAPPENS BELOW HERE ------------------------------------------------------
# Background plots are diagnostics for background rates and filtering
if args.bkg:
if hkmet is None:
log.error("Can't make background plots without MPU HKP files")
else:
if eventovershoots is not None:
figure4 = bkg_plots(etable, data, gtitable, args, mktable, data.eventshoottable)
else:
figure4 = bkg_plots(etable, data, gtitable, args, mktable, data.hkshoottable)
figure4.set_size_inches(16,12)
if args.save:
log.info('Writing bkg plot {0}'.format(basename))
figure4.savefig('{0}_bkg.png'.format(basename), dpi = 100)
# Engineering plots are reset rates, count rates by detector, and deadtime
if args.eng:
figure1 = eng_plots(etable, args, reset_rates, filttable)
figure1.set_size_inches(16,12)
if args.save:
log.info('Writing eng plot {0}'.format(basename))
if args.filtall:
figure1.savefig('{0}_eng_clean_{1:.1f}-{2:.1f}keV.png'.format(basename,args.emin,args.emax), dpi = 100)
else:
figure1.savefig('{0}_eng.png'.format(basename), dpi = 100)
# Science plot is light curve, spectrum, pulse profile, and PHA ratio plot (or poweer spectrum)
if args.sci:
# Make science plots using filtered events
if len(filttable) == 0:
log.error('No events left in filtered table! Aborting!')
sys.exit(3)
figure2 = sci_plots(filttable, gtitable, args)
figure2.set_size_inches(16,12)
if args.save:
log.info('Writing sci plot {0}'.format(basename))
if args.filtall:
figure2.savefig('{0}_sci_clean_{1:.1f}-{2:.1f}keV.png'.format(basename,args.emin,args.emax), dpi = 100)
else:
figure2.savefig('{0}_sci.png'.format(basename), dpi = 100)
# Map plot is overshoot and undershoot rates on maps
if args.map:
log.info("I'M THE MAP I'M THE MAP I'M THE MAAAAP")
if eventovershoots is not None:
figure3 = cartography(hkmet, eventovershoots, args, eventundershoots,
filttable, mktable, gtitable)
else:
figure3 = cartography(hkmet, hkovershoots, args, hkundershoots,
filttable, mktable, gtitable)
if args.save:
log.info('Writing MAP {0}'.format(basename))
figure3.savefig('{0}_map.png'.format(basename), dpi = 100)
#Interactive light curve for choosing time intervals to edit out of the light curve
if args.interactive:
log.info("Interaction is coming")
figure4 = plot.figure()
ILC = InteractiveLC(etable, args.lclog, gtitable, figure4, basename, binsize=1.0)
ILC.getgoodtimes()
ILC.writegti()
# Show all plots at the end, if not saving
if not args.save:
log.info('Showing plots...')
plt.show()
| mit |
vladsaveliev/bcbio-nextgen | bcbio/rnaseq/pizzly.py | 4 | 5043 | """
run the pizzly fusion caller for RNA-seq
https://github.com/pmelsted/pizzly
http://www.biorxiv.org/content/early/2017/07/20/166322
"""
from __future__ import print_function
import os
from bcbio.log import logger
from bcbio import utils
import bcbio.pipeline.datadict as dd
from bcbio.pipeline import config_utils
from bcbio.distributed.transaction import file_transaction
from bcbio.rnaseq import kallisto, sailfish, gtf
from bcbio.provenance import do
from bcbio.utils import file_exists, safe_makedir
from bcbio.bam import fasta
h5py = utils.LazyImport("h5py")
import numpy as np
import pandas as pd
def get_fragment_length(data):
"""
lifted from
https://github.com/pmelsted/pizzly/scripts/pizzly_get_fragment_length.py
"""
h5 = kallisto.get_kallisto_h5(data)
cutoff = 0.95
with h5py.File(h5) as f:
x = np.asarray(f['aux']['fld'], dtype='float64')
y = np.cumsum(x)/np.sum(x)
fraglen = np.argmax(y > cutoff)
return(fraglen)
def run_pizzly(data):
samplename = dd.get_sample_name(data)
work_dir = dd.get_work_dir(data)
pizzlydir = os.path.join(work_dir, "pizzly")
gtf = dd.get_transcriptome_gtf(data)
if not gtf:
gtf = dd.get_gtf_file(data)
if dd.get_transcriptome_fasta(data):
gtf_fa = dd.get_transcriptome_fasta(data)
else:
gtf_fa = sailfish.create_combined_fasta(data)
stripped_fa = os.path.splitext(os.path.basename(gtf_fa))[0] + "-noversions.fa"
stripped_fa = os.path.join(pizzlydir, stripped_fa)
gtf_fa = fasta.strip_transcript_versions(gtf_fa, stripped_fa)
fraglength = get_fragment_length(data)
cachefile = os.path.join(pizzlydir, "pizzly.cache")
fusions = kallisto.get_kallisto_fusions(data)
pizzlypath = config_utils.get_program("pizzly", dd.get_config(data))
outdir = pizzly(pizzlypath, gtf, gtf_fa, fraglength, cachefile, pizzlydir,
fusions, samplename, data)
return outdir
def pizzly(pizzly_path, gtf, gtf_fa, fraglength, cachefile, pizzlydir, fusions,
samplename, data):
outdir = os.path.join(pizzlydir, samplename)
out_stem = os.path.join(outdir, samplename)
pizzly_gtf = make_pizzly_gtf(gtf, os.path.join(pizzlydir, "pizzly.gtf"), data)
sentinel = os.path.join(out_stem, "-flat-filtered.tsv")
pizzlycalls = out_stem + ".json"
if not file_exists(pizzlycalls):
with file_transaction(data, outdir) as tx_out_dir:
safe_makedir(tx_out_dir)
tx_out_stem = os.path.join(tx_out_dir, samplename)
with file_transaction(cachefile) as tx_cache_file:
cmd = ("{pizzly_path} -k 31 --gtf {pizzly_gtf} --cache {tx_cache_file} "
"--align-score 2 --insert-size {fraglength} --fasta {gtf_fa} "
"--output {tx_out_stem} {fusions}")
message = ("Running pizzly on %s." % fusions)
do.run(cmd.format(**locals()), message)
flatfile = out_stem + "-flat.tsv"
filteredfile = out_stem + "-flat-filtered.tsv"
flatten_pizzly(pizzlycalls, flatfile, data)
filter_pizzly(flatfile, filteredfile, data)
return outdir
def make_pizzly_gtf(gtf_file, out_file, data):
"""
pizzly needs the GTF to be in gene -> transcript -> exon order for each
gene. it also wants the gene biotype set as the source
"""
if file_exists(out_file):
return out_file
db = gtf.get_gtf_db(gtf_file)
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for gene in db.features_of_type("gene"):
children = [x for x in db.children(id=gene)]
for child in children:
if child.attributes.get("gene_biotype", None):
gene_biotype = child.attributes.get("gene_biotype")
gene.attributes['gene_biotype'] = gene_biotype
gene.source = gene_biotype[0]
print(gene, file=out_handle)
for child in children:
child.source = gene_biotype[0]
# gffread produces a version-less FASTA file
child.attributes.pop("transcript_version", None)
print(child, file=out_handle)
return out_file
def flatten_pizzly(in_file, out_file, data):
pizzlyflatten = config_utils.get_program("pizzly_flatten_json.py", data)
if file_exists(out_file):
return out_file
cmd = "{pizzlyflatten} {in_file} > {tx_out_file}"
message = "Flattening {in_file} to {out_file}."
with file_transaction(data, out_file) as tx_out_file:
do.run(cmd.format(**locals()), message.format(**locals()))
return out_file
def filter_pizzly(in_file, out_file, data):
df = pd.read_csv(in_file, header=0, sep="\t")
df = df.query('paircount > 1 and splitcount > 1')
if file_exists(out_file):
return out_file
with file_transaction(out_file) as tx_out_file:
df.to_csv(tx_out_file, sep="\t", index=False)
return out_file
| mit |
parthea/pydatalab | google/datalab/utils/commands/_utils.py | 1 | 27217 | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
"""Utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from builtins import str
from past.builtins import basestring
try:
import IPython
import IPython.core.display
except ImportError:
raise Exception('This module can only be loaded in ipython.')
import json
import pandas
try:
# Pandas profiling is not needed for build/test but will be in the container.
import pandas_profiling
except ImportError:
pass
import sys
import yaml
import google.datalab.data
import google.datalab.bigquery
import google.datalab.storage
import google.datalab.utils
from . import _html
def notebook_environment():
""" Get the IPython user namespace. """
ipy = IPython.get_ipython()
return ipy.user_ns
def get_notebook_item(name):
""" Get an item from the IPython environment. """
env = notebook_environment()
return google.datalab.utils.get_item(env, name)
def render_list(data):
return IPython.core.display.HTML(_html.HtmlBuilder.render_list(data))
def render_dictionary(data, headers=None):
""" Return a dictionary list formatted as a HTML table.
Args:
data: the dictionary list
headers: the keys in the dictionary to use as table columns, in order.
"""
return IPython.core.display.HTML(_html.HtmlBuilder.render_table(data, headers))
def render_text(text, preformatted=False):
""" Return text formatted as a HTML
Args:
text: the text to render
preformatted: whether the text should be rendered as preformatted
"""
return IPython.core.display.HTML(_html.HtmlBuilder.render_text(text, preformatted))
def get_field_list(fields, schema):
""" Convert a field list spec into a real list of field names.
For tables, we return only the top-level non-RECORD fields as Google charts
can't handle nested data.
"""
# If the fields weren't supplied get them from the schema.
if schema:
all_fields = [f['name'] for f in schema._bq_schema if f['type'] != 'RECORD']
if isinstance(fields, list):
if schema:
# validate fields exist
for f in fields:
if f not in all_fields:
raise Exception('Cannot find field %s in given schema' % f)
return fields
if isinstance(fields, basestring) and fields != '*':
if schema:
# validate fields exist
for f in fields.split(','):
if f not in all_fields:
raise Exception('Cannot find field %s in given schema' % f)
return fields.split(',')
if not schema:
return []
return all_fields
def _get_cols(fields, schema):
""" Get column metadata for Google Charts based on field list and schema. """
typemap = {
'STRING': 'string',
'INT64': 'number',
'INTEGER': 'number',
'FLOAT': 'number',
'FLOAT64': 'number',
'BOOL': 'boolean',
'BOOLEAN': 'boolean',
'DATE': 'date',
'TIME': 'timeofday',
'DATETIME': 'datetime',
'TIMESTAMP': 'timestamp'
}
cols = []
for col in fields:
if schema:
f = schema[col]
t = 'string' if f.mode == 'REPEATED' else typemap.get(f.type, 'string')
cols.append({'id': f.name, 'label': f.name, 'type': t})
else:
# This will only happen if we had no rows to infer a schema from, so the type
# is not really important, except that GCharts will choke if we pass such a schema
# to a chart if it is string x string so we default to number.
cols.append({'id': col, 'label': col, 'type': 'number'})
return cols
def _get_data_from_empty_list(source, fields='*', first_row=0, count=-1, schema=None):
""" Helper function for _get_data that handles empty lists. """
fields = get_field_list(fields, schema)
return {'cols': _get_cols(fields, schema), 'rows': []}, 0
def _get_data_from_list_of_dicts(source, fields='*', first_row=0, count=-1, schema=None):
""" Helper function for _get_data that handles lists of dicts. """
if schema is None:
schema = google.datalab.bigquery.Schema.from_data(source)
fields = get_field_list(fields, schema)
gen = source[first_row:first_row + count] if count >= 0 else source
rows = [{'c': [{'v': row[c]} if c in row else {} for c in fields]} for row in gen]
return {'cols': _get_cols(fields, schema), 'rows': rows}, len(source)
def _get_data_from_list_of_lists(source, fields='*', first_row=0, count=-1, schema=None):
""" Helper function for _get_data that handles lists of lists. """
if schema is None:
schema = google.datalab.bigquery.Schema.from_data(source)
fields = get_field_list(fields, schema)
gen = source[first_row:first_row + count] if count >= 0 else source
cols = [schema.find(name) for name in fields]
rows = [{'c': [{'v': row[i]} for i in cols]} for row in gen]
return {'cols': _get_cols(fields, schema), 'rows': rows}, len(source)
def _get_data_from_dataframe(source, fields='*', first_row=0, count=-1, schema=None):
""" Helper function for _get_data that handles Pandas DataFrames. """
if schema is None:
schema = google.datalab.bigquery.Schema.from_data(source)
fields = get_field_list(fields, schema)
rows = []
if count < 0:
count = len(source.index)
df_slice = source.reset_index(drop=True)[first_row:first_row + count]
for index, data_frame_row in df_slice.iterrows():
row = data_frame_row.to_dict()
for key in list(row.keys()):
val = row[key]
if isinstance(val, pandas.Timestamp):
row[key] = val.to_pydatetime()
rows.append({'c': [{'v': row[c]} if c in row else {} for c in fields]})
cols = _get_cols(fields, schema)
return {'cols': cols, 'rows': rows}, len(source)
def _get_data_from_table(source, fields='*', first_row=0, count=-1, schema=None):
""" Helper function for _get_data that handles BQ Tables. """
if not source.exists():
return _get_data_from_empty_list(source, fields, first_row, count)
if schema is None:
schema = source.schema
fields = get_field_list(fields, schema)
gen = source.range(first_row, count) if count >= 0 else source
rows = [{'c': [{'v': row[c]} if c in row else {} for c in fields]} for row in gen]
return {'cols': _get_cols(fields, schema), 'rows': rows}, source.length
def get_data(source, fields='*', env=None, first_row=0, count=-1, schema=None):
""" A utility function to get a subset of data from a Table, Query, Pandas dataframe or List.
Args:
source: the source of the data. Can be a Table, Pandas DataFrame, List of dictionaries or
lists, or a string, in which case it is expected to be the name of a table in BQ.
fields: a list of fields that we want to return as a list of strings, comma-separated string,
or '*' for all.
env: if the data source is a Query module, this is the set of variable overrides for
parameterizing the Query.
first_row: the index of the first row to return; default 0. Onl;y used if count is non-negative.
count: the number or rows to return. If negative (the default), return all rows.
schema: the schema of the data. Optional; if supplied this can be used to help do type-coercion.
Returns:
A tuple consisting of a dictionary and a count; the dictionary has two entries: 'cols'
which is a list of column metadata entries for Google Charts, and 'rows' which is a list of
lists of values. The count is the total number of rows in the source (independent of the
first_row/count parameters).
Raises:
Exception if the request could not be fulfilled.
"""
ipy = IPython.get_ipython()
if env is None:
env = {}
env.update(ipy.user_ns)
if isinstance(source, basestring):
source = google.datalab.utils.get_item(ipy.user_ns, source, source)
if isinstance(source, basestring):
source = google.datalab.bigquery.Table(source)
if isinstance(source, list):
if len(source) == 0:
return _get_data_from_empty_list(source, fields, first_row, count, schema)
elif isinstance(source[0], dict):
return _get_data_from_list_of_dicts(source, fields, first_row, count, schema)
elif isinstance(source[0], list):
return _get_data_from_list_of_lists(source, fields, first_row, count, schema)
else:
raise Exception("To get tabular data from a list it must contain dictionaries or lists.")
elif isinstance(source, pandas.DataFrame):
return _get_data_from_dataframe(source, fields, first_row, count, schema)
elif isinstance(source, google.datalab.bigquery.Query):
return _get_data_from_table(source.execute().result(), fields, first_row, count, schema)
elif isinstance(source, google.datalab.bigquery.Table):
return _get_data_from_table(source, fields, first_row, count, schema)
else:
raise Exception("Cannot chart %s; unsupported object type" % source)
def handle_magic_line(line, cell, parser, namespace=None):
""" Helper function for handling magic command lines given a parser with handlers set. """
try:
args, cell = parser.parse(line, cell, namespace)
if args:
return args['func'](args, cell)
except Exception as e:
# e.args[0] is 'exit_0' if --help is provided in line.
# In this case don't write anything to stderr.
if e.args and e.args[0] == 'exit_0':
return
sys.stderr.write('\n' + str(e))
sys.stderr.flush()
def expand_var(v, env):
""" If v is a variable reference (for example: '$myvar'), replace it using the supplied
env dictionary.
Args:
v: the variable to replace if needed.
env: user supplied dictionary.
Raises:
Exception if v is a variable reference but it is not found in env.
"""
if len(v) == 0:
return v
# Using len() and v[0] instead of startswith makes this Unicode-safe.
if v[0] == '$':
v = v[1:]
if len(v) and v[0] != '$':
if v in env:
v = env[v]
else:
raise Exception('Cannot expand variable $%s' % v)
return v
def replace_vars(config, env):
""" Replace variable references in config using the supplied env dictionary.
Args:
config: the config to parse. Can be a tuple, list or dict.
env: user supplied dictionary.
Raises:
Exception if any variable references are not found in env.
"""
if isinstance(config, dict):
for k, v in list(config.items()):
if isinstance(v, dict) or isinstance(v, list) or isinstance(v, tuple):
replace_vars(v, env)
elif isinstance(v, basestring):
config[k] = expand_var(v, env)
elif isinstance(config, list):
for i, v in enumerate(config):
if isinstance(v, dict) or isinstance(v, list) or isinstance(v, tuple):
replace_vars(v, env)
elif isinstance(v, basestring):
config[i] = expand_var(v, env)
elif isinstance(config, tuple):
# TODO(gram): figure out how to handle these if the tuple elements are scalar
for v in config:
if isinstance(v, dict) or isinstance(v, list) or isinstance(v, tuple):
replace_vars(v, env)
def parse_config(config, env, as_dict=True):
""" Parse a config from a magic cell body. This could be JSON or YAML. We turn it into
a Python dictionary then recursively replace any variable references using the supplied
env dictionary.
"""
if config is None:
return None
stripped = config.strip()
if len(stripped) == 0:
config = {}
elif stripped[0] == '{':
config = json.loads(config)
else:
config = yaml.load(config)
if as_dict:
config = dict(config)
# Now we need to walk the config dictionary recursively replacing any '$name' vars.
replace_vars(config, env)
return config
def parse_config_for_selected_keys(content, env, keys):
""" Parse a config from a magic cell body for selected config keys.
For example, if 'content' is:
config_item1: value1
config_item2: value2
config_item3: value3
and 'keys' are: [config_item1, config_item3]
The results will be a tuple of
1. The parsed config items (dict): {config_item1: value1, config_item3: value3}
2. The remaining content (string): config_item2: value2
Args:
content: the input content. A string. It has to be a yaml or JSON string.
env: user supplied dictionary for replacing vars (for example, $myvalue).
keys: a list of keys to retrieve from content. Note that it only checks top level keys
in the dict.
Returns:
A tuple. First is the parsed config including only selected keys. Second is
the remaining content.
Raises:
Exception if the content is not a valid yaml or JSON string.
"""
config_items = {key: None for key in keys}
if not content:
return config_items, content
stripped = content.strip()
if len(stripped) == 0:
return {}, None
elif stripped[0] == '{':
config = json.loads(content)
else:
config = yaml.load(content)
if not isinstance(config, dict):
raise ValueError('Invalid config.')
for key in keys:
config_items[key] = config.pop(key, None)
replace_vars(config_items, env)
if not config:
return config_items, None
if stripped[0] == '{':
content_out = json.dumps(config, indent=4)
else:
content_out = yaml.dump(config, default_flow_style=False)
return config_items, content_out
def validate_config(config, required_keys, optional_keys=None):
""" Validate a config dictionary to make sure it includes all required keys
and does not include any unexpected keys.
Args:
config: the config to validate.
required_keys: the names of the keys that the config must have.
optional_keys: the names of the keys that the config can have.
Raises:
Exception if the config is not a dict or invalid.
"""
if optional_keys is None:
optional_keys = []
if not isinstance(config, dict):
raise Exception('config is not dict type')
invalid_keys = set(config) - set(required_keys + optional_keys)
if len(invalid_keys) > 0:
raise Exception('Invalid config with unexpected keys '
'"%s"' % ', '.join(e for e in invalid_keys))
missing_keys = set(required_keys) - set(config)
if len(missing_keys) > 0:
raise Exception('Invalid config with missing keys "%s"' % ', '.join(missing_keys))
def validate_config_must_have(config, required_keys):
""" Validate a config dictionary to make sure it has all of the specified keys
Args:
config: the config to validate.
required_keys: the list of possible keys that config must include.
Raises:
Exception if the config does not have any of them.
"""
missing_keys = set(required_keys) - set(config)
if len(missing_keys) > 0:
raise Exception('Invalid config with missing keys "%s"' % ', '.join(missing_keys))
def validate_config_has_one_of(config, one_of_keys):
""" Validate a config dictionary to make sure it has one and only one
key in one_of_keys.
Args:
config: the config to validate.
one_of_keys: the list of possible keys that config can have one and only one.
Raises:
Exception if the config does not have any of them, or multiple of them.
"""
intersection = set(config).intersection(one_of_keys)
if len(intersection) > 1:
raise Exception('Only one of the values in "%s" is needed' % ', '.join(intersection))
if len(intersection) == 0:
raise Exception('One of the values in "%s" is needed' % ', '.join(one_of_keys))
def validate_config_value(value, possible_values):
""" Validate a config value to make sure it is one of the possible values.
Args:
value: the config value to validate.
possible_values: the possible values the value can be
Raises:
Exception if the value is not one of possible values.
"""
if value not in possible_values:
raise Exception('Invalid config value "%s". Possible values are '
'%s' % (value, ', '.join(e for e in possible_values)))
# For chart and table HTML viewers, we use a list of table names and reference
# instead the indices in the HTML, so as not to include things like projectID, etc,
# in the HTML.
_data_sources = []
def get_data_source_index(name):
if name not in _data_sources:
_data_sources.append(name)
return _data_sources.index(name)
def validate_gcs_path(path, require_object):
""" Check whether a given path is a valid GCS path.
Args:
path: the config to check.
require_object: if True, the path has to be an object path but not bucket path.
Raises:
Exception if the path is invalid
"""
bucket, key = google.datalab.storage._bucket.parse_name(path)
if bucket is None:
raise Exception('Invalid GCS path "%s"' % path)
if require_object and key is None:
raise Exception('It appears the GCS path "%s" is a bucket path but not an object path' % path)
def parse_control_options(controls, variable_defaults=None):
""" Parse a set of control options.
Args:
controls: The dictionary of control options.
variable_defaults: If the controls are for a Query with variables, then this is the
default variable values defined in the Query module. The options in the controls
parameter can override these but if a variable has no 'value' property then we
fall back to these.
Returns:
- the HTML for the controls.
- the default values for the controls as a dict.
- the list of DIV IDs of the controls.
"""
controls_html = ''
control_defaults = {}
control_ids = []
div_id = _html.Html.next_id()
if variable_defaults is None:
variable_defaults = {}
for varname, control in list(controls.items()):
label = control.get('label', varname)
control_id = div_id + '__' + varname
control_ids.append(control_id)
value = control.get('value', variable_defaults.get(varname, None))
# The user should usually specify the type but we will default to 'textbox' for strings
# and 'set' for lists.
if isinstance(value, basestring):
type = 'textbox'
elif isinstance(value, list):
type = 'set'
else:
type = None
type = control.get('type', type)
if type == 'picker':
choices = control.get('choices', value)
if not isinstance(choices, list) or len(choices) == 0:
raise Exception('picker control must specify a nonempty set of choices')
if value is None:
value = choices[0]
choices_html = ''
for i, choice in enumerate(choices):
choices_html += "<option value=\"%s\" %s>%s</option>" % \
(choice, ("selected=\"selected\"" if choice == value else ''), choice)
control_html = "{label}<select disabled id=\"{id}\">{choices}</select>" \
.format(label=label, id=control_id, choices=choices_html)
elif type == 'set': # Multi-picker; implemented as checkboxes.
# TODO(gram): consider using "name" property of the control to group checkboxes. That
# way we can save the code of constructing and parsing control Ids with sequential
# numbers in it. Multiple checkboxes can share the same name.
choices = control.get('choices', value)
if not isinstance(choices, list) or len(choices) == 0:
raise Exception('set control must specify a nonempty set of choices')
if value is None:
value = choices
choices_html = ''
control_ids[-1] = '%s:%d' % (control_id, len(choices)) # replace ID to include count.
for i, choice in enumerate(choices):
checked = choice in value
choice_id = '%s:%d' % (control_id, i)
# TODO(gram): we may want a 'Submit/Refresh button as we may not want to rerun
# query on each checkbox change.
choices_html += """
<div>
<label>
<input type="checkbox" id="{id}" value="{choice}" {checked} disabled>
{choice}
</label>
</div>
""".format(id=choice_id, choice=choice, checked="checked" if checked else '')
control_html = "{label}<div>{choices}</div>".format(label=label, choices=choices_html)
elif type == 'checkbox':
control_html = """
<label>
<input type="checkbox" id="{id}" {checked} disabled>
{label}
</label>
""".format(label=label, id=control_id, checked="checked" if value else '')
elif type == 'slider':
min_ = control.get('min', None)
max_ = control.get('max', None)
if min_ is None or max_ is None:
raise Exception('slider control must specify a min and max value')
if max_ <= min_:
raise Exception('slider control must specify a min value less than max value')
step = control.get('step', 1 if isinstance(min_, int) and isinstance(max_, int)
else (float(max_ - min_) / 10.0))
if value is None:
value = min_
control_html = """
{label}
<input type="text" class="gchart-slider_value" id="{id}_value" value="{value}" disabled/>
<input type="range" class="gchart-slider" id="{id}" min="{min}" max="{max}" step="{step}"
value="{value}" disabled/>
""".format(label=label, id=control_id, value=value, min=min_, max=max_, step=step)
elif type == 'textbox':
if value is None:
value = ''
control_html = "{label}<input type=\"text\" value=\"{value}\" id=\"{id}\" disabled/>" \
.format(label=label, value=value, id=control_id)
else:
raise Exception(
'Unknown control type %s (expected picker, slider, checkbox, textbox or set)' % type)
control_defaults[varname] = value
controls_html += "<div class=\"gchart-control\">{control}</div>\n" \
.format(control=control_html)
controls_html = "<div class=\"gchart-controls\">{controls}</div>".format(controls=controls_html)
return controls_html, control_defaults, control_ids
def chart_html(driver_name, chart_type, source, chart_options=None, fields='*', refresh_interval=0,
refresh_data=None, control_defaults=None, control_ids=None, schema=None):
""" Return HTML for a chart.
Args:
driver_name: the name of the chart driver. Currently we support 'plotly' or 'gcharts'.
chart_type: string specifying type of chart.
source: the data source for the chart. Can be actual data (e.g. list) or the name of
a data source (e.g. the name of a query module).
chart_options: a dictionary of options for the chart. Can contain a 'controls' entry
specifying controls. Other entries are passed as JSON to Google Charts.
fields: the fields to chart. Can be '*' for all fields (only sensible if the columns are
ordered; e.g. a Query or list of lists, but not a list of dictionaries); otherwise a
string containing a comma-separated list of field names.
refresh_interval: a time in seconds after which the chart data will be refreshed. 0 if the
chart should not be refreshed (i.e. the data is static).
refresh_data: if the source is a list or other raw data, this is a YAML string containing
metadata needed to support calls to refresh (get_chart_data).
control_defaults: the default variable values for controls that are shared across charts
including this one.
control_ids: the DIV IDs for controls that are shared across charts including this one.
schema: an optional schema for the data; if not supplied one will be inferred.
Returns:
A string containing the HTML for the chart.
"""
div_id = _html.Html.next_id()
controls_html = ''
if control_defaults is None:
control_defaults = {}
if control_ids is None:
control_ids = []
if chart_options is not None and 'variables' in chart_options:
controls = chart_options['variables']
del chart_options['variables'] # Just to make sure GCharts doesn't see them.
controls_html, defaults, ids = parse_control_options(controls)
# We augment what we are passed so that in principle we can have controls that are
# shared by charts as well as controls that are specific to a chart.
control_defaults.update(defaults)
control_ids.extend(ids),
_HTML_TEMPLATE = """
<div class="bqgc-container">
{controls}
<div class="bqgc {extra_class}" id="{id}">
</div>
</div>
<script>
require.config({{
paths: {{
d3: '//cdnjs.cloudflare.com/ajax/libs/d3/3.4.13/d3',
plotly: 'https://cdn.plot.ly/plotly-1.5.1.min.js?noext',
jquery: '//ajax.googleapis.com/ajax/libs/jquery/2.0.0/jquery.min'
}},
map: {{
'*': {{
datalab: 'nbextensions/gcpdatalab'
}}
}},
shim: {{
plotly: {{
deps: ['d3', 'jquery'],
exports: 'plotly'
}}
}}
}});
require(['datalab/charting',
'datalab/element!{id}',
'base/js/events',
'datalab/style!/nbextensions/gcpdatalab/charting.css'
],
function(charts, dom, events) {{
charts.render(
'{driver}',
dom,
events,
'{chart_type}',
{control_ids},
{data},
{options},
{refresh_data},
{refresh_interval},
{total_rows});
}}
);
</script>
"""
count = 25 if chart_type == 'paged_table' else -1
data, total_count = get_data(source, fields, control_defaults, 0, count, schema)
if refresh_data is None:
if isinstance(source, basestring):
source_index = get_data_source_index(source)
refresh_data = {'source_index': source_index, 'name': source_index}
else:
refresh_data = {'name': 'raw data'}
refresh_data['fields'] = fields
# TODO(gram): check if we need to augment env with user_ns
return _HTML_TEMPLATE \
.format(driver=driver_name,
controls=controls_html,
id=div_id,
chart_type=chart_type,
extra_class=" bqgc-controlled" if len(controls_html) else '',
data=json.dumps(data, cls=google.datalab.utils.JSONEncoder),
options=json.dumps(chart_options, cls=google.datalab.utils.JSONEncoder),
refresh_data=json.dumps(refresh_data, cls=google.datalab.utils.JSONEncoder),
refresh_interval=refresh_interval,
control_ids=str(control_ids),
total_rows=total_count)
def profile_df(df):
""" Generate a profile of data in a dataframe.
Args:
df: the Pandas dataframe.
"""
# The bootstrap CSS messes up the Datalab display so we tweak it to not have an effect.
# TODO(gram): strip it out rather than this kludge.
return IPython.core.display.HTML(
pandas_profiling.ProfileReport(df).html.replace('bootstrap', 'nonexistent'))
| apache-2.0 |
SanPen/GridCal | src/research/grid_reduction/minimal_graph_reduction_example.py | 1 | 4370 | # from networkx import DiGraph, all_simple_paths, draw
from matplotlib import pyplot as plt
# Python program to print all paths from a source to destination.
from collections import defaultdict
# This class represents a directed graph
# using adjacency list representation
class Graph:
def __init__(self, number_of_nodes):
# default dictionary to store graph
self.graph = dict()
# function to add an edge to graph
self.number_of_nodes = number_of_nodes
# initialize adjacency graph
for i in range(number_of_nodes):
self.graph[i] = list()
def add_edge(self, u, v):
"""
Add directed edge between u and v
:param u: node
:param v: node
"""
self.graph[u].append(v)
# self.graph[v].append(u)
def all_paths_util(self, u, d, visited, path, paths):
"""
A recursive function to print all paths from 'u' to 'd'.
visited[] keeps track of vertices in current path.
path[] stores actual vertices and path_index is current
index in path[]
:param u:
:param d:
:param visited:
:param path:
:param paths:
:return:
"""
# Mark the current node as visited and store in path
visited[u] = True
path.append(u)
# If current vertex is same as destination, then print
# current path[]
if u == d:
paths.append(path)
else:
# If current vertex is not destination
# Recur for all the vertices adjacent to this vertex
for i in self.graph[u]:
if visited[i] is False:
self.all_paths_util(i, d, visited, path, paths)
# Remove current vertex from path[] and mark it as unvisited
path.pop()
visited[u] = False
# Prints all paths from 's' to 'd'
def all_simple_paths(self, s, d):
# Mark all the vertices as not visited
visited = [False] * self.number_of_nodes
# Create an array to store paths
paths = list()
path = list()
# Call the recursive helper function to print all paths
self.all_paths_util(s, d, visited, path, paths)
return paths
def merge_nodes(self, node_to_delete, node_to_keep):
"""
Merge the information about two nodes
:param node_to_delete:
:param node_to_keep:
:return:
"""
# self.graph[node_to_keep] += self.graph[node_to_delete]
lst = self.graph[node_to_delete]
for x in lst:
if x != node_to_keep:
self.graph[x] += node_to_keep
del self.graph[node_to_delete]
# for key, values in self.graph.items():
# val = values.copy()
# for i in range(len(val)):
# if val[i] == node_to_delete:
# val[i] = node_to_keep
# print('\tnode updated', key, ':', node_to_delete, '->', node_to_keep, ' remove(', node_to_delete, ')')
# self.graph[key] = val
pass
def remove_edge(self, u, v):
if v in self.graph[u]:
self.graph[u].remove(v)
self.graph[v].remove(u)
# data preparation -----------------------------------------------
branches = [(1, 0), (2, 1), (3, 2), (3, 12), (6, 5), (5, 4), (4, 3),
(7, 6), (8, 7), (8, 9), (9, 10), (10, 11), (11, 0), (12, 8)]
branches_to_remove_idx = [11, 10, 9, 8, 6, 5, 3, 2, 0]
ft_dict = dict()
graph = Graph(13)
for i, br in enumerate(branches):
graph.add_edge(br[0], br[1])
ft_dict[i] = (br[0], br[1])
# Processing -----------------------------------------------------
for idx in branches_to_remove_idx:
# get the nodes that define the edge to remove
f, t = ft_dict[idx]
# get the number of paths from 'f' to 't'
n_paths = len(list(graph.all_simple_paths(f, t)))
if n_paths == 1:
# remove branch and merge the nodes 'f' and 't'
#
# This is wat I have no clue how to do
#
print('Merge nodes', f, t)
graph.merge_nodes(f, t)
pass
else:
# remove the branch and that's it
print('Simple removal of', f, t)
graph.remove_edge(f, t)
# -----------------------------------------------------------------
| gpl-3.0 |
Some1Nebo/ufcpy | verifier.py | 1 | 6597 | from datetime import datetime, timedelta
import numpy as np
from sklearn import linear_model, preprocessing
from storage import init_db
from storage.models.fight import Fight
from storage.models.fighter import Fighter
cutoff_date = datetime(year=1970, month=1, day=1).date()
class MLPredictor:
def __init__(self, featurize):
self.clf = linear_model.LassoCV()
self.featurize = featurize
self.scaler = None
def learn(self, learning_set):
raw_target = [f.outcome for f in learning_set]
target = np.array(raw_target)
raw_data = [self.featurize(f) for f in learning_set]
data = np.array(raw_data)
self.scaler = preprocessing.StandardScaler().fit(data)
# self.scaler = preprocessing.Normalizer().fit(data)
if self.scaler:
data = self.scaler.transform(data)
self.clf.fit(data, target)
def predict(self, fight):
featurized = np.array([self.featurize(fight)])
if self.scaler:
featurized = self.scaler.transform(featurized)
return self.clf.predict(featurized)[0]
def featurize(fight):
return featurize_fighter(fight.fighter1, fight.event) + featurize_fighter(fight.fighter2, fight.event)
def featurize_fighter(fighter, event):
previous_fights = [f for f in fighter.fights if f.event.date < event.date]
previous_fights.sort(key=lambda f: f.event.date)
wins = [f for f in previous_fights if fighter_win(fighter, f)]
losses = [f for f in previous_fights if not fighter_win(fighter, f)]
win_ratio_feature = 0.5
if len(previous_fights) != 0:
win_ratio_feature = float(len(wins)) / len(previous_fights)
age = (event.date - fighter.birthday).days / 365.0
avg_win_duration = 0
avg_loss_duration = 0
if len(wins) != 0:
avg_win_duration = np.mean(map(fight_duration, wins))
if len(losses) != 0:
avg_loss_duration = np.mean(map(fight_duration, losses))
return [age,
fighter.height,
fighter.reach,
win_ratio_feature,
winning_streak(fighter, previous_fights),
avg_win_duration,
avg_loss_duration
] + specialization_vector(fighter)
def winning_streak(figher, previous_fights):
streak = 0
for f in reversed(previous_fights):
if fighter_win(figher, f):
streak += 1
else:
break
return streak
def fight_duration(fight):
full_rounds = fight.round - 1
t = timedelta(minutes=fight.time.minute, seconds=fight.time.second)
return full_rounds + t.total_seconds() / 60.0
def fighter_win(fighter, f):
return f.fighter1.ref == fighter.ref and f.outcome == 1 or f.fighter2.ref == fighter.ref and f.outcome == -1
def specialization_vector(fighter):
result = {
'bjj': 0,
'boxing': 0,
'cardio': 0,
'chin': 0,
'striker': 0,
'wrestler': 0
}
spec = fighter.specialization or ''
spec = spec.lower()
def check(category, words):
for w in words:
if w in spec:
result[category] = 1
break
check('wrestler', ['wrestl', 'takedown', 'slam', 'throw'])
check('bjj', ['bjj', 'jiu', 'jits', 'grappl', 'ground', 'submission'])
check('striker', ['ko ', 'power', 'strik', 'kick', 'knee', 'elbow', 'muay', 'thai'])
check('cardio', ['cardio', 'condition', 'athlet'])
check('boxing', ['hands', 'box', 'ko ', 'punch'])
check('chin', ['heart', 'chin', 'resilience'])
return [v for k, v in sorted(result.items(), key=lambda (name, value): name)]
class EventProxy:
def __init__(self, date):
self.date = date
class FightProxy:
def __init__(self, fighter1, fighter2, event):
self.fighter1 = fighter1
self.fighter2 = fighter2
self.event = event
@staticmethod
def reversed(fight):
result = FightProxy(fight.fighter2, fight.fighter1, fight.event)
result.outcome = -fight.outcome
return result
def cross_validate(predictor, fights):
learning_set, validation_set = split(fights, 0.7)
learning_set += map(FightProxy.reversed, learning_set)
predictor.learn(learning_set)
correct = 0
predicted = 0
for fight in validation_set:
outcome = predictor.predict(fight)
if abs(outcome) > 0.2:
predicted_outcome = -1 if outcome < 0 else 1
predicted += 1
if predicted_outcome == fight.outcome:
correct += 1
print(fight.fighter1.ref, fight.fighter2.ref, fight.outcome, outcome)
print(featurize(fight))
return len(validation_set), correct, predicted, correct / float(predicted + 1e-10)
def split(collection, ratio):
mid = int(len(collection) * ratio)
return collection[:mid], collection[mid:]
def validate_fight(f):
return (f.fighter1.reach and
f.fighter2.reach and
f.event and
f.fighter1.birthday > cutoff_date and
f.fighter2.birthday > cutoff_date)
def find_fighter(fighters, name):
first_name, last_name = name.lower().split()
return filter(lambda f: first_name in f.ref.lower() and last_name in f.ref.lower(), fighters)[0]
def predict_event(predictor, fights, fighters):
event_date_str = raw_input("Enter event date (DD/MM/YYYY): ")
event_date = datetime.strptime(event_date_str, "%d/%m/%Y").date()
print(event_date)
predictor.learn(fights)
while True:
name1 = raw_input("Enter fighter 1 name: ")
fighter1 = find_fighter(fighters, name1)
print(fighter1.ref)
name2 = raw_input("Enter fighter 2 name: ")
fighter2 = find_fighter(fighters, name2)
print(fighter2.ref)
fight = FightProxy(fighter1, fighter2, EventProxy(event_date))
print(featurize(fight))
print(predictor.predict(fight))
if __name__ == "__main__":
mysql_connection_string = "mysql+pymysql://{username}:{password}@{host}/{dbname}".format(
username='tempuser',
password='temppassword',
host='localhost',
dbname='ufcdb')
memory_connection_string = 'sqlite:///:memory:'
Session = init_db(mysql_connection_string, create=True)
session = Session()
all_fights = session.query(Fight).all()
fights = filter(validate_fight, all_fights)
fighters = session.query(Fighter).all()
predictor = MLPredictor(featurize)
# print(cross_validate(predictor, fights))
predict_event(predictor, fights, fighters)
| apache-2.0 |
habi/GlobalDiagnostiX | aptina/NoiseVsExposure.py | 1 | 6444 | """
Script to visualize the "Noise vs. Exposure" data from DevWare
According to section 2.2.4.7 of the DevWareX help (http://is.gd/mt7FyF) we get
signal levels and different kinds of noise measurements by using the "Sensor
Control" > "Diagnostics" > "Noise vs. Exposure" tool.
The analyis report gives
- Signal
- RMS Dyn (temporal noise), Avg Dyn (temporal noise)
- FPN (fixed pattern noise), Col FPN (columnwise FPN), Row FPN (rowwise FPN)
- Col Dyn (columnwise temporal noise) and Row Dyn (rowwise temporal noise).
See the wiki page linkes above to see how the values are calulated.
"""
import glob
import os
import numpy
import matplotlib.pyplot as plt
def AskUser(Blurb, Choices):
""" Ask for input. Based on function in MasterThesisIvan.ini """
print(Blurb)
for Counter, Item in enumerate(sorted(Choices)):
print ' * [' + str(Counter) + ']:', Item
Selection = []
while Selection not in range(len(Choices)):
try:
Selection = int(input(' '.join(['Please enter the choice you',
'want [0-' +
str(len(Choices) - 1) +
']:'])))
except SyntaxError:
print 'You actually have to select *something*'
if Selection not in range(len(Choices)):
print 'Try again with a valid choice'
print 'You selected', sorted(Choices)[Selection]
return sorted(Choices)[Selection]
Root = '/afs/psi.ch/project/EssentialMed/Images/NoiseVsExposure'
DataFiles = [os.path.basename(i) for i in
glob.glob(os.path.join(Root, '*.txt'))]
# Which plot do we show?
whichone = DataFiles.index(AskUser('Which file should I show you?', DataFiles))
# If no manual selection, we can do
# for whichone in range(len(DataFiles)):
# in a loop...
# Tell what we do
Sensor = DataFiles[whichone][:-4].split('_')[0]
Lens = DataFiles[whichone][:-4].split('_')[1]
FramesPerSample = DataFiles[whichone][:-4].split('_')[2]
MaximumExposure = DataFiles[whichone][:-4].split('_')[3]
Decades = DataFiles[whichone][:-4].split('_')[4]
SamplesPerDecade = DataFiles[whichone][:-4].split('_')[5]
print 'We are showing the data from the', Sensor, 'CMOS with the', Lens, \
'lens. The analysis was done with', FramesPerSample, \
'frames per sample,', MaximumExposure, 'ms maximum exposure over', \
Decades, 'decades with', SamplesPerDecade, 'samples per decade.'
print
print 'If the exposure has not been recorded in "log scale" (you will see', \
'it in the plots), the "Decades" correspond to the "minimal exposure"', \
'and the "samples per decade" correspond to the "numbers of samples".'
# Generate figure title, so we can distinguish the output
Title = Sensor, Lens, FramesPerSample, 'Frames per Sample', \
MaximumExposure, 'ms Maximum Exposure', Decades, 'Decades', \
SamplesPerDecade, 'Samples/Decade'
# Load the data from the file
File = os.path.join(Root, DataFiles[whichone])
Data = numpy.loadtxt(File, skiprows=3)
# First line gives the full range. Read it with the snippet based on
# http://stackoverflow.com/a/1904455
with open(File, 'r') as f:
FullRange = int(f.readline().split('=')[1])
# Plot the data
Labels = ['Exposure time [ms]', 'Signal', 'RMS Dyn (temporal noise)',
'Avg Dyn (temporal noise)', 'FPN (fixed pattern noise)',
'columnwise FPN', 'rowwise FPN', 'columnwise temporal noise',
'rowwise temporal noise']
# The title of the plot is split over all the suplots, otherwise it destroys
# the layout due to its long length
plt.figure(' '.join(Title), figsize=(16, 9))
# Signal
ax = plt.subplot(131)
plt.plot(Data[:, 0], Data[:, 1], 'o-', label=Labels[1])
plt.axhline(FullRange, linestyle='--', label='Full range')
plt.xlabel(Labels[0])
plt.ylabel(Labels[1])
plt.title(' '.join(Title[:2]))
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.25,
box.width, box.height * 0.75])
plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.1))
# We want to fit some of the data, thus make a linspace to fit over
PolyfitRange = numpy.linspace(min(Data[:, 0]) - max(Data[:, 0]),
2 * max(Data[:, 0]), 200)
fit = 9
# Fixed pattern noise
ax = plt.subplot(132)
maxy = 0
for i in range(4, 7):
plt.plot(Data[:, 0], Data[:, i], 'o-', label=Labels[i])
maxy = max(max(Data[:, i]), maxy)
plt.plot(Data[:, 0], (Data[:, 1] / max(Data[:, 1])) * max(Data[:, 4]), '--',
label='"Signal" scaled to max(FPN)')
polynomial = numpy.poly1d(numpy.polyfit(Data[:, 0], Data[:, 4], fit))
plt.plot(PolyfitRange, polynomial(PolyfitRange), '--',
label='Polynomial fit (' + str(fit) + ') of FPN')
plt.xlim([min(Data[:, 0]), max(Data[:, 0])])
plt.ylim([0, maxy * 1.1])
plt.xlabel(Labels[0])
plt.ylabel('FPN')
plt.title(' '.join(Title[2:6]))
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.25,
box.width, box.height * 0.75])
plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.1))
# Temporal noise
ax = plt.subplot(133)
maxy = 0
for i in [2, 3, 7, 8]:
plt.plot(Data[:, 0], Data[:, i], 'o-', label=Labels[i])
maxy = max(max(Data[:, i]), maxy)
plt.plot(Data[:, 0], Data[:, 1] / max(Data[:, 1]) * max(Data[:, 2]), '--',
label='"Signal" scaled to max(RMS Dyn)')
polynomial = numpy.poly1d(numpy.polyfit(Data[:, 0], Data[:, 2], fit))
plt.plot(PolyfitRange, polynomial(PolyfitRange), '--',
label='Polynomial fit (' + str(fit) + ') of RMS Dyn')
plt.xlim([min(Data[:, 0]), max(Data[:, 0])])
plt.ylim([0, maxy * 1.1])
plt.xlabel(Labels[0])
plt.ylabel('Dyn')
plt.title(' '.join(Title[6:]))
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.25,
box.width, box.height * 0.75])
plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.1))
plt.savefig(os.path.join(Root, DataFiles[whichone][:-4] + '.png'),
Transparent=True, bbox_inches='tight')
plt.draw()
plt.show()
# ~ # Polynomial fit stuff
# ~ plt.figure()
# ~ x = Data[:, 0]
# ~ y = Data[:, 9]
# ~ xp = numpy.linspace(min(x)-max(x), 2*max(x), 222)
# ~
# ~ plt.plot(x, y, '-x', label='original')
# ~ for i in range(3,10,2):
# ~ polynomial = numpy.poly1d(numpy.polyfit(x, y, i))
# ~ plt.plot(xp, polynomial(xp), '--', label=str(i))
# ~ plt.legend(loc='best')
# ~ plt.xlim([min(x), max(x)])
# ~ plt.ylim([min(y), max(y)])
# ~ plt.draw()
# ~ plt.show()
| unlicense |
NDManh/numbbo | code-postprocessing/bbob_pproc/cococommands.py | 2 | 4837 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Module for using COCO from the (i)Python interpreter.
For all operations in the Python interpreter, it will be assumed that
the package has been imported as bb, just like it is done in the first
line of the examples below.
The main data structures used in COCO are :py:class:`DataSet`, which
corresponds to data of one algorithm on one problem, and
:py:class:`DataSetList`, which is for collections of :py:class:`DataSet`
instances. Both classes are implemented in :py:mod:`bbob_pproc.pproc`.
Examples:
* Start by importing :py:mod:`bbob_pproc`::
>>> import bbob_pproc as bb # load bbob_pproc
>>> import os
>>> import urllib
>>> import tarfile
>>> path = os.path.abspath(os.path.dirname(os.path.dirname('__file__')))
>>> os.chdir(path)
* Load a data set, assign to variable :py:data:`ds`::
>>> infoFile = 'data/BIPOP-CMA-ES/bbobexp_f2.info'
>>> if not os.path.exists(infoFile):
... os.chdir(os.path.join(path, 'data'))
... dataurl = 'http://coco.gforge.inria.fr/data-archive/2009/BIPOP-CMA-ES_hansen_noiseless.tgz'
... filename, headers = urllib.urlretrieve(dataurl)
... archivefile = tarfile.open(filename)
... archivefile.extractall()
... os.chdir(path)
>>> ds = bb.load(infoFile)
Data consistent according to test in consistency_check() in pproc.DataSet
* Get some information on a :py:class:`DataSetList` instance::
>>> print ds # doctest:+ELLIPSIS
[DataSet(BIPOP-CMA-ES on f2 2-D), ..., DataSet(BIPOP-CMA-ES on f2 40-D)]
>>> bb.info(ds)
6 data set(s)
Algorithm(s): BIPOP-CMA-ES
1 Function with ID 2
Dimension(s): 2, 3, 5, 10, 20, 40
Max evals: [762, 1537, 2428, 6346, 20678, 75010]
"""
from __future__ import absolute_import
#from bbob_pproc import ppsingle, ppfigdim, dataoutput
# from bbob_pproc.pproc import DataSetList, DataSet
from . import pproc
#__all__ = ['load', 'info', 'pickle', 'systeminfo', 'DataSetList', 'DataSet']
def load(filename):
"""Create a :py:class:`DataSetList` instance from a file or folder.
Input argument filename can be a single :file:`info` file name, a
single pickle filename or a folder name. In the latter case, the
folder is browsed recursively for :file:`info` or :file:`pickle`
files.
"""
return pproc.DataSetList(filename)
# info on the DataSetList: algId, function, dim
def info(dsList):
"""Display more info on an instance of DatasetList."""
dsList.info()
# TODO: method for pickling data in the current folder!
def pickle(dsList):
"""Pickle a DataSetList."""
dsList.pickle(verbose=True)
# TODO this will create a folder with suffix -pickle from anywhere:
# make sure the output folder is created at the right location
def systeminfo():
"""Display information on the system."""
import sys
print sys.version
import numpy
print 'Numpy %s' % numpy.__version__
import matplotlib
print 'Matplotlib %s' % matplotlib.__version__
import bbob_pproc
print 'bbob_pproc %s' % bbob_pproc.__version__
#def examples():
# """Execute example script from examples.py"""
#
# from bbob_pproc import examples
#def plot(dsList):
# """Generate some plots given a DataSetList instance."""
# # if only a single data set
# if len(dsList) == 1:
# ppsingle.generatefig(dsList)
# ppsingle.beautify()
# plt.show()
# # table?
# else:
# # scaling figure
# ppfigdim.generatefig(dsList, (10., 1., 1e-1, 1e-2, 1e-3, 1e-5, 1e-8))
# ppfigdim.beautify()
# if
# all data sets are from the same algorithm
#
# do something to lead a single DataSet instead?
# TODO: make sure each module have at least one method that deals with DataSetList instances.
# TODO: data structure dictAlg?
# TODO: hide modules that are not necessary
# bbob2010 (package)
# bbobies
# bestalg
# toolsstats
# bwsettings
# changeAlgIdAndComment
# dataoutput
# determineFtarget
# determineFtarget3
# findfiles
# genericsettings
# grayscalesettings
# minirun
# minirun_2
# ppfig
# ppfigdim -> ERT vs dim 1 alg: 1 DataSetList ok
# pplogloss -> ERT loss vs ? 1 alg, 1 dim: 1 DataSetList ok
# pprldistr -> Runevals (or?) vs %) 1 alg 1 dim: 1 DataSet... ?
# comp2 (package)
# ppfig2 ->
# pprldistr2 ->
# ppscatter ->
# pptable2 ->
# compall (package)
# ppfigs ->
# ppperfprof ->
# pprldmany ->
# pptables ->
# pproc
# pprocold
# pptable -> ERT, etc... vs target 1 alg
# pptex
# ranksumtest
# readalign
# readindexfiles # obsolete
# run
# run2
# runcomp2
# runcompall
# rungeneric
# rungeneric1
# rungeneric2
# rungenericmany
# runmarc
| bsd-3-clause |
Achuth17/scikit-learn | examples/gaussian_process/plot_gp_probabilistic_classification_after_regression.py | 252 | 3490 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
==============================================================================
Gaussian Processes classification example: exploiting the probabilistic output
==============================================================================
A two-dimensional regression exercise with a post-processing allowing for
probabilistic classification thanks to the Gaussian property of the prediction.
The figure illustrates the probability that the prediction is negative with
respect to the remaining uncertainty in the prediction. The red and blue lines
corresponds to the 95% confidence interval on the prediction of the zero level
set.
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from scipy import stats
from sklearn.gaussian_process import GaussianProcess
from matplotlib import pyplot as pl
from matplotlib import cm
# Standard normal distribution functions
phi = stats.distributions.norm().pdf
PHI = stats.distributions.norm().cdf
PHIinv = stats.distributions.norm().ppf
# A few constants
lim = 8
def g(x):
"""The function to predict (classification will then consist in predicting
whether g(x) <= 0 or not)"""
return 5. - x[:, 1] - .5 * x[:, 0] ** 2.
# Design of experiments
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
# Observations
y = g(X)
# Instanciate and fit Gaussian Process Model
gp = GaussianProcess(theta0=5e-1)
# Don't perform MLE or you'll get a perfect prediction for this simple example!
gp.fit(X, y)
# Evaluate real function, the prediction and its MSE on a grid
res = 50
x1, x2 = np.meshgrid(np.linspace(- lim, lim, res),
np.linspace(- lim, lim, res))
xx = np.vstack([x1.reshape(x1.size), x2.reshape(x2.size)]).T
y_true = g(xx)
y_pred, MSE = gp.predict(xx, eval_MSE=True)
sigma = np.sqrt(MSE)
y_true = y_true.reshape((res, res))
y_pred = y_pred.reshape((res, res))
sigma = sigma.reshape((res, res))
k = PHIinv(.975)
# Plot the probabilistic classification iso-values using the Gaussian property
# of the prediction
fig = pl.figure(1)
ax = fig.add_subplot(111)
ax.axes.set_aspect('equal')
pl.xticks([])
pl.yticks([])
ax.set_xticklabels([])
ax.set_yticklabels([])
pl.xlabel('$x_1$')
pl.ylabel('$x_2$')
cax = pl.imshow(np.flipud(PHI(- y_pred / sigma)), cmap=cm.gray_r, alpha=0.8,
extent=(- lim, lim, - lim, lim))
norm = pl.matplotlib.colors.Normalize(vmin=0., vmax=0.9)
cb = pl.colorbar(cax, ticks=[0., 0.2, 0.4, 0.6, 0.8, 1.], norm=norm)
cb.set_label('${\\rm \mathbb{P}}\left[\widehat{G}(\mathbf{x}) \leq 0\\right]$')
pl.plot(X[y <= 0, 0], X[y <= 0, 1], 'r.', markersize=12)
pl.plot(X[y > 0, 0], X[y > 0, 1], 'b.', markersize=12)
cs = pl.contour(x1, x2, y_true, [0.], colors='k', linestyles='dashdot')
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.025], colors='b',
linestyles='solid')
pl.clabel(cs, fontsize=11)
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.5], colors='k',
linestyles='dashed')
pl.clabel(cs, fontsize=11)
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.975], colors='r',
linestyles='solid')
pl.clabel(cs, fontsize=11)
pl.show()
| bsd-3-clause |
dhruv13J/scikit-learn | sklearn/utils/testing.py | 47 | 23587 | """Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# Denis Engemann
# License: BSD 3 clause
import os
import inspect
import pkgutil
import warnings
import sys
import re
import platform
import scipy as sp
import scipy.io
from functools import wraps
try:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
except ImportError:
# Python 3+
from urllib.request import urlopen
from urllib.error import HTTPError
import sklearn
from sklearn.base import BaseEstimator
# Conveniently import all assertions in one place.
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_false
from nose.tools import assert_raises
from nose.tools import raises
from nose import SkipTest
from nose import with_setup
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
ClusterMixin)
__all__ = ["assert_equal", "assert_not_equal", "assert_raises",
"assert_raises_regexp", "raises", "with_setup", "assert_true",
"assert_false", "assert_almost_equal", "assert_array_equal",
"assert_array_almost_equal", "assert_array_less",
"assert_less", "assert_less_equal",
"assert_greater", "assert_greater_equal"]
try:
from nose.tools import assert_in, assert_not_in
except ImportError:
# Nose < 1.0.0
def assert_in(x, container):
assert_true(x in container, msg="%r in %r" % (x, container))
def assert_not_in(x, container):
assert_false(x in container, msg="%r in %r" % (x, container))
try:
from nose.tools import assert_raises_regex
except ImportError:
# for Python 2
def assert_raises_regex(expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Helper function to check for message patterns in exceptions"""
not_raised = False
try:
callable_obj(*args, **kwargs)
not_raised = True
except expected_exception as e:
error_message = str(e)
if not re.compile(expected_regexp).search(error_message):
raise AssertionError("Error message should match pattern "
"%r. %r does not." %
(expected_regexp, error_message))
if not_raised:
raise AssertionError("%s not raised by %s" %
(expected_exception.__name__,
callable_obj.__name__))
# assert_raises_regexp is deprecated in Python 3.4 in favor of
# assert_raises_regex but lets keep the bacward compat in scikit-learn with
# the old name for now
assert_raises_regexp = assert_raises_regex
def _assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def _assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
def assert_less_equal(a, b, msg=None):
message = "%r is not lower than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a <= b, message
def assert_greater_equal(a, b, msg=None):
message = "%r is not greater than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a >= b, message
def assert_warns(warning_class, func, *args, **kw):
"""Test that a certain warning occurs.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
Returns
-------
result : the return value of `func`
"""
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = any(warning.category is warning_class for warning in w)
if not found:
raise AssertionError("%s did not give warning: %s( is %s)"
% (func.__name__, warning_class, w))
return result
def assert_warns_message(warning_class, message, func, *args, **kw):
# very important to avoid uncontrolled state propagation
"""Test that a certain warning occurs and with a certain message.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
message : str | callable
The entire message or a substring to test for. If callable,
it takes a string as argument and will trigger an assertion error
if it returns `False`.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`.
Returns
-------
result : the return value of `func`
"""
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
if hasattr(np, 'VisibleDeprecationWarning'):
# Let's not catch the numpy internal DeprecationWarnings
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = [issubclass(warning.category, warning_class) for warning in w]
if not any(found):
raise AssertionError("No warning raised for %s with class "
"%s"
% (func.__name__, warning_class))
message_found = False
# Checks the message of all warnings belong to warning_class
for index in [i for i, x in enumerate(found) if x]:
# substring will match, the entire message with typo won't
msg = w[index].message # For Python 3 compatibility
msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
if callable(message): # add support for certain tests
check_in_message = message
else:
check_in_message = lambda msg: message in msg
if check_in_message(msg):
message_found = True
break
if not message_found:
raise AssertionError("Did not receive the message you expected "
"('%s') for <%s>, got: '%s'"
% (message, func.__name__, msg))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
# XXX: once we may depend on python >= 2.6, this can be replaced by the
# warnings module context manager.
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: %s"
% (func.__name__, w))
return result
def ignore_warnings(obj=None):
""" Context manager and decorator to ignore warnings
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if callable(obj):
return _ignore_warnings(obj)
else:
return _IgnoreWarnings()
def _ignore_warnings(fn):
"""Decorator to catch and hide warnings without visual nesting"""
@wraps(fn)
def wrapper(*args, **kwargs):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
return fn(*args, **kwargs)
w[:] = []
return wrapper
class _IgnoreWarnings(object):
"""Improved and simplified Python warnings context manager
Copied from Python 2.7.5 and modified as required.
"""
def __init__(self):
"""
Parameters
==========
category : warning class
The category to filter. Defaults to Warning. If None,
all categories will be muted.
"""
self._record = True
self._module = sys.modules['warnings']
self._entered = False
self.log = []
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
clean_warning_registry() # be safe and not propagate state + chaos
warnings.simplefilter('always')
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
self.log = []
def showwarning(*args, **kwargs):
self.log.append(warnings.WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return self.log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
clean_warning_registry() # be safe and not propagate state + chaos
try:
from nose.tools import assert_less
except ImportError:
assert_less = _assert_less
try:
from nose.tools import assert_greater
except ImportError:
assert_greater = _assert_greater
def _assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
if np.allclose(actual, desired, rtol=rtol, atol=atol):
return
msg = ('Array not equal to tolerance rtol=%g, atol=%g: '
'actual %s, desired %s') % (rtol, atol, actual, desired)
raise AssertionError(msg)
if hasattr(np.testing, 'assert_allclose'):
assert_allclose = np.testing.assert_allclose
else:
assert_allclose = _assert_allclose
def assert_raise_message(exceptions, message, function, *args, **kwargs):
"""Helper function to test error messages in exceptions
Parameters
----------
exceptions : exception or tuple of exception
Name of the estimator
func : callable
Calable object to raise error
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
"""
try:
function(*args, **kwargs)
except exceptions as e:
error_message = str(e)
if message not in error_message:
raise AssertionError("Error message does not include the expected"
" string: %r. Observed error message: %r" %
(message, error_message))
else:
# concatenate exception names
if isinstance(exceptions, tuple):
names = " or ".join(e.__name__ for e in exceptions)
else:
names = exceptions.__name__
raise AssertionError("%s not raised by %s" %
(names, function.__name__))
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set.
Parameters
----------
columns_dict : dict, keys=str, values=ndarray
Contains data as columns_dict[column_name] = array of data.
dataname : string
Name of data set.
matfile : string or file object
The file name string or the file-like object of the output file.
ordering : list, default None
List of column_names, determines the ordering in the data set.
Notes
-----
This function transposes all arrays, while fetch_mldata only transposes
'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
scipy.io.savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
def __init__(self, mock_datasets):
"""Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.
"""
self.mock_datasets = mock_datasets
def __call__(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
from io import BytesIO
matfile = BytesIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise HTTPError(urlname, 404, dataset_name + " is not available",
[], None)
def install_mldata_mock(mock_datasets):
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
META_ESTIMATORS = ["OneVsOneClassifier",
"OutputCodeClassifier", "OneVsRestClassifier", "RFE",
"RFECV", "BaseEnsemble"]
# estimators that there is no way to default-construct sensibly
OTHER = ["Pipeline", "FeatureUnion", "GridSearchCV",
"RandomizedSearchCV"]
# some trange ones
DONT_TEST = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer',
'LabelBinarizer', 'LabelEncoder',
'MultiLabelBinarizer', 'TfidfTransformer',
'TfidfVectorizer', 'IsotonicRegression',
'OneHotEncoder', 'RandomTreesEmbedding',
'FeatureHasher', 'DummyClassifier', 'DummyRegressor',
'TruncatedSVD', 'PolynomialFeatures',
'GaussianRandomProjectionHash', 'HashingVectorizer',
'CheckingClassifier', 'PatchExtractor', 'CountVectorizer',
# GradientBoosting base estimators, maybe should
# exclude them in another way
'ZeroEstimator', 'ScaledLogOddsEstimator',
'QuantileEstimator', 'MeanEstimator',
'LogOddsEstimator', 'PriorProbabilityEstimator',
'_SigmoidCalibration', 'VotingClassifier']
def all_estimators(include_meta_estimators=False,
include_other=False, type_filter=None,
include_dont_test=False):
"""Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
include_meta_estimators : boolean, default=False
Whether to include meta-estimators that can be constructed using
an estimator as their first argument. These are currently
BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV.
include_other : boolean, default=False
Wether to include meta-estimators that are somehow special and can
not be default-constructed sensibly. These are currently
Pipeline, FeatureUnion and GridSearchCV
include_dont_test : boolean, default=False
Whether to include "special" label estimator or test processors.
type_filter : string, list of string, or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = sklearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(
path=path, prefix='sklearn.', onerror=lambda x: None):
if ".tests." in modname:
continue
module = __import__(modname, fromlist="dummy")
classes = inspect.getmembers(module, inspect.isclass)
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes
if (issubclass(c[1], BaseEstimator)
and c[0] != 'BaseEstimator')]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if not include_dont_test:
estimators = [c for c in estimators if not c[0] in DONT_TEST]
if not include_other:
estimators = [c for c in estimators if not c[0] in OTHER]
# possibly get rid of meta estimators
if not include_meta_estimators:
estimators = [c for c in estimators if not c[0] in META_ESTIMATORS]
if type_filter is not None:
if not isinstance(type_filter, list):
type_filter = [type_filter]
else:
type_filter = list(type_filter) # copy
filtered_estimators = []
filters = {'classifier': ClassifierMixin,
'regressor': RegressorMixin,
'transformer': TransformerMixin,
'cluster': ClusterMixin}
for name, mixin in filters.items():
if name in type_filter:
type_filter.remove(name)
filtered_estimators.extend([est for est in estimators
if issubclass(est[1], mixin)])
estimators = filtered_estimators
if type_filter:
raise ValueError("Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or None, got"
" %s." % repr(type_filter))
# drop duplicates, sort for reproducibility
return sorted(set(estimators))
def set_random_state(estimator, random_state=0):
if "random_state" in estimator.get_params().keys():
estimator.set_params(random_state=random_state)
def if_matplotlib(func):
"""Test decorator that skips test if matplotlib not installed. """
@wraps(func)
def run_test(*args, **kwargs):
try:
import matplotlib
matplotlib.use('Agg', warn=False)
# this fails if no $DISPLAY specified
import matplotlib.pyplot as plt
plt.figure()
except ImportError:
raise SkipTest('Matplotlib not available.')
else:
return func(*args, **kwargs)
return run_test
def if_not_mac_os(versions=('10.7', '10.8', '10.9'),
message='Multi-process bug in Mac OS X >= 10.7 '
'(see issue #636)'):
"""Test decorator that skips test if OS is Mac OS X and its
major version is one of ``versions``.
"""
mac_version, _, _ = platform.mac_ver()
skip = '.'.join(mac_version.split('.')[:2]) in versions
def decorator(func):
if skip:
@wraps(func)
def func(*args, **kwargs):
raise SkipTest(message)
return func
return decorator
def clean_warning_registry():
"""Safe way to reset warnings """
warnings.resetwarnings()
reg = "__warningregistry__"
for mod_name, mod in list(sys.modules.items()):
if 'six.moves' in mod_name:
continue
if hasattr(mod, reg):
getattr(mod, reg).clear()
def check_skip_network():
if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)):
raise SkipTest("Text tutorial requires large dataset download")
def check_skip_travis():
"""Skip test if being run on Travis."""
if os.environ.get('TRAVIS') == "true":
raise SkipTest("This test needs to be skipped on Travis")
with_network = with_setup(check_skip_network)
with_travis = with_setup(check_skip_travis)
| bsd-3-clause |
project-asap/IReS-Platform | asap-platform/asap-server/asapLibrary/operators/lr_train_spark/imr_tools.py | 5 | 5668 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
tools for imr datasets
@author: Chris Mantas
@contact: [email protected]
@since: Created on 2016-02-12
@todo: custom formats, break up big lines
@license: http://www.apache.org/licenses/LICENSE-2.0 Apache License
"""
from ast import literal_eval
from collections import defaultdict
def create_label_encoder(labels):
"""
Creates a label encoder from a list of labels
:param labels: a list of integers
:return: a LabelEncoder object
"""
from sklearn.preprocessing import LabelEncoder
encoder = LabelEncoder()
encoder.fit(labels)
return encoder
def get_features_from_line(line):
"""
Given a text line it returns
a) only the last element of the tuple if the line is a tuple.
That element we assume to be a list of features.
b) the line's elements if the line is not a tuple
:param line:
:return:
"""
from ast import literal_eval
entry = literal_eval(line)
return entry[-1] if isinstance(entry, tuple) else entry
def parse_line(line):
"""
Parses a string line to a tuple
:param line:
:return:
"""
from ast import literal_eval
try:
entry = literal_eval(line)
if not isinstance(entry, tuple):
raise Exception("Input parsed, but is not a tuple")
except:
raise Exception("Could not evaluate (parse) input into an object")
return entry
def tuple_to_labeled_point(entry, category, l_encoder=None):
"""
Creates a label point from a text line that is formated as a tuple
:param entry: a tuple of format (3, 2, 1, [3,4,4 ..]), where the first
entries in the tuple are labels, and the last entry is
a list of features
:param category: which one of the labels in the tuple to keep for the
labeled point (0 to 2 for imr dataset)
:param l_encoder: the label encoder to encode the label (if any)
:return: a LabeledPoint
"""
from pyspark.mllib.classification import LabeledPoint
label = entry[category]
if l_encoder:
label = l_encoder.transform(label)
features = entry[-1]
return LabeledPoint(label, features) # return a new labelPoint
def classify_line(features, model, l_encoder=None):
"""
Classifies the features based on the given model.
If a label encoder is specified, it reverses the encoding of the label
:param features: a vector of features
:param model: a Classification Model
:param l_encoder: a LabelEncoder
:return: a tuple of: label, [feat1, feat2 ... featN]
"""
encoded_prediction = model.predict(features)
prediction = l_encoder.inverse_transform(encoded_prediction) \
if l_encoder else encoded_prediction
return prediction, features
def label_encoders_from_json_file(labels_json_file, category=None):
"""
Loads a mapping of categories->available_labels from a json file.
If category is specified it returns the LabelEncoder for this category.
If not, it returns a dict of category->LabelEncoder
:param labels_json_file:
:param category:
:return:
"""
from json import load
from sklearn.preprocessing import LabelEncoder
with open(labels_json_file) as infile:
all_labels = load(infile)
label_dict = dict(map(
lambda (k, v): (int(k), LabelEncoder().fit(v)),
all_labels.iteritems()
))
return label_dict[category] if category else label_dict
def labels_from_csv_file(csv_file, label_range):
"""
Parses a csv dataset and keeps a set of all the labels in 'label_range'.
Preserves the order in which it sees labels - does not contain duplicates.
:param csv_file:
:param label_range:
:return:
"""
labels = defaultdict(list)
label_sets = defaultdict(set)
with open(csv_file) as infile:
for line in infile:
line_tokens = line.split(';')
for i in range(label_range[0], label_range[1]+1):
label = int(line_tokens[i])
if label not in label_sets[i]:
label_sets[i].add(label)
labels[i].append(label)
# convert to regular dict of lists
return dict(labels.iteritems())
# ======================= MAIN ========================= #
if __name__ == "__main__":
from argparse import ArgumentParser
from json import dump
cli_parser = ArgumentParser(description='tools for imr datasets')
cli_parser.add_argument("operation",
help="the operation to run: 'train' or 'classify'")
cli_parser.add_argument("input",
help="the input dataset (formatted as a csv file"
"separated with ';' character")
cli_parser.add_argument("output", help="the output file")
cli_parser.add_argument("-rs", '--range-start', type=int, default=1,
help="the start of the range of labels")
cli_parser.add_argument("-re", '--range-end', type=int, default=3,
help="the end of the range of labels (inclusive)")
args = cli_parser.parse_args()
if args.operation == "storelabels":
from collections import defaultdict
# get a dict of labels from a csv dataset
labels_dict = labels_from_csv_file(args.input,
(args.range_start, args.range_end))
# dump it to the output file
with open(args.output, 'w+') as outfile:
dump(labels_dict, outfile)
else:
print("I do not know operation:", args.operation)
| apache-2.0 |
pombredanne/chest | chest/tests/test_core.py | 6 | 10427 | from chest.core import Chest, nbytes, key_to_filename
import os
import re
import json
import shutil
import pickle
from contextlib import contextmanager
import numpy as np
from chest.utils import raises, raise_KeyError
import time
import hashlib
@contextmanager
def tmp_chest(*args, **kwargs):
c = Chest(*args, **kwargs)
fn = c.path
try:
yield c
finally:
if os.path.exists(fn):
with c.lock:
c.drop()
try:
del c
except:
pass
def my_key_to_fname(key):
fname = str(hashlib.md5(str(key).encode()).hexdigest())
return fname
def test_basic():
with tmp_chest() as c:
c[1] = 'one'
c['two'] = 2
assert c[1] == 'one'
assert c['two'] == 2
assert c.path
assert len(c) == 2
assert set(c) == set([1, 'two'])
def test_paths():
with tmp_chest() as c:
assert os.path.exists(c.path)
c[1] = 'one'
c.move_to_disk(1)
assert os.path.exists(c.key_to_filename(1))
with open(c.key_to_filename(1), mode='rb') as f:
assert pickle.load(f) == 'one'
def eq(a, b):
c = a == b
if isinstance(c, np.ndarray):
return c.all()
return c
def test_limited_storage():
x = np.ones(1000, dtype='i4')
y = np.ones(1000, dtype='i4')
with tmp_chest(available_memory=5000) as c:
c['x'] = x
c['y'] = y
assert c.memory_usage < c.available_memory
assert 'x' in c
assert 'y' in c
assert len(c.inmem) == 1
assert 'x' not in c.inmem
assert 'y' in c.inmem
assert eq(c['x'], x)
assert eq(c['y'], y)
def test_limited_shrink_called_normally():
x = np.ones(1000, dtype='i4')
y = 2 * np.ones(1000, dtype='i4')
with tmp_chest(available_memory=0) as c:
c['x'] = x
c['y'] = y
assert not c.inmem
assert eq(c['x'], x)
assert not c.inmem
def test_shrink():
with tmp_chest(available_memory=100) as c:
c['one'] = np.ones(10, dtype='i8') # 80 bytes
assert 'one' in c.inmem
c['two'] = 2 * np.ones(5, dtype='i8') # 40 bytes
assert 'two' in c.inmem
assert 'one' not in c.inmem
def test_drop():
with tmp_chest() as c:
c.drop()
assert not os.path.exists(c.path)
def test_flush():
with tmp_chest() as c:
c[1] = 'one'
c[2] = 'two'
c.flush()
assert os.path.exists(c.key_to_filename(1))
assert os.path.exists(c.key_to_filename(2))
c[1] = 'two'
c[2] = 'one'
c.flush()
assert c[1] == 'two'
assert c[2] == 'one'
def test_keys_values_items():
with tmp_chest() as c:
c[1] = 'one'
c[2] = 'two'
assert set(c.keys()) == set([1, 2])
assert set(c.values()) == set(['one', 'two'])
assert set(c.items()) == set([(1, 'one'), (2, 'two')])
def test_recreate_chest():
with tmp_chest() as c:
c[1] = 'one'
c[2] = 'two'
c.flush()
c2 = Chest(path=c.path)
assert c.items() == c2.items()
def test_delitem():
with tmp_chest() as c:
c[1] = 'one'
c[2] = 'two'
del c[1]
assert 1 not in c
c.flush()
assert 2 in c
assert os.path.exists(c.key_to_filename(2))
del c[2]
assert not os.path.exists(c.key_to_filename(2))
def test_str():
with tmp_chest() as c:
assert c.path in str(c)
def test_get_from_disk():
with tmp_chest() as c:
c[1] = 'one' # 1 is in memory
c.get_from_disk(1) # shouldn't have an effect
assert 1 in c.inmem
def test_errors():
with tmp_chest() as c:
assert raises(KeyError, lambda: c[1])
assert raises(KeyError, lambda: c[(1, 2)])
def test_reset_item_is_consistent():
with tmp_chest() as c:
c[1] = 'one'
c.flush()
c[1] = 'uno'
assert c[1] == 'uno'
fn = c.key_to_filename(1)
assert not os.path.exists(fn) or c.load(open(fn)) == 'uno'
def test_nbytes():
assert isinstance(nbytes('x'), int)
assert nbytes('x') < 100
assert nbytes(np.ones(1000, dtype='i4')) >= 4000
def test_dataframe_nbytes():
try:
import pandas as pd
df = pd.DataFrame({'a': [1]*10000})
assert nbytes(df) > 10000
except ImportError:
pass
def test_del_on_temp_path():
c = Chest()
c[1] = 'one'
c.flush()
fn = c.path
del c
import gc
gc.collect()
assert not os.path.exists(fn)
def test_del_on_normal_path():
path = '_chest_test_path'
if os.path.exists(path):
shutil.rmtree(path)
c = Chest(path=path)
c[1] = 'one'
c.flush()
del c
import gc
gc.collect()
assert os.path.exists(path)
c = Chest(path=path)
c.drop()
def test_basic_json():
with tmp_chest(load=json.load, dump=json.dump, mode='t') as c:
c[1] = [1, 2, 3]
c[2] = 'two'
c.flush()
c2 = Chest(path=c.path, load=json.load, dump=json.dump, mode='t')
assert c2[1] == c[1]
assert c2[2] == c[2]
def test_key_to_filename():
assert key_to_filename('x') == 'x'
assert isinstance(key_to_filename((1, (3, 4))), str)
assert re.match('^\w+$', key_to_filename('1/2'))
def test_key_to_filename_with_tuples():
a, two = os.path.split(key_to_filename(('one', 'two')))
b, three = os.path.split(key_to_filename(('one', 'three')))
assert a == b
def test_context_manager():
with Chest() as c:
c[1] = 1
c.flush()
assert not os.path.exists(c.path)
try:
with Chest() as c:
1 / 0
except Exception as e:
assert isinstance(e, ZeroDivisionError)
def test_threadsafe():
from multiprocessing.pool import ThreadPool
from random import randint
pool = ThreadPool(8)
n = 100
with tmp_chest(available_memory=48) as c:
for i in range(10):
c[i] = i
def getset(_):
c[randint(0, 9)] = c[randint(0, 9)]
pool.map(getset, range(n))
pool.close()
pool.join()
assert set(c.keys()).issubset(range(10))
assert set(c.values()).issubset(range(10))
def test_undumpable_values_stay_in_memory():
class A(object):
def __getstate__(self):
raise TypeError()
with tmp_chest(available_memory=100) as c:
a = A()
fn = 'tmp'
with open(fn, 'w') as f:
assert raises(TypeError, lambda: c.dump(a, f))
os.remove(fn)
c['a'] = a
# Add enough data to try to flush out a
for i in range(20):
c[i] = i
assert 'a' in c.inmem
assert not os.path.exists(c.key_to_filename('a'))
def test_eat():
with tmp_chest() as c1:
with tmp_chest() as c2:
c1['foo'] = 'bar'
c1['bar'] = 'bbq'
c2['bar'] = 'foo'
c2['spam'] = 'eggs'
c1.update(c2)
assert c1['foo'] == 'bar'
assert c1['bar'] == 'foo'
assert c1['spam'] == 'eggs'
def test_update_no_overwrite():
with tmp_chest() as c1:
with tmp_chest() as c2:
c1['foo'] = 'bar'
c1['bar'] = 'bbq'
c2['bar'] = 'foo'
c2['spam', 'spam'] = 'eggs'
c1.update(c2, overwrite=False)
assert c1['foo'] == 'bar'
assert c1['bar'] == 'bbq'
assert c1['spam', 'spam'] == 'eggs'
def test_update():
with tmp_chest() as c1:
with tmp_chest() as c2:
c1['foo'] = 'bar'
c1['bar'] = 'bbq'
c2['bar'] = 'foo'
c2['spam', 'spam'] = 'eggs'
c1.update(c2)
assert c1['foo'] == 'bar'
assert c1['bar'] == 'foo'
assert c1['spam', 'spam'] == 'eggs'
assert c2['bar'] == 'foo'
assert c2['spam', 'spam'] == 'eggs'
def test_del_flushes():
with tmp_chest(path='foo') as c:
assert c._explicitly_given_path
c[1] = 1
fn = c.key_to_filename(1)
c.__del__()
assert os.path.exists(fn)
def test_del_drops():
with tmp_chest() as c:
c[1] = 1
fn = c.key_to_filename(1)
c.flush()
c.__del__()
assert not os.path.exists(fn)
def test_nested_files_with_tuples():
with tmp_chest(path='foo') as c:
c['one'] = 1
c['one', 'two'] = 12
c['one', 'three'] = 13
assert c['one'] == 1
assert c['one', 'two'] == 12
assert c['one', 'three'] == 13
c.flush()
paths = [fn for fn in os.listdir(c.path) if fn != '.keys']
assert len(paths) == 2
assert any(os.path.isdir(os.path.join(c.path, p)) for p in paths)
assert any(not os.path.isdir(os.path.join(c.path, p)) for p in paths)
c['a', 'b', 'c', 'd', 'e'] = 5
c.flush()
assert c['a', 'b', 'c', 'd', 'e'] == 5
def test_store_fnames():
with tmp_chest(key_to_filename=my_key_to_fname) as c1:
c1[('spam', 'eggs')] = 'spam and eggs'
c1.flush()
with tmp_chest() as c2:
c2.update(c1)
c2.flush()
assert c2[('spam', 'eggs')] == 'spam and eggs'
def test_store_fnames_relocatable():
with tmp_chest(path="somewhere", key_to_filename=my_key_to_fname) as c1:
c1[('spam', 'eggs')] = 'spam and eggs'
c1.flush()
os.rename("somewhere", "else")
with tmp_chest(path="else") as c1:
with tmp_chest(path="somewhereelse") as c2:
c2.update(c1)
c2.flush()
assert c2[('spam', 'eggs')] == 'spam and eggs'
def test_memory_usage():
with tmp_chest() as c:
assert c.memory_usage == 0
c[1] = 1
mem1 = c.memory_usage
assert mem1 > 0
c[2] = 2
assert c.memory_usage > mem1
c.flush()
assert c.memory_usage == 0
def test_prefetch():
with tmp_chest(on_miss=raise_KeyError) as c:
c[1] = 1
c[2] = 2
c[3] = 3
assert not raises(KeyError, lambda: c[1])
c.flush()
assert raises(KeyError, lambda: c[1])
c.prefetch(1)
assert not raises(KeyError, lambda: c[1])
c.prefetch([1, 2])
assert not raises(KeyError, lambda: c[2])
| bsd-3-clause |
tcunis/paparazzi | sw/tools/tcp_aircraft_server/phoenix/__init__.py | 86 | 4470 | #Copyright 2014, Antoine Drouin
"""
Phoenix is a Python library for interacting with Paparazzi
"""
import math
"""
Unit convertions
"""
def rad_of_deg(d): return d/180.*math.pi
def deg_of_rad(r): return r*180./math.pi
def rps_of_rpm(r): return r*2.*math.pi/60.
def rpm_of_rps(r): return r/2./math.pi*60.
def m_of_inch(i): return i*0.0254
"""
Plotting
"""
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
my_title_spec = {'color' : 'k', 'fontsize' : 20 }
def save_if(filename):
if filename: matplotlib.pyplot.savefig(filename, dpi=80)
def prepare_fig(fig=None, window_title=None, figsize=(20.48, 10.24), margins=None):
if fig == None:
fig = plt.figure(figsize=figsize)
# else:
# plt.figure(fig.number)
if margins:
left, bottom, right, top, wspace, hspace = margins
fig.subplots_adjust(left=left, right=right, bottom=bottom, top=top,
hspace=hspace, wspace=wspace)
if window_title:
fig.canvas.set_window_title(window_title)
return fig
def decorate(ax, title=None, xlab=None, ylab=None, legend=None, xlim=None, ylim=None):
ax.xaxis.grid(color='k', linestyle='-', linewidth=0.2)
ax.yaxis.grid(color='k', linestyle='-', linewidth=0.2)
if xlab:
ax.xaxis.set_label_text(xlab)
if ylab:
ax.yaxis.set_label_text(ylab)
if title:
ax.set_title(title, my_title_spec)
if legend <> None:
ax.legend(legend, loc='best')
if xlim <> None:
ax.set_xlim(xlim[0], xlim[1])
if ylim <> None:
ax.set_ylim(ylim[0], ylim[1])
"""
Messages
"""
#: dictionary mapping the C type to its length in bytes (e.g char -> 1)
TYPE_TO_LENGTH_MAP = {
"char" : 1,
"uint8" : 1,
"int8" : 1,
"uint16" : 2,
"int16" : 2,
"uint32" : 4,
"int32" : 4,
"float" : 4,
"double" : 8,
}
#: dictionary mapping the C type to correct format string
TYPE_TO_PRINT_MAP = {
float : "%f",
str : "%s",
chr : "%c",
int : "%d"
}
ACID_ALL = 0xFF
ACID_TEST = 0xFE
ACID_GROUNDSTATION = 0xFD
#: dictionary mapping debug types to format characters
DEBUG_MESSAGES = {
"DEBUG_UINT8" : "%d",
"DEBUG_INT32" : "%d",
"DEBUG_FLOAT" : "%#f"
}
"""
Binary logs
See format description in sw/airborne/subsystems/datalink/fms_link.c
"""
import struct
def hex_of_bin(b): return ' '.join( [ "%02X" % ord( x ) for x in b ] )
import pdb
def read_binary_log(filename, tick_freq = 2*512.):
f = open(filename, "rb")
d = f.read()
packet_header_len = 6
msg_header_len = 2
def read_packet(d, packet_start):
payload_start = packet_start+packet_header_len
timestamp, payload_len = struct.unpack("IH", d[packet_start:payload_start])
msgs = read_packet_payload(d, payload_start, payload_len)
next_packet = payload_start+payload_len+2
return timestamp, msgs, next_packet
def read_packet_payload(d, s, l):
msgs = []
packet_end = s+l; msg_start = s
while msg_start<packet_end:
payload_start = msg_start+msg_header_len
msg_len, msg_id = struct.unpack("BB", d[msg_start:payload_start])
payload_end = payload_start+msg_len
msg_payload = d[payload_start:payload_end]
msgs.append([msg_id, msg_payload])
#print msg_id, msg_len, hex_of_bin(msg_payload)
msg_start = payload_end
return msgs
packets = []
packet_start=0
while packet_start<len(d):
timestamp, msgs, next_packet = read_packet(d, packet_start)
packets.append([timestamp/tick_freq, msgs])
#print timestamp, msgs
packet_start = next_packet
f.close()
return packets
def extract_from_binary_log(protocol, packets, msg_names, t_min=None, t_max=None):
ret = [{'time':[], 'data':[]} for m in msg_names]
if t_min == None: t_min = packets[0][0]
if t_max == None: t_max = packets[-1][0]
for t, msgs in packets:
if t>= t_min and t<= t_max:
for id, payload in msgs:
m = protocol.get_message_by_id('telemetry', id)
try: i = msg_names.index(m.name)
except: pass
finally: ret[i]['time'].append(t); ret[i]['data'].append(m.unpack_scaled_values(payload))
return ret
| gpl-2.0 |
hdmetor/scikit-learn | examples/svm/plot_svm_nonlinear.py | 61 | 1089 | """
==============
Non-linear SVM
==============
Perform binary classification using non-linear SVC
with RBF kernel. The target to predict is a XOR of the
inputs.
The color map illustrates the decision function learn by the SVC.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-3, 3, 500),
np.linspace(-3, 3, 500))
np.random.seed(0)
X = np.random.randn(300, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
clf = svm.NuSVC()
clf.fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()), aspect='auto',
origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,
linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired)
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
plt.show()
| bsd-3-clause |
lancezlin/ml_template_py | lib/python2.7/site-packages/matplotlib/legend_handler.py | 8 | 22693 | """
This module defines default legend handlers.
It is strongly encouraged to have read the :ref:`legend guide
<plotting-guide-legend>` before this documentation.
Legend handlers are expected to be a callable object with a following
signature. ::
legend_handler(legend, orig_handle, fontsize, handlebox)
Where *legend* is the legend itself, *orig_handle* is the original
plot, *fontsize* is the fontsize in pixles, and *handlebox* is a
OffsetBox instance. Within the call, you should create relevant
artists (using relevant properties from the *legend* and/or
*orig_handle*) and add them into the handlebox. The artists needs to
be scaled according to the fontsize (note that the size is in pixel,
i.e., this is dpi-scaled value).
This module includes definition of several legend handler classes
derived from the base class (HandlerBase) with the following method.
def legend_artist(self, legend, orig_handle, fontsize, handlebox):
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import zip
import numpy as np
from matplotlib.lines import Line2D
from matplotlib.patches import Rectangle
import matplotlib.collections as mcoll
import matplotlib.colors as mcolors
def update_from_first_child(tgt, src):
tgt.update_from(src.get_children()[0])
class HandlerBase(object):
"""
A Base class for default legend handlers.
The derived classes are meant to override *create_artists* method, which
has a following signature.::
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize,
trans):
The overridden method needs to create artists of the given
transform that fits in the given dimension (xdescent, ydescent,
width, height) that are scaled by fontsize if necessary.
"""
def __init__(self, xpad=0., ypad=0., update_func=None):
self._xpad, self._ypad = xpad, ypad
self._update_prop_func = update_func
def _update_prop(self, legend_handle, orig_handle):
if self._update_prop_func is None:
self._default_update_prop(legend_handle, orig_handle)
else:
self._update_prop_func(legend_handle, orig_handle)
def _default_update_prop(self, legend_handle, orig_handle):
legend_handle.update_from(orig_handle)
def update_prop(self, legend_handle, orig_handle, legend):
self._update_prop(legend_handle, orig_handle)
legend._set_artist_props(legend_handle)
legend_handle.set_clip_box(None)
legend_handle.set_clip_path(None)
def adjust_drawing_area(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize,
):
xdescent = xdescent - self._xpad * fontsize
ydescent = ydescent - self._ypad * fontsize
width = width - self._xpad * fontsize
height = height - self._ypad * fontsize
return xdescent, ydescent, width, height
def legend_artist(self, legend, orig_handle,
fontsize, handlebox):
"""
Return the artist that this HandlerBase generates for the given
original artist/handle.
Parameters
----------
legend : :class:`matplotlib.legend.Legend` instance
The legend for which these legend artists are being created.
orig_handle : :class:`matplotlib.artist.Artist` or similar
The object for which these legend artists are being created.
fontsize : float or int
The fontsize in pixels. The artists being created should
be scaled according to the given fontsize.
handlebox : :class:`matplotlib.offsetbox.OffsetBox` instance
The box which has been created to hold this legend entry's
artists. Artists created in the `legend_artist` method must
be added to this handlebox inside this method.
"""
xdescent, ydescent, width, height = self.adjust_drawing_area(
legend, orig_handle,
handlebox.xdescent, handlebox.ydescent,
handlebox.width, handlebox.height,
fontsize)
artists = self.create_artists(legend, orig_handle,
xdescent, ydescent, width, height,
fontsize, handlebox.get_transform())
# create_artists will return a list of artists.
for a in artists:
handlebox.add_artist(a)
# we only return the first artist
return artists[0]
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize,
trans):
raise NotImplementedError('Derived must override')
class HandlerNpoints(HandlerBase):
def __init__(self, marker_pad=0.3, numpoints=None, **kw):
HandlerBase.__init__(self, **kw)
self._numpoints = numpoints
self._marker_pad = marker_pad
def get_numpoints(self, legend):
if self._numpoints is None:
return legend.numpoints
else:
return self._numpoints
def get_xdata(self, legend, xdescent, ydescent, width, height, fontsize):
numpoints = self.get_numpoints(legend)
if numpoints > 1:
# we put some pad here to compensate the size of the
# marker
xdata = np.linspace(-xdescent + self._marker_pad * fontsize,
width - self._marker_pad * fontsize,
numpoints)
xdata_marker = xdata
elif numpoints == 1:
xdata = np.linspace(-xdescent, width, 2)
xdata_marker = [0.5 * width - 0.5 * xdescent]
return xdata, xdata_marker
class HandlerNpointsYoffsets(HandlerNpoints):
def __init__(self, numpoints=None, yoffsets=None, **kw):
HandlerNpoints.__init__(self, numpoints=numpoints, **kw)
self._yoffsets = yoffsets
def get_ydata(self, legend, xdescent, ydescent, width, height, fontsize):
if self._yoffsets is None:
ydata = height * legend._scatteryoffsets
else:
ydata = height * np.asarray(self._yoffsets)
return ydata
class HandlerLine2D(HandlerNpoints):
"""
Handler for Line2D instances.
"""
def __init__(self, marker_pad=0.3, numpoints=None, **kw):
HandlerNpoints.__init__(self, marker_pad=marker_pad, numpoints=numpoints, **kw)
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize,
trans):
xdata, xdata_marker = self.get_xdata(legend, xdescent, ydescent,
width, height, fontsize)
ydata = ((height - ydescent) / 2.) * np.ones(xdata.shape, float)
legline = Line2D(xdata, ydata)
self.update_prop(legline, orig_handle, legend)
legline.set_drawstyle('default')
legline.set_marker("")
legline_marker = Line2D(xdata_marker, ydata[:len(xdata_marker)])
self.update_prop(legline_marker, orig_handle, legend)
legline_marker.set_linestyle('None')
if legend.markerscale != 1:
newsz = legline_marker.get_markersize() * legend.markerscale
legline_marker.set_markersize(newsz)
# we don't want to add this to the return list because
# the texts and handles are assumed to be in one-to-one
# correspondence.
legline._legmarker = legline_marker
legline.set_transform(trans)
legline_marker.set_transform(trans)
return [legline, legline_marker]
class HandlerPatch(HandlerBase):
"""
Handler for Patch instances.
"""
def __init__(self, patch_func=None, **kw):
"""
The HandlerPatch class optionally takes a function ``patch_func``
who's responsibility is to create the legend key artist. The
``patch_func`` should have the signature::
def patch_func(legend=legend, orig_handle=orig_handle,
xdescent=xdescent, ydescent=ydescent,
width=width, height=height, fontsize=fontsize)
Subsequently the created artist will have its ``update_prop`` method
called and the appropriate transform will be applied.
"""
HandlerBase.__init__(self, **kw)
self._patch_func = patch_func
def _create_patch(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize):
if self._patch_func is None:
p = Rectangle(xy=(-xdescent, -ydescent),
width=width, height=height)
else:
p = self._patch_func(legend=legend, orig_handle=orig_handle,
xdescent=xdescent, ydescent=ydescent,
width=width, height=height, fontsize=fontsize)
return p
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize, trans):
p = self._create_patch(legend, orig_handle,
xdescent, ydescent, width, height, fontsize)
self.update_prop(p, orig_handle, legend)
p.set_transform(trans)
return [p]
class HandlerLineCollection(HandlerLine2D):
"""
Handler for LineCollection instances.
"""
def get_numpoints(self, legend):
if self._numpoints is None:
return legend.scatterpoints
else:
return self._numpoints
def _default_update_prop(self, legend_handle, orig_handle):
lw = orig_handle.get_linewidth()[0]
dashes = orig_handle.get_dashes()[0]
color = orig_handle.get_colors()[0]
legend_handle.set_color(color)
legend_handle.set_linewidth(lw)
if dashes[0] is not None: # dashed line
legend_handle.set_dashes(dashes[1])
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize, trans):
xdata, xdata_marker = self.get_xdata(legend, xdescent, ydescent,
width, height, fontsize)
ydata = ((height - ydescent) / 2.) * np.ones(xdata.shape, float)
legline = Line2D(xdata, ydata)
self.update_prop(legline, orig_handle, legend)
legline.set_transform(trans)
return [legline]
class HandlerRegularPolyCollection(HandlerNpointsYoffsets):
"""
Handler for RegularPolyCollections.
"""
def __init__(self, yoffsets=None, sizes=None, **kw):
HandlerNpointsYoffsets.__init__(self, yoffsets=yoffsets, **kw)
self._sizes = sizes
def get_numpoints(self, legend):
if self._numpoints is None:
return legend.scatterpoints
else:
return self._numpoints
def get_sizes(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize):
if self._sizes is None:
handle_sizes = orig_handle.get_sizes()
if not len(handle_sizes):
handle_sizes = [1]
size_max = max(handle_sizes) * legend.markerscale ** 2
size_min = min(handle_sizes) * legend.markerscale ** 2
numpoints = self.get_numpoints(legend)
if numpoints < 4:
sizes = [.5 * (size_max + size_min), size_max,
size_min]
else:
rng = (size_max - size_min)
sizes = rng * np.linspace(0, 1, numpoints) + size_min
else:
sizes = self._sizes
return sizes
def update_prop(self, legend_handle, orig_handle, legend):
self._update_prop(legend_handle, orig_handle)
legend_handle.set_figure(legend.figure)
#legend._set_artist_props(legend_handle)
legend_handle.set_clip_box(None)
legend_handle.set_clip_path(None)
def create_collection(self, orig_handle, sizes, offsets, transOffset):
p = type(orig_handle)(orig_handle.get_numsides(),
rotation=orig_handle.get_rotation(),
sizes=sizes,
offsets=offsets,
transOffset=transOffset,
)
return p
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize,
trans):
xdata, xdata_marker = self.get_xdata(legend, xdescent, ydescent,
width, height, fontsize)
ydata = self.get_ydata(legend, xdescent, ydescent,
width, height, fontsize)
sizes = self.get_sizes(legend, orig_handle, xdescent, ydescent,
width, height, fontsize)
p = self.create_collection(orig_handle, sizes,
offsets=list(zip(xdata_marker, ydata)),
transOffset=trans)
self.update_prop(p, orig_handle, legend)
p._transOffset = trans
return [p]
class HandlerPathCollection(HandlerRegularPolyCollection):
"""
Handler for PathCollections, which are used by scatter
"""
def create_collection(self, orig_handle, sizes, offsets, transOffset):
p = type(orig_handle)([orig_handle.get_paths()[0]],
sizes=sizes,
offsets=offsets,
transOffset=transOffset,
)
return p
class HandlerCircleCollection(HandlerRegularPolyCollection):
"""
Handler for CircleCollections
"""
def create_collection(self, orig_handle, sizes, offsets, transOffset):
p = type(orig_handle)(sizes,
offsets=offsets,
transOffset=transOffset,
)
return p
class HandlerErrorbar(HandlerLine2D):
"""
Handler for Errorbars
"""
def __init__(self, xerr_size=0.5, yerr_size=None,
marker_pad=0.3, numpoints=None, **kw):
self._xerr_size = xerr_size
self._yerr_size = yerr_size
HandlerLine2D.__init__(self, marker_pad=marker_pad, numpoints=numpoints,
**kw)
def get_err_size(self, legend, xdescent, ydescent, width, height, fontsize):
xerr_size = self._xerr_size * fontsize
if self._yerr_size is None:
yerr_size = xerr_size
else:
yerr_size = self._yerr_size * fontsize
return xerr_size, yerr_size
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize,
trans):
plotlines, caplines, barlinecols = orig_handle
xdata, xdata_marker = self.get_xdata(legend, xdescent, ydescent,
width, height, fontsize)
ydata = ((height - ydescent) / 2.) * np.ones(xdata.shape, float)
legline = Line2D(xdata, ydata)
xdata_marker = np.asarray(xdata_marker)
ydata_marker = np.asarray(ydata[:len(xdata_marker)])
xerr_size, yerr_size = self.get_err_size(legend, xdescent, ydescent,
width, height, fontsize)
legline_marker = Line2D(xdata_marker, ydata_marker)
# when plotlines are None (only errorbars are drawn), we just
# make legline invisible.
if plotlines is None:
legline.set_visible(False)
legline_marker.set_visible(False)
else:
self.update_prop(legline, plotlines, legend)
legline.set_drawstyle('default')
legline.set_marker('None')
self.update_prop(legline_marker, plotlines, legend)
legline_marker.set_linestyle('None')
if legend.markerscale != 1:
newsz = legline_marker.get_markersize() * legend.markerscale
legline_marker.set_markersize(newsz)
handle_barlinecols = []
handle_caplines = []
if orig_handle.has_xerr:
verts = [ ((x - xerr_size, y), (x + xerr_size, y))
for x, y in zip(xdata_marker, ydata_marker)]
coll = mcoll.LineCollection(verts)
self.update_prop(coll, barlinecols[0], legend)
handle_barlinecols.append(coll)
if caplines:
capline_left = Line2D(xdata_marker - xerr_size, ydata_marker)
capline_right = Line2D(xdata_marker + xerr_size, ydata_marker)
self.update_prop(capline_left, caplines[0], legend)
self.update_prop(capline_right, caplines[0], legend)
capline_left.set_marker("|")
capline_right.set_marker("|")
handle_caplines.append(capline_left)
handle_caplines.append(capline_right)
if orig_handle.has_yerr:
verts = [ ((x, y - yerr_size), (x, y + yerr_size))
for x, y in zip(xdata_marker, ydata_marker)]
coll = mcoll.LineCollection(verts)
self.update_prop(coll, barlinecols[0], legend)
handle_barlinecols.append(coll)
if caplines:
capline_left = Line2D(xdata_marker, ydata_marker - yerr_size)
capline_right = Line2D(xdata_marker, ydata_marker + yerr_size)
self.update_prop(capline_left, caplines[0], legend)
self.update_prop(capline_right, caplines[0], legend)
capline_left.set_marker("_")
capline_right.set_marker("_")
handle_caplines.append(capline_left)
handle_caplines.append(capline_right)
artists = []
artists.extend(handle_barlinecols)
artists.extend(handle_caplines)
artists.append(legline)
artists.append(legline_marker)
for artist in artists:
artist.set_transform(trans)
return artists
class HandlerStem(HandlerNpointsYoffsets):
"""
Handler for Errorbars
"""
def __init__(self, marker_pad=0.3, numpoints=None,
bottom=None, yoffsets=None, **kw):
HandlerNpointsYoffsets.__init__(self, marker_pad=marker_pad,
numpoints=numpoints,
yoffsets=yoffsets,
**kw)
self._bottom = bottom
def get_ydata(self, legend, xdescent, ydescent, width, height, fontsize):
if self._yoffsets is None:
ydata = height * (0.5 * legend._scatteryoffsets + 0.5)
else:
ydata = height * np.asarray(self._yoffsets)
return ydata
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize,
trans):
markerline, stemlines, baseline = orig_handle
xdata, xdata_marker = self.get_xdata(legend, xdescent, ydescent,
width, height, fontsize)
ydata = self.get_ydata(legend, xdescent, ydescent,
width, height, fontsize)
if self._bottom is None:
bottom = 0.
else:
bottom = self._bottom
leg_markerline = Line2D(xdata_marker, ydata[:len(xdata_marker)])
self.update_prop(leg_markerline, markerline, legend)
leg_stemlines = []
for thisx, thisy in zip(xdata_marker, ydata):
l = Line2D([thisx, thisx], [bottom, thisy])
leg_stemlines.append(l)
for lm, m in zip(leg_stemlines, stemlines):
self.update_prop(lm, m, legend)
leg_baseline = Line2D([np.amin(xdata), np.amax(xdata)],
[bottom, bottom])
self.update_prop(leg_baseline, baseline, legend)
artists = [leg_markerline]
artists.extend(leg_stemlines)
artists.append(leg_baseline)
for artist in artists:
artist.set_transform(trans)
return artists
class HandlerTuple(HandlerBase):
"""
Handler for Tuple
"""
def __init__(self, **kwargs):
HandlerBase.__init__(self, **kwargs)
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize,
trans):
handler_map = legend.get_legend_handler_map()
a_list = []
for handle1 in orig_handle:
handler = legend.get_legend_handler(handler_map, handle1)
_a_list = handler.create_artists(legend, handle1,
xdescent, ydescent, width, height,
fontsize,
trans)
a_list.extend(_a_list)
return a_list
class HandlerPolyCollection(HandlerBase):
"""
Handler for PolyCollection used in fill_between and stackplot.
"""
def _update_prop(self, legend_handle, orig_handle):
def first_color(colors):
colors = mcolors.colorConverter.to_rgba_array(colors)
if len(colors):
return colors[0]
else:
return "none"
def get_first(prop_array):
if len(prop_array):
return prop_array[0]
else:
return None
legend_handle.set_edgecolor(first_color(orig_handle.get_edgecolor()))
legend_handle.set_facecolor(first_color(orig_handle.get_facecolor()))
legend_handle.set_fill(orig_handle.get_fill())
legend_handle.set_hatch(orig_handle.get_hatch())
legend_handle.set_linewidth(get_first(orig_handle.get_linewidths()))
legend_handle.set_linestyle(get_first(orig_handle.get_linestyles()))
legend_handle.set_transform(get_first(orig_handle.get_transforms()))
legend_handle.set_figure(orig_handle.get_figure())
legend_handle.set_alpha(orig_handle.get_alpha())
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize, trans):
p = Rectangle(xy=(-xdescent, -ydescent),
width=width, height=height)
self.update_prop(p, orig_handle, legend)
p.set_transform(trans)
return [p]
| mit |
superzerg/TCD1304AP_teensy2pp | read_pixels.py | 1 | 2341 | #!/usr/bin/python3 -i
import serial # if you have not already done so
from time import sleep
import matplotlib.pyplot as plt
import re
import datetime
import numpy
import pickle
class DataExtruder:
def __init__(self,port='/dev/ttyACM0',baudrate=115200):
self.pattern_pixels=re.compile(r'data=(?P<pixels>[\w ]*) \((?P<nerror>\d*) errors')
self.port=port
self.baudrate=baudrate
self.ser = None
self.data={
'pixels':[],
'nerror':[]
}
self.figure=plt.figure(figsize=[20,8])
self.figure.show()
self.figure_axe=self.figure.gca()
def acquire(self,plot=True):
if self.ser is None:
self.ser=serial.Serial(self.port, self.baudrate)
else:
print('serial connection alredy opened')
print('starting acquisition, press Ctrl+C to stop.')
try:
while True:
data_serial=self.ser.readline().decode('utf-8')
m=self.pattern_pixels.match(data_serial)
if m:
pixels_num=[];
pixels_ascii=m.group('pixels');
i=0
npixel=0
while i+1<len(pixels_ascii):
if pixels_ascii[i]==' ':
if pixels_ascii[i+1]==' ':
pixels_num.append(-1)
i=i+2
else:
print('ERROR reading pixel')
break
else:
pixel=255-int(pixels_ascii[i:i+2],16)
pixels_num.append(pixel)
i=i+2
npixel=npixel+1
self.data['pixels'].append(pixels_num)
self.data['nerror'].append(int(m.group('nerror')))
if plot:
self.plot_pixels()
sleep(0.05)
except KeyboardInterrupt:
pass
self.ser.close()
self.ser=None
def plot_pixels(self):
plt.cla()
self.figure_axe.set_position([0.05,0.1,0.94,0.8])
if len(self.data['pixels'])==0:
return
last_reading=self.data['pixels'][len(self.data['pixels'])-1]
if len(last_reading)!=3648:
return
x=range(1,3649)
self.plt_pixels,=plt.plot(x,last_reading,'b-')
self.figure_axe.set_ylim([-1,255])
self.figure_axe.set_xlim([1,3648])
self.figure_axe.set_ylabel('pixel value')
self.figure_axe.set_xlabel('pixel')
plt.draw()
if __name__ == '__main__':
test=DataExtruder(port='/dev/ttyACM0',baudrate=115200)
test.acquire()
| gpl-3.0 |
gonuke/chtc_usage_tools | extractUsage.py | 1 | 2664 | #!/usr/bin/env python
import sqlite3
import argparse
import datetime
import chtc_usage_tools as cut
import matplotlib.pyplot as plt
import matplotlib.dates as mpld
import matplotlib as mpl
from numpy import array
mpl.rcParams['axes.color_cycle'] = ['r', 'k', 'c']
parser = argparse.ArgumentParser(description='A tool to extract usage data')
parser.add_argument('--project',help='The name of a project over which to summarize the results',nargs="*",type=lambda s: unicode(s,'utf8'))
parser.add_argument('--pool',help='Limit the data to a single pool',nargs="*")
parser.add_argument('-s','--sum',help="Sum across pools",action='store_true')
parser.add_argument('--span',choices=['day','month','year'],help="Time span across which to sum data",default='month')
parser.add_argument('database',help='The name of a database file')
args=parser.parse_args()
conn = cut.usage_db_connect(args.database)
curs = conn.cursor()
### projects
usage_projects=set(cut.get_db_projects(curs))
if args.project:
usage_projects=set(args.project).intersection(usage_projects)
### pools
usage_pools=cut.get_db_pools(curs)
if args.pool:
usage_pools=set(args.pool).intersection(usage_pools)
usage_pools = list(usage_pools)
date_fmt_list= {'day':"%Y-%m-%d", 'month':"%Y-%m", 'year':"%Y"}
sql_groupby_name = 'month'
if args.span:
sql_groupby_name = args.span
date_fmt = date_fmt_list[sql_groupby_name]
# sum over all users for each pool
sum_usage_pools = map(lambda x: "sum(" + x + ")", usage_pools)
col_query = ','.join(sum_usage_pools)
# sum over all pools
if args.sum:
col_query = '(' + '+'.join(sum_usage_pools) + ')'
usage_pools = ["total"]
project_data = {}
fig = plt.figure()
for project in usage_projects:
sql_cmd = 'select strftime("' + date_fmt + '",enddate) as ' + sql_groupby_name + ',' + col_query + ' from usage where ' + 'userid in (select rowid from users where project=?) group by ' + sql_groupby_name
curs.execute(sql_cmd, (project,))
project_data[project] = {'dates':[], 'usage':[]}
rows = curs.fetchall()
for row in rows:
project_data[project]['dates'].append(datetime.datetime.strptime(row[0],date_fmt))
project_data[project]['usage'].append(list(row[1:]))
pool_idx = 0
for temp in zip(*project_data[project]['usage']):
if (max(temp) > 0):
plt.plot_date(mpld.date2num(project_data[project]['dates']),array(temp),'-',xdate=True,label=project + " " + usage_pools[pool_idx])
pool_idx += 1
pool_idx = pool_idx % len(usage_pools)
#print project_data
plt.legend(loc='upper left')
plt.ylabel('cpu-hours per ' + sql_groupby_name)
fig.autofmt_xdate()
plt.show()
| bsd-3-clause |
JohnGriffiths/dipy | dipy/reconst/tests/test_sfm.py | 9 | 5744 | import numpy as np
import numpy.testing as npt
import nibabel as nib
import dipy.reconst.sfm as sfm
import dipy.data as dpd
import dipy.core.gradients as grad
import dipy.sims.voxel as sims
import dipy.core.optimize as opt
import dipy.reconst.cross_validation as xval
def test_design_matrix():
data, gtab = dpd.dsi_voxels()
sphere = dpd.get_sphere()
# Make it with NNLS, so that it gets tested regardless of sklearn
sparse_fascicle_model = sfm.SparseFascicleModel(gtab, sphere,
solver='NNLS')
npt.assert_equal(sparse_fascicle_model.design_matrix.shape,
(np.sum(~gtab.b0s_mask), sphere.vertices.shape[0]))
@npt.dec.skipif(not sfm.has_sklearn)
def test_sfm():
fdata, fbvals, fbvecs = dpd.get_data()
data = nib.load(fdata).get_data()
gtab = grad.gradient_table(fbvals, fbvecs)
for iso in [sfm.ExponentialIsotropicModel, None]:
sfmodel = sfm.SparseFascicleModel(gtab, isotropic=iso)
sffit1 = sfmodel.fit(data[0, 0, 0])
sphere = dpd.get_sphere()
odf1 = sffit1.odf(sphere)
pred1 = sffit1.predict(gtab)
mask = np.ones(data.shape[:-1])
sffit2 = sfmodel.fit(data, mask)
pred2 = sffit2.predict(gtab)
odf2 = sffit2.odf(sphere)
sffit3 = sfmodel.fit(data)
pred3 = sffit3.predict(gtab)
odf3 = sffit3.odf(sphere)
npt.assert_almost_equal(pred3, pred2, decimal=2)
npt.assert_almost_equal(pred3[0, 0, 0], pred1, decimal=2)
npt.assert_almost_equal(odf3[0, 0, 0], odf1, decimal=2)
npt.assert_almost_equal(odf3[0, 0, 0], odf2[0, 0, 0], decimal=2)
# Fit zeros and you will get back zeros
npt.assert_almost_equal(sfmodel.fit(np.zeros(data[0, 0, 0].shape)).beta,
np.zeros(sfmodel.design_matrix[0].shape[-1]))
@npt.dec.skipif(not sfm.has_sklearn)
def test_predict():
SNR = 1000
S0 = 100
_, fbvals, fbvecs = dpd.get_data('small_64D')
bvals = np.load(fbvals)
bvecs = np.load(fbvecs)
gtab = grad.gradient_table(bvals, bvecs)
mevals = np.array(([0.0015, 0.0003, 0.0003],
[0.0015, 0.0003, 0.0003]))
angles = [(0, 0), (60, 0)]
S, sticks = sims.multi_tensor(gtab, mevals, S0, angles=angles,
fractions=[10, 90], snr=SNR)
sfmodel = sfm.SparseFascicleModel(gtab, response=[0.0015, 0.0003, 0.0003])
sffit = sfmodel.fit(S)
pred = sffit.predict()
npt.assert_(xval.coeff_of_determination(pred, S) > 97)
# Should be possible to predict using a different gtab:
new_gtab = grad.gradient_table(bvals[::2], bvecs[::2])
new_pred = sffit.predict(new_gtab)
npt.assert_(xval.coeff_of_determination(new_pred, S[::2]) > 97)
def test_sfm_background():
fdata, fbvals, fbvecs = dpd.get_data()
data = nib.load(fdata).get_data()
gtab = grad.gradient_table(fbvals, fbvecs)
to_fit = data[0,0,0]
to_fit[gtab.b0s_mask] = 0
sfmodel = sfm.SparseFascicleModel(gtab, solver='NNLS')
sffit = sfmodel.fit(to_fit)
npt.assert_equal(sffit.beta, np.zeros_like(sffit.beta))
def test_sfm_stick():
fdata, fbvals, fbvecs = dpd.get_data()
data = nib.load(fdata).get_data()
gtab = grad.gradient_table(fbvals, fbvecs)
sfmodel = sfm.SparseFascicleModel(gtab, solver='NNLS',
response=[0.001, 0, 0])
sffit1 = sfmodel.fit(data[0, 0, 0])
sphere = dpd.get_sphere()
odf1 = sffit1.odf(sphere)
pred1 = sffit1.predict(gtab)
SNR = 1000
S0 = 100
mevals = np.array(([0.001, 0, 0],
[0.001, 0, 0]))
angles = [(0, 0), (60, 0)]
S, sticks = sims.multi_tensor(gtab, mevals, S0, angles=angles,
fractions=[50, 50], snr=SNR)
sfmodel = sfm.SparseFascicleModel(gtab, solver='NNLS',
response=[0.001, 0, 0])
sffit = sfmodel.fit(S)
pred = sffit.predict()
npt.assert_(xval.coeff_of_determination(pred, S) > 96)
def test_sfm_sklearnlinearsolver():
class SillySolver(opt.SKLearnLinearSolver):
def fit(self, X, y):
self.coef_ = np.ones(X.shape[-1])
class EvenSillierSolver(object):
def fit(self, X, y):
self.coef_ = np.ones(X.shape[-1])
fdata, fbvals, fbvecs = dpd.get_data()
gtab = grad.gradient_table(fbvals, fbvecs)
sfmodel = sfm.SparseFascicleModel(gtab, solver=SillySolver())
npt.assert_(isinstance(sfmodel.solver, SillySolver))
npt.assert_raises(ValueError,
sfm.SparseFascicleModel,
gtab,
solver=EvenSillierSolver())
@npt.dec.skipif(not sfm.has_sklearn)
def test_exponential_iso():
fdata, fbvals, fbvecs = dpd.get_data()
data_dti = nib.load(fdata).get_data()
gtab_dti = grad.gradient_table(fbvals, fbvecs)
data_multi, gtab_multi = dpd.dsi_deconv_voxels()
for data, gtab in zip([data_dti, data_multi], [gtab_dti, gtab_multi]):
sfmodel = sfm.SparseFascicleModel(
gtab, isotropic=sfm.ExponentialIsotropicModel)
sffit1 = sfmodel.fit(data[0, 0, 0])
sphere = dpd.get_sphere()
odf1 = sffit1.odf(sphere)
pred1 = sffit1.predict(gtab)
SNR = 1000
S0 = 100
mevals = np.array(([0.0015, 0.0005, 0.0005],
[0.0015, 0.0005, 0.0005]))
angles = [(0, 0), (60, 0)]
S, sticks = sims.multi_tensor(gtab, mevals, S0, angles=angles,
fractions=[50, 50], snr=SNR)
sffit = sfmodel.fit(S)
pred = sffit.predict()
npt.assert_(xval.coeff_of_determination(pred, S) > 96)
| bsd-3-clause |
UNR-AERIAL/scikit-learn | examples/ensemble/plot_bias_variance.py | 357 | 7324 | """
============================================================
Single estimator versus bagging: bias-variance decomposition
============================================================
This example illustrates and compares the bias-variance decomposition of the
expected mean squared error of a single estimator against a bagging ensemble.
In regression, the expected mean squared error of an estimator can be
decomposed in terms of bias, variance and noise. On average over datasets of
the regression problem, the bias term measures the average amount by which the
predictions of the estimator differ from the predictions of the best possible
estimator for the problem (i.e., the Bayes model). The variance term measures
the variability of the predictions of the estimator when fit over different
instances LS of the problem. Finally, the noise measures the irreducible part
of the error which is due the variability in the data.
The upper left figure illustrates the predictions (in dark red) of a single
decision tree trained over a random dataset LS (the blue dots) of a toy 1d
regression problem. It also illustrates the predictions (in light red) of other
single decision trees trained over other (and different) randomly drawn
instances LS of the problem. Intuitively, the variance term here corresponds to
the width of the beam of predictions (in light red) of the individual
estimators. The larger the variance, the more sensitive are the predictions for
`x` to small changes in the training set. The bias term corresponds to the
difference between the average prediction of the estimator (in cyan) and the
best possible model (in dark blue). On this problem, we can thus observe that
the bias is quite low (both the cyan and the blue curves are close to each
other) while the variance is large (the red beam is rather wide).
The lower left figure plots the pointwise decomposition of the expected mean
squared error of a single decision tree. It confirms that the bias term (in
blue) is low while the variance is large (in green). It also illustrates the
noise part of the error which, as expected, appears to be constant and around
`0.01`.
The right figures correspond to the same plots but using instead a bagging
ensemble of decision trees. In both figures, we can observe that the bias term
is larger than in the previous case. In the upper right figure, the difference
between the average prediction (in cyan) and the best possible model is larger
(e.g., notice the offset around `x=2`). In the lower right figure, the bias
curve is also slightly higher than in the lower left figure. In terms of
variance however, the beam of predictions is narrower, which suggests that the
variance is lower. Indeed, as the lower right figure confirms, the variance
term (in green) is lower than for single decision trees. Overall, the bias-
variance decomposition is therefore no longer the same. The tradeoff is better
for bagging: averaging several decision trees fit on bootstrap copies of the
dataset slightly increases the bias term but allows for a larger reduction of
the variance, which results in a lower overall mean squared error (compare the
red curves int the lower figures). The script output also confirms this
intuition. The total error of the bagging ensemble is lower than the total
error of a single decision tree, and this difference indeed mainly stems from a
reduced variance.
For further details on bias-variance decomposition, see section 7.3 of [1]_.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning", Springer, 2009.
"""
print(__doc__)
# Author: Gilles Louppe <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import BaggingRegressor
from sklearn.tree import DecisionTreeRegressor
# Settings
n_repeat = 50 # Number of iterations for computing expectations
n_train = 50 # Size of the training set
n_test = 1000 # Size of the test set
noise = 0.1 # Standard deviation of the noise
np.random.seed(0)
# Change this for exploring the bias-variance decomposition of other
# estimators. This should work well for estimators with high variance (e.g.,
# decision trees or KNN), but poorly for estimators with low variance (e.g.,
# linear models).
estimators = [("Tree", DecisionTreeRegressor()),
("Bagging(Tree)", BaggingRegressor(DecisionTreeRegressor()))]
n_estimators = len(estimators)
# Generate data
def f(x):
x = x.ravel()
return np.exp(-x ** 2) + 1.5 * np.exp(-(x - 2) ** 2)
def generate(n_samples, noise, n_repeat=1):
X = np.random.rand(n_samples) * 10 - 5
X = np.sort(X)
if n_repeat == 1:
y = f(X) + np.random.normal(0.0, noise, n_samples)
else:
y = np.zeros((n_samples, n_repeat))
for i in range(n_repeat):
y[:, i] = f(X) + np.random.normal(0.0, noise, n_samples)
X = X.reshape((n_samples, 1))
return X, y
X_train = []
y_train = []
for i in range(n_repeat):
X, y = generate(n_samples=n_train, noise=noise)
X_train.append(X)
y_train.append(y)
X_test, y_test = generate(n_samples=n_test, noise=noise, n_repeat=n_repeat)
# Loop over estimators to compare
for n, (name, estimator) in enumerate(estimators):
# Compute predictions
y_predict = np.zeros((n_test, n_repeat))
for i in range(n_repeat):
estimator.fit(X_train[i], y_train[i])
y_predict[:, i] = estimator.predict(X_test)
# Bias^2 + Variance + Noise decomposition of the mean squared error
y_error = np.zeros(n_test)
for i in range(n_repeat):
for j in range(n_repeat):
y_error += (y_test[:, j] - y_predict[:, i]) ** 2
y_error /= (n_repeat * n_repeat)
y_noise = np.var(y_test, axis=1)
y_bias = (f(X_test) - np.mean(y_predict, axis=1)) ** 2
y_var = np.var(y_predict, axis=1)
print("{0}: {1:.4f} (error) = {2:.4f} (bias^2) "
" + {3:.4f} (var) + {4:.4f} (noise)".format(name,
np.mean(y_error),
np.mean(y_bias),
np.mean(y_var),
np.mean(y_noise)))
# Plot figures
plt.subplot(2, n_estimators, n + 1)
plt.plot(X_test, f(X_test), "b", label="$f(x)$")
plt.plot(X_train[0], y_train[0], ".b", label="LS ~ $y = f(x)+noise$")
for i in range(n_repeat):
if i == 0:
plt.plot(X_test, y_predict[:, i], "r", label="$\^y(x)$")
else:
plt.plot(X_test, y_predict[:, i], "r", alpha=0.05)
plt.plot(X_test, np.mean(y_predict, axis=1), "c",
label="$\mathbb{E}_{LS} \^y(x)$")
plt.xlim([-5, 5])
plt.title(name)
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.subplot(2, n_estimators, n_estimators + n + 1)
plt.plot(X_test, y_error, "r", label="$error(x)$")
plt.plot(X_test, y_bias, "b", label="$bias^2(x)$"),
plt.plot(X_test, y_var, "g", label="$variance(x)$"),
plt.plot(X_test, y_noise, "c", label="$noise(x)$")
plt.xlim([-5, 5])
plt.ylim([0, 0.1])
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.show()
| bsd-3-clause |
davidwaroquiers/pymatgen | pymatgen/analysis/eos.py | 5 | 19699 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements various equation of states.
Note: Most of the code were initially adapted from ASE and deltafactor by
@gmatteo but has since undergone major refactoring.
"""
import logging
import warnings
from abc import ABCMeta, abstractmethod
from copy import deepcopy
import numpy as np
from scipy.optimize import leastsq, minimize
from pymatgen.core.units import FloatWithUnit
from pymatgen.util.plotting import add_fig_kwargs, get_ax_fig_plt, pretty_plot
__author__ = "Kiran Mathew, gmatteo"
__credits__ = "Cormac Toher"
logger = logging.getLogger(__file__)
class EOSBase(metaclass=ABCMeta):
"""
Abstract class that must be subcalssed by all equation of state
implementations.
"""
def __init__(self, volumes, energies):
"""
Args:
volumes (list/numpy.array): volumes in Ang^3
energies (list/numpy.array): energy in eV
"""
self.volumes = np.array(volumes)
self.energies = np.array(energies)
# minimum energy(e0), buk modulus(b0),
# derivative of bulk modulus wrt pressure(b1), minimum volume(v0)
self._params = None
# the eos function parameters. It is the same as _params except for
# equation of states that uses polynomial fits(deltafactor and
# numerical_eos)
self.eos_params = None
def _initial_guess(self):
"""
Quadratic fit to get an initial guess for the parameters.
Returns:
tuple: (e0, b0, b1, v0)
"""
a, b, c = np.polyfit(self.volumes, self.energies, 2)
self.eos_params = [a, b, c]
v0 = -b / (2 * a)
e0 = a * (v0 ** 2) + b * v0 + c
b0 = 2 * a * v0
b1 = 4 # b1 is usually a small number like 4
vmin, vmax = min(self.volumes), max(self.volumes)
if not vmin < v0 and v0 < vmax:
raise EOSError("The minimum volume of a fitted parabola is " "not in the input volumes\n.")
return e0, b0, b1, v0
def fit(self):
"""
Do the fitting. Does least square fitting. If you want to use custom
fitting, must override this.
"""
# the objective function that will be minimized in the least square
# fitting
self._params = self._initial_guess()
self.eos_params, ierr = leastsq(
lambda pars, x, y: y - self._func(x, pars),
self._params,
args=(self.volumes, self.energies),
)
# e0, b0, b1, v0
self._params = self.eos_params
if ierr not in [1, 2, 3, 4]:
raise EOSError("Optimal parameters not found")
@abstractmethod
def _func(self, volume, params):
"""
The equation of state function. This must be implemented by all classes
that derive from this abstract class.
Args:
volume (float/numpy.array)
params (list/tuple): values for the parameters other than the
volume used by the eos.
"""
pass
def func(self, volume):
"""
The equation of state function with the paramters other than volume set
to the ones obtained from fitting.
Args:
volume (list/numpy.array)
Returns:
numpy.array
"""
return self._func(np.array(volume), self.eos_params)
def __call__(self, volume):
"""
Args:
volume (): Volume
Returns:
Compute EOS with this volume.
"""
return self.func(volume)
@property
def e0(self):
"""
Returns the min energy.
"""
return self._params[0]
@property
def b0(self):
"""
Returns the bulk modulus.
Note: the units for the bulk modulus: unit of energy/unit of volume^3.
"""
return self._params[1]
@property
def b0_GPa(self):
"""
Returns the bulk modulus in GPa.
Note: This assumes that the energy and volumes are in eV and Ang^3
respectively
"""
return FloatWithUnit(self.b0, "eV ang^-3").to("GPa")
@property
def b1(self):
"""
Returns the derivative of bulk modulus wrt pressure(dimensionless)
"""
return self._params[2]
@property
def v0(self):
"""
Returns the minimum or the reference volume in Ang^3.
"""
return self._params[3]
@property
def results(self):
"""
Returns a summary dict.
Returns:
dict
"""
return dict(e0=self.e0, b0=self.b0, b1=self.b1, v0=self.v0)
def plot(self, width=8, height=None, plt=None, dpi=None, **kwargs):
"""
Plot the equation of state.
Args:
width (float): Width of plot in inches. Defaults to 8in.
height (float): Height of plot in inches. Defaults to width *
golden ratio.
plt (matplotlib.pyplot): If plt is supplied, changes will be made
to an existing plot. Otherwise, a new plot will be created.
dpi:
kwargs (dict): additional args fed to pyplot.plot.
supported keys: style, color, text, label
Returns:
Matplotlib plot object.
"""
# pylint: disable=E1307
plt = pretty_plot(width=width, height=height, plt=plt, dpi=dpi)
color = kwargs.get("color", "r")
label = kwargs.get("label", "{} fit".format(self.__class__.__name__))
lines = [
"Equation of State: %s" % self.__class__.__name__,
"Minimum energy = %1.2f eV" % self.e0,
"Minimum or reference volume = %1.2f Ang^3" % self.v0,
"Bulk modulus = %1.2f eV/Ang^3 = %1.2f GPa" % (self.b0, self.b0_GPa),
"Derivative of bulk modulus wrt pressure = %1.2f" % self.b1,
]
text = "\n".join(lines)
text = kwargs.get("text", text)
# Plot input data.
plt.plot(self.volumes, self.energies, linestyle="None", marker="o", color=color)
# Plot eos fit.
vmin, vmax = min(self.volumes), max(self.volumes)
vmin, vmax = (vmin - 0.01 * abs(vmin), vmax + 0.01 * abs(vmax))
vfit = np.linspace(vmin, vmax, 100)
plt.plot(vfit, self.func(vfit), linestyle="dashed", color=color, label=label)
plt.grid(True)
plt.xlabel("Volume $\\AA^3$")
plt.ylabel("Energy (eV)")
plt.legend(loc="best", shadow=True)
# Add text with fit parameters.
plt.text(0.4, 0.5, text, transform=plt.gca().transAxes)
return plt
@add_fig_kwargs
def plot_ax(self, ax=None, fontsize=12, **kwargs):
"""
Plot the equation of state on axis `ax`
Args:
ax: matplotlib :class:`Axes` or None if a new figure should be created.
fontsize: Legend fontsize.
color (str): plot color.
label (str): Plot label
text (str): Legend text (options)
Returns:
Matplotlib figure object.
"""
# pylint: disable=E1307
ax, fig, plt = get_ax_fig_plt(ax=ax)
color = kwargs.get("color", "r")
label = kwargs.get("label", "{} fit".format(self.__class__.__name__))
lines = [
"Equation of State: %s" % self.__class__.__name__,
"Minimum energy = %1.2f eV" % self.e0,
"Minimum or reference volume = %1.2f Ang^3" % self.v0,
"Bulk modulus = %1.2f eV/Ang^3 = %1.2f GPa" % (self.b0, self.b0_GPa),
"Derivative of bulk modulus wrt pressure = %1.2f" % self.b1,
]
text = "\n".join(lines)
text = kwargs.get("text", text)
# Plot input data.
ax.plot(self.volumes, self.energies, linestyle="None", marker="o", color=color)
# Plot eos fit.
vmin, vmax = min(self.volumes), max(self.volumes)
vmin, vmax = (vmin - 0.01 * abs(vmin), vmax + 0.01 * abs(vmax))
vfit = np.linspace(vmin, vmax, 100)
ax.plot(vfit, self.func(vfit), linestyle="dashed", color=color, label=label)
ax.grid(True)
ax.set_xlabel("Volume $\\AA^3$")
ax.set_ylabel("Energy (eV)")
ax.legend(loc="best", shadow=True)
# Add text with fit parameters.
ax.text(
0.5,
0.5,
text,
fontsize=fontsize,
horizontalalignment="center",
verticalalignment="center",
transform=ax.transAxes,
)
return fig
class Murnaghan(EOSBase):
"""
Murnaghan EOS.
"""
def _func(self, volume, params):
"""
From PRB 28,5480 (1983)
"""
e0, b0, b1, v0 = tuple(params)
return e0 + b0 * volume / b1 * (((v0 / volume) ** b1) / (b1 - 1.0) + 1.0) - v0 * b0 / (b1 - 1.0)
class Birch(EOSBase):
"""
Birch EOS.
"""
def _func(self, volume, params):
"""
From Intermetallic compounds: Principles and Practice, Vol. I:
Principles Chapter 9 pages 195-210 by M. Mehl. B. Klein,
D. Papaconstantopoulos.
case where n=0
"""
e0, b0, b1, v0 = tuple(params)
return (
e0
+ 9.0 / 8.0 * b0 * v0 * ((v0 / volume) ** (2.0 / 3.0) - 1.0) ** 2
+ 9.0 / 16.0 * b0 * v0 * (b1 - 4.0) * ((v0 / volume) ** (2.0 / 3.0) - 1.0) ** 3
)
class BirchMurnaghan(EOSBase):
"""
BirchMurnaghan EOS
"""
def _func(self, volume, params):
"""
BirchMurnaghan equation from PRB 70, 224107
"""
e0, b0, b1, v0 = tuple(params)
eta = (v0 / volume) ** (1.0 / 3.0)
return e0 + 9.0 * b0 * v0 / 16.0 * (eta ** 2 - 1) ** 2 * (6 + b1 * (eta ** 2 - 1.0) - 4.0 * eta ** 2)
class PourierTarantola(EOSBase):
"""
PourierTarantola EOS
"""
def _func(self, volume, params):
"""
Pourier-Tarantola equation from PRB 70, 224107
"""
e0, b0, b1, v0 = tuple(params)
eta = (volume / v0) ** (1.0 / 3.0)
squiggle = -3.0 * np.log(eta)
return e0 + b0 * v0 * squiggle ** 2 / 6.0 * (3.0 + squiggle * (b1 - 2))
class Vinet(EOSBase):
"""
Vinet EOS.
"""
def _func(self, volume, params):
"""
Vinet equation from PRB 70, 224107
"""
e0, b0, b1, v0 = tuple(params)
eta = (volume / v0) ** (1.0 / 3.0)
return e0 + 2.0 * b0 * v0 / (b1 - 1.0) ** 2 * (
2.0 - (5.0 + 3.0 * b1 * (eta - 1.0) - 3.0 * eta) * np.exp(-3.0 * (b1 - 1.0) * (eta - 1.0) / 2.0)
)
class PolynomialEOS(EOSBase):
"""
Derives from EOSBase. Polynomial based equations of states must subclass
this.
"""
def _func(self, volume, params):
return np.poly1d(list(params))(volume)
def fit(self, order):
"""
Do polynomial fitting and set the parameters. Uses numpy polyfit.
Args:
order (int): order of the fit polynomial
"""
self.eos_params = np.polyfit(self.volumes, self.energies, order)
self._set_params()
def _set_params(self):
"""
Use the fit polynomial to compute the parameter e0, b0, b1 and v0
and set to the _params attribute.
"""
fit_poly = np.poly1d(self.eos_params)
# the volume at min energy, used as the intial guess for the
# optimization wrt volume.
v_e_min = self.volumes[np.argmin(self.energies)]
# evaluate e0, v0, b0 and b1
min_wrt_v = minimize(fit_poly, v_e_min)
e0, v0 = min_wrt_v.fun, min_wrt_v.x[0]
pderiv2 = np.polyder(fit_poly, 2)
pderiv3 = np.polyder(fit_poly, 3)
b0 = v0 * np.poly1d(pderiv2)(v0)
db0dv = np.poly1d(pderiv2)(v0) + v0 * np.poly1d(pderiv3)(v0)
# db/dp
b1 = -v0 * db0dv / b0
self._params = [e0, b0, b1, v0]
class DeltaFactor(PolynomialEOS):
"""
Fitting a polynomial EOS using delta factor.
"""
def _func(self, volume, params):
x = volume ** (-2.0 / 3.0)
return np.poly1d(list(params))(x)
def fit(self, order=3):
"""
Overriden since this eos works with volume**(2/3) instead of volume.
"""
x = self.volumes ** (-2.0 / 3.0)
self.eos_params = np.polyfit(x, self.energies, order)
self._set_params()
def _set_params(self):
"""
Overriden to account for the fact the fit with volume**(2/3) instead
of volume.
"""
deriv0 = np.poly1d(self.eos_params)
deriv1 = np.polyder(deriv0, 1)
deriv2 = np.polyder(deriv1, 1)
deriv3 = np.polyder(deriv2, 1)
for x in np.roots(deriv1):
if x > 0 and deriv2(x) > 0:
v0 = x ** (-3.0 / 2.0)
break
else:
raise EOSError("No minimum could be found")
derivV2 = 4.0 / 9.0 * x ** 5.0 * deriv2(x)
derivV3 = -20.0 / 9.0 * x ** (13.0 / 2.0) * deriv2(x) - 8.0 / 27.0 * x ** (15.0 / 2.0) * deriv3(x)
b0 = derivV2 / x ** (3.0 / 2.0)
b1 = -1 - x ** (-3.0 / 2.0) * derivV3 / derivV2
# e0, b0, b1, v0
self._params = [deriv0(v0 ** (-2.0 / 3.0)), b0, b1, v0]
class NumericalEOS(PolynomialEOS):
"""
A numerical EOS.
"""
def fit(self, min_ndata_factor=3, max_poly_order_factor=5, min_poly_order=2):
"""
Fit the input data to the 'numerical eos', the equation of state employed
in the quasiharmonic Debye model described in the paper:
10.1103/PhysRevB.90.174107.
credits: Cormac Toher
Args:
min_ndata_factor (int): parameter that controls the minimum number
of data points that will be used for fitting.
minimum number of data points =
total data points-2*min_ndata_factor
max_poly_order_factor (int): parameter that limits the max order
of the polynomial used for fitting.
max_poly_order = number of data points used for fitting -
max_poly_order_factor
min_poly_order (int): minimum order of the polynomial to be
considered for fitting.
"""
warnings.simplefilter("ignore", np.RankWarning)
def get_rms(x, y):
return np.sqrt(np.sum((np.array(x) - np.array(y)) ** 2) / len(x))
# list of (energy, volume) tuples
e_v = list(zip(self.energies, self.volumes))
ndata = len(e_v)
# minimum number of data points used for fitting
ndata_min = max(ndata - 2 * min_ndata_factor, min_poly_order + 1)
rms_min = np.inf
# number of data points available for fit in each iteration
ndata_fit = ndata
# store the fit polynomial coefficients and the rms in a dict,
# where the key=(polynomial order, number of data points used for
# fitting)
all_coeffs = {}
# sort by energy
e_v = sorted(e_v, key=lambda x: x[0])
# minimum energy tuple
e_min = e_v[0]
# sort by volume
e_v = sorted(e_v, key=lambda x: x[1])
# index of minimum energy tuple in the volume sorted list
emin_idx = e_v.index(e_min)
# the volume lower than the volume corresponding to minimum energy
v_before = e_v[emin_idx - 1][1]
# the volume higher than the volume corresponding to minimum energy
v_after = e_v[emin_idx + 1][1]
e_v_work = deepcopy(e_v)
# loop over the data points.
while (ndata_fit >= ndata_min) and (e_min in e_v_work):
max_poly_order = ndata_fit - max_poly_order_factor
e = [ei[0] for ei in e_v_work]
v = [ei[1] for ei in e_v_work]
# loop over polynomial order
for i in range(min_poly_order, max_poly_order + 1):
coeffs = np.polyfit(v, e, i)
pder = np.polyder(coeffs)
a = np.poly1d(pder)(v_before)
b = np.poly1d(pder)(v_after)
if a * b < 0:
rms = get_rms(e, np.poly1d(coeffs)(v))
rms_min = min(rms_min, rms * i / ndata_fit)
all_coeffs[(i, ndata_fit)] = [coeffs.tolist(), rms]
# store the fit coefficients small to large,
# i.e a0, a1, .. an
all_coeffs[(i, ndata_fit)][0].reverse()
# remove 1 data point from each end.
e_v_work.pop()
e_v_work.pop(0)
ndata_fit = len(e_v_work)
logger.info("total number of polynomials: {}".format(len(all_coeffs)))
norm = 0.0
fit_poly_order = ndata
# weight average polynomial coefficients.
weighted_avg_coeffs = np.zeros((fit_poly_order,))
# combine all the filtered polynomial candidates to get the final fit.
for k, v in all_coeffs.items():
# weighted rms = rms * polynomial order / rms_min / ndata_fit
weighted_rms = v[1] * k[0] / rms_min / k[1]
weight = np.exp(-(weighted_rms ** 2))
norm += weight
coeffs = np.array(v[0])
# pad the coefficient array with zeros
coeffs = np.lib.pad(coeffs, (0, max(fit_poly_order - len(coeffs), 0)), "constant")
weighted_avg_coeffs += weight * coeffs
# normalization
weighted_avg_coeffs /= norm
weighted_avg_coeffs = weighted_avg_coeffs.tolist()
# large to small(an, an-1, ..., a1, a0) as expected by np.poly1d
weighted_avg_coeffs.reverse()
self.eos_params = weighted_avg_coeffs
self._set_params()
class EOS:
"""
Convenient wrapper. Retained in its original state to ensure backward
compatibility.
Fit equation of state for bulk systems.
The following equations are supported::
murnaghan: PRB 28, 5480 (1983)
birch: Intermetallic compounds: Principles and Practice, Vol I:
Principles. pages 195-210
birch_murnaghan: PRB 70, 224107
pourier_tarantola: PRB 70, 224107
vinet: PRB 70, 224107
deltafactor
numerical_eos: 10.1103/PhysRevB.90.174107.
Usage::
eos = EOS(eos_name='murnaghan')
eos_fit = eos.fit(volumes, energies)
eos_fit.plot()
"""
MODELS = {
"murnaghan": Murnaghan,
"birch": Birch,
"birch_murnaghan": BirchMurnaghan,
"pourier_tarantola": PourierTarantola,
"vinet": Vinet,
"deltafactor": DeltaFactor,
"numerical_eos": NumericalEOS,
}
def __init__(self, eos_name="murnaghan"):
"""
Args:
eos_name (str): Type of EOS to fit.
"""
if eos_name not in self.MODELS:
raise EOSError(
"The equation of state '{}' is not supported. "
"Please choose one from the following list: {}".format(eos_name, list(self.MODELS.keys()))
)
self._eos_name = eos_name
self.model = self.MODELS[eos_name]
def fit(self, volumes, energies):
"""
Fit energies as function of volumes.
Args:
volumes (list/np.array)
energies (list/np.array)
Returns:
EOSBase: EOSBase object
"""
eos_fit = self.model(np.array(volumes), np.array(energies))
eos_fit.fit()
return eos_fit
class EOSError(Exception):
"""
Error class for EOS fitting.
"""
pass
| mit |
hrjn/scikit-learn | sklearn/gaussian_process/gpr.py | 5 | 19178 | """Gaussian processes regression. """
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import warnings
from operator import itemgetter
import numpy as np
from scipy.linalg import cholesky, cho_solve, solve_triangular
from scipy.optimize import fmin_l_bfgs_b
from sklearn.base import BaseEstimator, RegressorMixin, clone
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
from sklearn.utils import check_random_state
from sklearn.utils.validation import check_X_y, check_array
from sklearn.utils.deprecation import deprecated
class GaussianProcessRegressor(BaseEstimator, RegressorMixin):
"""Gaussian process regression (GPR).
The implementation is based on Algorithm 2.1 of Gaussian Processes
for Machine Learning (GPML) by Rasmussen and Williams.
In addition to standard scikit-learn estimator API,
GaussianProcessRegressor:
* allows prediction without prior fitting (based on the GP prior)
* provides an additional method sample_y(X), which evaluates samples
drawn from the GPR (prior or posterior) at given inputs
* exposes a method log_marginal_likelihood(theta), which can be used
externally for other ways of selecting hyperparameters, e.g., via
Markov chain Monte Carlo.
Read more in the :ref:`User Guide <gaussian_process>`.
.. versionadded:: 0.18
Parameters
----------
kernel : kernel object
The kernel specifying the covariance function of the GP. If None is
passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
the kernel's hyperparameters are optimized during fitting.
alpha : float or array-like, optional (default: 1e-10)
Value added to the diagonal of the kernel matrix during fitting.
Larger values correspond to increased noise level in the observations
and reduce potential numerical issue during fitting. If an array is
passed, it must have the same number of entries as the data used for
fitting and is used as datapoint-dependent noise level. Note that this
is equivalent to adding a WhiteKernel with c=alpha. Allowing to specify
the noise level directly as a parameter is mainly for convenience and
for consistency with Ridge.
optimizer : string or callable, optional (default: "fmin_l_bfgs_b")
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func' is the objective function to be maximized, which
# takes the hyperparameters theta as parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
n_restarts_optimizer : int, optional (default: 0)
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer == 0 implies that one
run is performed.
normalize_y : boolean, optional (default: False)
Whether the target values y are normalized, i.e., the mean of the
observed target values become zero. This parameter should be set to
True if the target values' mean is expected to differ considerable from
zero. When enabled, the normalization effectively modifies the GP's
prior based on the data, which contradicts the likelihood principle;
normalization is thus disabled per default.
copy_X_train : bool, optional (default: True)
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
X_train_ : array-like, shape = (n_samples, n_features)
Feature values in training data (also required for prediction)
y_train_ : array-like, shape = (n_samples, [n_output_dims])
Target values in training data (also required for prediction)
kernel_ : kernel object
The kernel used for prediction. The structure of the kernel is the
same as the one passed as parameter but with optimized hyperparameters
L_ : array-like, shape = (n_samples, n_samples)
Lower-triangular Cholesky decomposition of the kernel in ``X_train_``
alpha_ : array-like, shape = (n_samples,)
Dual coefficients of training data points in kernel space
log_marginal_likelihood_value_ : float
The log-marginal-likelihood of ``self.kernel_.theta``
"""
def __init__(self, kernel=None, alpha=1e-10,
optimizer="fmin_l_bfgs_b", n_restarts_optimizer=0,
normalize_y=False, copy_X_train=True, random_state=None):
self.kernel = kernel
self.alpha = alpha
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.normalize_y = normalize_y
self.copy_X_train = copy_X_train
self.random_state = random_state
@property
@deprecated("Attribute rng was deprecated in version 0.19 and "
"will be removed in 0.21.")
def rng(self):
return self._rng
@property
@deprecated("Attribute y_train_mean was deprecated in version 0.19 and "
"will be removed in 0.21.")
def y_train_mean(self):
return self._y_train_mean
def fit(self, X, y):
"""Fit Gaussian process regression model.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Training data
y : array-like, shape = (n_samples, [n_output_dims])
Target values
Returns
-------
self : returns an instance of self.
"""
if self.kernel is None: # Use an RBF kernel as default
self.kernel_ = C(1.0, constant_value_bounds="fixed") \
* RBF(1.0, length_scale_bounds="fixed")
else:
self.kernel_ = clone(self.kernel)
self._rng = check_random_state(self.random_state)
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
# Normalize target value
if self.normalize_y:
self._y_train_mean = np.mean(y, axis=0)
# demean y
y = y - self._y_train_mean
else:
self._y_train_mean = np.zeros(1)
if np.iterable(self.alpha) \
and self.alpha.shape[0] != y.shape[0]:
if self.alpha.shape[0] == 1:
self.alpha = self.alpha[0]
else:
raise ValueError("alpha must be a scalar or an array"
" with same number of entries as y.(%d != %d)"
% (self.alpha.shape[0], y.shape[0]))
self.X_train_ = np.copy(X) if self.copy_X_train else X
self.y_train_ = np.copy(y) if self.copy_X_train else y
if self.optimizer is not None and self.kernel_.n_dims > 0:
# Choose hyperparameters based on maximizing the log-marginal
# likelihood (potentially starting from several initial values)
def obj_func(theta, eval_gradient=True):
if eval_gradient:
lml, grad = self.log_marginal_likelihood(
theta, eval_gradient=True)
return -lml, -grad
else:
return -self.log_marginal_likelihood(theta)
# First optimize starting from theta specified in kernel
optima = [(self._constrained_optimization(obj_func,
self.kernel_.theta,
self.kernel_.bounds))]
# Additional runs are performed from log-uniform chosen initial
# theta
if self.n_restarts_optimizer > 0:
if not np.isfinite(self.kernel_.bounds).all():
raise ValueError(
"Multiple optimizer restarts (n_restarts_optimizer>0) "
"requires that all bounds are finite.")
bounds = self.kernel_.bounds
for iteration in range(self.n_restarts_optimizer):
theta_initial = \
self._rng.uniform(bounds[:, 0], bounds[:, 1])
optima.append(
self._constrained_optimization(obj_func, theta_initial,
bounds))
# Select result from run with minimal (negative) log-marginal
# likelihood
lml_values = list(map(itemgetter(1), optima))
self.kernel_.theta = optima[np.argmin(lml_values)][0]
self.log_marginal_likelihood_value_ = -np.min(lml_values)
else:
self.log_marginal_likelihood_value_ = \
self.log_marginal_likelihood(self.kernel_.theta)
# Precompute quantities required for predictions which are independent
# of actual query points
K = self.kernel_(self.X_train_)
K[np.diag_indices_from(K)] += self.alpha
self.L_ = cholesky(K, lower=True) # Line 2
self.alpha_ = cho_solve((self.L_, True), self.y_train_) # Line 3
return self
def predict(self, X, return_std=False, return_cov=False):
"""Predict using the Gaussian process regression model
We can also predict based on an unfitted model by using the GP prior.
In addition to the mean of the predictive distribution, also its
standard deviation (return_std=True) or covariance (return_cov=True).
Note that at most one of the two can be requested.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Query points where the GP is evaluated
return_std : bool, default: False
If True, the standard-deviation of the predictive distribution at
the query points is returned along with the mean.
return_cov : bool, default: False
If True, the covariance of the joint predictive distribution at
the query points is returned along with the mean
Returns
-------
y_mean : array, shape = (n_samples, [n_output_dims])
Mean of predictive distribution a query points
y_std : array, shape = (n_samples,), optional
Standard deviation of predictive distribution at query points.
Only returned when return_std is True.
y_cov : array, shape = (n_samples, n_samples), optional
Covariance of joint predictive distribution a query points.
Only returned when return_cov is True.
"""
if return_std and return_cov:
raise RuntimeError(
"Not returning standard deviation of predictions when "
"returning full covariance.")
X = check_array(X)
if not hasattr(self, "X_train_"): # Unfitted;predict based on GP prior
y_mean = np.zeros(X.shape[0])
if return_cov:
y_cov = self.kernel(X)
return y_mean, y_cov
elif return_std:
y_var = self.kernel.diag(X)
return y_mean, np.sqrt(y_var)
else:
return y_mean
else: # Predict based on GP posterior
K_trans = self.kernel_(X, self.X_train_)
y_mean = K_trans.dot(self.alpha_) # Line 4 (y_mean = f_star)
y_mean = self._y_train_mean + y_mean # undo normal.
if return_cov:
v = cho_solve((self.L_, True), K_trans.T) # Line 5
y_cov = self.kernel_(X) - K_trans.dot(v) # Line 6
return y_mean, y_cov
elif return_std:
# compute inverse K_inv of K based on its Cholesky
# decomposition L and its inverse L_inv
L_inv = solve_triangular(self.L_.T, np.eye(self.L_.shape[0]))
K_inv = L_inv.dot(L_inv.T)
# Compute variance of predictive distribution
y_var = self.kernel_.diag(X)
y_var -= np.einsum("ij,ij->i", np.dot(K_trans, K_inv), K_trans)
# Check if any of the variances is negative because of
# numerical issues. If yes: set the variance to 0.
y_var_negative = y_var < 0
if np.any(y_var_negative):
warnings.warn("Predicted variances smaller than 0. "
"Setting those variances to 0.")
y_var[y_var_negative] = 0.0
return y_mean, np.sqrt(y_var)
else:
return y_mean
def sample_y(self, X, n_samples=1, random_state=0):
"""Draw samples from Gaussian process and evaluate at X.
Parameters
----------
X : array-like, shape = (n_samples_X, n_features)
Query points where the GP samples are evaluated
n_samples : int, default: 1
The number of samples drawn from the Gaussian process
random_state : RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
y_samples : array, shape = (n_samples_X, [n_output_dims], n_samples)
Values of n_samples samples drawn from Gaussian process and
evaluated at query points.
"""
rng = check_random_state(random_state)
y_mean, y_cov = self.predict(X, return_cov=True)
if y_mean.ndim == 1:
y_samples = rng.multivariate_normal(y_mean, y_cov, n_samples).T
else:
y_samples = \
[rng.multivariate_normal(y_mean[:, i], y_cov,
n_samples).T[:, np.newaxis]
for i in range(y_mean.shape[1])]
y_samples = np.hstack(y_samples)
return y_samples
def log_marginal_likelihood(self, theta=None, eval_gradient=False):
"""Returns log-marginal likelihood of theta for training data.
Parameters
----------
theta : array-like, shape = (n_kernel_params,) or None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default: False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. If True, theta must not be None.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : array, shape = (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
if theta is None:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
kernel = self.kernel_.clone_with_theta(theta)
if eval_gradient:
K, K_gradient = kernel(self.X_train_, eval_gradient=True)
else:
K = kernel(self.X_train_)
K[np.diag_indices_from(K)] += self.alpha
try:
L = cholesky(K, lower=True) # Line 2
except np.linalg.LinAlgError:
return (-np.inf, np.zeros_like(theta)) \
if eval_gradient else -np.inf
# Support multi-dimensional output of self.y_train_
y_train = self.y_train_
if y_train.ndim == 1:
y_train = y_train[:, np.newaxis]
alpha = cho_solve((L, True), y_train) # Line 3
# Compute log-likelihood (compare line 7)
log_likelihood_dims = -0.5 * np.einsum("ik,ik->k", y_train, alpha)
log_likelihood_dims -= np.log(np.diag(L)).sum()
log_likelihood_dims -= K.shape[0] / 2 * np.log(2 * np.pi)
log_likelihood = log_likelihood_dims.sum(-1) # sum over dimensions
if eval_gradient: # compare Equation 5.9 from GPML
tmp = np.einsum("ik,jk->ijk", alpha, alpha) # k: output-dimension
tmp -= cho_solve((L, True), np.eye(K.shape[0]))[:, :, np.newaxis]
# Compute "0.5 * trace(tmp.dot(K_gradient))" without
# constructing the full matrix tmp.dot(K_gradient) since only
# its diagonal is required
log_likelihood_gradient_dims = \
0.5 * np.einsum("ijl,ijk->kl", tmp, K_gradient)
log_likelihood_gradient = log_likelihood_gradient_dims.sum(-1)
if eval_gradient:
return log_likelihood, log_likelihood_gradient
else:
return log_likelihood
def _constrained_optimization(self, obj_func, initial_theta, bounds):
if self.optimizer == "fmin_l_bfgs_b":
theta_opt, func_min, convergence_dict = \
fmin_l_bfgs_b(obj_func, initial_theta, bounds=bounds)
if convergence_dict["warnflag"] != 0:
warnings.warn("fmin_l_bfgs_b terminated abnormally with the "
" state: %s" % convergence_dict)
elif callable(self.optimizer):
theta_opt, func_min = \
self.optimizer(obj_func, initial_theta, bounds=bounds)
else:
raise ValueError("Unknown optimizer %s." % self.optimizer)
return theta_opt, func_min
| bsd-3-clause |
nicproulx/mne-python | mne/channels/channels.py | 2 | 39343 | # Authors: Alexandre Gramfort <[email protected]>
# Matti Hamalainen <[email protected]>
# Denis Engemann <[email protected]>
# Andrew Dykstra <[email protected]>
# Teon Brooks <[email protected]>
#
# License: BSD (3-clause)
import os
import os.path as op
import numpy as np
from scipy import sparse
from ..externals.six import string_types
from ..utils import verbose, logger, warn, copy_function_doc_to_method_doc
from ..io.compensator import get_current_comp
from ..io.constants import FIFF
from ..io.meas_info import anonymize_info
from ..io.pick import (channel_type, pick_info, pick_types,
_check_excludes_includes, _PICK_TYPES_KEYS)
def _get_meg_system(info):
"""Educated guess for the helmet type based on channels."""
system = '306m'
for ch in info['chs']:
if ch['kind'] == FIFF.FIFFV_MEG_CH:
# Only take first 16 bits, as higher bits store CTF grad comp order
coil_type = ch['coil_type'] & 0xFFFF
if coil_type == FIFF.FIFFV_COIL_NM_122:
system = '122m'
break
elif coil_type // 1000 == 3: # All Vectorview coils are 30xx
system = '306m'
break
elif (coil_type == FIFF.FIFFV_COIL_MAGNES_MAG or
coil_type == FIFF.FIFFV_COIL_MAGNES_GRAD):
nmag = np.sum([c['kind'] == FIFF.FIFFV_MEG_CH
for c in info['chs']])
system = 'Magnes_3600wh' if nmag > 150 else 'Magnes_2500wh'
break
elif coil_type == FIFF.FIFFV_COIL_CTF_GRAD:
system = 'CTF_275'
break
elif coil_type == FIFF.FIFFV_COIL_KIT_GRAD:
system = 'KIT'
break
elif coil_type == FIFF.FIFFV_COIL_BABY_GRAD:
system = 'BabySQUID'
break
return system
def _contains_ch_type(info, ch_type):
"""Check whether a certain channel type is in an info object.
Parameters
----------
info : instance of Info
The measurement information.
ch_type : str
the channel type to be checked for
Returns
-------
has_ch_type : bool
Whether the channel type is present or not.
"""
if not isinstance(ch_type, string_types):
raise ValueError('`ch_type` is of class {actual_class}. It must be '
'`str`'.format(actual_class=type(ch_type)))
meg_extras = ['mag', 'grad', 'planar1', 'planar2']
fnirs_extras = ['hbo', 'hbr']
valid_channel_types = sorted([key for key in _PICK_TYPES_KEYS
if key != 'meg'] + meg_extras + fnirs_extras)
if ch_type not in valid_channel_types:
raise ValueError('ch_type must be one of %s, not "%s"'
% (valid_channel_types, ch_type))
if info is None:
raise ValueError('Cannot check for channels of type "%s" because info '
'is None' % (ch_type,))
return ch_type in [channel_type(info, ii) for ii in range(info['nchan'])]
def _get_ch_type(inst, ch_type):
"""Choose a single channel type (usually for plotting).
Usually used in plotting to plot a single datatype, e.g. look for mags,
then grads, then ... to plot.
"""
if ch_type is None:
for type_ in ['mag', 'grad', 'planar1', 'planar2', 'eeg']:
if type_ in inst:
ch_type = type_
break
else:
raise RuntimeError('No plottable channel types found')
return ch_type
@verbose
def equalize_channels(candidates, verbose=None):
"""Equalize channel picks for a collection of MNE-Python objects.
Parameters
----------
candidates : list
list Raw | Epochs | Evoked | AverageTFR
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Notes
-----
This function operates inplace.
"""
from ..io.base import BaseRaw
from ..epochs import BaseEpochs
from ..evoked import Evoked
from ..time_frequency import AverageTFR
if not all(isinstance(c, (BaseRaw, BaseEpochs, Evoked, AverageTFR))
for c in candidates):
raise ValueError('candidates must be Raw, Epochs, Evoked, or '
'AverageTFR')
chan_max_idx = np.argmax([c.info['nchan'] for c in candidates])
chan_template = candidates[chan_max_idx].ch_names
logger.info('Identiying common channels ...')
channels = [set(c.ch_names) for c in candidates]
common_channels = set(chan_template).intersection(*channels)
dropped = list()
for c in candidates:
drop_them = list(set(c.ch_names) - common_channels)
if drop_them:
c.drop_channels(drop_them)
dropped.extend(drop_them)
if dropped:
dropped = list(set(dropped))
logger.info('Dropped the following channels:\n%s' % dropped)
else:
logger.info('all channels are corresponding, nothing to do.')
class ContainsMixin(object):
"""Mixin class for Raw, Evoked, Epochs."""
def __contains__(self, ch_type):
"""Check channel type membership.
Parameters
----------
ch_type : str
Channel type to check for. Can be e.g. 'meg', 'eeg', 'stim', etc.
Returns
-------
in : bool
Whether or not the instance contains the given channel type.
Examples
--------
Channel type membership can be tested as::
>>> 'meg' in inst # doctest: +SKIP
True
>>> 'seeg' in inst # doctest: +SKIP
False
"""
if ch_type == 'meg':
has_ch_type = (_contains_ch_type(self.info, 'mag') or
_contains_ch_type(self.info, 'grad'))
else:
has_ch_type = _contains_ch_type(self.info, ch_type)
return has_ch_type
@property
def compensation_grade(self):
"""The current gradient compensation grade."""
return get_current_comp(self.info)
# XXX Eventually de-duplicate with _kind_dict of mne/io/meas_info.py
_human2fiff = {'ecg': FIFF.FIFFV_ECG_CH,
'eeg': FIFF.FIFFV_EEG_CH,
'emg': FIFF.FIFFV_EMG_CH,
'eog': FIFF.FIFFV_EOG_CH,
'exci': FIFF.FIFFV_EXCI_CH,
'ias': FIFF.FIFFV_IAS_CH,
'misc': FIFF.FIFFV_MISC_CH,
'resp': FIFF.FIFFV_RESP_CH,
'seeg': FIFF.FIFFV_SEEG_CH,
'stim': FIFF.FIFFV_STIM_CH,
'syst': FIFF.FIFFV_SYST_CH,
'bio': FIFF.FIFFV_BIO_CH,
'ecog': FIFF.FIFFV_ECOG_CH,
'hbo': FIFF.FIFFV_FNIRS_CH,
'hbr': FIFF.FIFFV_FNIRS_CH}
_human2unit = {'ecg': FIFF.FIFF_UNIT_V,
'eeg': FIFF.FIFF_UNIT_V,
'emg': FIFF.FIFF_UNIT_V,
'eog': FIFF.FIFF_UNIT_V,
'exci': FIFF.FIFF_UNIT_NONE,
'ias': FIFF.FIFF_UNIT_NONE,
'misc': FIFF.FIFF_UNIT_V,
'resp': FIFF.FIFF_UNIT_NONE,
'seeg': FIFF.FIFF_UNIT_V,
'stim': FIFF.FIFF_UNIT_NONE,
'syst': FIFF.FIFF_UNIT_NONE,
'bio': FIFF.FIFF_UNIT_V,
'ecog': FIFF.FIFF_UNIT_V,
'hbo': FIFF.FIFF_UNIT_MOL,
'hbr': FIFF.FIFF_UNIT_MOL}
_unit2human = {FIFF.FIFF_UNIT_V: 'V',
FIFF.FIFF_UNIT_T: 'T',
FIFF.FIFF_UNIT_T_M: 'T/m',
FIFF.FIFF_UNIT_MOL: 'M',
FIFF.FIFF_UNIT_NONE: 'NA'}
def _check_set(ch, projs, ch_type):
"""Ensure type change is compatible with projectors."""
new_kind = _human2fiff[ch_type]
if ch['kind'] != new_kind:
for proj in projs:
if ch['ch_name'] in proj['data']['col_names']:
raise RuntimeError('Cannot change channel type for channel %s '
'in projector "%s"'
% (ch['ch_name'], proj['desc']))
ch['kind'] = new_kind
class SetChannelsMixin(object):
"""Mixin class for Raw, Evoked, Epochs."""
@verbose
def set_eeg_reference(self, ref_channels=None, verbose=None):
"""Specify which reference to use for EEG data.
By default, MNE-Python will automatically re-reference the EEG signal
to use an average reference (see below). Use this function to
explicitly specify the desired reference for EEG. This can be either an
existing electrode or a new virtual channel. This function will
re-reference the data according to the desired reference and prevent
MNE-Python from automatically adding an average reference.
Some common referencing schemes and the corresponding value for the
``ref_channels`` parameter:
No re-referencing:
If the EEG data is already using the proper reference, set
``ref_channels=[]``. This will prevent MNE-Python from
automatically re-referencing the data to an average reference.
Average reference:
A new virtual reference electrode is created by averaging the
current EEG signal. Make sure that all bad EEG channels are
properly marked and set ``ref_channels=None``.
A single electrode:
Set ``ref_channels`` to the name of the channel that will act as
the new reference.
The mean of multiple electrodes:
A new virtual reference electrode is created by computing the
average of the current EEG signal recorded from two or more
selected channels. Set ``ref_channels`` to a list of channel names,
indicating which channels to use. For example, to apply an average
mastoid reference, when using the 10-20 naming scheme, set
``ref_channels=['M1', 'M2']``.
.. note:: In case of average reference (ref_channels=None), the
reference is added as an SSP projector and it is not applied
automatically. For it to take effect, apply with method
:meth:`apply_proj <mne.io.proj.ProjMixin.apply_proj>`.
For custom reference (ref_channel is not None), this method
operates in place.
Parameters
----------
ref_channels : list of str | None
The names of the channels to use to construct the reference. If
None (default), an average reference will be added as an SSP
projector but not immediately applied to the data. If an empty list
is specified, the data is assumed to already have a proper
reference and MNE will not attempt any re-referencing of the data.
Defaults to an average reference (None).
verbose : bool, str, int, or None
If not None, override default verbose level (see
:func:`mne.verbose` and :ref:`Logging documentation <tut_logging>`
for more).
Returns
-------
inst : instance of Raw | Epochs | Evoked
Data with EEG channels re-referenced. For ``ref_channels=None``,
an average projector will be added instead of directly subtarcting
data.
Notes
-----
1. If a reference is requested that is not the average reference, this
function removes any pre-existing average reference projections.
2. During source localization, the EEG signal should have an average
reference.
3. In order to apply a reference other than an average reference, the
data must be preloaded.
4. Re-referencing to an average reference is done with an SSP
projector. This allows applying this reference without preloading
the data. Be aware that on preloaded data, SSP projectors are not
automatically applied. Use the ``apply_proj()`` method to apply
them.
.. versionadded:: 0.13.0
See Also
--------
mne.set_bipolar_reference
"""
from ..io.reference import set_eeg_reference
return set_eeg_reference(self, ref_channels, copy=False)[0]
def _get_channel_positions(self, picks=None):
"""Get channel locations from info.
Parameters
----------
picks : array-like of int | None
Indices of channels to include. If None (default), all meg and eeg
channels that are available are returned (bad channels excluded).
Notes
-----
.. versionadded:: 0.9.0
"""
if picks is None:
picks = pick_types(self.info, meg=True, eeg=True)
chs = self.info['chs']
pos = np.array([chs[k]['loc'][:3] for k in picks])
n_zero = np.sum(np.sum(np.abs(pos), axis=1) == 0)
if n_zero > 1: # XXX some systems have origin (0, 0, 0)
raise ValueError('Could not extract channel positions for '
'{} channels'.format(n_zero))
return pos
def _set_channel_positions(self, pos, names):
"""Update channel locations in info.
Parameters
----------
pos : array-like | np.ndarray, shape (n_points, 3)
The channel positions to be set.
names : list of str
The names of the channels to be set.
Notes
-----
.. versionadded:: 0.9.0
"""
if len(pos) != len(names):
raise ValueError('Number of channel positions not equal to '
'the number of names given.')
pos = np.asarray(pos, dtype=np.float)
if pos.shape[-1] != 3 or pos.ndim != 2:
msg = ('Channel positions must have the shape (n_points, 3) '
'not %s.' % (pos.shape,))
raise ValueError(msg)
for name, p in zip(names, pos):
if name in self.ch_names:
idx = self.ch_names.index(name)
self.info['chs'][idx]['loc'][:3] = p
else:
msg = ('%s was not found in the info. Cannot be updated.'
% name)
raise ValueError(msg)
def set_channel_types(self, mapping):
"""Define the sensor type of channels.
Note: The following sensor types are accepted:
ecg, eeg, emg, eog, exci, ias, misc, resp, seeg, stim, syst, ecog,
hbo, hbr
Parameters
----------
mapping : dict
a dictionary mapping a channel to a sensor type (str)
{'EEG061': 'eog'}.
Notes
-----
.. versionadded:: 0.9.0
"""
ch_names = self.info['ch_names']
# first check and assemble clean mappings of index and name
unit_changes = dict()
for ch_name, ch_type in mapping.items():
if ch_name not in ch_names:
raise ValueError("This channel name (%s) doesn't exist in "
"info." % ch_name)
c_ind = ch_names.index(ch_name)
if ch_type not in _human2fiff:
raise ValueError('This function cannot change to this '
'channel type: %s. Accepted channel types '
'are %s.'
% (ch_type,
", ".join(sorted(_human2unit.keys()))))
# Set sensor type
_check_set(self.info['chs'][c_ind], self.info['projs'], ch_type)
unit_old = self.info['chs'][c_ind]['unit']
unit_new = _human2unit[ch_type]
if unit_old not in _unit2human:
raise ValueError("Channel '%s' has unknown unit (%s). Please "
"fix the measurement info of your data."
% (ch_name, unit_old))
if unit_old != _human2unit[ch_type]:
this_change = (_unit2human[unit_old], _unit2human[unit_new])
if this_change not in unit_changes:
unit_changes[this_change] = list()
unit_changes[this_change].append(ch_name)
self.info['chs'][c_ind]['unit'] = _human2unit[ch_type]
if ch_type in ['eeg', 'seeg', 'ecog']:
coil_type = FIFF.FIFFV_COIL_EEG
elif ch_type == 'hbo':
coil_type = FIFF.FIFFV_COIL_FNIRS_HBO
elif ch_type == 'hbr':
coil_type = FIFF.FIFFV_COIL_FNIRS_HBR
else:
coil_type = FIFF.FIFFV_COIL_NONE
self.info['chs'][c_ind]['coil_type'] = coil_type
msg = "The unit for channel(s) {0} has changed from {1} to {2}."
for this_change, names in unit_changes.items():
warn(msg.format(", ".join(sorted(names)), *this_change))
def rename_channels(self, mapping):
"""Rename channels.
Parameters
----------
mapping : dict | callable
a dictionary mapping the old channel to a new channel name
e.g. {'EEG061' : 'EEG161'}. Can also be a callable function
that takes and returns a string (new in version 0.10.0).
Notes
-----
.. versionadded:: 0.9.0
"""
rename_channels(self.info, mapping)
@verbose
def set_montage(self, montage, verbose=None):
"""Set EEG sensor configuration and head digitization.
Parameters
----------
montage : instance of Montage or DigMontage
The montage to use.
verbose : bool, str, int, or None
If not None, override default verbose level (see
:func:`mne.verbose` and :ref:`Logging documentation <tut_logging>`
for more).
Notes
-----
Operates in place.
.. versionadded:: 0.9.0
"""
from .montage import _set_montage
_set_montage(self.info, montage)
return self
def plot_sensors(self, kind='topomap', ch_type=None, title=None,
show_names=False, ch_groups=None, to_sphere=True,
axes=None, block=False, show=True):
"""Plot sensor positions.
Parameters
----------
kind : str
Whether to plot the sensors as 3d, topomap or as an interactive
sensor selection dialog. Available options 'topomap', '3d',
'select'. If 'select', a set of channels can be selected
interactively by using lasso selector or clicking while holding
control key. The selected channels are returned along with the
figure instance. Defaults to 'topomap'.
ch_type : None | str
The channel type to plot. Available options 'mag', 'grad', 'eeg',
'seeg', 'ecog', 'all'. If ``'all'``, all the available mag, grad,
eeg, seeg and ecog channels are plotted. If None (default), then
channels are chosen in the order given above.
title : str | None
Title for the figure. If None (default), equals to ``'Sensor
positions (%s)' % ch_type``.
show_names : bool
Whether to display all channel names. Defaults to False.
ch_groups : 'position' | array of shape (ch_groups, picks) | None
Channel groups for coloring the sensors. If None (default), default
coloring scheme is used. If 'position', the sensors are divided
into 8 regions. See ``order`` kwarg of :func:`mne.viz.plot_raw`. If
array, the channels are divided by picks given in the array.
.. versionadded:: 0.13.0
to_sphere : bool
Whether to project the 3d locations to a sphere. When False, the
sensor array appears similar as to looking downwards straight above
the subject's head. Has no effect when kind='3d'. Defaults to True.
.. versionadded:: 0.14.0
axes : instance of Axes | instance of Axes3D | None
Axes to draw the sensors to. If ``kind='3d'``, axes must be an
instance of Axes3D. If None (default), a new axes will be created.
.. versionadded:: 0.13.0
block : bool
Whether to halt program execution until the figure is closed.
Defaults to False.
.. versionadded:: 0.13.0
show : bool
Show figure if True. Defaults to True.
Returns
-------
fig : instance of matplotlib figure
Figure containing the sensor topography.
selection : list
A list of selected channels. Only returned if ``kind=='select'``.
See Also
--------
mne.viz.plot_layout
Notes
-----
This function plots the sensor locations from the info structure using
matplotlib. For drawing the sensors using mayavi see
:func:`mne.viz.plot_trans`.
.. versionadded:: 0.12.0
"""
from ..viz.utils import plot_sensors
return plot_sensors(self.info, kind=kind, ch_type=ch_type, title=title,
show_names=show_names, ch_groups=ch_groups,
to_sphere=to_sphere, axes=axes, block=block,
show=show)
@copy_function_doc_to_method_doc(anonymize_info)
def anonymize(self):
"""
.. versionadded:: 0.13.0
"""
anonymize_info(self.info)
return self
class UpdateChannelsMixin(object):
"""Mixin class for Raw, Evoked, Epochs, AverageTFR."""
def pick_types(self, meg=True, eeg=False, stim=False, eog=False,
ecg=False, emg=False, ref_meg='auto', misc=False,
resp=False, chpi=False, exci=False, ias=False, syst=False,
seeg=False, dipole=False, gof=False, bio=False, ecog=False,
fnirs=False, include=[], exclude='bads', selection=None):
"""Pick some channels by type and names.
Parameters
----------
meg : bool | str
If True include all MEG channels. If False include None
If string it can be 'mag', 'grad', 'planar1' or 'planar2' to select
only magnetometers, all gradiometers, or a specific type of
gradiometer.
eeg : bool
If True include EEG channels.
stim : bool
If True include stimulus channels.
eog : bool
If True include EOG channels.
ecg : bool
If True include ECG channels.
emg : bool
If True include EMG channels.
ref_meg: bool | str
If True include CTF / 4D reference channels. If 'auto', the
reference channels are only included if compensations are present.
misc : bool
If True include miscellaneous analog channels.
resp : bool
If True include response-trigger channel. For some MEG systems this
is separate from the stim channel.
chpi : bool
If True include continuous HPI coil channels.
exci : bool
Flux excitation channel used to be a stimulus channel.
ias : bool
Internal Active Shielding data (maybe on Triux only).
syst : bool
System status channel information (on Triux systems only).
seeg : bool
Stereotactic EEG channels.
dipole : bool
Dipole time course channels.
gof : bool
Dipole goodness of fit channels.
bio : bool
Bio channels.
ecog : bool
Electrocorticography channels.
fnirs : bool | str
Functional near-infrared spectroscopy channels. If True include all
fNIRS channels. If False (default) include none. If string it can
be 'hbo' (to include channels measuring oxyhemoglobin) or 'hbr' (to
include channels measuring deoxyhemoglobin).
include : list of string
List of additional channels to include. If empty do not include
any.
exclude : list of string | str
List of channels to exclude. If 'bads' (default), exclude channels
in ``info['bads']``.
selection : list of string
Restrict sensor channels (MEG, EEG) to this list of channel names.
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
Notes
-----
.. versionadded:: 0.9.0
"""
idx = pick_types(
self.info, meg=meg, eeg=eeg, stim=stim, eog=eog, ecg=ecg, emg=emg,
ref_meg=ref_meg, misc=misc, resp=resp, chpi=chpi, exci=exci,
ias=ias, syst=syst, seeg=seeg, dipole=dipole, gof=gof, bio=bio,
ecog=ecog, fnirs=fnirs, include=include, exclude=exclude,
selection=selection)
self._pick_drop_channels(idx)
return self
def pick_channels(self, ch_names):
"""Pick some channels.
Parameters
----------
ch_names : list
The list of channels to select.
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
See Also
--------
drop_channels
Notes
-----
.. versionadded:: 0.9.0
"""
_check_excludes_includes(ch_names)
idx = [self.ch_names.index(c) for c in ch_names if c in self.ch_names]
self._pick_drop_channels(idx)
return self
def drop_channels(self, ch_names):
"""Drop some channels.
Parameters
----------
ch_names : list
List of the names of the channels to remove.
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
See Also
--------
pick_channels
Notes
-----
.. versionadded:: 0.9.0
"""
msg = ("'ch_names' should be a list of strings (the name[s] of the "
"channel to be dropped), not a {0}.")
if isinstance(ch_names, string_types):
raise ValueError(msg.format("string"))
else:
if not all([isinstance(ch_name, string_types)
for ch_name in ch_names]):
raise ValueError(msg.format(type(ch_names[0])))
missing = [ch_name for ch_name in ch_names
if ch_name not in self.ch_names]
if len(missing) > 0:
msg = "Channel(s) {0} not found, nothing dropped."
raise ValueError(msg.format(", ".join(missing)))
bad_idx = [self.ch_names.index(ch_name) for ch_name in ch_names
if ch_name in self.ch_names]
idx = np.setdiff1d(np.arange(len(self.ch_names)), bad_idx)
self._pick_drop_channels(idx)
return self
def _pick_drop_channels(self, idx):
# avoid circular imports
from ..io.base import _check_preload
from ..time_frequency import AverageTFR
_check_preload(self, 'adding or dropping channels')
if getattr(self, 'picks', None) is not None:
self.picks = self.picks[idx]
if hasattr(self, '_cals'):
self._cals = self._cals[idx]
pick_info(self.info, idx, copy=False)
if getattr(self, '_projector', None) is not None:
self._projector = self._projector[idx][:, idx]
if self.preload:
# All others (Evoked, Epochs, Raw) have chs axis=-2
axis = -3 if isinstance(self, AverageTFR) else -2
self._data = self._data.take(idx, axis=axis)
def add_channels(self, add_list, force_update_info=False):
"""Append new channels to the instance.
Parameters
----------
add_list : list
A list of objects to append to self. Must contain all the same
type as the current object
force_update_info : bool
If True, force the info for objects to be appended to match the
values in `self`. This should generally only be used when adding
stim channels for which important metadata won't be overwritten.
.. versionadded:: 0.12
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
"""
# avoid circular imports
from ..io import BaseRaw, _merge_info
from ..epochs import BaseEpochs
if not isinstance(add_list, (list, tuple)):
raise AssertionError('Input must be a list or tuple of objs')
# Object-specific checks
if not all([inst.preload for inst in add_list] + [self.preload]):
raise AssertionError('All data must be preloaded')
if isinstance(self, BaseRaw):
con_axis = 0
comp_class = BaseRaw
elif isinstance(self, BaseEpochs):
con_axis = 1
comp_class = BaseEpochs
else:
con_axis = 0
comp_class = type(self)
if not all(isinstance(inst, comp_class) for inst in add_list):
raise AssertionError('All input data must be of same type')
data = [inst._data for inst in [self] + add_list]
# Make sure that all dimensions other than channel axis are the same
compare_axes = [i for i in range(data[0].ndim) if i != con_axis]
shapes = np.array([dat.shape for dat in data])[:, compare_axes]
if not ((shapes[0] - shapes) == 0).all():
raise AssertionError('All dimensions except channels must match')
# Create final data / info objects
data = np.concatenate(data, axis=con_axis)
infos = [self.info] + [inst.info for inst in add_list]
new_info = _merge_info(infos, force_update_to_first=force_update_info)
# Now update the attributes
self._data = data
self.info = new_info
if isinstance(self, BaseRaw):
self._cals = np.concatenate([getattr(inst, '_cals')
for inst in [self] + add_list])
return self
class InterpolationMixin(object):
"""Mixin class for Raw, Evoked, Epochs."""
def interpolate_bads(self, reset_bads=True, mode='accurate'):
"""Interpolate bad MEG and EEG channels.
Operates in place.
Parameters
----------
reset_bads : bool
If True, remove the bads from info.
mode : str
Either `'accurate'` or `'fast'`, determines the quality of the
Legendre polynomial expansion used for interpolation of MEG
channels.
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
Notes
-----
.. versionadded:: 0.9.0
"""
from .interpolation import _interpolate_bads_eeg, _interpolate_bads_meg
if getattr(self, 'preload', None) is False:
raise ValueError('Data must be preloaded.')
_interpolate_bads_eeg(self)
_interpolate_bads_meg(self, mode=mode)
if reset_bads is True:
self.info['bads'] = []
return self
def rename_channels(info, mapping):
"""Rename channels.
Parameters
----------
info : dict
Measurement info.
mapping : dict | callable
a dictionary mapping the old channel to a new channel name
e.g. {'EEG061' : 'EEG161'}. Can also be a callable function
that takes and returns a string (new in version 0.10.0).
"""
info._check_consistency()
bads = list(info['bads']) # make our own local copies
ch_names = list(info['ch_names'])
# first check and assemble clean mappings of index and name
if isinstance(mapping, dict):
orig_names = sorted(list(mapping.keys()))
missing = [orig_name not in ch_names for orig_name in orig_names]
if any(missing):
raise ValueError("Channel name(s) in mapping missing from info: "
"%s" % np.array(orig_names)[np.array(missing)])
new_names = [(ch_names.index(ch_name), new_name)
for ch_name, new_name in mapping.items()]
elif callable(mapping):
new_names = [(ci, mapping(ch_name))
for ci, ch_name in enumerate(ch_names)]
else:
raise ValueError('mapping must be callable or dict, not %s'
% (type(mapping),))
# check we got all strings out of the mapping
if any(not isinstance(new_name[1], string_types)
for new_name in new_names):
raise ValueError('New channel mapping must only be to strings')
# do the remapping locally
for c_ind, new_name in new_names:
for bi, bad in enumerate(bads):
if bad == ch_names[c_ind]:
bads[bi] = new_name
ch_names[c_ind] = new_name
# check that all the channel names are unique
if len(ch_names) != len(np.unique(ch_names)):
raise ValueError('New channel names are not unique, renaming failed')
# do the reampping in info
info['bads'] = bads
for ch, ch_name in zip(info['chs'], ch_names):
ch['ch_name'] = ch_name
info._update_redundant()
info._check_consistency()
def _recursive_flatten(cell, dtype):
"""Unpack mat files in Python."""
while not isinstance(cell[0], dtype):
cell = [c for d in cell for c in d]
return cell
def read_ch_connectivity(fname, picks=None):
"""Parse FieldTrip neighbors .mat file.
More information on these neighbor definitions can be found on the
related FieldTrip documentation pages:
http://fieldtrip.fcdonders.nl/template/neighbours
Parameters
----------
fname : str
The file name. Example: 'neuromag306mag', 'neuromag306planar',
'ctf275', 'biosemi64', etc.
picks : array-like of int, shape (n_channels,)
The indices of the channels to include. Must match the template.
Defaults to None.
Returns
-------
ch_connectivity : scipy.sparse matrix
The connectivity matrix.
ch_names : list
The list of channel names present in connectivity matrix.
"""
from scipy.io import loadmat
if not op.isabs(fname):
templates_dir = op.realpath(op.join(op.dirname(__file__),
'data', 'neighbors'))
templates = os.listdir(templates_dir)
for f in templates:
if f == fname:
break
if f == fname + '_neighb.mat':
fname += '_neighb.mat'
break
else:
raise ValueError('I do not know about this neighbor '
'template: "{}"'.format(fname))
fname = op.join(templates_dir, fname)
nb = loadmat(fname)['neighbours']
ch_names = _recursive_flatten(nb['label'], string_types)
neighbors = [_recursive_flatten(c, string_types) for c in
nb['neighblabel'].flatten()]
assert len(ch_names) == len(neighbors)
if picks is not None:
if max(picks) >= len(ch_names):
raise ValueError('The picks must be compatible with '
'channels. Found a pick ({}) which exceeds '
'the channel range ({})'
.format(max(picks), len(ch_names)))
connectivity = _ch_neighbor_connectivity(ch_names, neighbors)
if picks is not None:
# picking before constructing matrix is buggy
connectivity = connectivity[picks][:, picks]
ch_names = [ch_names[p] for p in picks]
return connectivity, ch_names
def _ch_neighbor_connectivity(ch_names, neighbors):
"""Compute sensor connectivity matrix.
Parameters
----------
ch_names : list of str
The channel names.
neighbors : list of list
A list of list of channel names. The neighbors to
which the channels in ch_names are connected with.
Must be of the same length as ch_names.
Returns
-------
ch_connectivity : scipy.sparse matrix
The connectivity matrix.
"""
if len(ch_names) != len(neighbors):
raise ValueError('`ch_names` and `neighbors` must '
'have the same length')
set_neighbors = set([c for d in neighbors for c in d])
rest = set(ch_names) - set_neighbors
if len(rest) > 0:
raise ValueError('Some of your neighbors are not present in the '
'list of channel names')
for neigh in neighbors:
if (not isinstance(neigh, list) and
not all(isinstance(c, string_types) for c in neigh)):
raise ValueError('`neighbors` must be a list of lists of str')
ch_connectivity = np.eye(len(ch_names), dtype=bool)
for ii, neigbs in enumerate(neighbors):
ch_connectivity[ii, [ch_names.index(i) for i in neigbs]] = True
ch_connectivity = sparse.csr_matrix(ch_connectivity)
return ch_connectivity
def fix_mag_coil_types(info):
"""Fix magnetometer coil types.
Parameters
----------
info : dict
The info dict to correct. Corrections are done in-place.
Notes
-----
This function changes magnetometer coil types 3022 (T1: SQ20483N) and
3023 (T2: SQ20483-A) to 3024 (T3: SQ20950N) in the channel definition
records in the info structure.
Neuromag Vectorview systems can contain magnetometers with two
different coil sizes (3022 and 3023 vs. 3024). The systems
incorporating coils of type 3024 were introduced last and are used at
the majority of MEG sites. At some sites with 3024 magnetometers,
the data files have still defined the magnetometers to be of type
3022 to ensure compatibility with older versions of Neuromag software.
In the MNE software as well as in the present version of Neuromag
software coil type 3024 is fully supported. Therefore, it is now safe
to upgrade the data files to use the true coil type.
.. note:: The effect of the difference between the coil sizes on the
current estimates computed by the MNE software is very small.
Therefore the use of mne_fix_mag_coil_types is not mandatory.
"""
old_mag_inds = _get_T1T2_mag_inds(info)
for ii in old_mag_inds:
info['chs'][ii]['coil_type'] = FIFF.FIFFV_COIL_VV_MAG_T3
logger.info('%d of %d T1/T2 magnetometer types replaced with T3.' %
(len(old_mag_inds), len(pick_types(info, meg='mag'))))
info._check_consistency()
def _get_T1T2_mag_inds(info):
"""Find T1/T2 magnetometer coil types."""
picks = pick_types(info, meg='mag')
old_mag_inds = []
for ii in picks:
ch = info['chs'][ii]
if ch['coil_type'] in (FIFF.FIFFV_COIL_VV_MAG_T1,
FIFF.FIFFV_COIL_VV_MAG_T2):
old_mag_inds.append(ii)
return old_mag_inds
| bsd-3-clause |
kinglogxzl/rqalpha | build/lib/rqalpha/analyser/simulation_exchange.py | 2 | 16317 | # -*- coding: utf-8 -*-
#
# Copyright 2016 Ricequant, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import pandas as pd
from collections import defaultdict, OrderedDict
from six import iteritems
from ..const import ORDER_STATUS
from .. import const
from ..account import Account
from ..i18n import gettext as _
from ..logger import user_log
from .order import Order
from .order_style import MarketOrder, LimitOrder
from .portfolio import Portfolio, Dividend
from .risk_cal import RiskCal
from .trade import Trade
class SimuExchange(object):
def __init__(self, data_proxy, trading_params, **kwargs):
self.data_proxy = data_proxy
self.trading_params = trading_params
self.dt = None # type: datetime.datetime current simulation datetime
# TODO move risk cal outside this class
self.risk_cal = RiskCal(trading_params, data_proxy)
self.daily_portfolios = OrderedDict() # type: Dict[str, Portfolio], each day has a portfolio copy
self.all_orders = {} # type: Dict[str, Order], all orders, including cancel orders
self.open_orders = defaultdict(list) # type: Dict[str, List[Order]], all open orders
self.start_date = start_date = self.trading_params.trading_calendar[0].date()
self.account = Account(start_date=start_date, init_cash=self.trading_params.init_cash)
# TODO should treat benchmark as a buy and hold strategy
self.benchmark_portfolio_value = self.benchmark_cash = self.trading_params.init_cash
self.benchmark_market_value = 0
self.benchmark_quantity = 0
self.last_date = None # type: datetime.date, last trading date
self.simu_days_cnt = 0 # type: int, days count since simulation start
def on_dt_change(self, dt):
if dt.date() != self.current_date:
self.last_date = self.current_date
self.dt = dt
@property
def current_date(self):
return self.dt.date() if self.dt else None
def on_day_open(self):
self.handle_dividend_payable()
def on_day_close(self):
self.simu_days_cnt += 1
self.reject_all_open_orders()
trades = self.account.get_trades(self.current_date)
portfolio = self.account.portfolio
positions = portfolio.positions
# update position sellable for T+1
for trade in trades:
position = positions[trade.order_book_id]
position.sellable += trade.amount
self.update_daily_portfolio()
for order_book_id in list(positions.keys()):
position = positions[order_book_id]
if position.quantity == 0:
positions.pop(order_book_id)
# store today portfolio
self.daily_portfolios[self.current_date] = copy.deepcopy(portfolio)
# TODO make benchmark cal works better
# update benchmark
if self.benchmark_market_value == 0:
self.benchmark_market_value = None
origin_benchmark_portfolio_value = self.benchmark_portfolio_value
# FIXME quick dirty hack
price = self.data_proxy.get_bar(self.trading_params.benchmark, pd.Timestamp(self.start_date)).close
self.benchmark_quantity = self.benchmark_portfolio_value / price
self.benchmark_quantity = int(self.benchmark_quantity)
trade_price = price
commission = 0.0008 * trade_price * self.benchmark_quantity
self.benchmark_cash -= trade_price * self.benchmark_quantity
self.benchmark_cash -= commission
self.benchmark_market_value = price * self.benchmark_quantity
self.benchmark_portfolio_value = self.benchmark_market_value + self.benchmark_cash
benchmark_daily_returns = self.benchmark_portfolio_value / origin_benchmark_portfolio_value - 1
else:
new_benchmark_market_value = self.data_proxy.get_bar(
self.trading_params.benchmark, pd.Timestamp(self.current_date)).close * self.benchmark_quantity
new_benchmark_portfolio_value = new_benchmark_market_value + self.benchmark_cash
benchmark_daily_returns = new_benchmark_portfolio_value / self.benchmark_portfolio_value - 1
self.benchmark_portfolio_value = new_benchmark_portfolio_value
self.risk_cal.calculate(self.current_date, portfolio.daily_returns, benchmark_daily_returns)
self.handle_dividend_ex_dividend()
def get_yesterday_portfolio(self):
return self.daily_portfolios.get(self.last_date)
def reject_all_open_orders(self):
for order_book_id, order_list in iteritems(self.open_orders):
for order in order_list:
user_log.warn(_("Order Rejected: {order_book_id} can not match, {order_list}").format(
order_book_id=order_book_id,
order_list=order_list,
))
order.mark_rejected(_("market close"))
del order_list[:]
def match_current_orders(self, bar_dict):
trades, close_orders = self.match_orders(bar_dict)
for trade in trades:
self.account.record_new_trade(self.current_date, trade)
self.remove_close_orders(close_orders)
# remove rejected order
rejected_orders = []
for order_book_id, order_list in iteritems(self.open_orders):
for order in order_list:
if order.status == ORDER_STATUS.REJECTED:
rejected_orders.append(order)
self.remove_close_orders(rejected_orders)
def on_bar_close(self, bar_dict):
self.match_orders(bar_dict)
# self.update_portfolio(bar_dict)
def update_daily_portfolio(self):
yesterday_portfolio = self.get_yesterday_portfolio()
portfolio = self.account.portfolio
if yesterday_portfolio is None:
yesterday_portfolio_value = portfolio.starting_cash
else:
yesterday_portfolio_value = yesterday_portfolio.portfolio_value
portfolio.pnl = portfolio.portfolio_value - yesterday_portfolio_value
portfolio.daily_returns = portfolio.pnl / yesterday_portfolio_value
portfolio.total_returns = portfolio.portfolio_value / portfolio.starting_cash - 1
portfolio.annualized_returns = (1 + portfolio.total_returns) ** (
const.DAYS_CNT.DAYS_A_YEAR / float((self.current_date - self.trading_params.start_date).days + 1)) - 1
def update_portfolio(self, bar_dict):
portfolio = self.account.portfolio
positions = portfolio.positions
for order_book_id, position in iteritems(positions):
position.market_value = position.quantity * bar_dict[order_book_id].close
portfolio.market_value = sum(position.market_value for order_book_id, position in iteritems(positions))
portfolio.portfolio_value = portfolio.market_value + portfolio.cash
for order_book_id, position in iteritems(positions):
position.value_percent = position.market_value / portfolio.portfolio_value
def create_order(self, bar_dict, order_book_id, amount, style):
if style is None:
style = MarketOrder()
order = Order(self.dt, order_book_id, amount, style)
self.open_orders[order_book_id].append(order)
self.all_orders[order.order_id] = order
# match order here because ricequant do this
self.match_current_orders(bar_dict)
self.update_portfolio(bar_dict)
return order
def cancel_order(self, order_id):
order = self.get_order(order_id)
if order in self.open_orders[order.order_book_id]:
order.cancel()
def remove_close_orders(self, close_orders):
for order in close_orders:
order_list = self.open_orders[order.order_book_id]
try:
order_list.remove(order)
except ValueError:
pass
def get_order(self, order_id):
return self.all_orders[order_id]
def match_orders(self, bar_dict):
# TODO abstract Matching Engine
trades = []
close_orders = []
portfolio = self.account.portfolio
positions = portfolio.positions
slippage_decider = self.account.slippage_decider
commission_decider = self.account.commission_decider
tax_decider = self.account.tax_decider
data_proxy = self.data_proxy
for order_book_id, order_list in iteritems(self.open_orders):
# TODO handle limit order
for order in order_list:
# TODO check whether can match
is_pass, reason = self.validate_order(bar_dict, order)
if not is_pass:
order.mark_rejected(reason)
user_log.error(reason)
continue
trade_price = slippage_decider.get_trade_price(data_proxy, order)
amount = order.quantity
trade = Trade(
date=order.dt,
order_book_id=order_book_id,
price=trade_price,
amount=order.quantity,
order_id=order.order_id,
commission=0.,
)
commission = commission_decider.get_commission(order, trade)
trade.commission = commission
tax = tax_decider.get_tax(order, trade)
trade.tax = tax
# deduct available cash
portfolio.cash -= trade_price * amount
# cal commisssion & tax
portfolio.cash -= commission
portfolio.cash -= tax
portfolio.total_commission += commission
portfolio.total_tax += tax
# update order
# TODO simu to create more trades
order.filled_shares = order.quantity
close_orders.append(order)
trades.append(trade)
# update position
position = positions[order_book_id]
position.quantity += trade.amount
if trade.amount > 0:
position.bought_quantity += trade.amount
position.bought_value += trade_price * amount
else:
position.sold_quantity += abs(trade.amount)
position.sold_value += abs(trade_price * amount)
return trades, close_orders
def validate_order(self, bar_dict, order):
# TODO need to be abstract as a validator
order_book_id = order.order_book_id
portfolio = self.account.portfolio
positions = portfolio.positions
position = positions[order_book_id]
bar = bar_dict[order_book_id]
amount = order.quantity
close_price = bar.close
price = self.account.slippage_decider.get_trade_price(self.data_proxy, order)
cost_money = price * amount
is_buy = amount > 0
# check whether is trading
if not bar.is_trading:
return False, _("Order Rejected: {order_book_id} is not trading.").format(
order_book_id=order_book_id,
)
# handle limit order
if self.trading_params.frequency == "1d":
if isinstance(order.style, LimitOrder):
limit_price = order.style.get_limit_price(is_buy)
if is_buy and limit_price < bar.close:
return False, _("Order Rejected: price is too low to buy {order_book_id}").format(
order_book_id=order_book_id)
elif not is_buy and limit_price > bar.close:
return False, _("Order Rejected: price is too high to sell {order_book_id}").format(
order_book_id=order_book_id)
else:
raise NotImplementedError
# check amount
if abs(amount) < int(self.data_proxy.instrument(order_book_id).round_lot):
return False, _("Order Rejected: amount must over 100 for {order_book_id} ").format(
order_book_id=order_book_id,
)
# check money is enough
if is_buy and close_price * amount > self.account.portfolio.cash:
return False, _("Order Rejected: no enough money to buy {order_book_id}, needs {cost_money:.2f}, cash {cash:.2f}").format(
order_book_id=order_book_id,
cost_money=cost_money,
cash=portfolio.cash,
)
if order.quantity < 0 and abs(order.quantity) > position.sellable:
return False, _("Order Rejected: no enough stock {order_book_id} to sell, you want to sell {quantity}, sellable {sellable}").format(
order_book_id=order_book_id,
quantity=abs(order.quantity),
sellable=position.sellable,
)
# # TODO check whether is limit up or limit down
# # FIXME need to handle ST 5%
# last_close = self.data_proxy.history(order_book_id, 2, "1d", "close").iloc[-2]
# if is_buy and price >= last_close * 1.1:
# return False, _("Order Rejected: {order_book_id} is limit up.").format(
# order_book_id=order_book_id,
# )
# elif not is_buy and price <= last_close * 0.9:
# return False, _("Order Rejected: {order_book_id} is limit down.").format(
# order_book_id=order_book_id,
# )
# TODO check volume is over 25%
# FIXME might have mulitiple order
if amount > bar.volume * 0.25:
return False, _("Order Rejected: {order_book_id} volume is over 25%.").format(
order_book_id=order_book_id,
)
return True, None
def handle_dividend_ex_dividend(self):
data_proxy = self.data_proxy
portfolio = self.account.portfolio
for order_book_id, position in iteritems(portfolio.positions):
dividend_series = data_proxy.get_dividends_by_book_date(order_book_id, self.current_date)
if dividend_series is None:
continue
dividend_per_share = dividend_series["dividend_cash_before_tax"] / dividend_series["round_lot"]
portfolio._dividend_info[order_book_id] = Dividend(order_book_id, position.quantity, dividend_series)
portfolio.dividend_receivable += dividend_per_share * position.quantity
def handle_dividend_payable(self):
"""handle dividend payable before trading
"""
data_proxy = self.data_proxy
portfolio = self.account.portfolio
to_delete_dividend = []
for order_book_id, dividend_info in iteritems(portfolio._dividend_info):
dividend_series = dividend_info.dividend_series
if pd.Timestamp(self.current_date) == pd.Timestamp(dividend_series.payable_date):
dividend_per_share = dividend_series["dividend_cash_before_tax"] / dividend_series["round_lot"]
if dividend_per_share > 0 and dividend_info.quantity > 0:
dividend_cash = dividend_per_share * dividend_info.quantity
portfolio.dividend_receivable -= dividend_cash
portfolio.cash += dividend_cash
# user_log.info(_("get dividend {dividend} for {order_book_id}").format(
# dividend=dividend_cash,
# order_book_id=order_book_id,
# ))
to_delete_dividend.append(order_book_id)
for order_book_id in to_delete_dividend:
portfolio._dividend_info.pop(order_book_id, None)
| apache-2.0 |
rlnsanz/delft | delft/delft.py | 1 | 61818 | # -*- coding: utf-8 -*-
"""
Copyright 2016 Randal S. Olson
This file is part of the TPOT library.
The TPOT library is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your option)
any later version.
The TPOT library is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
the TPOT library. If not, see http://www.gnu.org/licenses/.
"""
from __future__ import print_function
import argparse
import random
import hashlib
import inspect
import sys
from functools import partial
from collections import Counter
import numpy as np
import pandas as pd
from math import ceil
from keras import regularizers
from keras.layers import Input, LSTM, RepeatVector, Dense
from keras.models import Model
from sklearn.preprocessing import Imputer, StandardScaler, RobustScaler, MaxAbsScaler, MinMaxScaler
from sklearn.cross_validation import train_test_split
import warnings
from update_checker import update_check
from _version import __version__
from export_utils import unroll_nested_fuction_calls, generate_import_code, replace_function_calls
from decorators import _gp_new_generation
import deap
from deap import algorithms, base, creator, tools, gp
from tqdm import tqdm
class Activation(object): pass
class Activity_Regularizer(object): pass
class Activity_Regularization_Parameter(object): pass
class Bool(object): pass
class Expansion_Factor(object): pass
class Compression_Factor(object): pass
class Encoded_DF(object): pass
class Classified_DF(Encoded_DF): pass
class Dropout_Rate(object): pass
class Imputed_DF(object): pass
class Optimizer(object): pass
class Output_DF(object): pass
class Scaled_DF(object): pass
class Strategy(object): pass
class Autoencoder(object):
def __init__(self, compression_factor,
encoder_activation, decoder_activation,
optimizer, activity_regularizer, dropout_rate, non_feature_columns, nb_epoch=50):
"""
Initializes one layer of an artificial neural network
Parameters
----------
compression_factor: float
The number of neurons in this layer is determined by dividing the number of neurons in the previous
layer by the compression factor. A compression factor greater than 0 and less than 1 causes an expansion.
encoder_activation: Activation
The activation function of this layer
decoder_activation: Activation
The activation function of the decoding layer that corresponds to this encoding layer in a
stacked autoencoder architecture. Used in pre-training a neural network.
optimizer: Optimizer
The optimization method to control the gradient steps of the algorithm.
Only the optimizer in the first layer is used.
activity_regularizer: Activity_Regularizer
The activity regularizer to help the algorithm find simpler models and generalize better.
dropout_rate: 0 <= float <= 1
Percentage of the input values of this layer that will be set to zero.
nb_epoch: int (Default: 50)
Number of epochs for pre-training and training this layer.
"""
self.compression_factor = compression_factor
self.encoder_activation = encoder_activation
self.decoder_activation = decoder_activation
self.nb_epoch = nb_epoch
self.optimizer = optimizer
self.activity_regularizer = activity_regularizer
self.dropout_rate = dropout_rate
self.non_feature_columns = non_feature_columns
def start_encoder(self, train_df, validate_df):
"""
Creates the first hidden layer in a stacked autoencoder or neural network, and connects it to the input
layer. This method also instantiates the Input class from Keras, and must be called before calling
the `stack_encoder` method of this class.
Parameters
----------
train_df: pandas.DataFrame
training set.
validate_df: pandas.DataFrame
validation set.
excess_columns: int
The number of columns added to train_df and validate_df by TPOT that are not features of the data.
For example, "guess", "class", and "group" are non-feature columns in train_df and validate_df,
so excess_columns is 3. This value is calculated by len(non_feature_columns) when this method is called.
Returns
-------
None
"""
try:
self.train_df = train_df
self.validate_df = validate_df
nbr_columns = train_df.shape[1] - len(self.non_feature_columns)
self.train_df_noisy = train_df.copy(deep=True).drop(self.non_feature_columns, axis=1).astype(np.float64)
self.__dropout__(self.train_df_noisy, ceil(nbr_columns*self.dropout_rate))
self.validate_df_noisy = validate_df.copy(deep=True).drop(self.non_feature_columns, axis=1).astype(np.float64)
self.__dropout__(self.validate_df_noisy, ceil(nbr_columns*self.dropout_rate))
self.nbr_columns = nbr_columns
_input = Input(shape=(nbr_columns,))
self.input = _input
if self.compression_factor == 0:
# Behavior undefined. Do not change feature space.
self.compression_factor = 1
# encoding_dim is the number of columns in this layer, nbr_columns is the number of columns in the previous
# layer.
encoding_dim = ceil(nbr_columns / self.compression_factor)
self.encoding_dim = encoding_dim
code_layer = Dense(encoding_dim, activation=self.encoder_activation,
activity_regularizer=self.activity_regularizer)(_input)
self.code_layer = code_layer
except Exception as e:
print(e)
def stack_encoder(self, nbr_columns, code_layer, _input):
"""
Creates any hidden layer, except the first hidden layer, in a stacked autoencoder or neural network,
and connects the previous layer to this layer.
For creating the first layer, see Autoencoder.start_encoder.
Parameters
----------
nbr_columns: int
The number of columns in the previous layer.
code_layer: Keras.Dense
The hidden layer immediately preceding this layer. code_layer will pass its activation to this layer.
_input: Keras.Input
The input layer created by `Autoencoder.start_encoder`
Returns
-------
None
"""
try:
self.input = _input
self.nbr_columns = nbr_columns
if self.compression_factor == 0:
# Behavior undefined. Do not change feature space.
self.compression_factor = 1
# encoding_dim is the number of columns in this layer
encoding_dim = ceil(nbr_columns / self.compression_factor)
self.encoding_dim = encoding_dim
code_layer = Dense(encoding_dim, activation=self.encoder_activation,
activity_regularizer=self.activity_regularizer)(code_layer)
self.code_layer = code_layer
except Exception as e:
print(e)
def __dropout__(self, input_df, nbr_drop_columns):
"""
Zeroes out n columns per example in input_df, where n is the number specified by nbr_columns.
The columns to be zeroed out are chosen independently at random.
Parameters
----------
input_df: pandas.DataFrame
The input dataset
nbr_drop_columns: int
Number of columns per example to zero out
Returns
-------
None
"""
columns = [j for j in input_df.columns]
for i in input_df.index:
np.random.shuffle(columns)
zero_indices = columns[0:nbr_drop_columns]
input_df.loc[i, zero_indices] = 0
class TPOT(object):
"""TPOT automatically creates and optimizes machine learning pipelines using genetic programming."""
update_checked = False
def __init__(self, population_size=100, generations=100,
mutation_rate=0.9, crossover_rate=0.05,
random_state=0, verbosity=0, scoring_function=None,
disable_update_check=False):
"""Sets up the genetic programming algorithm for pipeline optimization.
Parameters
----------
population_size: int (default: 100)
The number of pipelines in the genetic algorithm population. Must be > 0.
The more pipelines in the population, the slower TPOT will run, but it's also more likely to find better pipelines.
generations: int (default: 100)
The number of generations to run pipeline optimization for. Must be > 0.
The more generations you give TPOT to run, the longer it takes, but it's also more likely to find better pipelines.
mutation_rate: float (default: 0.9)
The mutation rate for the genetic programming algorithm in the range [0.0, 1.0].
This tells the genetic programming algorithm how many pipelines to apply random changes to every generation.
We don't recommend that you tweak this parameter unless you know what you're doing.
crossover_rate: float (default: 0.05)
The crossover rate for the genetic programming algorithm in the range [0.0, 1.0].
This tells the genetic programming algorithm how many pipelines to "breed" every generation.
We don't recommend that you tweak this parameter unless you know what you're doing.
random_state: int (default: 0)
The random number generator seed for TPOT. Use this to make sure that TPOT will give you the same results each time
you run it against the same data set with that seed.
verbosity: int (default: 0)
How much information TPOT communicates while it's running. 0 = none, 1 = minimal, 2 = all
scoring_function: function (default: balanced accuracy)
Function used to evaluate the goodness of a given pipeline for the classification problem. By default, balanced class accuracy is used.
disable_update_check: bool (default: False)
Flag indicating whether the TPOT version checker should be disabled.
Returns
-------
None
"""
# Save params to be recalled later by get_params()
self.params = locals() # Must be placed before any local variable definitions
self.params.pop('self')
# Do not prompt the user to update during this session if they ever disabled the update check
if disable_update_check:
TPOT.update_checked = True
# Prompt the user if their version is out of date
if not disable_update_check and not TPOT.update_checked:
update_check('tpot', __version__)
TPOT.update_checked = True
self._training_testing_data = False
self._optimized_pipeline = None
self._training_features = None
self._training_classes = None
self.population_size = population_size
self.generations = generations
self.mutation_rate = mutation_rate
self.crossover_rate = crossover_rate
self.verbosity = verbosity
self.pbar = None
self.gp_generation = 0
# Columns to always ignore when in an operator
self.non_feature_columns = ['class', 'group', 'guess']
if random_state > 0:
random.seed(random_state)
np.random.seed(random_state)
self._pset = gp.PrimitiveSetTyped('MAIN', [pd.DataFrame], Output_DF)
# Rename pipeline input to "input_df"
self._pset.renameArguments(ARG0='input_df')
# Neural Network operators
self._pset.addPrimitive(self._autoencoder, [Scaled_DF, Expansion_Factor, Activation,
Activation, Optimizer, Dropout_Rate, Activity_Regularizer,
Activity_Regularization_Parameter,
Activity_Regularization_Parameter], Classified_DF)
self._pset.addPrimitive(self._hidden_autoencoder, [Encoded_DF, Compression_Factor, Activation,
Activation, Optimizer, Dropout_Rate, Activity_Regularizer,
Activity_Regularization_Parameter,
Activity_Regularization_Parameter], Encoded_DF)
self._pset.addPrimitive(self._compile_autoencoder, [Encoded_DF], Output_DF)
# Feature preprocessing operators
self._pset.addPrimitive(self._standard_scaler, [Imputed_DF], Scaled_DF)
self._pset.addPrimitive(self._robust_scaler, [Imputed_DF], Scaled_DF)
self._pset.addPrimitive(self._min_max_scaler, [Imputed_DF], Scaled_DF)
self._pset.addPrimitive(self._max_abs_scaler, [Imputed_DF], Scaled_DF)
# Imputer operators
self._pset.addPrimitive(self._imputer, [pd.DataFrame, Strategy], Imputed_DF)
# Terminals
int_terminals = np.concatenate((np.arange(0, 51, 1),
np.arange(60, 110, 10)))
for val in int_terminals:
self._pset.addTerminal(val, int)
float_terminals = np.concatenate(([1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1],
np.linspace(0., 1., 101),
np.linspace(2., 50., 49),
np.linspace(60., 100., 5)))
for val in float_terminals:
self._pset.addTerminal(val, float)
for val in np.linspace(0.1, 10, 100):
self._pset.addTerminal(val, Compression_Factor)
self._pset.addTerminal(val, Expansion_Factor)
if val >= 0.33 and val <= 1:
for i in range(12):
self._pset.addTerminal(val, Expansion_Factor)
self._pset.addTerminal(True, Bool)
self._pset.addTerminal(False, Bool)
self._pset.addTerminal("softmax", Activation)
self._pset.addTerminal("softplus", Activation)
self._pset.addTerminal("softsign", Activation)
self._pset.addTerminal("relu", Activation)
self._pset.addTerminal("tanh", Activation)
self._pset.addTerminal("sigmoid", Activation)
self._pset.addTerminal("hard_sigmoid", Activation)
self._pset.addTerminal("linear", Activation)
self._pset.addTerminal("sgd", Optimizer)
self._pset.addTerminal("rmsprop", Optimizer)
self._pset.addTerminal("adagrad", Optimizer)
self._pset.addTerminal("adadelta", Optimizer)
self._pset.addTerminal("adam", Optimizer)
self._pset.addTerminal("adamax", Optimizer)
self._pset.addTerminal(0, Activity_Regularizer)
self._pset.addTerminal(1, Activity_Regularizer)
self._pset.addTerminal(2, Activity_Regularizer)
self._pset.addTerminal(3, Activity_Regularizer)
self._pset.addTerminal("mean", Strategy)
self._pset.addTerminal("median", Strategy)
self._pset.addTerminal("most_frequent", Strategy)
for val in np.concatenate(([1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1], np.linspace(0., 1., 101))):
self._pset.addTerminal(val, Activity_Regularization_Parameter)
for val in np.linspace(0., .65, 101):
self._pset.addTerminal(val, Dropout_Rate)
for i in range(15):
self._pset.addTerminal(0.0, Dropout_Rate)
# Dummies for DEAP mutation, never produce a better pipeline, necessary to avoid halting exception
self._pset.addTerminal([0,0], Classified_DF )
self._pset.addTerminal([0,0], Scaled_DF)
self._pset.addTerminal([0, 0], Imputed_DF)
creator.create('FitnessMulti', base.Fitness, weights=(-1.0, 1.0))
creator.create('Individual', gp.PrimitiveTree, fitness=creator.FitnessMulti)
self._toolbox = base.Toolbox()
self._toolbox.register('expr', self._gen_grow_safe, pset=self._pset, min_=12, max_=500)
self._toolbox.register('individual', tools.initIterate, creator.Individual, self._toolbox.expr)
self._toolbox.register('population', tools.initRepeat, list, self._toolbox.individual)
self._toolbox.register('compile', gp.compile, pset=self._pset)
self._toolbox.register('select', self._combined_selection_operator)
self._toolbox.register('mate', gp.cxOnePoint)
self._toolbox.register('expr_mut', self._gen_grow_safe, min_=3, max_=12)
self._toolbox.register('mutate', self._random_mutation_operator)
self.hof = None
if not scoring_function:
self.scoring_function = self._balanced_accuracy
else:
self.scoring_function = scoring_function
def set_training_classes_vectorized(self, classes_vec):
"""
Call this method before fitting TPOT.
DELFT needs both forms of the class: vector form and scalar form.
This method allows the user to set the vector form of the labels in the training dataset.
Parameters
----------
classes_vec: numpy.array
an n-by-m matrix of n examples and m classes
Every column contains zeroes (0), except for one column, which contains a 1.
Returns
-------
None
"""
self._training_classes_vec = classes_vec
def fit(self, features, classes):
"""Fits a machine learning pipeline that maximizes classification accuracy on the provided data
Uses genetic programming to optimize a machine learning pipeline that
maximizes classification accuracy on the provided `features` and `classes`.
Performs an internal stratified training/testing cross-validaton split to avoid
overfitting on the provided data.
Parameters
----------
features: array-like {n_samples, n_features}
Feature matrix
classes: array-like {n_samples}
List of class labels for prediction
Returns
-------
None
"""
try:
# Fitting and not scoring flag.
self._training_testing_data = False
# self._training_classes_vec = (np.arange(max(classes) + 1) == classes[:, None]).astype(np.float32)
# Store the training features and classes for later use
self._training_features = features
self._training_classes = classes
training_testing_data = pd.DataFrame(data=features)
training_testing_data['class'] = classes
new_col_names = {}
for column in training_testing_data.columns.values:
if type(column) != str:
new_col_names[column] = str(column).zfill(10)
training_testing_data.rename(columns=new_col_names, inplace=True)
# Randomize the order of the columns so there is no potential bias introduced by the initial order
# of the columns, e.g., the most predictive features at the beginning or end.
data_columns = list(training_testing_data.columns.values)
np.random.shuffle(data_columns)
training_testing_data = training_testing_data[data_columns]
training_indices, testing_indices = train_test_split(training_testing_data.index,
stratify=training_testing_data['class'].values,
train_size=0.75,
test_size=0.25)
self._training_classes_vec_train = self._training_classes_vec[training_indices]
self._training_classes_vec_test = self._training_classes_vec[testing_indices]
training_testing_data.loc[training_indices, 'group'] = 'training'
training_testing_data.loc[testing_indices, 'group'] = 'testing'
# Default guess: the most frequent class in the training data
most_frequent_training_class = Counter(training_testing_data.loc[training_indices, 'class'].values).most_common(1)[0][0]
training_testing_data.loc[:, 'guess'] = most_frequent_training_class
self._toolbox.register('evaluate', self._evaluate_individual, training_testing_data=training_testing_data)
pop = self._toolbox.population(n=self.population_size)
def pareto_eq(ind1, ind2):
"""Function used to determine whether two individuals are equal on the Pareto front
Parameters
----------
ind1: DEAP individual from the GP population
First individual to compare
ind2: DEAP individual from the GP population
Second individual to compare
Returns
----------
individuals_equal: bool
Boolean indicating whether the two individuals are equal on the Pareto front
"""
return np.all(ind1.fitness.values == ind2.fitness.values)
self.hof = tools.ParetoFront(similar=pareto_eq)
verbose = (self.verbosity == 2)
# Start the progress bar
num_evaluations = self.population_size * (self.generations + 1)
self.pbar = tqdm(total=num_evaluations, unit='pipeline', leave=False,
disable=(not verbose), desc='GP Progress')
pop, _ = algorithms.eaSimple(population=pop, toolbox=self._toolbox, cxpb=self.crossover_rate,
mutpb=self.mutation_rate, ngen=self.generations,
halloffame=self.hof, verbose=False)
# Allow for certain exceptions to signal a premature fit() cancellation
except (KeyboardInterrupt, SystemExit):
pass
finally:
# Close the progress bar
if not isinstance(self.pbar, type(None)): # Standard truthiness checks won't work for tqdm
self.pbar.close()
# Reset gp_generation counter to restore initial state
self.gp_generation = 0
# Store the pipeline with the highest internal testing accuracy
if self.hof:
top_score = 0.
for pipeline in self.hof:
pipeline_score = self._evaluate_individual(pipeline, training_testing_data)[1]
if pipeline_score > top_score:
top_score = pipeline_score
self._optimized_pipeline = pipeline
if self.verbosity >= 1 and self._optimized_pipeline:
if verbose: # Add an extra line of spacing if the progress bar was used
print()
print('Best pipeline: {}'.format(self._optimized_pipeline))
def predict(self, testing_features):
"""Uses the optimized pipeline to predict the classes for a feature set.
Parameters
----------
testing_features: array-like {n_samples, n_features}
Feature matrix of the testing set
Returns
----------
array-like: {n_samples}
Predicted classes for the testing set
"""
if self._optimized_pipeline is None:
raise ValueError('A pipeline has not yet been optimized. Please call fit() first.')
training_data = pd.DataFrame(self._training_features)
training_data['class'] = self._training_classes
training_data['group'] = 'training'
testing_data = pd.DataFrame(testing_features)
testing_data['class'] = 0
testing_data['group'] = 'testing'
training_testing_data = pd.concat([training_data, testing_data])
# Default guess: the most frequent class in the training data
most_frequent_training_class = Counter(self._training_classes).most_common(1)[0][0]
training_testing_data.loc[:, 'guess'] = most_frequent_training_class
new_col_names = {}
for column in training_testing_data.columns.values:
if type(column) != str:
new_col_names[column] = str(column).zfill(10)
training_testing_data.rename(columns=new_col_names, inplace=True)
# Transform the tree expression in a callable function
func = self._toolbox.compile(expr=self._optimized_pipeline)
result = func(training_testing_data)
return result.loc[result['group'] == 'testing', 'guess'].values
def fit_predict(self, features, classes):
"""Convenience function that fits a pipeline then predicts on the provided features
Parameters
----------
features: array-like {n_samples, n_features}
Feature matrix
classes: array-like {n_samples}
List of class labels for prediction
Returns
----------
array-like: {n_samples}
Predicted classes for the provided features
"""
self.fit(features, classes)
return self.predict(features)
def score(self, testing_features, testing_classes):
"""Estimates the testing accuracy of the optimized pipeline.
Parameters
----------
testing_features: array-like {n_samples, n_features}
Feature matrix of the testing set
testing_classes: array-like {n_samples}
List of class labels for prediction in the testing set
Returns
-------
accuracy_score: float
The estimated test set accuracy
"""
if self._optimized_pipeline is None:
raise ValueError('A pipeline has not yet been optimized. Please call fit() first.')
training_data = pd.DataFrame(self._training_features)
training_data['class'] = self._training_classes
training_data['group'] = 'training'
testing_data = pd.DataFrame(testing_features)
testing_data['class'] = testing_classes
testing_data['group'] = 'testing'
training_testing_data = pd.concat([training_data, testing_data])
self._training_testing_data = True
# Default guess: the most frequent class in the training data
most_frequent_training_class = Counter(self._training_classes).most_common(1)[0][0]
training_testing_data.loc[:, 'guess'] = most_frequent_training_class
new_col_names = {}
for column in training_testing_data.columns.values:
if type(column) != str:
new_col_names[column] = str(column).zfill(10)
training_testing_data.rename(columns=new_col_names, inplace=True)
for pipeline in self.hof:
print("Paretto Front: {}".format(pipeline))
return self._evaluate_individual(self._optimized_pipeline, training_testing_data)[1]
def get_params(self, deep=None):
"""Get parameters for this estimator
This function is necessary for TPOT to work as a drop-in estimator in,
e.g., sklearn.cross_validation.cross_val_score
Parameters
----------
deep: unused
Only implemented to maintain interface for sklearn
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
return self.params
def export(self, output_file_name):
"""Exports the current optimized pipeline as Python code
Parameters
----------
output_file_name: string
String containing the path and file name of the desired output file
Returns
-------
None
"""
if self._optimized_pipeline is None:
raise ValueError('A pipeline has not yet been optimized. Please call fit() first.')
exported_pipeline = self._optimized_pipeline
# Unroll the nested function calls into serial code. Check export_utils.py for details.
pipeline_list = unroll_nested_fuction_calls(exported_pipeline)
# Have the exported code import all of the necessary modules and functions
pipeline_text = generate_import_code(pipeline_list)
# Replace the function calls with their corresponding Python code. Check export_utils.py for details.
pipeline_text += replace_function_calls(pipeline_list)
with open(output_file_name, 'w') as output_file:
output_file.write(pipeline_text)
def _imputer(self, input_df, strategy):
# http://stackoverflow.com/questions/6736590/fast-check-for-nan-in-numpy
input_data = input_df.drop(self.non_feature_columns, axis=1).values
if np.isnan(np.sum(input_data)):
imputer = Imputer(strategy=strategy)
imputer.fit(input_data)
input_data = imputer.transform(input_data)
input_df_imputed = pd.DataFrame(data=input_data)
input_df_imputed[self.non_feature_columns] = input_df[self.non_feature_columns]
return input_df_imputed
else:
return input_df
def _autoencoder(self, input_df, compression_factor, encoder_acivation,
decoder_activation, optimizer, dropout_rate, activity_regularizer,
activity_regularizer_param1, activity_regularizer_param2):
"""
First layer of an artificial neural network
Parameters
----------
input_df: pandas.DataFrame {n_samples, n_features+['class', 'group', 'guess']}
Input dataframe.
compression_factor: Compression_Factor, range (0, 10]
Determines the number of neurons in the first hidden layer.
encoder_activation: Activation
The activation function of this layer
decoder_activation: Activation
The activation function of the decoding layer that corresponds to this encoding layer in a
stacked autoencoder architecture. Used in pre-training a neural network.
optimizer: Optimizer
The optimization method to control the gradient steps of the algorithm.
Only the optimizer in the first layer is used.
dropout_rate: 0 <= float <= 1
Percentage of the input values of this layer that will be set to zero.
activity_regularizer: Activity_Regularizer
The activity regularizer to help the algorithm find simpler models and generalize better.
activity_regularizer_param1: Activity_Regularization_Parameter
First activity regularization parameter
activity_regularizer_param2: Activity_Regularization_Parameter
Second activity regularization parameter. Only used when using l1l2 activity regularization.
Returns
-------
(encoding_dim, code_layer, input), where
encoding_dim: int
Number of neurons in the first layer.
code_layer: Keras.Dense
The first hidden layer.
input: Keras.Input
The input layer.
"""
# reset the encoder stack
# used to build and fit stacked autoencoders with arbitrary number of layers
self.encoder_stack = []
if activity_regularizer == 1:
activity_regularizer = regularizers.activity_l1(activity_regularizer_param1)
elif activity_regularizer == 2:
activity_regularizer = regularizers.activity_l2(activity_regularizer_param1)
elif activity_regularizer == 3:
activity_regularizer = regularizers.activity_l1l2(activity_regularizer_param1, activity_regularizer_param2)
else:
activity_regularizer = None
autoencoder = Autoencoder(compression_factor=compression_factor, encoder_activation=encoder_acivation,
decoder_activation=decoder_activation, optimizer=optimizer,
activity_regularizer=activity_regularizer, dropout_rate=dropout_rate,
non_feature_columns=self.non_feature_columns)
train_df = input_df.loc[input_df['group'] == 'training']
test_df = input_df.loc[input_df['group'] == 'testing']
autoencoder.start_encoder(train_df, test_df)
self.encoder_stack.append(autoencoder)
return autoencoder.encoding_dim, autoencoder.code_layer, autoencoder.input
def _hidden_autoencoder(self, input_tuple, compression_factor, encoder_acivation,
decoder_activation, optimizer, dropout_rate, activity_regularizer,
activity_regularizer_param1, activity_regularizer_param2):
"""
Any hidden layer, except the first hidden layer, of an artificial neural network.
Parameters
----------
input_tuple: (int, Keras.Dense, Keras.Input)
The output from either the preceding self._autoencoder or the preceding self._hidden_autoencoder
This information is needed to connect the next hidden layer.
compression_factor: Compression_Factor, range (0, 10]
Determines the number of neurons in this hidden layer.
encoder_activation: Activation
The activation function of this layer
decoder_activation: Activation
The activation function of the decoding layer that corresponds to this encoding layer in a
stacked autoencoder architecture. Used in pre-training a neural network.
optimizer: Optimizer
The optimization method to control the gradient steps of the algorithm.
Only the optimizer in the first layer is used.
dropout_rate: 0 <= float <= 1
Percentage of the input values of this layer that will be set to zero.
activity_regularizer: Activity_Regularizer
The activity regularizer to help the algorithm find simpler models and generalize better.
activity_regularizer_param1: Activity_Regularization_Parameter
First activity regularization parameter
activity_regularizer_param2: Activity_Regularization_Parameter
Second activity regularization parameter. Only used when using l1l2 activity regularization.
Returns
-------
(encoding_dim, code_layer, input), where
encoding_dim: int
Number of neurons in this layer.
code_layer: Keras.Dense
This hidden layer.
input: Keras.Input
The input layer (from the first layer).
"""
if type(input_tuple) == type(pd.DataFrame):
# Just as a precaution. In case an evolved pipeline attaches _hidden_autoencoder
# without first attaching an _autoencoder
return self._autoencoder(input_tuple, compression_factor, encoder_acivation,
decoder_activation, optimizer, dropout_rate, activity_regularizer,
activity_regularizer_param1, activity_regularizer_param2)
# nbr_columns, code_layer, _input = input_tuple
if activity_regularizer == 1:
activity_regularizer = regularizers.activity_l1(activity_regularizer_param1)
elif activity_regularizer == 2:
activity_regularizer = regularizers.activity_l2(activity_regularizer_param1)
elif activity_regularizer == 3:
activity_regularizer = regularizers.activity_l1l2(activity_regularizer_param1, activity_regularizer_param2)
else:
activity_regularizer = None
autoencoder = Autoencoder(compression_factor=compression_factor, encoder_activation=encoder_acivation,
decoder_activation=decoder_activation, optimizer=optimizer,
activity_regularizer=activity_regularizer, dropout_rate=dropout_rate,
non_feature_columns=self.non_feature_columns)
autoencoder.stack_encoder(*input_tuple)
self.encoder_stack.append(autoencoder)
return autoencoder.encoding_dim, autoencoder.code_layer, autoencoder.input
def _compile_autoencoder(self, input_df):
"""
Pre-trains the stacked autoencoder, and then connects the first half of the stacked autoencoder
to a classification layer to convert it into an artificial neural network. This method is necessary
because it pops encoders from a LIFO stack to build their corresponding decoders.
Parameters
----------
input_df: DUMMY
used for strong typing in DEAP.
Returns
-------
input_df: pandas.DataFrame {n_samples, n_features + ['guess', 'group', 'class']}
Returns a DataFrame containing the classification guesses for each example.
"""
if self._training_testing_data:
# This runs when we are scoring the test set.
self._training_classes_vec_train = self._training_classes_vec
optimizer = self.encoder_stack[0].optimizer
train_df = self.encoder_stack[0].train_df
train_data_noisy = self.encoder_stack[0].train_df_noisy
validate_df = self.encoder_stack[0].validate_df
validate_data_noisy = self.encoder_stack[0].validate_df_noisy
nb_epoch = self.encoder_stack[0].nb_epoch * len(self.encoder_stack)
self.encoder_stack.reverse()
encoded_layer = self.encoder_stack[0].code_layer
train_data = train_df.drop(self.non_feature_columns, axis=1).astype(np.float64)
validate_data = validate_df.drop(self.non_feature_columns, axis=1).astype(np.float64)
decoder = None
_input = None
target_layer = None
hashable = []
for autoencoder in self.encoder_stack:
_input = autoencoder.input
if decoder is None:
decoder = Dense(autoencoder.nbr_columns, activation=autoencoder.decoder_activation)(encoded_layer)
target_layer = Dense(len(self._training_classes_vec_train[0]), activation=autoencoder.decoder_activation)(encoded_layer)
else:
decoder = Dense(autoencoder.nbr_columns, activation=autoencoder.decoder_activation)(decoder)
hashable.append(autoencoder.decoder_activation)
# Stacked Autoencoder, reconstructor
model = Model(input=_input, output=decoder)
# Stacked Autoencoder, encoder
encoder = Model(input=_input, output=encoded_layer)
# Artificial Neural Network, classifier
classifier = Model(input=_input, output=target_layer)
# Unsupervised pre-training
model.compile(optimizer=optimizer, loss='binary_crossentropy')
model.fit(train_data_noisy.values, train_data.values, nb_epoch=nb_epoch, batch_size=256, verbose=1,
shuffle=True, validation_data=(validate_data_noisy.values, validate_data.values))
# Supervised training
classifier.compile(optimizer=optimizer, loss='binary_crossentropy')
classifier.fit(train_data.values, np.array(self._training_classes_vec_train), nb_epoch=nb_epoch*5, batch_size=256,
verbose=1, shuffle=True)
# Restore the training dataset into input_df
input_df = train_df.append(validate_df)
input_df = input_df.reset_index(drop=True)
# If there are no features left (i.e., only 'class', 'group', and 'guess' remain in the DF), then there is nothing to do
if len(input_df.columns) == 3:
return input_df
input_df = input_df.copy()
all_features = input_df.drop(self.non_feature_columns, axis=1).values
predictions = classifier.predict(all_features)
guess = np.argmax(predictions, 1)
input_df.loc[:, 'guess'] = guess
# Also store the guesses as a synthetic feature
# sf_hash = '-'.join(sorted(input_df.columns.values))
sf_hash = '-'.join([str(x) for x in input_df.columns.values])
# Use the classifier object's class name in the synthetic feature
sf_hash += '{}'.format(classifier.__class__)
hashable.append(optimizer)
hashable.append(str(nb_epoch))
sf_hash += '-'.join(hashable)
sf_identifier = 'SyntheticFeature-{}'.format(hashlib.sha224(sf_hash.encode('UTF-8')).hexdigest())
input_df.loc[:, sf_identifier] = input_df['guess'].values
return input_df
# @staticmethod
def _standard_scaler(self, input_df):
"""Uses scikit-learn's StandardScaler to scale the features by removing their mean and scaling to unit variance
Parameters
----------
input_df: pandas.DataFrame {n_samples, n_features+['class', 'group', 'guess']}
Input DataFrame to scale
Returns
-------
scaled_df: pandas.DataFrame {n_samples, n_features + ['guess', 'group', 'class']}
Returns a DataFrame containing the scaled features
"""
training_features = input_df.loc[input_df['group'] == 'training'].drop(self.non_feature_columns, axis=1)
if len(training_features.columns.values) == 0:
return input_df.copy()
# The scaler must be fit on only the training data
scaler = StandardScaler(copy=False)
scaler.fit(training_features.values.astype(np.float64))
scaled_features = scaler.transform(input_df.drop(self.non_feature_columns, axis=1).values.astype(np.float64))
for col_num, column in enumerate(input_df.drop(self.non_feature_columns, axis=1).columns.values):
input_df.loc[:, column] = scaled_features[:, col_num]
return input_df.copy()
def _robust_scaler(self, input_df):
"""Uses scikit-learn's RobustScaler to scale the features using statistics that are robust to outliers
Parameters
----------
input_df: pandas.DataFrame {n_samples, n_features+['class', 'group', 'guess']}
Input DataFrame to scale
Returns
-------
scaled_df: pandas.DataFrame {n_samples, n_features + ['guess', 'group', 'class']}
Returns a DataFrame containing the scaled features
"""
training_features = input_df.loc[input_df['group'] == 'training'].drop(self.non_feature_columns, axis=1)
if len(training_features.columns.values) == 0:
return input_df.copy()
# The scaler must be fit on only the training data
scaler = RobustScaler(copy=False)
scaler.fit(training_features.values.astype(np.float64))
scaled_features = scaler.transform(input_df.drop(self.non_feature_columns, axis=1).values.astype(np.float64))
for col_num, column in enumerate(input_df.drop(self.non_feature_columns, axis=1).columns.values):
input_df.loc[:, column] = scaled_features[:, col_num]
return input_df.copy()
def _min_max_scaler(self, input_df):
"""Uses scikit-learn's MinMaxScaler to transform all of the features by scaling them to the range [0, 1]
Parameters
----------
input_df: pandas.DataFrame {n_samples, n_features+['class', 'group', 'guess']}
Input DataFrame to scale
Returns
-------
modified_df: pandas.DataFrame {n_samples, n_features + ['guess', 'group', 'class']}
Returns a DataFrame containing the scaled features
"""
training_features = input_df.loc[input_df['group'] == 'training'].drop(self.non_feature_columns, axis=1)
if len(training_features.columns.values) == 0:
return input_df.copy()
# The feature scaler must be fit on only the training data
mm_scaler = MinMaxScaler(copy=False)
mm_scaler.fit(training_features.values.astype(np.float64))
scaled_features = mm_scaler.transform(input_df.drop(self.non_feature_columns, axis=1).values.astype(np.float64))
modified_df = pd.DataFrame(data=scaled_features)
for non_feature_column in self.non_feature_columns:
modified_df[non_feature_column] = input_df[non_feature_column].values
new_col_names = {}
for column in modified_df.columns.values:
if type(column) != str:
new_col_names[column] = str(column).zfill(10)
modified_df.rename(columns=new_col_names, inplace=True)
return modified_df.copy()
def _max_abs_scaler(self, input_df):
"""Uses scikit-learn's MaxAbsScaler to transform all of the features by scaling them to [0, 1] relative to the feature's maximum value
Parameters
----------
input_df: pandas.DataFrame {n_samples, n_features+['class', 'group', 'guess']}
Input DataFrame to scale
Returns
-------
modified_df: pandas.DataFrame {n_samples, n_features + ['guess', 'group', 'class']}
Returns a DataFrame containing the scaled features
"""
training_features = input_df.loc[input_df['group'] == 'training'].drop(self.non_feature_columns, axis=1)
if len(training_features.columns.values) == 0:
return input_df.copy()
# The feature scaler must be fit on only the training data
ma_scaler = MaxAbsScaler(copy=False)
ma_scaler.fit(training_features.values.astype(np.float64))
scaled_features = ma_scaler.transform(input_df.drop(self.non_feature_columns, axis=1).values.astype(np.float64))
modified_df = pd.DataFrame(data=scaled_features)
for non_feature_column in self.non_feature_columns:
modified_df[non_feature_column] = input_df[non_feature_column].values
new_col_names = {}
for column in modified_df.columns.values:
if type(column) != str:
new_col_names[column] = str(column).zfill(10)
modified_df.rename(columns=new_col_names, inplace=True)
return modified_df.copy()
def _evaluate_individual(self, individual, training_testing_data):
"""Determines the `individual`'s fitness according to its performance on the provided data
Parameters
----------
individual: DEAP individual
A list of pipeline operators and model parameters that can be compiled by DEAP into a callable function
training_testing_data: pandas.DataFrame {n_samples, n_features+['guess', 'group', 'class']}
A DataFrame containing the training and testing data for the `individual`'s evaluation
Returns
-------
fitness: float
Returns a float value indicating the `individual`'s fitness according to its performance on the provided data
"""
try:
# Transform the tree expression in a callable function
func = self._toolbox.compile(expr=individual)
# Count the number of pipeline operators as a measure of pipeline complexity
operator_count = 0
for i in range(len(individual)):
node = individual[i]
if type(node) is deap.gp.Terminal:
continue
if type(node) is deap.gp.Primitive and node.name == '_combine_dfs':
continue
operator_count += 1
result = func(training_testing_data)
result = result[result['group'] == 'testing']
resulting_score = self.scoring_function(result)
except MemoryError:
# Throw out GP expressions that are too large to be compiled in Python
return 5000., 0.
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
# Catch-all: Do not allow one pipeline that crashes to cause TPOT to crash
# Instead, assign the crashing pipeline a poor fitness
print(e)
return 5000., 0.
finally:
if not self.pbar.disable:
self.pbar.update(1) # One more pipeline evaluated
if isinstance(resulting_score, float) or isinstance(resulting_score, np.float64) or isinstance(resulting_score, np.float32):
return max(1, operator_count), resulting_score
else:
raise ValueError('Scoring function does not return a float')
def _balanced_accuracy(self, result):
"""Default scoring function: balanced class accuracy
Parameters
----------
result: pandas.DataFrame {n_samples, n_features+['guess', 'group', 'class']}
A DataFrame containing a pipeline's predictions and the corresponding classes for the testing data
Returns
-------
fitness: float
Returns a float value indicating the `individual`'s balanced accuracy on the testing data
"""
all_classes = list(set(result['class'].values))
all_class_accuracies = []
for this_class in all_classes:
sens_columns = (result['guess'] == this_class) & (result['class'] == this_class)
sens_count = float(len(result[result['class'] == this_class]))
this_class_sensitivity = len(result[sens_columns]) / sens_count
spec_columns = (result['guess'] != this_class) & (result['class'] != this_class)
spec_count = float(len(result[result['class'] != this_class]))
this_class_specificity = len(result[spec_columns]) / spec_count
this_class_accuracy = (this_class_sensitivity + this_class_specificity) / 2.
all_class_accuracies.append(this_class_accuracy)
balanced_accuracy = np.mean(all_class_accuracies)
return balanced_accuracy
@_gp_new_generation
def _combined_selection_operator(self, individuals, k):
"""Perform NSGA2 selection on the population according to their Pareto fitness
Parameters
----------
individuals: list
A list of individuals to perform selection on
k: int
The number of individuals to return from the selection phase
Returns
-------
fitness: list
Returns a list of individuals that were selected
"""
return tools.selNSGA2(individuals, int(k / 5.)) * 5
def _random_mutation_operator(self, individual):
"""Perform a replacement, insert, or shrink mutation on an individual
Parameters
----------
individual: DEAP individual
A list of pipeline operators and model parameters that can be compiled by DEAP into a callable function
Returns
-------
fitness: list
Returns the individual with one of the mutations applied to it
"""
mutation_techniques = [
partial(gp.mutUniform, expr=self._toolbox.expr_mut, pset=self._pset),
partial(gp.mutInsert, pset=self._pset),
partial(gp.mutShrink)
]
return np.random.choice(mutation_techniques)(individual)
def _gen_grow_safe(self, pset, min_, max_, type_=None):
"""Generate an expression where each leaf might have a different depth
between *min* and *max*.
Parameters
----------
pset: PrimitiveSetTyped
Primitive set from which primitives are selected.
min_: int
Minimum height of the produced trees.
max_: int
Maximum Height of the produced trees.
type_: class
The type that should return the tree when called, when
:obj:`None` (default) the type of :pset: (pset.ret)
is assumed.
Returns
-------
individual: list
A grown tree with leaves at possibly different depths.
"""
def condition(height, depth, type_):
"""Expression generation stops when the depth is equal to height
or when it is randomly determined that a a node should be a terminal.
"""
# Plus 10 height to enable deep pipelines
return type_ not in [Encoded_DF, Output_DF, Scaled_DF, Imputed_DF] or depth == height
return self._generate(pset, min_, max_, condition, type_)
# Generate function stolen straight from deap.gp.generate
def _generate(self, pset, min_, max_, condition, type_=None):
"""Generate a Tree as a list of list. The tree is build
from the root to the leaves, and it stop growing when the
condition is fulfilled.
Parameters
----------
pset: PrimitiveSetTyped
Primitive set from which primitives are selected.
min_: int
Minimum height of the produced trees.
max_: int
Maximum Height of the produced trees.
condition: function
The condition is a function that takes two arguments,
the height of the tree to build and the current
depth in the tree.
type_: class
The type that should return the tree when called, when
:obj:`None` (default) no return type is enforced.
Returns
-------
individual: list
A grown tree with leaves at possibly different depths
dependending on the condition function.
"""
if type_ is None:
type_ = pset.ret
expr = []
height = random.randint(min_, max_)
stack = [(0, type_)]
while len(stack) != 0:
depth, type_ = stack.pop()
# We've added a type_ parameter to the condition function
if condition(height, depth, type_):
try:
term = random.choice(pset.terminals[type_])
except IndexError:
_, _, traceback = sys.exc_info()
raise IndexError("The gp.generate function tried to add "
"a terminal of type '%s', but there is "
"none available." % (type_,)).with_traceback(traceback)
if inspect.isclass(term):
term = term()
expr.append(term)
else:
try:
prim = random.choice(pset.primitives[type_])
except IndexError:
_, _, traceback = sys.exc_info()
raise IndexError("The gp.generate function tried to add "
"a primitive of type '%s', but there is "
"none available." % (type_,)).with_traceback(traceback)
expr.append(prim)
for arg in reversed(prim.args):
stack.append((depth+1, arg))
return expr
def positive_integer(value):
"""Ensures that the provided value is a positive integer; throws an exception otherwise
Parameters
----------
value: int
The number to evaluate
Returns
-------
value: int
Returns a positive integer
"""
try:
value = int(value)
except Exception:
raise argparse.ArgumentTypeError('Invalid int value: \'{}\''.format(value))
if value < 0:
raise argparse.ArgumentTypeError('Invalid positive int value: \'{}\''.format(value))
return value
def float_range(value):
"""Ensures that the provided value is a float integer in the range (0., 1.); throws an exception otherwise
Parameters
----------
value: float
The number to evaluate
Returns
-------
value: float
Returns a float in the range (0., 1.)
"""
try:
value = float(value)
except:
raise argparse.ArgumentTypeError('Invalid float value: \'{}\''.format(value))
if value < 0.0 or value > 1.0:
raise argparse.ArgumentTypeError('Invalid float value: \'{}\''.format(value))
return value
def main():
"""Main function that is called when TPOT is run on the command line"""
parser = argparse.ArgumentParser(description='A Python tool that automatically creates and '
'optimizes machine learning pipelines using genetic programming.',
add_help=False)
parser.add_argument('INPUT_FILE', type=str, help='Data file to optimize the pipeline on; ensure that the class label column is labeled as "class".')
parser.add_argument('-h', '--help', action='help', help='Show this help message and exit.')
parser.add_argument('-is', action='store', dest='INPUT_SEPARATOR', default='\t',
type=str, help='Character used to separate columns in the input file.')
parser.add_argument('-o', action='store', dest='OUTPUT_FILE', default='',
type=str, help='File to export the final optimized pipeline.')
parser.add_argument('-g', action='store', dest='GENERATIONS', default=100,
type=positive_integer, help='Number of generations to run pipeline optimization over.\nGenerally, TPOT will work better when '
'you give it more generations (and therefore time) to optimize over. TPOT will evaluate '
'GENERATIONS x POPULATION_SIZE number of pipelines in total.')
parser.add_argument('-p', action='store', dest='POPULATION_SIZE', default=100,
type=positive_integer, help='Number of individuals in the GP population.\nGenerally, TPOT will work better when you give it '
' more individuals (and therefore time) to optimize over. TPOT will evaluate '
'GENERATIONS x POPULATION_SIZE number of pipelines in total.')
parser.add_argument('-mr', action='store', dest='MUTATION_RATE', default=0.9,
type=float_range, help='GP mutation rate in the range [0.0, 1.0]. We recommend using the default parameter unless you '
'understand how the mutation rate affects GP algorithms.')
parser.add_argument('-xr', action='store', dest='CROSSOVER_RATE', default=0.05,
type=float_range, help='GP crossover rate in the range [0.0, 1.0]. We recommend using the default parameter unless you '
'understand how the crossover rate affects GP algorithms.')
parser.add_argument('-s', action='store', dest='RANDOM_STATE', default=0,
type=int, help='Random number generator seed for reproducibility. Set this seed if you want your TPOT run to be reproducible '
'with the same seed and data set in the future.')
parser.add_argument('-v', action='store', dest='VERBOSITY', default=1, choices=[0, 1, 2],
type=int, help='How much information TPOT communicates while it is running: 0 = none, 1 = minimal, 2 = all.')
parser.add_argument('--no-update-check', action='store_true', dest='DISABLE_UPDATE_CHECK', default=False,
help='Flag indicating whether the TPOT version checker should be disabled.')
parser.add_argument('--version', action='version', version='TPOT {version}'.format(version=__version__),
help='Show TPOT\'s version number and exit.')
args = parser.parse_args()
if args.VERBOSITY >= 2:
print('\nTPOT settings:')
for arg in sorted(args.__dict__):
if arg == 'DISABLE_UPDATE_CHECK':
continue
print('{}\t=\t{}'.format(arg, args.__dict__[arg]))
print('')
input_data = pd.read_csv(args.INPUT_FILE, sep=args.INPUT_SEPARATOR)
if 'Class' in input_data.columns.values:
input_data.rename(columns={'Class': 'class'}, inplace=True)
RANDOM_STATE = args.RANDOM_STATE if args.RANDOM_STATE > 0 else None
training_indices, testing_indices = train_test_split(input_data.index,
stratify=input_data['class'].values,
train_size=0.75,
test_size=0.25,
random_state=RANDOM_STATE)
training_features = input_data.loc[training_indices].drop('class', axis=1).values
training_classes = input_data.loc[training_indices, 'class'].values
testing_features = input_data.loc[testing_indices].drop('class', axis=1).values
testing_classes = input_data.loc[testing_indices, 'class'].values
tpot = TPOT(generations=args.GENERATIONS, population_size=args.POPULATION_SIZE,
mutation_rate=args.MUTATION_RATE, crossover_rate=args.CROSSOVER_RATE,
random_state=args.RANDOM_STATE, verbosity=args.VERBOSITY,
disable_update_check=args.DISABLE_UPDATE_CHECK)
tpot.fit(training_features, training_classes)
if args.VERBOSITY >= 1:
print('\nTraining accuracy: {}'.format(tpot.score(training_features, training_classes)))
print('Holdout accuracy: {}'.format(tpot.score(testing_features, testing_classes)))
if args.OUTPUT_FILE != '':
tpot.export(args.OUTPUT_FILE)
if __name__ == '__main__':
main()
| gpl-3.0 |
sapfo/medeas | src/old_scripts/main_simul_eigenvalues_distribution.py | 1 | 7741 | #!/usr/bin/env python
"""
Created Wed Oct 7 15:04:36 CEST 2015
@author: sapfo
"""
import matplotlib
#matplotlib.use('Agg')
import simul_ms
import python_cmdscale
#import python_pca
import exp
import sys
import numpy as np
import pylab as py
from scipy.stats import norm
'''
We want to pick n1, n2, D, T?
Simulate data
Compute the distance matrix
MDS the distance matrix
Get coordinates
Get eigenvalues, eigenvectors
Plot comparing with the other eigenvalues
'''
#################### FIXED #############
n = 30
n1 = 5
n2 = 5
n3 = 5
D = 0.4
D1 = 0.1 #(D1<D)
nreps = 1000
## simulate data
rescaling = 2.0
verbose = False
########### 1 population ##############
print "########### 1 population ##############"
## expected tree length for one population
exp_tree_length = 0
for i in range(2,n+1):
exp_tree_length += 2./(i-1)
nsnps = [100]
T_mds = {}
T_pca = {}
Eigenvalues_mds = []
Distances_noise = []
Expected_Delta = np.zeros((n,n))
for kk in range(1,n):
Expected_Delta += np.eye(n,k=kk)
Expected_Delta += np.eye(n,k=-kk)
Expected_Delta *= 2./exp_tree_length
print Expected_Delta
for nsnp in nsnps:
T_mds[nsnp] = []
T_pca[nsnp] = []
for iteration in range(nreps):
params,data,tree_lengths = simul_ms.ms_one_pops(n=n,nreps=nsnp,verbose=0)
Delta = simul_ms.distance_matrix(data=data,verbose=0)
if verbose: print "Delta: ",Delta
Diff = Delta - Expected_Delta
if verbose: print "Diff: ",Diff
Distances_noise += list(Diff.flatten())
#Expected_Delta = zeros
evals_mds, evecs_mds, Y_mds = python_cmdscale.cmdscale(Delta)
Eigenvalues_mds += list(evals_mds[:-1])
#evals_pca, evecs_pca, Y_pca = python_pca.PCA(data.T)
#print "params: ",params
if verbose: print "average tree length (computed with ms): ",rescaling*np.average(tree_lengths)
if verbose: print "expected tree length (analytical coal): ",exp_tree_length
# mds expected total tree length, bias, rmse
t_mds = (2./(np.average(evals_mds[:-1])))**(1/2.)
T_mds[nsnp].append(t_mds)
if verbose: print "expected T (mds) from eigenvalues: ",T_mds
# pca expected tree length, bias, rmse
#t_pca = 1./np.average(evals_pca[:-1])
#T_pca[nsnp].append(t_pca)
#if verbose: print "expected T (pca) from eigenvalues: ",T_pca
print "expected lambda1 (mds) for (Ivan analytical): ",2./((exp_tree_length)**2)
#print "expected lambda1 (pca) for (Ivan analytical): ",1./((exp_tree_length))
#print "observed lambda1 (mds procedure): ",evals_mds[0]
#print "observed lambda1 (pca procedure): ",evals_pca[0]
#print "observed average lambdas (mds): ",np.average(evals_mds[:-1])
#print "observed average lambdas (pca): ",np.average(evals_pca[:-1])
#print "evals (first 10): ",evals_mds[:10]
mu1,std1 = norm.fit(Eigenvalues_mds)
mu2,std2 = norm.fit(Distances_noise)
fig = py.figure()
py.suptitle("1 population, %s snps, %s rep"%(nsnp,nreps))
ax1 = fig.add_subplot(2,1,1)
py.title("Eigenvalues")
py.hist(Eigenvalues_mds,normed=True,alpha=0.5)
py.vlines(2./((exp_tree_length)**2),0,10,color='red')
xmin,xmax=py.xlim()
x = np.linspace(xmin,xmax,100)
p = norm.pdf(x,mu1,std1)
py.plot(x,p,'k',linewidth=2)
ax1 = fig.add_subplot(2,1,2)
py.title("Distances")
py.hist(Distances_noise,normed=True,alpha=0.5)
xmin,xmax=py.xlim()
x = np.linspace(xmin,xmax,100)
p = norm.pdf(x,mu2,std2)
py.plot(x,p,'k',linewidth=2)
#py.savefig("Eigenvalues_mds_1pop.pdf")
py.show()
sys.exit()
### plotting one population ###
py.plot(Y[:,0],(Y[:,1]),'o',color='blue')
py.title("simulations 1 population n = %s, nreps = %s "%(n,nreps))
py.xlabel("dim 1 (%.2f %%)"%(1.*evals[0]/np.average(evals[:-1])))
py.ylabel("dim 2 (%.2f %%)"%(1.*evals[1]/np.average(evals[:-1])))
########### 2 populations ##############
print "########### 2 populations ##############"
#ms simul
params_2pops,data_2pops,tree_lengths_2pops = simul_ms.ms_two_pops(n1=n1, n2=n2, D=1./rescaling*D,nreps=nreps,verbose=0)
avg_tree_length_2pops = rescaling*np.average(tree_lengths_2pops)
Delta_2pops = simul_ms.distance_matrix(data=data_2pops,verbose=0)
#cmdscale
evals_2pops, evecs_2pops, Y_2pops = python_cmdscale.cmdscale(Delta_2pops)
exp.T_D_two_pops(eigenvalues = evals_2pops,n1=n1,n2=n2,diploid=2)
# analytical
params_exp_2pops,evals_exp_2pops, evec_exp_2pops = exp.two_pops(n1=n1, n2=n2, D=D, T=avg_tree_length_2pops)
print "params_2pops (ms): ",params_2pops
print "params_exp_2pops: ",params_exp_2pops
print "average tree length (ms): ",rescaling*np.average(tree_lengths_2pops)
#print "expected tree length (coal): ",exp_tree_length
print "expected lambda1 (analytical): ",evals_exp_2pops[0]
print "observed lambda1 (cmdscale): ",evals_2pops[0]
print "expected lambda2 (analytical): ",evals_exp_2pops[1]
print "observed lambda2 (cmdscale): ",evals_2pops[1]
print "average observed lambda2...n-1 (cmdscale): ",np.average(evals_2pops[1:-1])
print evals_exp_2pops[:10]
print evals_2pops[:10]
#print "observed lambda1 (mds): ",evals[0]
#print "observed average lambdas (mds): ",np.average(evals[:-1])
### plotting two population ###
py.figure()
py.plot(Y_2pops[:,0][:n1],Y_2pops[:,1][:n1],'x',color='orange')
py.plot(Y_2pops[:,0][n1:],Y_2pops[:,1][n1:],'o',color='blue')
py.title("simulations 2 pops n1 = %s, n2 = %s, D = %s, nreps = %s "%(n1,n2,D,nreps))
py.xlabel("dim 1")
py.ylabel("dim 2")
#py.xlabel("dim 1 (%.2f %%)"%(1.*evals[0]/np.average(evals[:-1])))
#py.ylabel("dim 2 (%.2f %%)"%(1.*evals[1]/np.average(evals[:-1])))
#py.show()
########### 3 populations ##############
print "########### 3 populations ##############"
nreps = 100
#ms simul
params_3pops,data_3pops,tree_lengths_3pops = simul_ms.ms_three_pops(n1=n1, n2=n2, n3=n3, D=1./rescaling*D, D1 = 1./rescaling*D1,nreps=nreps,verbose=0)
avg_tree_length_3pops = rescaling*np.average(tree_lengths_3pops)
Delta_3pops = simul_ms.distance_matrix(data=data_3pops,verbose=0)
#cmdscale
evals_3pops, evecs_3pops, Y_3pops = python_cmdscale.cmdscale(Delta_3pops)
try:
Texp,Dexp,D1exp,Drescaledexp,D1rescaledexp = exp.T_D_D1_three_pops(eigenvalues = evals_3pops,n1=n1,n2=n2,n3=n3,diploid=2)
except:
Texp,Dexp,D1exp,Drescaledexp,D1rescaledexp= 1,1,1,1,1
print "average tree length (ms): ",rescaling*np.average(tree_lengths_3pops)
print "params_3pops (ms): ",params_3pops
# analytical
params_exp_3pops,evals_exp_3pops, evec_exp_3pops = exp.three_pops(n1=n1, n2=n2, n3=n3, D=D, D1=D1, T=avg_tree_length_3pops)
print "params_3pops (ms): ",params_3pops
print "params_exp_3pops: ",params_exp_3pops
print "average tree length (ms): ",rescaling*np.average(tree_lengths_3pops)
#print "expected tree length (coal): ",exp_tree_length
print "expected lambda1 (analytical): ",evals_exp_3pops[0]
print "observed lambda1 (cmdscale): ",evals_3pops[0]
print ""
print "expected lambda2 (analytical): ",evals_exp_3pops[1]
print "observed lambda2 (cmdscale): ",evals_3pops[1]
print ""
print "expected lambda3 (analytical): ",evals_exp_3pops[2]
print "observed lambda3 (cmdscale): ",evals_3pops[2]
print "average observed lambda3...n-1 (cmdscale): ",np.average(evals_3pops[2:-1])
print evals_exp_3pops[:10]
print evals_3pops[:10]
sys.exit()
### plotting three population ###
py.figure()
py.plot(Y_3pops[:,0][:n1],Y_3pops[:,1][:n1],'D',color='orange')
py.plot(Y_3pops[:,0][n1:n1+n2],Y_3pops[:,1][n1:n1+n2],'o',color='blue')
py.plot(Y_3pops[:,0][n1+n2:],Y_3pops[:,1][n1+n2:],'v',color='green')
py.title("simulations 3 pops n1 = %(n1)s, n2 = %(n2)s, n3 = %(n3)s, D = %(D)s, D1 = %(D1)s, nreps = %(nreps)s "%params_3pops)
py.xlabel("dim 1")
py.ylabel("dim 2")
py.show()
########### 4 populations and above ##############
| gpl-3.0 |
Obus/scikit-learn | sklearn/metrics/cluster/bicluster.py | 359 | 2797 | from __future__ import division
import numpy as np
from sklearn.utils.linear_assignment_ import linear_assignment
from sklearn.utils.validation import check_consistent_length, check_array
__all__ = ["consensus_score"]
def _check_rows_and_columns(a, b):
"""Unpacks the row and column arrays and checks their shape."""
check_consistent_length(*a)
check_consistent_length(*b)
checks = lambda x: check_array(x, ensure_2d=False)
a_rows, a_cols = map(checks, a)
b_rows, b_cols = map(checks, b)
return a_rows, a_cols, b_rows, b_cols
def _jaccard(a_rows, a_cols, b_rows, b_cols):
"""Jaccard coefficient on the elements of the two biclusters."""
intersection = ((a_rows * b_rows).sum() *
(a_cols * b_cols).sum())
a_size = a_rows.sum() * a_cols.sum()
b_size = b_rows.sum() * b_cols.sum()
return intersection / (a_size + b_size - intersection)
def _pairwise_similarity(a, b, similarity):
"""Computes pairwise similarity matrix.
result[i, j] is the Jaccard coefficient of a's bicluster i and b's
bicluster j.
"""
a_rows, a_cols, b_rows, b_cols = _check_rows_and_columns(a, b)
n_a = a_rows.shape[0]
n_b = b_rows.shape[0]
result = np.array(list(list(similarity(a_rows[i], a_cols[i],
b_rows[j], b_cols[j])
for j in range(n_b))
for i in range(n_a)))
return result
def consensus_score(a, b, similarity="jaccard"):
"""The similarity of two sets of biclusters.
Similarity between individual biclusters is computed. Then the
best matching between sets is found using the Hungarian algorithm.
The final score is the sum of similarities divided by the size of
the larger set.
Read more in the :ref:`User Guide <biclustering>`.
Parameters
----------
a : (rows, columns)
Tuple of row and column indicators for a set of biclusters.
b : (rows, columns)
Another set of biclusters like ``a``.
similarity : string or function, optional, default: "jaccard"
May be the string "jaccard" to use the Jaccard coefficient, or
any function that takes four arguments, each of which is a 1d
indicator vector: (a_rows, a_columns, b_rows, b_columns).
References
----------
* Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis
for bicluster acquisition
<https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__.
"""
if similarity == "jaccard":
similarity = _jaccard
matrix = _pairwise_similarity(a, b, similarity)
indices = linear_assignment(1. - matrix)
n_a = len(a[0])
n_b = len(b[0])
return matrix[indices[:, 0], indices[:, 1]].sum() / max(n_a, n_b)
| bsd-3-clause |
pchmieli/h2o-3 | h2o-py/tests/testdir_algos/gbm/pyunit_weights_var_impGBM.py | 1 | 6494 | import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.gbm import H2OGradientBoostingEstimator
import random
def weights_var_imp():
def check_same(data1, data2, min_rows_scale):
gbm1_regression = H2OGradientBoostingEstimator(min_rows=5,
ntrees=5,
max_depth=5)
gbm1_regression.train(x=["displacement", "power", "weight", "acceleration", "year"],
y="economy",
training_frame=data1)
gbm2_regression = H2OGradientBoostingEstimator(min_rows=5*min_rows_scale,
ntrees=5,
max_depth=5)
gbm2_regression.train(x=["displacement", "power", "weight", "acceleration", "year", "weights"],
y="economy",
training_frame=data2,
weights_column="weights")
gbm1_binomial = H2OGradientBoostingEstimator(min_rows=5,
distribution="bernoulli",
ntrees=5,
max_depth=5)
gbm1_binomial.train(x=["displacement", "power", "weight", "acceleration", "year"],
y="economy_20mpg",
training_frame=data1)
gbm2_binomial = H2OGradientBoostingEstimator(min_rows=5*min_rows_scale,
distribution="bernoulli",
ntrees=5,
max_depth=5)
gbm2_binomial.train(x=["displacement", "power", "weight", "acceleration", "year", "weights"],
y="economy_20mpg",
training_frame=data2,
weights_column="weights")
gbm1_multinomial = H2OGradientBoostingEstimator(min_rows=5,
distribution="multinomial",
ntrees=5,
max_depth=5)
gbm1_multinomial.train(x=["displacement", "power", "weight", "acceleration", "year"],
y="cylinders",
training_frame=data1)
gbm2_multinomial = H2OGradientBoostingEstimator(min_rows=5*min_rows_scale,
distribution="multinomial",
ntrees=5,
max_depth=5)
gbm2_multinomial.train(x=["displacement", "power", "weight", "acceleration", "year", "weights"],
y="cylinders",
weights_column="weights", training_frame=data2)
reg1_vi = gbm1_regression.varimp(return_list=True)
reg2_vi = gbm2_regression.varimp(return_list=True)
bin1_vi = gbm1_binomial.varimp(return_list=True)
bin2_vi = gbm2_binomial.varimp(return_list=True)
mul1_vi = gbm1_multinomial.varimp(return_list=True)
mul2_vi = gbm2_multinomial.varimp(return_list=True)
print "Varimp (regresson) no weights vs. weights: {0}, {1}".format(reg1_vi, reg2_vi)
print "Varimp (binomial) no weights vs. weights: {0}, {1}".format(bin1_vi, bin2_vi)
print "Varimp (multinomial) no weights vs. weights: {0}, {1}".format(mul1_vi, mul2_vi)
for rvi1, rvi2 in zip(reg1_vi, reg2_vi): assert rvi1 == rvi1, "Expected vi's (regression) to be the same, but got {0}, and {1}".format(rvi1, rvi2)
for bvi1, bvi2 in zip(bin1_vi, bin2_vi): assert bvi1 == bvi1, "Expected vi's (binomial) to be the same, but got {0}, and {1}".format(bvi1, bvi2)
for mvi1, mvi2 in zip(mul1_vi, mul2_vi): assert mvi1 == mvi1, "Expected vi's (multinomial) to be the same, but got {0}, and {1}".format(mvi1, mvi2)
h2o_cars_data = h2o.import_file(pyunit_utils.locate("smalldata/junit/cars_20mpg.csv"))
h2o_cars_data["economy_20mpg"] = h2o_cars_data["economy_20mpg"].asfactor()
h2o_cars_data["cylinders"] = h2o_cars_data["cylinders"].asfactor()
# uniform weights same as no weights
weight = random.randint(1,10)
uniform_weights = [[weight for r in range(406)]]
h2o_uniform_weights = h2o.H2OFrame(python_obj=uniform_weights)
h2o_uniform_weights.set_names(["weights"])
h2o_data_uniform_weights = h2o_cars_data.cbind(h2o_uniform_weights)
print "\n\nChecking that using uniform weights is equivalent to no weights:"
check_same(h2o_cars_data, h2o_data_uniform_weights, weight)
# zero weights same as removed observations
zero_weights = [[0 if random.randint(0,1) else 1 for r in range(406)]]
h2o_zero_weights = h2o.H2OFrame(python_obj=zero_weights)
h2o_zero_weights.set_names(["weights"])
h2o_data_zero_weights = h2o_cars_data.cbind(h2o_zero_weights)
h2o_data_zeros_removed = h2o_cars_data[h2o_zero_weights["weights"] == 1]
print "\n\nChecking that using some zero weights is equivalent to removing those observations:"
check_same(h2o_data_zeros_removed, h2o_data_zero_weights, 1)
# doubled weights same as doubled observations
doubled_weights = [[1 if random.randint(0,1) else 2 for r in range(406)]]
h2o_doubled_weights = h2o.H2OFrame(python_obj=doubled_weights)
h2o_doubled_weights.set_names(["weights"])
h2o_data_doubled_weights = h2o_cars_data.cbind(h2o_doubled_weights)
doubled_data = h2o.as_list(h2o_cars_data, use_pandas=False)
doubled_data = zip(*doubled_data)
colnames = doubled_data.pop(0)
for idx, w in enumerate(doubled_weights[0]):
if w == 2: doubled_data.append(doubled_data[idx])
doubled_data = zip(*doubled_data)
h2o_data_doubled = h2o.H2OFrame(python_obj=doubled_data)
h2o_data_doubled.set_names(list(colnames))
h2o_data_doubled["economy_20mpg"] = h2o_data_doubled["economy_20mpg"].asfactor()
h2o_data_doubled["cylinders"] = h2o_data_doubled["cylinders"].asfactor()
h2o_data_doubled_weights["economy_20mpg"] = h2o_data_doubled_weights["economy_20mpg"].asfactor()
h2o_data_doubled_weights["cylinders"] = h2o_data_doubled_weights["cylinders"].asfactor()
print "\n\nChecking that doubling some weights is equivalent to doubling those observations:"
check_same(h2o_data_doubled, h2o_data_doubled_weights, 1)
if __name__ == "__main__":
pyunit_utils.standalone_test(weights_var_imp)
else:
weights_var_imp()
| apache-2.0 |
voxlol/scikit-learn | examples/classification/plot_lda.py | 164 | 2224 | """
====================================================================
Normal and Shrinkage Linear Discriminant Analysis for classification
====================================================================
Shows how shrinkage improves classification.
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.lda import LDA
n_train = 20 # samples for training
n_test = 200 # samples for testing
n_averages = 50 # how often to repeat classification
n_features_max = 75 # maximum number of features
step = 4 # step size for the calculation
def generate_data(n_samples, n_features):
"""Generate random blob-ish data with noisy features.
This returns an array of input data with shape `(n_samples, n_features)`
and an array of `n_samples` target labels.
Only one feature contains discriminative information, the other features
contain only noise.
"""
X, y = make_blobs(n_samples=n_samples, n_features=1, centers=[[-2], [2]])
# add non-discriminative features
if n_features > 1:
X = np.hstack([X, np.random.randn(n_samples, n_features - 1)])
return X, y
acc_clf1, acc_clf2 = [], []
n_features_range = range(1, n_features_max + 1, step)
for n_features in n_features_range:
score_clf1, score_clf2 = 0, 0
for _ in range(n_averages):
X, y = generate_data(n_train, n_features)
clf1 = LDA(solver='lsqr', shrinkage='auto').fit(X, y)
clf2 = LDA(solver='lsqr', shrinkage=None).fit(X, y)
X, y = generate_data(n_test, n_features)
score_clf1 += clf1.score(X, y)
score_clf2 += clf2.score(X, y)
acc_clf1.append(score_clf1 / n_averages)
acc_clf2.append(score_clf2 / n_averages)
features_samples_ratio = np.array(n_features_range) / n_train
plt.plot(features_samples_ratio, acc_clf1, linewidth=2,
label="LDA with shrinkage", color='r')
plt.plot(features_samples_ratio, acc_clf2, linewidth=2,
label="LDA", color='g')
plt.xlabel('n_features / n_samples')
plt.ylabel('Classification accuracy')
plt.legend(loc=1, prop={'size': 12})
plt.suptitle('LDA vs. shrinkage LDA (1 discriminative feature)')
plt.show()
| bsd-3-clause |
tawsifkhan/scikit-learn | examples/manifold/plot_swissroll.py | 330 | 1446 | """
===================================
Swiss Roll reduction with LLE
===================================
An illustration of Swiss Roll reduction
with locally linear embedding
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
import matplotlib.pyplot as plt
# This import is needed to modify the way figure behaves
from mpl_toolkits.mplot3d import Axes3D
Axes3D
#----------------------------------------------------------------------
# Locally linear embedding of the swiss roll
from sklearn import manifold, datasets
X, color = datasets.samples_generator.make_swiss_roll(n_samples=1500)
print("Computing LLE embedding")
X_r, err = manifold.locally_linear_embedding(X, n_neighbors=12,
n_components=2)
print("Done. Reconstruction error: %g" % err)
#----------------------------------------------------------------------
# Plot result
fig = plt.figure()
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(211, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
except:
ax = fig.add_subplot(211)
ax.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.set_title("Original data")
ax = fig.add_subplot(212)
ax.scatter(X_r[:, 0], X_r[:, 1], c=color, cmap=plt.cm.Spectral)
plt.axis('tight')
plt.xticks([]), plt.yticks([])
plt.title('Projected data')
plt.show()
| bsd-3-clause |
pernambucano/myo_libras | libras_myo/printGraph.py | 1 | 1520 | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
from segmentData import readCsv, calculateAverageEnergy
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.utils import column_or_1d
from mpl_toolkits.mplot3d import Axes3D
def readCsvSegmented(inputFileName):
data = np.genfromtxt(inputFileName, delimiter=',')
return data
def print3D(X,labels):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(X[:,0], X[:,1], X[:,2], c=labels, cmap=plt.cm.get_cmap('rainbow'))
ax.set_xlabel('LD 1')
ax.set_ylabel('LD 2')
ax.set_zlabel('LD 3')
plt.show()
def printGraph(X,y):
y = np.array(y)[None].T
plt.axis([-100,100,-100,100])
data = np.append(X, y, 1)
print data
plt.grid(True)
for x,y,c in data:
color = 'b'
if c == 0:
color = 'r'
elif c == 1:
color = 'b'
elif c == 2:
color = 'g'
elif c== 3:
color = 'm'
elif c==4:
color = 'c'
else:
color = 'k'
plt.scatter(x,y, color=color)
# plt.plot(sequence)
# plt.savefig("teste.png")
plt.show()
if __name__ == '__main__':
f1 = readCsv('data/bernardo/bernardo-C-1-emg.csv')
# f2 = readCsvSegmented('data_segmented/sylvia/sylvia-A-2-emg-segmented.csv')
f3 = calculateAverageEnergy(f1)
printGraph(f3)
# printGraph(f2)
# printGraph(f3)
| mit |
jeffhsu3/genda | scripts/read_trace.py | 1 | 3476 | #!/usr/bin/env
import pandas as pd
import numpy as np
import sys
from IPython import embed
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import glob
import random
from collections import defaultdict
# Set seed
try:
import configparser
except ImportError:
import ConfigParser as configparser
# CHUNKSIZE
CHUNKSIZE = 10000
BURN_IN = 400
# COV_mat
def get_genotype(chrom, rsid):
""":TODO switch to reading HDF5
"""
geno_path = ('/home/hsuj/lustre/geno/'
'CCF_1000G_Aug2013_Chr{0}.dose.double.ATB.RNASeq_MEQTL.txt')
geno_gen = pd.read_csv(geno_path.format(str(chrom)),
sep=" ", chunksize = 10000)
for i in geno_gen:
if rsid in i.index:
break
else: pass
return(i.ix[rsid, :])
def main(chunk):
"""
nsamps - number to sample from MCMC posterior
"""
try:
chunk = int(chunk)
except TypeError:
print('Script requires chunk to be an int')
files = glob.glob('/home/hsuj/lustre/mmseq_analysis/remapped/*/*.mmseq.trace_gibbs.gz')
# Split files by genes into different slurm jobs
# number of individuals to test
nindv = len(files)
#
nsamps = 10
best_snps_file = '/home/hsuj/lustre/matrixeQTL_mmseq/best_snps.csv'
best_snps_columns = pd.read_csv(best_snps_file, header=0, sep=",",
index_col=0, nrows=2)
best_snps = pd.read_csv(best_snps_file,
sep=",", index_col=0, header=None)
best_snps.columns = best_snps_columns.columns
phen_file = pd.read_csv('/home/hsuj/Afib/eQTL/Exons/pca_final.csv',
sep=",")
#### Read in genotypes
by_mcmc_sampling = []
for i in range(nindv):
print('Reading in file: {0!s}'.format(str(i)))
tnames = pd.read_csv(open(files[i], 'rb'), compression='gzip',
sep=" ", nrows=1)
# Sigh this should not be needed
collided_indexes = []
new_best_snp_index = []
for transcript in best_snps.index:
try:
collided_indexes.append(tnames.columns.get_loc(transcript))
new_best_snp_index.append(transcript)
except KeyError:
pass
test = pd.read_csv(open(files[i], 'rb'), compression='gzip', sep=" ",
usecols=collided_indexes,
engine='python', encoding='utf-8',
nrows=1024)
rows = random.sample(list(test.index[BURN_IN:]), nsamps)
temp = test.iloc[rows, :].copy()
temp = temp.iloc[chunk, :]
temp.name = files[i].split("/")[-1].split(".")[0]
by_mcmc#_sampling.append(temp)
"""
temp.columns = [files[i].split("/")[-1].split(".")[0] for trans_i in\
range(len(collided_indexes))]
temp.index = pd.Index(np.arange(0, nsamps))
for k, j in enumerate(new_best_snp_index):
tdict[j].append(temp.iloc[:, k])
"""
fig, ax = plt.subplots()
"""
for i, j in tdict.items():
j = pd.concat(j, axis=1)
tdict[i] = j
ax.plot(j.mean(axis=1), j.std(axis=1) , 'o')
"""
fig.savefig('/home/hsuj/lustre/output/sd_mean_mcmc.png', dpi=300)
all_genes_single_sampling = pd.concat(by_mcmc_sampling, axis=1, join='inner')
all_genes_single_sampling.to_csv('/home/hsuj/lustre/matrixeQTL_mmseq/temp/sampling_{0}.txt'.format(chunk))
if __name__ == '__main__':
# argument 1 the chunk to parse
main(sys.argv[1])
| bsd-3-clause |
xuanyuanking/spark | python/pyspark/pandas/tests/test_repr.py | 15 | 7832 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from pyspark import pandas as ps
from pyspark.pandas.config import set_option, reset_option, option_context
from pyspark.testing.pandasutils import PandasOnSparkTestCase
class ReprTest(PandasOnSparkTestCase):
max_display_count = 23
@classmethod
def setUpClass(cls):
super().setUpClass()
set_option("display.max_rows", ReprTest.max_display_count)
@classmethod
def tearDownClass(cls):
reset_option("display.max_rows")
super().tearDownClass()
def test_repr_dataframe(self):
psdf = ps.range(ReprTest.max_display_count)
self.assertTrue("Showing only the first" not in repr(psdf))
self.assert_eq(repr(psdf), repr(psdf.to_pandas()))
psdf = ps.range(ReprTest.max_display_count + 1)
self.assertTrue("Showing only the first" in repr(psdf))
self.assertTrue(
repr(psdf).startswith(repr(psdf.to_pandas().head(ReprTest.max_display_count)))
)
with option_context("display.max_rows", None):
psdf = ps.range(ReprTest.max_display_count + 1)
self.assert_eq(repr(psdf), repr(psdf.to_pandas()))
def test_repr_series(self):
psser = ps.range(ReprTest.max_display_count).id
self.assertTrue("Showing only the first" not in repr(psser))
self.assert_eq(repr(psser), repr(psser.to_pandas()))
psser = ps.range(ReprTest.max_display_count + 1).id
self.assertTrue("Showing only the first" in repr(psser))
self.assertTrue(
repr(psser).startswith(repr(psser.to_pandas().head(ReprTest.max_display_count)))
)
with option_context("display.max_rows", None):
psser = ps.range(ReprTest.max_display_count + 1).id
self.assert_eq(repr(psser), repr(psser.to_pandas()))
psser = ps.range(ReprTest.max_display_count).id.rename()
self.assertTrue("Showing only the first" not in repr(psser))
self.assert_eq(repr(psser), repr(psser.to_pandas()))
psser = ps.range(ReprTest.max_display_count + 1).id.rename()
self.assertTrue("Showing only the first" in repr(psser))
self.assertTrue(
repr(psser).startswith(repr(psser.to_pandas().head(ReprTest.max_display_count)))
)
with option_context("display.max_rows", None):
psser = ps.range(ReprTest.max_display_count + 1).id.rename()
self.assert_eq(repr(psser), repr(psser.to_pandas()))
psser = ps.MultiIndex.from_tuples(
[(100 * i, i) for i in range(ReprTest.max_display_count)]
).to_series()
self.assertTrue("Showing only the first" not in repr(psser))
self.assert_eq(repr(psser), repr(psser.to_pandas()))
psser = ps.MultiIndex.from_tuples(
[(100 * i, i) for i in range(ReprTest.max_display_count + 1)]
).to_series()
self.assertTrue("Showing only the first" in repr(psser))
self.assertTrue(
repr(psser).startswith(repr(psser.to_pandas().head(ReprTest.max_display_count)))
)
with option_context("display.max_rows", None):
psser = ps.MultiIndex.from_tuples(
[(100 * i, i) for i in range(ReprTest.max_display_count + 1)]
).to_series()
self.assert_eq(repr(psser), repr(psser.to_pandas()))
def test_repr_indexes(self):
psidx = ps.range(ReprTest.max_display_count).index
self.assertTrue("Showing only the first" not in repr(psidx))
self.assert_eq(repr(psidx), repr(psidx.to_pandas()))
psidx = ps.range(ReprTest.max_display_count + 1).index
self.assertTrue("Showing only the first" in repr(psidx))
self.assertTrue(
repr(psidx).startswith(
repr(psidx.to_pandas().to_series().head(ReprTest.max_display_count).index)
)
)
with option_context("display.max_rows", None):
psidx = ps.range(ReprTest.max_display_count + 1).index
self.assert_eq(repr(psidx), repr(psidx.to_pandas()))
psidx = ps.MultiIndex.from_tuples([(100 * i, i) for i in range(ReprTest.max_display_count)])
self.assertTrue("Showing only the first" not in repr(psidx))
self.assert_eq(repr(psidx), repr(psidx.to_pandas()))
psidx = ps.MultiIndex.from_tuples(
[(100 * i, i) for i in range(ReprTest.max_display_count + 1)]
)
self.assertTrue("Showing only the first" in repr(psidx))
self.assertTrue(
repr(psidx).startswith(
repr(psidx.to_pandas().to_frame().head(ReprTest.max_display_count).index)
)
)
with option_context("display.max_rows", None):
psidx = ps.MultiIndex.from_tuples(
[(100 * i, i) for i in range(ReprTest.max_display_count + 1)]
)
self.assert_eq(repr(psidx), repr(psidx.to_pandas()))
def test_html_repr(self):
psdf = ps.range(ReprTest.max_display_count)
self.assertTrue("Showing only the first" not in psdf._repr_html_())
self.assertEqual(psdf._repr_html_(), psdf.to_pandas()._repr_html_())
psdf = ps.range(ReprTest.max_display_count + 1)
self.assertTrue("Showing only the first" in psdf._repr_html_())
with option_context("display.max_rows", None):
psdf = ps.range(ReprTest.max_display_count + 1)
self.assertEqual(psdf._repr_html_(), psdf.to_pandas()._repr_html_())
def test_repr_float_index(self):
psdf = ps.DataFrame(
{"a": np.random.rand(ReprTest.max_display_count)},
index=np.random.rand(ReprTest.max_display_count),
)
self.assertTrue("Showing only the first" not in repr(psdf))
self.assert_eq(repr(psdf), repr(psdf.to_pandas()))
self.assertTrue("Showing only the first" not in repr(psdf.a))
self.assert_eq(repr(psdf.a), repr(psdf.a.to_pandas()))
self.assertTrue("Showing only the first" not in repr(psdf.index))
self.assert_eq(repr(psdf.index), repr(psdf.index.to_pandas()))
self.assertTrue("Showing only the first" not in psdf._repr_html_())
self.assertEqual(psdf._repr_html_(), psdf.to_pandas()._repr_html_())
psdf = ps.DataFrame(
{"a": np.random.rand(ReprTest.max_display_count + 1)},
index=np.random.rand(ReprTest.max_display_count + 1),
)
self.assertTrue("Showing only the first" in repr(psdf))
self.assertTrue("Showing only the first" in repr(psdf.a))
self.assertTrue("Showing only the first" in repr(psdf.index))
self.assertTrue("Showing only the first" in psdf._repr_html_())
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.test_repr import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
samuel1208/scikit-learn | sklearn/covariance/graph_lasso_.py | 127 | 25626 | """GraphLasso: sparse inverse covariance estimation with an l1-penalized
estimator.
"""
# Author: Gael Varoquaux <[email protected]>
# License: BSD 3 clause
# Copyright: INRIA
import warnings
import operator
import sys
import time
import numpy as np
from scipy import linalg
from .empirical_covariance_ import (empirical_covariance, EmpiricalCovariance,
log_likelihood)
from ..utils import ConvergenceWarning
from ..utils.extmath import pinvh
from ..utils.validation import check_random_state, check_array
from ..linear_model import lars_path
from ..linear_model import cd_fast
from ..cross_validation import check_cv, cross_val_score
from ..externals.joblib import Parallel, delayed
import collections
# Helper functions to compute the objective and dual objective functions
# of the l1-penalized estimator
def _objective(mle, precision_, alpha):
"""Evaluation of the graph-lasso objective function
the objective function is made of a shifted scaled version of the
normalized log-likelihood (i.e. its empirical mean over the samples) and a
penalisation term to promote sparsity
"""
p = precision_.shape[0]
cost = - 2. * log_likelihood(mle, precision_) + p * np.log(2 * np.pi)
cost += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return cost
def _dual_gap(emp_cov, precision_, alpha):
"""Expression of the dual gap convergence criterion
The specific definition is given in Duchi "Projected Subgradient Methods
for Learning Sparse Gaussians".
"""
gap = np.sum(emp_cov * precision_)
gap -= precision_.shape[0]
gap += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return gap
def alpha_max(emp_cov):
"""Find the maximum alpha for which there are some non-zeros off-diagonal.
Parameters
----------
emp_cov : 2D array, (n_features, n_features)
The sample covariance matrix
Notes
-----
This results from the bound for the all the Lasso that are solved
in GraphLasso: each time, the row of cov corresponds to Xy. As the
bound for alpha is given by `max(abs(Xy))`, the result follows.
"""
A = np.copy(emp_cov)
A.flat[::A.shape[0] + 1] = 0
return np.max(np.abs(A))
# The g-lasso algorithm
def graph_lasso(emp_cov, alpha, cov_init=None, mode='cd', tol=1e-4,
enet_tol=1e-4, max_iter=100, verbose=False,
return_costs=False, eps=np.finfo(np.float64).eps,
return_n_iter=False):
"""l1-penalized covariance estimator
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
emp_cov : 2D ndarray, shape (n_features, n_features)
Empirical covariance from which to compute the covariance estimate.
alpha : positive float
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
cov_init : 2D array (n_features, n_features), optional
The initial guess for the covariance.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, optional
The maximum number of iterations.
verbose : boolean, optional
If verbose is True, the objective function and dual gap are
printed at each iteration.
return_costs : boolean, optional
If return_costs is True, the objective function and dual gap
at each iteration are returned.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
covariance : 2D ndarray, shape (n_features, n_features)
The estimated covariance matrix.
precision : 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrix.
costs : list of (objective, dual_gap) pairs
The list of values of the objective function and the dual gap at
each iteration. Returned only if return_costs is True.
n_iter : int
Number of iterations. Returned only if `return_n_iter` is set to True.
See Also
--------
GraphLasso, GraphLassoCV
Notes
-----
The algorithm employed to solve this problem is the GLasso algorithm,
from the Friedman 2008 Biostatistics paper. It is the same algorithm
as in the R `glasso` package.
One possible difference with the `glasso` R package is that the
diagonal coefficients are not penalized.
"""
_, n_features = emp_cov.shape
if alpha == 0:
if return_costs:
precision_ = linalg.inv(emp_cov)
cost = - 2. * log_likelihood(emp_cov, precision_)
cost += n_features * np.log(2 * np.pi)
d_gap = np.sum(emp_cov * precision_) - n_features
if return_n_iter:
return emp_cov, precision_, (cost, d_gap), 0
else:
return emp_cov, precision_, (cost, d_gap)
else:
if return_n_iter:
return emp_cov, linalg.inv(emp_cov), 0
else:
return emp_cov, linalg.inv(emp_cov)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init.copy()
# As a trivial regularization (Tikhonov like), we scale down the
# off-diagonal coefficients of our starting point: This is needed, as
# in the cross-validation the cov_init can easily be
# ill-conditioned, and the CV loop blows. Beside, this takes
# conservative stand-point on the initial conditions, and it tends to
# make the convergence go faster.
covariance_ *= 0.95
diagonal = emp_cov.flat[::n_features + 1]
covariance_.flat[::n_features + 1] = diagonal
precision_ = pinvh(covariance_)
indices = np.arange(n_features)
costs = list()
# The different l1 regression solver have different numerical errors
if mode == 'cd':
errors = dict(over='raise', invalid='ignore')
else:
errors = dict(invalid='raise')
try:
# be robust to the max_iter=0 edge case, see:
# https://github.com/scikit-learn/scikit-learn/issues/4134
d_gap = np.inf
for i in range(max_iter):
for idx in range(n_features):
sub_covariance = covariance_[indices != idx].T[indices != idx]
row = emp_cov[idx, indices != idx]
with np.errstate(**errors):
if mode == 'cd':
# Use coordinate descent
coefs = -(precision_[indices != idx, idx]
/ (precision_[idx, idx] + 1000 * eps))
coefs, _, _, _ = cd_fast.enet_coordinate_descent_gram(
coefs, alpha, 0, sub_covariance, row, row,
max_iter, enet_tol, check_random_state(None), False)
else:
# Use LARS
_, _, coefs = lars_path(
sub_covariance, row, Xy=row, Gram=sub_covariance,
alpha_min=alpha / (n_features - 1), copy_Gram=True,
method='lars', return_path=False)
# Update the precision matrix
precision_[idx, idx] = (
1. / (covariance_[idx, idx]
- np.dot(covariance_[indices != idx, idx], coefs)))
precision_[indices != idx, idx] = (- precision_[idx, idx]
* coefs)
precision_[idx, indices != idx] = (- precision_[idx, idx]
* coefs)
coefs = np.dot(sub_covariance, coefs)
covariance_[idx, indices != idx] = coefs
covariance_[indices != idx, idx] = coefs
d_gap = _dual_gap(emp_cov, precision_, alpha)
cost = _objective(emp_cov, precision_, alpha)
if verbose:
print(
'[graph_lasso] Iteration % 3i, cost % 3.2e, dual gap %.3e'
% (i, cost, d_gap))
if return_costs:
costs.append((cost, d_gap))
if np.abs(d_gap) < tol:
break
if not np.isfinite(cost) and i > 0:
raise FloatingPointError('Non SPD result: the system is '
'too ill-conditioned for this solver')
else:
warnings.warn('graph_lasso: did not converge after %i iteration:'
' dual gap: %.3e' % (max_iter, d_gap),
ConvergenceWarning)
except FloatingPointError as e:
e.args = (e.args[0]
+ '. The system is too ill-conditioned for this solver',)
raise e
if return_costs:
if return_n_iter:
return covariance_, precision_, costs, i + 1
else:
return covariance_, precision_, costs
else:
if return_n_iter:
return covariance_, precision_, i + 1
else:
return covariance_, precision_
class GraphLasso(EmpiricalCovariance):
"""Sparse inverse covariance estimation with an l1-penalized estimator.
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
alpha : positive float, default 0.01
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
mode : {'cd', 'lars'}, default 'cd'
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, default 1e-4
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, default 100
The maximum number of iterations.
verbose : boolean, default False
If verbose is True, the objective function and dual gap are
plotted at each iteration.
assume_centered : boolean, default False
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Attributes
----------
covariance_ : array-like, shape (n_features, n_features)
Estimated covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
n_iter_ : int
Number of iterations run.
See Also
--------
graph_lasso, GraphLassoCV
"""
def __init__(self, alpha=.01, mode='cd', tol=1e-4, enet_tol=1e-4,
max_iter=100, verbose=False, assume_centered=False):
self.alpha = alpha
self.mode = mode
self.tol = tol
self.enet_tol = enet_tol
self.max_iter = max_iter
self.verbose = verbose
self.assume_centered = assume_centered
# The base class needs this for the score method
self.store_precision = True
def fit(self, X, y=None):
X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(
X, assume_centered=self.assume_centered)
self.covariance_, self.precision_, self.n_iter_ = graph_lasso(
emp_cov, alpha=self.alpha, mode=self.mode, tol=self.tol,
enet_tol=self.enet_tol, max_iter=self.max_iter,
verbose=self.verbose, return_n_iter=True)
return self
# Cross-validation with GraphLasso
def graph_lasso_path(X, alphas, cov_init=None, X_test=None, mode='cd',
tol=1e-4, enet_tol=1e-4, max_iter=100, verbose=False):
"""l1-penalized covariance estimator along a path of decreasing alphas
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
X : 2D ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate.
alphas : list of positive floats
The list of regularization parameters, decreasing order.
X_test : 2D array, shape (n_test_samples, n_features), optional
Optional test matrix to measure generalisation error.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, optional
The maximum number of iterations.
verbose : integer, optional
The higher the verbosity flag, the more information is printed
during the fitting.
Returns
-------
covariances_ : List of 2D ndarray, shape (n_features, n_features)
The estimated covariance matrices.
precisions_ : List of 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrices.
scores_ : List of float
The generalisation error (log-likelihood) on the test data.
Returned only if test data is passed.
"""
inner_verbose = max(0, verbose - 1)
emp_cov = empirical_covariance(X)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init
covariances_ = list()
precisions_ = list()
scores_ = list()
if X_test is not None:
test_emp_cov = empirical_covariance(X_test)
for alpha in alphas:
try:
# Capture the errors, and move on
covariance_, precision_ = graph_lasso(
emp_cov, alpha=alpha, cov_init=covariance_, mode=mode, tol=tol,
enet_tol=enet_tol, max_iter=max_iter, verbose=inner_verbose)
covariances_.append(covariance_)
precisions_.append(precision_)
if X_test is not None:
this_score = log_likelihood(test_emp_cov, precision_)
except FloatingPointError:
this_score = -np.inf
covariances_.append(np.nan)
precisions_.append(np.nan)
if X_test is not None:
if not np.isfinite(this_score):
this_score = -np.inf
scores_.append(this_score)
if verbose == 1:
sys.stderr.write('.')
elif verbose > 1:
if X_test is not None:
print('[graph_lasso_path] alpha: %.2e, score: %.2e'
% (alpha, this_score))
else:
print('[graph_lasso_path] alpha: %.2e' % alpha)
if X_test is not None:
return covariances_, precisions_, scores_
return covariances_, precisions_
class GraphLassoCV(GraphLasso):
"""Sparse inverse covariance w/ cross-validated choice of the l1 penalty
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
alphas : integer, or list positive float, optional
If an integer is given, it fixes the number of points on the
grids of alpha to be used. If a list is given, it gives the
grid to be used. See the notes in the class docstring for
more details.
n_refinements: strictly positive integer
The number of times the grid is refined. Not used if explicit
values of alphas are passed.
cv : cross-validation generator, optional
see sklearn.cross_validation module. If None is passed, defaults to
a 3-fold strategy
tol: positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter: integer, optional
Maximum number of iterations.
mode: {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where number of features is greater
than number of samples. Elsewhere prefer cd which is more numerically
stable.
n_jobs: int, optional
number of jobs to run in parallel (default 1).
verbose: boolean, optional
If verbose is True, the objective function and duality gap are
printed at each iteration.
assume_centered : Boolean
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Attributes
----------
covariance_ : numpy.ndarray, shape (n_features, n_features)
Estimated covariance matrix.
precision_ : numpy.ndarray, shape (n_features, n_features)
Estimated precision matrix (inverse covariance).
alpha_ : float
Penalization parameter selected.
cv_alphas_ : list of float
All penalization parameters explored.
`grid_scores`: 2D numpy.ndarray (n_alphas, n_folds)
Log-likelihood score on left-out data across folds.
n_iter_ : int
Number of iterations run for the optimal alpha.
See Also
--------
graph_lasso, GraphLasso
Notes
-----
The search for the optimal penalization parameter (alpha) is done on an
iteratively refined grid: first the cross-validated scores on a grid are
computed, then a new refined grid is centered around the maximum, and so
on.
One of the challenges which is faced here is that the solvers can
fail to converge to a well-conditioned estimate. The corresponding
values of alpha then come out as missing values, but the optimum may
be close to these missing values.
"""
def __init__(self, alphas=4, n_refinements=4, cv=None, tol=1e-4,
enet_tol=1e-4, max_iter=100, mode='cd', n_jobs=1,
verbose=False, assume_centered=False):
self.alphas = alphas
self.n_refinements = n_refinements
self.mode = mode
self.tol = tol
self.enet_tol = enet_tol
self.max_iter = max_iter
self.verbose = verbose
self.cv = cv
self.n_jobs = n_jobs
self.assume_centered = assume_centered
# The base class needs this for the score method
self.store_precision = True
def fit(self, X, y=None):
"""Fits the GraphLasso covariance model to X.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate
"""
X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(
X, assume_centered=self.assume_centered)
cv = check_cv(self.cv, X, y, classifier=False)
# List of (alpha, scores, covs)
path = list()
n_alphas = self.alphas
inner_verbose = max(0, self.verbose - 1)
if isinstance(n_alphas, collections.Sequence):
alphas = self.alphas
n_refinements = 1
else:
n_refinements = self.n_refinements
alpha_1 = alpha_max(emp_cov)
alpha_0 = 1e-2 * alpha_1
alphas = np.logspace(np.log10(alpha_0), np.log10(alpha_1),
n_alphas)[::-1]
t0 = time.time()
for i in range(n_refinements):
with warnings.catch_warnings():
# No need to see the convergence warnings on this grid:
# they will always be points that will not converge
# during the cross-validation
warnings.simplefilter('ignore', ConvergenceWarning)
# Compute the cross-validated loss on the current grid
# NOTE: Warm-restarting graph_lasso_path has been tried, and
# this did not allow to gain anything (same execution time with
# or without).
this_path = Parallel(
n_jobs=self.n_jobs,
verbose=self.verbose
)(
delayed(graph_lasso_path)(
X[train], alphas=alphas,
X_test=X[test], mode=self.mode,
tol=self.tol, enet_tol=self.enet_tol,
max_iter=int(.1 * self.max_iter),
verbose=inner_verbose)
for train, test in cv)
# Little danse to transform the list in what we need
covs, _, scores = zip(*this_path)
covs = zip(*covs)
scores = zip(*scores)
path.extend(zip(alphas, scores, covs))
path = sorted(path, key=operator.itemgetter(0), reverse=True)
# Find the maximum (avoid using built in 'max' function to
# have a fully-reproducible selection of the smallest alpha
# in case of equality)
best_score = -np.inf
last_finite_idx = 0
for index, (alpha, scores, _) in enumerate(path):
this_score = np.mean(scores)
if this_score >= .1 / np.finfo(np.float64).eps:
this_score = np.nan
if np.isfinite(this_score):
last_finite_idx = index
if this_score >= best_score:
best_score = this_score
best_index = index
# Refine the grid
if best_index == 0:
# We do not need to go back: we have chosen
# the highest value of alpha for which there are
# non-zero coefficients
alpha_1 = path[0][0]
alpha_0 = path[1][0]
elif (best_index == last_finite_idx
and not best_index == len(path) - 1):
# We have non-converged models on the upper bound of the
# grid, we need to refine the grid there
alpha_1 = path[best_index][0]
alpha_0 = path[best_index + 1][0]
elif best_index == len(path) - 1:
alpha_1 = path[best_index][0]
alpha_0 = 0.01 * path[best_index][0]
else:
alpha_1 = path[best_index - 1][0]
alpha_0 = path[best_index + 1][0]
if not isinstance(n_alphas, collections.Sequence):
alphas = np.logspace(np.log10(alpha_1), np.log10(alpha_0),
n_alphas + 2)
alphas = alphas[1:-1]
if self.verbose and n_refinements > 1:
print('[GraphLassoCV] Done refinement % 2i out of %i: % 3is'
% (i + 1, n_refinements, time.time() - t0))
path = list(zip(*path))
grid_scores = list(path[1])
alphas = list(path[0])
# Finally, compute the score with alpha = 0
alphas.append(0)
grid_scores.append(cross_val_score(EmpiricalCovariance(), X,
cv=cv, n_jobs=self.n_jobs,
verbose=inner_verbose))
self.grid_scores = np.array(grid_scores)
best_alpha = alphas[best_index]
self.alpha_ = best_alpha
self.cv_alphas_ = alphas
# Finally fit the model with the selected alpha
self.covariance_, self.precision_, self.n_iter_ = graph_lasso(
emp_cov, alpha=best_alpha, mode=self.mode, tol=self.tol,
enet_tol=self.enet_tol, max_iter=self.max_iter,
verbose=inner_verbose, return_n_iter=True)
return self
| bsd-3-clause |
Arcanewinds/FDL-LunarResources | DataPreparation/LOLA_DEM/img_split_2_latlon.py | 1 | 3086 | #Written by Timothy Seabrook
#[email protected]
#This script is used to refactor the naming convention of LOLA_DEM Small Tiles, into x/y coordinates.
from pathlib import Path
import glob, os
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from skimage import io
from osgeo import gdal
from testNAC import long_id, lat_id
thisDir = os.path.dirname(os.path.abspath(__file__))
rootDir = os.path.join(thisDir, os.pardir, os.pardir)
dataDir = os.path.join(rootDir, 'Data')
DEMDir = os.path.join(dataDir, 'LOLA_DEM', 'South_Pole')
DEMLargeDir = os.path.join(DEMDir, 'Large_Tiles')
DEMSmallDir = os.path.join(DEMDir, 'Small_Tiles')
base_folder = "/Volumes/DATA DISK/PDS_FILES/LROC_BigDEM/"
folder_switch = {
0 : 'crater',
1 : 'not crater',
2 : 'not sure',
}
base_filename = "hs-45-45_lola20sp"
full_size = [30400, 30400]
p_size = [3800, 3800]
cut_size = [32, 32]
stride = np.divide(cut_size,2)
for n in (26):
v_pieces = np.floor_divide(full_size[0], p_size[0])
h_pieces = np.floor_divide(full_size[1], p_size[1])
y_ind = np.floor_divide(n, v_pieces)
x_ind = np.mod(n, v_pieces)
y_pos = [0]*2
x_pos = [0]*2
y_pos[0] = np.multiply(p_size[0],y_ind)
x_pos[0] = np.multiply(p_size[1],x_ind)
y_pos[1] = y_pos[0] + p_size[0]
x_pos[1] = x_pos[0] + p_size[1]
for m in len(folder_switch):
folder_name = folder_switch[m]
if not os.path.isfile(base_folder + folder_name):
os.mkdir(base_folder + folder_name)
for filename in os.listdir(base_folder + folder_name):
piece_id = filename.split('_cut')[1].split('.')[0]
w_cuts = np.multiply(np.floor_divide(p_size[1], cut_size[1]), np.divide(cut_size[1], stride[1]))
h_cuts = np.multiply(np.floor_divide(p_size[0], cut_size[0]), np.divide(cut_size[0], stride[0]))
y_ind = np.floor_divide(piece_id, w_cuts)
x_ind = np.mod(piece_id, w_cuts)
y_pos = np.multiply(y_ind, stride[0])
x_pos = np.multiply(x_ind, stride[1])
#https://stackoverflow.com/questions/273946/how-do-i-resize-an-image-using-pil-and-maintain-its-aspect-ratio
for n in (25,39,43,57,64):
if not os.path.isdir(base_folder+'p'+str(n+1)+"/"):
os.mkdir(base_folder+'p'+str(n+1)+"/")
curr_filename = base_folder+base_filename+'_p'+str(n+1)+'.tif'
ds = gdal.Open(curr_filename)
im = np.array(ds.GetRasterBand(1).ReadAsArray())
width = im.shape[1]
height = im.shape[0]
w_cuts = np.multiply(np.floor_divide(width, cut_size[1]), np.divide(cut_size[1], stride[1]))
h_cuts = np.multiply(np.floor_divide(height, cut_size[0]), np.divide(cut_size[0], stride[0]))
for i in range(w_cuts):
for j in range(h_cuts):
x_off = np.multiply(i, stride[1])
y_off = np.multiply(j, stride[0])
#image = np.asarray(im)
image = im[y_off:y_off+cut_size[0], x_off:x_off+cut_size[1]]
ind = (i*w_cuts + j)
filename = base_folder+'p'+str(n+1)+"/"+base_filename+'_cut'+str(ind)
im2 = Image.fromarray(image)
im2.save(filename + '.tif') | gpl-3.0 |
gschivley/ERCOT_power | Final report/MarginalUnit.py | 1 | 4417 | from __future__ import division
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
import gzip
import cPickle as pickle
class Marginal_Unit():
def __init__(self, ercot, allEPA, labeled_plants, eia860Dict, fuel_prices):
# ercot = pd.read_csv(ercotPath, index_col=0)
# epaDict = self.load_zipped_pickle(epaPath)
# labeled_plants = pd.read_csv('Cluster labels.csv')
# eia860Dict = pickle.load(open(eiaPath, "rb"))
# fuel_prices = pd.read_csv(fuelPath, index_col=0)
# ercot = self.process_ercot(ercot)
# allEPA = self.process_epa(epaDict)
group_cluster = self.create_clusters(allEPA, labeled_plants, ercot)
groupCapacity = self.calc_group_capacity(eia860Dict, labeled_plants)
group_cluster = pd.merge(groupCapacity, group_cluster, on=['cluster', 'year'])
group_cluster.loc[:,'Month'] = group_cluster.loc[:,'DATETIME'].apply(lambda x: x.month)
group_cluster = group_cluster.rename(columns = {'year':'Year'})
self.X = pd.merge(group_cluster, fuel_prices, on=['Year', 'Month'])
self.y = self.X.loc[:,['DATETIME', 'cluster', 'Gen Change (MW)']]
self.X.drop('Gen Change (MW)', axis=1, inplace=True)
# def plotClusters(self, clusterData):
# pass
def getX(self):
return self.X
def getY(self):
return self.y
# def process_ercot(self, ercot):
# ercot.loc[:,'Net Load (MW)'] = ercot.loc[:,'ERCOT Load, MW'] - ercot.loc[:,'Total Wind Output, MW']
# ercot.loc[1:,'Net Load Change (MW)'] = ercot.iloc[1:,-1].values - ercot.iloc[:-1,-1].values
# ercot.loc[:,'DATETIME'] = pd.to_datetime(ercot.index)
# return ercot
# def process_epa(self, epaDict):
# allEPA = pd.concat(epaDict)
# allEPA.fillna(0, inplace=True)
# allEPA = self.plant_gen_delta(allEPA)
# allEPA.reset_index(drop=True, inplace=True)
# return allEPA
def create_clusters(self, allEPA, labeled_plants, ercot):
merged_epa_cluster = pd.merge(allEPA, labeled_plants, left_on=['PLANT_ID', 'YEAR'],
right_on=['plant_id', 'year'])
grouped_clusters = merged_epa_cluster.loc[:,['Gen Change (MW)', 'GROSS LOAD (MW)', 'DATETIME', 'cluster']].groupby(['DATETIME', 'cluster']).sum()
grouped_clusters.reset_index(inplace=True)
grouped_clusters['DATETIME'] = pd.to_datetime(grouped_clusters['DATETIME'])
grouped_clusters = pd.merge(grouped_clusters, ercot, on='DATETIME')
grouped_clusters.loc[:,'year'] = grouped_clusters.loc[:,'DATETIME'].apply(lambda x: x.year)
return grouped_clusters
def calc_group_capacity(self, eia860Dict, labeled_plants):
#Add year to as a column
for k in eia860Dict.keys():
eia860Dict[k]["Year"] = k
#Flatten dictionary, rename columns, and do inner join
merged860 = pd.concat(eia860Dict)
merged860.columns = ["plant_id", "nameplate_capacity", "year"]
merged860 = labeled_plants.merge(merged860, on=["plant_id", "year"])
groupCapacity = merged860.loc[:,["cluster", "year", "nameplate_capacity"]].groupby(by=["cluster", "year"]).sum()
groupCapacity.reset_index(inplace=True)
return groupCapacity
# def load_zipped_pickle(self, filename):
# with gzip.open(filename, 'rb') as f:
# loaded_object = pickle.load(f)
# return loaded_object
# def plant_gen_delta(self, df):
# """
# For every plant in the input df, calculate the change in gross load (MW)
# from the previous hour.
# input:
# df: dataframe of EPA clean air markets data
# return:
# df: concatanated list of dataframes
# """
# df_list = []
# for plant in df['PLANT_ID'].unique():
# temp = df.loc[df['PLANT_ID'] == plant,:]
# gen_change = temp.loc[:,'GROSS LOAD (MW)'].values - temp.loc[:,'GROSS LOAD (MW)'].shift(1).values
# temp.loc[:,'Gen Change (MW)'] = gen_change
# df_list.append(temp)
# return pd.concat(df_list) | mit |
kiyoto/statsmodels | statsmodels/graphics/plot_grids.py | 33 | 5711 | '''create scatterplot with confidence ellipsis
Author: Josef Perktold
License: BSD-3
TODO: update script to use sharex, sharey, and visible=False
see http://www.scipy.org/Cookbook/Matplotlib/Multiple_Subplots_with_One_Axis_Label
for sharex I need to have the ax of the last_row when editing the earlier
rows. Or you axes_grid1, imagegrid
http://matplotlib.sourceforge.net/mpl_toolkits/axes_grid/users/overview.html
'''
from statsmodels.compat.python import range
import numpy as np
from scipy import stats
from . import utils
__all__ = ['scatter_ellipse']
def _make_ellipse(mean, cov, ax, level=0.95, color=None):
"""Support function for scatter_ellipse."""
from matplotlib.patches import Ellipse
v, w = np.linalg.eigh(cov)
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan(u[1]/u[0])
angle = 180 * angle / np.pi # convert to degrees
v = 2 * np.sqrt(v * stats.chi2.ppf(level, 2)) #get size corresponding to level
ell = Ellipse(mean[:2], v[0], v[1], 180 + angle, facecolor='none',
edgecolor=color,
#ls='dashed', #for debugging
lw=1.5)
ell.set_clip_box(ax.bbox)
ell.set_alpha(0.5)
ax.add_artist(ell)
def scatter_ellipse(data, level=0.9, varnames=None, ell_kwds=None,
plot_kwds=None, add_titles=False, keep_ticks=False,
fig=None):
"""Create a grid of scatter plots with confidence ellipses.
ell_kwds, plot_kdes not used yet
looks ok with 5 or 6 variables, too crowded with 8, too empty with 1
Parameters
----------
data : array_like
Input data.
level : scalar, optional
Default is 0.9.
varnames : list of str, optional
Variable names. Used for y-axis labels, and if `add_titles` is True
also for titles. If not given, integers 1..data.shape[1] are used.
ell_kwds : dict, optional
UNUSED
plot_kwds : dict, optional
UNUSED
add_titles : bool, optional
Whether or not to add titles to each subplot. Default is False.
Titles are constructed from `varnames`.
keep_ticks : bool, optional
If False (default), remove all axis ticks.
fig : Matplotlib figure instance, optional
If given, this figure is simply returned. Otherwise a new figure is
created.
Returns
-------
fig : Matplotlib figure instance
If `fig` is None, the created figure. Otherwise `fig` itself.
"""
fig = utils.create_mpl_fig(fig)
import matplotlib.ticker as mticker
data = np.asanyarray(data) #needs mean and cov
nvars = data.shape[1]
if varnames is None:
#assuming single digit, nvars<=10 else use 'var%2d'
varnames = ['var%d' % i for i in range(nvars)]
plot_kwds_ = dict(ls='none', marker='.', color='k', alpha=0.5)
if plot_kwds:
plot_kwds_.update(plot_kwds)
ell_kwds_= dict(color='k')
if ell_kwds:
ell_kwds_.update(ell_kwds)
dmean = data.mean(0)
dcov = np.cov(data, rowvar=0)
for i in range(1, nvars):
#print '---'
ax_last=None
for j in range(i):
#print i,j, i*(nvars-1)+j+1
ax = fig.add_subplot(nvars-1, nvars-1, (i-1)*(nvars-1)+j+1)
## #sharey=ax_last) #sharey doesn't allow empty ticks?
## if j == 0:
## print 'new ax_last', j
## ax_last = ax
## ax.set_ylabel(varnames[i])
#TODO: make sure we have same xlim and ylim
formatter = mticker.FormatStrFormatter('% 3.1f')
ax.yaxis.set_major_formatter(formatter)
ax.xaxis.set_major_formatter(formatter)
idx = np.array([j,i])
ax.plot(*data[:,idx].T, **plot_kwds_)
if np.isscalar(level):
level = [level]
for alpha in level:
_make_ellipse(dmean[idx], dcov[idx[:,None], idx], ax, level=alpha,
**ell_kwds_)
if add_titles:
ax.set_title('%s-%s' % (varnames[i], varnames[j]))
if not ax.is_first_col():
if not keep_ticks:
ax.set_yticks([])
else:
ax.yaxis.set_major_locator(mticker.MaxNLocator(3))
else:
ax.set_ylabel(varnames[i])
if ax.is_last_row():
ax.set_xlabel(varnames[j])
else:
if not keep_ticks:
ax.set_xticks([])
else:
ax.xaxis.set_major_locator(mticker.MaxNLocator(3))
dcorr = np.corrcoef(data, rowvar=0)
dc = dcorr[idx[:,None], idx]
xlim = ax.get_xlim()
ylim = ax.get_ylim()
## xt = xlim[0] + 0.1 * (xlim[1] - xlim[0])
## yt = ylim[0] + 0.1 * (ylim[1] - ylim[0])
## if dc[1,0] < 0 :
## yt = ylim[0] + 0.1 * (ylim[1] - ylim[0])
## else:
## yt = ylim[1] - 0.2 * (ylim[1] - ylim[0])
yrangeq = ylim[0] + 0.4 * (ylim[1] - ylim[0])
if dc[1,0] < -0.25 or (dc[1,0] < 0.25 and dmean[idx][1] > yrangeq):
yt = ylim[0] + 0.1 * (ylim[1] - ylim[0])
else:
yt = ylim[1] - 0.2 * (ylim[1] - ylim[0])
xt = xlim[0] + 0.1 * (xlim[1] - xlim[0])
ax.text(xt, yt, '$\\rho=%0.2f$'% dc[1,0])
for ax in fig.axes:
if ax.is_last_row(): # or ax.is_first_col():
ax.xaxis.set_major_locator(mticker.MaxNLocator(3))
if ax.is_first_col():
ax.yaxis.set_major_locator(mticker.MaxNLocator(3))
return fig
| bsd-3-clause |
Frankkkkk/arctic | tests/integration/store/test_bitemporal_store.py | 2 | 16482 | '''
Created on 25 Aug 2015
@author: ateng
'''
from datetime import datetime as dt
from mock import patch
from pandas.util.testing import assert_frame_equal
from arctic.date._mktz import mktz
import pandas as pd
from tests.util import read_str_as_pandas
pytest_plugins = ['arctic.fixtures.arctic']
ts1 = read_str_as_pandas(""" sample_dt | near
2012-09-08 17:06:11.040 | 1.0
2012-10-08 17:06:11.040 | 2.0
2012-10-09 17:06:11.040 | 2.5
2012-11-08 17:06:11.040 | 3.0""")
ts1_update = read_str_as_pandas(""" sample_dt | near
2012-09-08 17:06:11.040 | 1.0
2012-10-08 17:06:11.040 | 2.0
2012-10-09 17:06:11.040 | 2.5
2012-11-08 17:06:11.040 | 3.0
2012-11-09 17:06:11.040 | 3.5""")
LOCAL_TZ = mktz()
def test_new_ts_read_write(bitemporal_library):
bitemporal_library.update('spam', ts1)
assert_frame_equal(ts1, bitemporal_library.read('spam').data)
def test_read_ts_raw(bitemporal_library):
bitemporal_library.update('spam', ts1, as_of=dt(2015, 5, 1, tzinfo=mktz('UTC')))
assert_frame_equal(bitemporal_library.read('spam', raw=True).data.tz_convert(tz=mktz('UTC'), level=1), read_str_as_pandas(
""" sample_dt | observed_dt | near
2012-09-08 17:06:11.040 | 2015-05-01 | 1.0
2012-10-08 17:06:11.040 | 2015-05-01 | 2.0
2012-10-09 17:06:11.040 | 2015-05-01 | 2.5
2012-11-08 17:06:11.040 | 2015-05-01 | 3.0""", num_index=2).tz_localize(tz=mktz('UTC'), level=1))
def test_write_ts_with_column_name_same_as_observed_dt_ok(bitemporal_library):
ts1 = read_str_as_pandas(""" sample_dt | observed_dt | near
2012-09-08 17:06:11.040 | 2015-1-1 | 1.0
2012-10-08 17:06:11.040 | 2015-1-1 | 2.0
2012-10-09 17:06:11.040 | 2015-1-1 | 2.5
2012-11-08 17:06:11.040 | 2015-1-1 | 3.0""")
bitemporal_library.update('spam', ts1)
assert_frame_equal(ts1, bitemporal_library.read('spam').data)
def test_last_update(bitemporal_library):
bitemporal_library.update('spam', ts1, as_of=dt(2015, 1, 1))
bitemporal_library.update('spam', ts1, as_of=dt(2015, 1, 2))
assert bitemporal_library.read('spam').last_updated == dt(2015, 1, 2, tzinfo=LOCAL_TZ)
def test_existing_ts_update_and_read(bitemporal_library):
bitemporal_library.update('spam', ts1)
bitemporal_library.update('spam', ts1_update[-1:])
assert_frame_equal(ts1_update, bitemporal_library.read('spam').data)
def test_existing_ts_update_existing_data_and_read(bitemporal_library):
bitemporal_library.update('spam', ts1)
bitemporal_library.update('spam', read_str_as_pandas(""" sample_dt | near
2012-10-09 17:06:11.040 | 4.2"""))
expected_ts = read_str_as_pandas(""" sample_dt | near
2012-09-08 17:06:11.040 | 1.0
2012-10-08 17:06:11.040 | 2.0
2012-10-09 17:06:11.040 | 4.2
2012-11-08 17:06:11.040 | 3.0""")
assert_frame_equal(expected_ts, bitemporal_library.read('spam').data)
def test_read_ts_with_historical_update(bitemporal_library):
with patch('arctic.store.bitemporal_store.dt') as mock_dt:
mock_dt.now.return_value = dt(2015, 5, 1)
mock_dt.side_effect = lambda *args, **kwargs: dt(*args, **kwargs)
bitemporal_library.update('spam', ts1)
bitemporal_library.update('spam', read_str_as_pandas(""" sample_dt | near
2012-10-09 17:06:11.040 | 4.2"""),
as_of=dt(2015, 5, 2))
bitemporal_library.update('spam', read_str_as_pandas(""" sample_dt | near
2012-10-09 17:06:11.040 | 6.6"""),
as_of=dt(2015, 5, 3))
assert_frame_equal(bitemporal_library.read('spam', as_of=dt(2015, 5, 2, 10)).data, read_str_as_pandas(
"""sample_dt | near
2012-09-08 17:06:11.040 | 1.0
2012-10-08 17:06:11.040 | 2.0
2012-10-09 17:06:11.040 | 4.2
2012-11-08 17:06:11.040 | 3.0"""))
assert_frame_equal(bitemporal_library.read('spam').data, read_str_as_pandas(""" sample_dt | near
2012-09-08 17:06:11.040 | 1.0
2012-10-08 17:06:11.040 | 2.0
2012-10-09 17:06:11.040 | 6.6
2012-11-08 17:06:11.040 | 3.0"""))
assert_frame_equal(bitemporal_library.read('spam', as_of=dt(2015, 5, 1, 10)).data, ts1)
def test_read_ts_with_historical_update_and_new_row(bitemporal_library):
with patch('arctic.store.bitemporal_store.dt') as mock_dt:
mock_dt.now.return_value = dt(2015, 5, 1)
mock_dt.side_effect = lambda *args, **kwargs: dt(*args, **kwargs)
bitemporal_library.update('spam', ts1)
bitemporal_library.update('spam', read_str_as_pandas(""" sample_dt | near
2012-10-09 17:06:11.040 | 4.2
2012-12-01 17:06:11.040 | 100"""),
as_of=dt(2015, 5, 2))
assert_frame_equal(bitemporal_library.read('spam').data, read_str_as_pandas(""" sample_dt | near
2012-09-08 17:06:11.040 | 1.0
2012-10-08 17:06:11.040 | 2.0
2012-10-09 17:06:11.040 | 4.2
2012-11-08 17:06:11.040 | 3.0
2012-12-01 17:06:11.040 | 100"""))
assert_frame_equal(bitemporal_library.read('spam', as_of=dt(2015, 5, 1, 10)).data, ts1)
def test_insert_new_rows_in_middle_remains_sorted(bitemporal_library):
bitemporal_library.update('spam', ts1)
bitemporal_library.update('spam', read_str_as_pandas(""" sample_dt | near
2012-10-09 12:00:00.000 | 30.0
2012-12-01 17:06:11.040 | 100"""))
assert_frame_equal(bitemporal_library.read('spam').data, read_str_as_pandas(""" sample_dt | near
2012-09-08 17:06:11.040 | 1.0
2012-10-08 17:06:11.040 | 2.0
2012-10-09 12:00:00.000 | 30.0
2012-10-09 17:06:11.040 | 2.5
2012-11-08 17:06:11.040 | 3.0
2012-12-01 17:06:11.040 | 100"""))
def test_insert_versions_inbetween_works_ok(bitemporal_library):
bitemporal_library.update('spam', ts1, as_of=dt(2015, 5, 1))
bitemporal_library.update('spam', read_str_as_pandas(""" sample_dt | near
2012-12-01 17:06:11.040 | 100"""),
as_of=dt(2015, 5, 10))
bitemporal_library.update('spam', read_str_as_pandas(""" sample_dt | near
2012-12-01 17:06:11.040 | 25"""),
as_of=dt(2015, 5, 8))
assert_frame_equal(bitemporal_library.read('spam').data, read_str_as_pandas(""" sample_dt | near
2012-09-08 17:06:11.040 | 1.0
2012-10-08 17:06:11.040 | 2.0
2012-10-09 17:06:11.040 | 2.5
2012-11-08 17:06:11.040 | 3.0
2012-12-01 17:06:11.040 | 100"""))
assert_frame_equal(bitemporal_library.read('spam', as_of=dt(2015, 5, 9)).data, read_str_as_pandas(
""" sample_dt | near
2012-09-08 17:06:11.040 | 1.0
2012-10-08 17:06:11.040 | 2.0
2012-10-09 17:06:11.040 | 2.5
2012-11-08 17:06:11.040 | 3.0
2012-12-01 17:06:11.040 | 25"""))
def test_read_ts_raw_all_version_ok(bitemporal_library):
bitemporal_library.update('spam', ts1, as_of=dt(2015, 5, 1, tzinfo=mktz('UTC')))
bitemporal_library.update('spam', read_str_as_pandas(""" sample_dt | near
2012-12-01 17:06:11.040 | 25"""),
as_of=dt(2015, 5, 5, tzinfo=mktz('UTC')))
bitemporal_library.update('spam', read_str_as_pandas(""" sample_dt | near
2012-11-08 17:06:11.040 | 42"""),
as_of=dt(2015, 5, 3, tzinfo=mktz('UTC')))
bitemporal_library.update('spam', read_str_as_pandas(""" sample_dt | near
2012-10-08 17:06:11.040 | 42
2013-01-01 17:06:11.040 | 100"""),
as_of=dt(2015, 5, 10, tzinfo=mktz('UTC')))
assert_frame_equal(bitemporal_library.read('spam', raw=True).data.tz_localize(tz=None, level=1), read_str_as_pandas(
""" sample_dt | observed_dt | near
2012-09-08 17:06:11.040 | 2015-05-01 | 1.0
2012-10-08 17:06:11.040 | 2015-05-01 | 2.0
2012-10-08 17:06:11.040 | 2015-05-10 | 42
2012-10-09 17:06:11.040 | 2015-05-01 | 2.5
2012-11-08 17:06:11.040 | 2015-05-01 | 3.0
2012-11-08 17:06:11.040 | 2015-05-03 | 42
2012-12-01 17:06:11.040 | 2015-05-05 | 25
2013-01-01 17:06:11.040 | 2015-05-10 | 100""", num_index=2))
def test_bitemporal_store_saves_as_of_with_timezone(bitemporal_library):
bitemporal_library.update('spam', ts1, as_of=dt(2015, 5, 1))
df = bitemporal_library.read('spam', raw=True).data
assert all([x[1] == dt(2015, 5, 1, tzinfo=LOCAL_TZ) for x in df.index])
def test_bitemporal_store_read_as_of_timezone(bitemporal_library):
bitemporal_library.update('spam', ts1, as_of=dt(2015, 5, 1, tzinfo=mktz('Europe/London')))
bitemporal_library.update('spam', read_str_as_pandas(""" sample_dt | near
2012-12-01 17:06:11.040 | 25"""),
as_of=dt(2015, 5, 2, tzinfo=mktz('Europe/London')))
df = bitemporal_library.read('spam', as_of=dt(2015, 5, 2, tzinfo=mktz('Asia/Hong_Kong'))).data
assert_frame_equal(df, ts1)
def test_multi_index_ts_read_write(bitemporal_library):
ts = read_str_as_pandas(""" index 1 | index 2 | near
2012-09-08 17:06:11.040 | SPAM Index | 1.0
2012-10-08 17:06:11.040 | SPAM Index | 2.0
2012-10-09 17:06:11.040 | SPAM Index | 2.5
2012-11-08 17:06:11.040 | SPAM Index | 3.0""", num_index=2)
bitemporal_library.update('spam', ts)
assert_frame_equal(ts, bitemporal_library.read('spam').data)
def test_multi_index_ts_read_raw(bitemporal_library):
ts = read_str_as_pandas(""" index 1 | index 2 | near
2012-09-08 17:06:11.040 | SPAM Index | 1.0
2012-10-08 17:06:11.040 | SPAM Index | 2.0
2012-10-09 17:06:11.040 | SPAM Index | 2.5
2012-11-08 17:06:11.040 | SPAM Index | 3.0""", num_index=2)
expected_ts = read_str_as_pandas(""" index 1 | index 2 | observed_dt | near
2012-09-08 17:06:11.040 | SPAM Index | 2015-01-01 | 1.0
2012-10-08 17:06:11.040 | SPAM Index | 2015-01-01 | 2.0
2012-10-09 17:06:11.040 | SPAM Index | 2015-01-01 | 2.5
2012-11-08 17:06:11.040 | SPAM Index | 2015-01-01 | 3.0""", num_index=3)
bitemporal_library.update('spam', ts, as_of=dt(2015, 1, 1))
assert_frame_equal(expected_ts.tz_localize(tz=LOCAL_TZ, level=2), bitemporal_library.read('spam', raw=True).data)
def test_multi_index_update(bitemporal_library):
ts = read_str_as_pandas(""" index 1 | index 2 | near
2012-09-08 17:06:11.040 | SPAM Index | 1.0
2012-09-08 17:06:11.040 | EGG Index | 1.1
2012-10-08 17:06:11.040 | SPAM Index | 2.0
2012-10-08 17:06:11.040 | EGG Index | 2.1
2012-10-09 17:06:11.040 | SPAM Index | 2.5
2012-10-09 17:06:11.040 | EGG Index | 2.6
2012-11-08 17:06:11.040 | SPAM Index | 3.0
2012-11-08 17:06:11.040 | EGG Index | 3.1""", num_index=2)
ts2 = read_str_as_pandas(""" index 1 | index 2 | near
2012-09-08 17:06:11.040 | SPAM Index | 1.2
2012-09-08 17:06:11.040 | EGG Index | 1.6
2012-12-08 17:06:11.040 | SPAM Index | 4.0""", num_index=2)
expected_ts = read_str_as_pandas(""" index 1 | index 2 | near
2012-09-08 17:06:11.040 | EGG Index | 1.6
2012-09-08 17:06:11.040 | SPAM Index | 1.2
2012-10-08 17:06:11.040 | EGG Index | 2.1
2012-10-08 17:06:11.040 | SPAM Index | 2.0
2012-10-09 17:06:11.040 | EGG Index | 2.6
2012-10-09 17:06:11.040 | SPAM Index | 2.5
2012-11-08 17:06:11.040 | EGG Index | 3.1
2012-11-08 17:06:11.040 | SPAM Index | 3.0
2012-12-08 17:06:11.040 | SPAM Index | 4.0""", num_index=2)
bitemporal_library.update('spam', ts, as_of=dt(2015, 1, 1))
bitemporal_library.update('spam', ts2, as_of=dt(2015, 1, 2))
assert_frame_equal(expected_ts, bitemporal_library.read('spam').data)
assert bitemporal_library.read('spam').last_updated == dt(2015, 1, 2, tzinfo=LOCAL_TZ)
| lgpl-2.1 |
mrunibe/MIALab | bin/main_knn.py | 1 | 9624 | """A medical image analysis pipeline.
The pipeline is used for brain tissue segmentation using a decision forest classifier.
"""
import argparse
import datetime
import os
import sys
import timeit
import SimpleITK as sitk
import numpy as np
from tensorflow.python.platform import app
from sklearn.neighbors import KNeighborsClassifier
from scipy import stats as scipy_stats
sys.path.insert(0, os.path.join(os.path.dirname(sys.argv[0]), '..')) # append the MIALab root directory to Python path
# fixes the ModuleNotFoundError when executing main.py in the console after code changes (e.g. git pull)
# somehow pip install does not keep track of packages
import mialab.classifier.decision_forest as df
import mialab.data.conversion as conversion
import mialab.data.structure as structure
import mialab.data.loading as load
import mialab.utilities.file_access_utilities as futil
import mialab.utilities.pipeline_utilities as putil
import mialab.utilities.statistic_utilities as statistics
FLAGS = None # the program flags
IMAGE_KEYS = [structure.BrainImageTypes.T1, structure.BrainImageTypes.T2, structure.BrainImageTypes.GroundTruth] # the list of images we will load
TEST_BATCH_SIZE = 2 # 1..30, the higher the faster but more memory usage
NORMALIZE_FEATURES = False # Normalize feature vectors to mean 0 and std 1
def main(_):
"""Brain tissue segmentation using decision forests.
The main routine executes the medical image analysis pipeline:
- Image loading
- Registration
- Pre-processing
- Feature extraction
- Decision forest classifier model building
- Segmentation using the decision forest classifier model on unseen images
- Post-processing of the segmentation
- Evaluation of the segmentation
"""
# load atlas images
putil.load_atlas_images(FLAGS.data_atlas_dir)
print('-' * 5, 'Training...')
# generate a model directory (use datetime to ensure that the directory is empty)
# we need an empty directory because TensorFlow will continue training an existing model if it is not empty
t = datetime.datetime.now().strftime('%Y-%m-%d%H%M%S')
model_dir = os.path.join(FLAGS.model_dir, t)
os.makedirs(model_dir, exist_ok=True)
# crawl the training image directories
crawler = load.FileSystemDataCrawler(FLAGS.data_train_dir,
IMAGE_KEYS,
futil.BrainImageFilePathGenerator(),
futil.DataDirectoryFilter())
data_items = list(crawler.data.items())
train_data_size = len(data_items)
pre_process_params = {'zscore_pre': True,#1 features
'coordinates_feature': False,#3 features
'intensity_feature': True,#1 features
'gradient_intensity_feature': True}#2 features
start_time_total_train = timeit.default_timer()
n_neighbors=20
batch_data = dict(data_items)
# load images for training and pre-process
images = putil.pre_process_batch(batch_data, pre_process_params, multi_process=True)
print('pre-processing done')
# generate feature matrix and label vector
data_train = np.concatenate([img.feature_matrix[0] for img in images])
labels_train = np.concatenate([img.feature_matrix[1] for img in images])
if NORMALIZE_FEATURES:
# normalize data (mean 0, std 1)
data_train = scipy_stats.zscore(data_train)
start_time = timeit.default_timer()
neigh = KNeighborsClassifier(n_neighbors=n_neighbors,weights='distance',algorithm='auto').fit(data_train, labels_train[:,0])
print(' Time elapsed:', timeit.default_timer() - start_time, 's')
time_total_train = timeit.default_timer() - start_time_total_train
start_time_total_test = timeit.default_timer()
print('-' * 5, 'Testing...')
result_dir = os.path.join(FLAGS.result_dir, t)
os.makedirs(result_dir, exist_ok=True)
# initialize evaluator
evaluator = putil.init_evaluator(result_dir)
# crawl the training image directories
crawler = load.FileSystemDataCrawler(FLAGS.data_test_dir,
IMAGE_KEYS,
futil.BrainImageFilePathGenerator(),
futil.DataDirectoryFilter())
data_items = list(crawler.data.items())
all_probabilities = None
for batch_index in range(0, len(data_items), TEST_BATCH_SIZE):
# slicing manages out of range; no need to worry
batch_data = dict(data_items[batch_index: batch_index + TEST_BATCH_SIZE])
# load images for testing and pre-process
pre_process_params['training'] = False
images_test = putil.pre_process_batch(batch_data, pre_process_params, multi_process=True)
images_prediction = []
images_probabilities = []
for img in images_test:
print('-' * 10, 'Testing', img.id_)
start_time = timeit.default_timer()
# probabilities, predictions = forest.predict(img.feature_matrix[0])
features = img.feature_matrix[0]
if NORMALIZE_FEATURES:
features = scipy_stats.zscore(features)
predictions=neigh.predict(features)
probabilities=neigh.predict_proba(features)
if all_probabilities is None:
all_probabilities = np.array([probabilities])
else:
all_probabilities = np.concatenate((all_probabilities, [probabilities]), axis=0)
print(' Time elapsed:', timeit.default_timer() - start_time, 's')
# convert prediction and probabilities back to SimpleITK images
image_prediction = conversion.NumpySimpleITKImageBridge.convert(predictions.astype(np.uint8),
img.image_properties)
image_probabilities = conversion.NumpySimpleITKImageBridge.convert(probabilities, img.image_properties)
# evaluate segmentation without post-processing
evaluator.evaluate(image_prediction, img.images[structure.BrainImageTypes.GroundTruth], img.id_)
images_prediction.append(image_prediction)
images_probabilities.append(image_probabilities)
# post-process segmentation and evaluate with post-processing
post_process_params = {'crf_post': True}
images_post_processed = putil.post_process_batch(images_test, images_prediction, images_probabilities,
post_process_params, multi_process=True)
for i, img in enumerate(images_test):
evaluator.evaluate(images_post_processed[i], img.images[structure.BrainImageTypes.GroundTruth],
img.id_ + '-PP')
# save results
sitk.WriteImage(images_prediction[i], os.path.join(result_dir, images_test[i].id_ + '_SEG.mha'), True)
sitk.WriteImage(images_post_processed[i], os.path.join(result_dir, images_test[i].id_ + '_SEG-PP.mha'), True)
time_total_test = timeit.default_timer() - start_time_total_test
# write summary of parameters to results dir
with open(os.path.join(result_dir, 'summary.txt'), 'w') as summary_file:
print('Result dir: {}'.format(result_dir))
print('Result dir: {}'.format(result_dir), file=summary_file)
print('Training data size: {}'.format(train_data_size), file=summary_file)
print('Total training time: {:.1f}s'.format(time_total_train), file=summary_file)
print('Total testing time: {:.1f}s'.format(time_total_test), file=summary_file)
print('Voxel Filter Mask: {}'.format(putil.FeatureExtractor.VOXEL_MASK_FLT), file=summary_file)
print('Normalize Features: {}'.format(NORMALIZE_FEATURES), file=summary_file)
print('kNN', file=summary_file)
print('n_neighbors: {}'.format(n_neighbors), file=summary_file)
stats = statistics.gather_statistics(os.path.join(result_dir, 'results.csv'))
print('Result statistics:', file=summary_file)
print(stats, file=summary_file)
# all_probabilities.astype(np.float16).dump(os.path.join(result_dir, 'all_probabilities.npy'))
if __name__ == "__main__":
"""The program's entry point."""
script_dir = os.path.dirname(sys.argv[0])
parser = argparse.ArgumentParser(description='Medical image analysis pipeline for brain tissue segmentation')
parser.add_argument(
'--model_dir',
type=str,
default=os.path.normpath(os.path.join(script_dir, './mia-model')),
help='Base directory for output models.'
)
parser.add_argument(
'--result_dir',
type=str,
default=os.path.normpath(os.path.join(script_dir, './mia-result')),
help='Directory for results.'
)
parser.add_argument(
'--data_atlas_dir',
type=str,
default=os.path.normpath(os.path.join(script_dir, '../data/atlas')),
help='Directory with atlas data.'
)
parser.add_argument(
'--data_train_dir',
type=str,
default=os.path.normpath(os.path.join(script_dir, '../data/train/')),
help='Directory with training data.'
)
parser.add_argument(
'--data_test_dir',
type=str,
default=os.path.normpath(os.path.join(script_dir, '../data/test/')),
help='Directory with testing data.'
)
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
siutanwong/scikit-learn | examples/linear_model/plot_sgd_weighted_samples.py | 344 | 1458 | """
=====================
SGD: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
y = [1] * 10 + [-1] * 10
sample_weight = 100 * np.abs(np.random.randn(20))
# and assign a bigger weight to the last 10 samples
sample_weight[:10] *= 10
# plot the weighted data points
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, s=sample_weight, alpha=0.9,
cmap=plt.cm.bone)
## fit the unweighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
no_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['solid'])
## fit the weighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y, sample_weight=sample_weight)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
samples_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['dashed'])
plt.legend([no_weights.collections[0], samples_weights.collections[0]],
["no weights", "with weights"], loc="lower left")
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
jmmease/pandas | pandas/tests/io/json/test_normalize.py | 14 | 11514 | import pytest
import numpy as np
import json
import pandas.util.testing as tm
from pandas import compat, Index, DataFrame
from pandas.io.json import json_normalize
from pandas.io.json.normalize import nested_to_record
@pytest.fixture
def deep_nested():
# deeply nested data
return [{'country': 'USA',
'states': [{'name': 'California',
'cities': [{'name': 'San Francisco',
'pop': 12345},
{'name': 'Los Angeles',
'pop': 12346}]
},
{'name': 'Ohio',
'cities': [{'name': 'Columbus',
'pop': 1234},
{'name': 'Cleveland',
'pop': 1236}]}
]
},
{'country': 'Germany',
'states': [{'name': 'Bayern',
'cities': [{'name': 'Munich', 'pop': 12347}]
},
{'name': 'Nordrhein-Westfalen',
'cities': [{'name': 'Duesseldorf', 'pop': 1238},
{'name': 'Koeln', 'pop': 1239}]}
]
}
]
@pytest.fixture
def state_data():
return [
{'counties': [{'name': 'Dade', 'population': 12345},
{'name': 'Broward', 'population': 40000},
{'name': 'Palm Beach', 'population': 60000}],
'info': {'governor': 'Rick Scott'},
'shortname': 'FL',
'state': 'Florida'},
{'counties': [{'name': 'Summit', 'population': 1234},
{'name': 'Cuyahoga', 'population': 1337}],
'info': {'governor': 'John Kasich'},
'shortname': 'OH',
'state': 'Ohio'}]
class TestJSONNormalize(object):
def test_simple_records(self):
recs = [{'a': 1, 'b': 2, 'c': 3},
{'a': 4, 'b': 5, 'c': 6},
{'a': 7, 'b': 8, 'c': 9},
{'a': 10, 'b': 11, 'c': 12}]
result = json_normalize(recs)
expected = DataFrame(recs)
tm.assert_frame_equal(result, expected)
def test_simple_normalize(self, state_data):
result = json_normalize(state_data[0], 'counties')
expected = DataFrame(state_data[0]['counties'])
tm.assert_frame_equal(result, expected)
result = json_normalize(state_data, 'counties')
expected = []
for rec in state_data:
expected.extend(rec['counties'])
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
result = json_normalize(state_data, 'counties', meta='state')
expected['state'] = np.array(['Florida', 'Ohio']).repeat([3, 2])
tm.assert_frame_equal(result, expected)
def test_empty_array(self):
result = json_normalize([])
expected = DataFrame()
tm.assert_frame_equal(result, expected)
def test_simple_normalize_with_separator(self, deep_nested):
# GH 14883
result = json_normalize({'A': {'A': 1, 'B': 2}})
expected = DataFrame([[1, 2]], columns=['A.A', 'A.B'])
tm.assert_frame_equal(result.reindex_like(expected), expected)
result = json_normalize({'A': {'A': 1, 'B': 2}}, sep='_')
expected = DataFrame([[1, 2]], columns=['A_A', 'A_B'])
tm.assert_frame_equal(result.reindex_like(expected), expected)
result = json_normalize({'A': {'A': 1, 'B': 2}}, sep=u'\u03c3')
expected = DataFrame([[1, 2]], columns=[u'A\u03c3A', u'A\u03c3B'])
tm.assert_frame_equal(result.reindex_like(expected), expected)
result = json_normalize(deep_nested, ['states', 'cities'],
meta=['country', ['states', 'name']],
sep='_')
expected = Index(['name', 'pop',
'country', 'states_name']).sort_values()
assert result.columns.sort_values().equals(expected)
def test_more_deeply_nested(self, deep_nested):
result = json_normalize(deep_nested, ['states', 'cities'],
meta=['country', ['states', 'name']])
# meta_prefix={'states': 'state_'})
ex_data = {'country': ['USA'] * 4 + ['Germany'] * 3,
'states.name': ['California', 'California', 'Ohio', 'Ohio',
'Bayern', 'Nordrhein-Westfalen',
'Nordrhein-Westfalen'],
'name': ['San Francisco', 'Los Angeles', 'Columbus',
'Cleveland', 'Munich', 'Duesseldorf', 'Koeln'],
'pop': [12345, 12346, 1234, 1236, 12347, 1238, 1239]}
expected = DataFrame(ex_data, columns=result.columns)
tm.assert_frame_equal(result, expected)
def test_shallow_nested(self):
data = [{'state': 'Florida',
'shortname': 'FL',
'info': {
'governor': 'Rick Scott'
},
'counties': [{'name': 'Dade', 'population': 12345},
{'name': 'Broward', 'population': 40000},
{'name': 'Palm Beach', 'population': 60000}]},
{'state': 'Ohio',
'shortname': 'OH',
'info': {
'governor': 'John Kasich'
},
'counties': [{'name': 'Summit', 'population': 1234},
{'name': 'Cuyahoga', 'population': 1337}]}]
result = json_normalize(data, 'counties',
['state', 'shortname',
['info', 'governor']])
ex_data = {'name': ['Dade', 'Broward', 'Palm Beach', 'Summit',
'Cuyahoga'],
'state': ['Florida'] * 3 + ['Ohio'] * 2,
'shortname': ['FL', 'FL', 'FL', 'OH', 'OH'],
'info.governor': ['Rick Scott'] * 3 + ['John Kasich'] * 2,
'population': [12345, 40000, 60000, 1234, 1337]}
expected = DataFrame(ex_data, columns=result.columns)
tm.assert_frame_equal(result, expected)
def test_meta_name_conflict(self):
data = [{'foo': 'hello',
'bar': 'there',
'data': [{'foo': 'something', 'bar': 'else'},
{'foo': 'something2', 'bar': 'else2'}]}]
with pytest.raises(ValueError):
json_normalize(data, 'data', meta=['foo', 'bar'])
result = json_normalize(data, 'data', meta=['foo', 'bar'],
meta_prefix='meta')
for val in ['metafoo', 'metabar', 'foo', 'bar']:
assert val in result
def test_record_prefix(self, state_data):
result = json_normalize(state_data[0], 'counties')
expected = DataFrame(state_data[0]['counties'])
tm.assert_frame_equal(result, expected)
result = json_normalize(state_data, 'counties',
meta='state',
record_prefix='county_')
expected = []
for rec in state_data:
expected.extend(rec['counties'])
expected = DataFrame(expected)
expected = expected.rename(columns=lambda x: 'county_' + x)
expected['state'] = np.array(['Florida', 'Ohio']).repeat([3, 2])
tm.assert_frame_equal(result, expected)
def test_non_ascii_key(self):
if compat.PY3:
testjson = (
b'[{"\xc3\x9cnic\xc3\xb8de":0,"sub":{"A":1, "B":2}},' +
b'{"\xc3\x9cnic\xc3\xb8de":1,"sub":{"A":3, "B":4}}]'
).decode('utf8')
else:
testjson = ('[{"\xc3\x9cnic\xc3\xb8de":0,"sub":{"A":1, "B":2}},'
'{"\xc3\x9cnic\xc3\xb8de":1,"sub":{"A":3, "B":4}}]')
testdata = {
u'sub.A': [1, 3],
u'sub.B': [2, 4],
b"\xc3\x9cnic\xc3\xb8de".decode('utf8'): [0, 1]
}
expected = DataFrame(testdata)
result = json_normalize(json.loads(testjson))
tm.assert_frame_equal(result, expected)
class TestNestedToRecord(object):
def test_flat_stays_flat(self):
recs = [dict(flat1=1, flat2=2),
dict(flat1=3, flat2=4),
]
result = nested_to_record(recs)
expected = recs
assert result == expected
def test_one_level_deep_flattens(self):
data = dict(flat1=1,
dict1=dict(c=1, d=2))
result = nested_to_record(data)
expected = {'dict1.c': 1,
'dict1.d': 2,
'flat1': 1}
assert result == expected
def test_nested_flattens(self):
data = dict(flat1=1,
dict1=dict(c=1, d=2),
nested=dict(e=dict(c=1, d=2),
d=2))
result = nested_to_record(data)
expected = {'dict1.c': 1,
'dict1.d': 2,
'flat1': 1,
'nested.d': 2,
'nested.e.c': 1,
'nested.e.d': 2}
assert result == expected
def test_json_normalize_errors(self):
# GH14583: If meta keys are not always present
# a new option to set errors='ignore' has been implemented
i = {
"Trades": [{
"general": {
"tradeid": 100,
"trade_version": 1,
"stocks": [{
"symbol": "AAPL",
"name": "Apple",
"price": "0"
}, {
"symbol": "GOOG",
"name": "Google",
"price": "0"
}
]
}
}, {
"general": {
"tradeid": 100,
"stocks": [{
"symbol": "AAPL",
"name": "Apple",
"price": "0"
}, {
"symbol": "GOOG",
"name": "Google",
"price": "0"
}
]
}
}
]
}
j = json_normalize(data=i['Trades'],
record_path=[['general', 'stocks']],
meta=[['general', 'tradeid'],
['general', 'trade_version']],
errors='ignore')
expected = {'general.trade_version': {0: 1.0, 1: 1.0, 2: '', 3: ''},
'general.tradeid': {0: 100, 1: 100, 2: 100, 3: 100},
'name': {0: 'Apple', 1: 'Google', 2: 'Apple', 3: 'Google'},
'price': {0: '0', 1: '0', 2: '0', 3: '0'},
'symbol': {0: 'AAPL', 1: 'GOOG', 2: 'AAPL', 3: 'GOOG'}}
assert j.fillna('').to_dict() == expected
pytest.raises(KeyError,
json_normalize, data=i['Trades'],
record_path=[['general', 'stocks']],
meta=[['general', 'tradeid'],
['general', 'trade_version']],
errors='raise'
)
| bsd-3-clause |
seckcoder/lang-learn | python/sklearn/sklearn/neighbors/nearest_centroid.py | 6 | 5853 | # -*- coding: utf-8 -*-
"""
Nearest Centroid Classification
"""
# Author: Robert Layton <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD Style.
import numpy as np
from scipy import sparse as sp
from ..base import BaseEstimator, ClassifierMixin
from ..utils.validation import check_arrays, atleast2d_or_csr
from ..metrics.pairwise import pairwise_distances
class NearestCentroid(BaseEstimator, ClassifierMixin):
"""Nearest centroid classifier.
Each class is represented by its centroid, with test samples classified to
the class with the nearest centroid.
Parameters
----------
metric: string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
shrink_threshold : float, optional (default = None)
Threshold for shrinking centroids to remove features.
Attributes
----------
`centroids_` : array-like, shape = [n_classes, n_features]
Centroid of each class
Examples
--------
>>> from sklearn.neighbors.nearest_centroid import NearestCentroid
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = NearestCentroid()
>>> clf.fit(X, y)
NearestCentroid(metric='euclidean', shrink_threshold=None)
>>> print clf.predict([[-0.8, -1]])
[1]
See also
--------
sklearn.neighbors.KNeighborsClassifier: nearest neighbors classifier
Notes
-----
When used for text classification with tf–idf vectors, this classifier is
also known as the Rocchio classifier.
References
----------
Tibshirani, R., Hastie, T., Narasimhan, B., & Chu, G. (2002). Diagnosis of
multiple cancer types by shrunken centroids of gene expression. Proceedings
of the National Academy of Sciences of the United States of America,
99(10), 6567-6572. The National Academy of Sciences.
"""
def __init__(self, metric='euclidean', shrink_threshold=None):
self.metric = metric
self.shrink_threshold = shrink_threshold
def fit(self, X, y):
"""
Fit the NearestCentroid model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Note that centroid shrinking cannot be used with sparse matrices.
y : array, shape = [n_samples]
Target values (integers)
"""
X, y = check_arrays(X, y, sparse_format="csr")
if sp.issparse(X) and self.shrink_threshold:
raise ValueError("threshold shrinking not supported"
" for sparse input")
n_samples, n_features = X.shape
classes = np.unique(y)
self.classes_ = classes
n_classes = classes.size
if n_classes < 2:
raise ValueError('y has less than 2 classes')
# Mask mapping each class to it's members.
self.centroids_ = np.empty((n_classes, n_features), dtype=np.float64)
for i, cur_class in enumerate(classes):
center_mask = y == cur_class
if sp.issparse(X):
center_mask = np.where(center_mask)[0]
self.centroids_[i] = X[center_mask].mean(axis=0)
if self.shrink_threshold:
dataset_centroid_ = np.array(X.mean(axis=0))[0]
# Number of clusters in each class.
nk = np.array([np.sum(classes == cur_class)
for cur_class in classes])
# m parameter for determining deviation
m = np.sqrt((1. / nk) + (1. / n_samples))
# Calculate deviation using the standard deviation of centroids.
variance = np.array(np.power(X - self.centroids_[y], 2))
variance = variance.sum(axis=0)
s = np.sqrt(variance / (n_samples - n_classes))
s += np.median(s) # To deter outliers from affecting the results.
mm = m.reshape(len(m), 1) # Reshape to allow broadcasting.
ms = mm * s
deviation = ((self.centroids_ - dataset_centroid_) / ms)
# Soft thresholding: if the deviation crosses 0 during shrinking,
# it becomes zero.
signs = np.sign(deviation)
deviation = (np.abs(deviation) - self.shrink_threshold)
deviation[deviation < 0] = 0
deviation = np.multiply(deviation, signs)
# Now adjust the centroids using the deviation
msd = np.multiply(ms, deviation)
self.centroids_ = np.array([dataset_centroid_ + msd[i]
for i in xrange(n_classes)])
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Notes
-----
If the metric constructor parameter is "precomputed", X is assumed to
be the distance matrix between the data to be predicted and
``self.centroids_``.
"""
X = atleast2d_or_csr(X)
if not hasattr(self, "centroids_"):
raise AttributeError("Model has not been trained yet.")
return self.classes_[pairwise_distances(
X, self.centroids_, metric=self.metric).argmin(axis=1)]
| unlicense |
moutai/scikit-learn | sklearn/utils/metaestimators.py | 283 | 2353 | """Utilities for meta-estimators"""
# Author: Joel Nothman
# Andreas Mueller
# Licence: BSD
from operator import attrgetter
from functools import update_wrapper
__all__ = ['if_delegate_has_method']
class _IffHasAttrDescriptor(object):
"""Implements a conditional property using the descriptor protocol.
Using this class to create a decorator will raise an ``AttributeError``
if the ``attribute_name`` is not present on the base object.
This allows ducktyping of the decorated method based on ``attribute_name``.
See https://docs.python.org/3/howto/descriptor.html for an explanation of
descriptors.
"""
def __init__(self, fn, attribute_name):
self.fn = fn
self.get_attribute = attrgetter(attribute_name)
# update the docstring of the descriptor
update_wrapper(self, fn)
def __get__(self, obj, type=None):
# raise an AttributeError if the attribute is not present on the object
if obj is not None:
# delegate only on instances, not the classes.
# this is to allow access to the docstrings.
self.get_attribute(obj)
# lambda, but not partial, allows help() to work with update_wrapper
out = lambda *args, **kwargs: self.fn(obj, *args, **kwargs)
# update the docstring of the returned function
update_wrapper(out, self.fn)
return out
def if_delegate_has_method(delegate):
"""Create a decorator for methods that are delegated to a sub-estimator
This enables ducktyping by hasattr returning True according to the
sub-estimator.
>>> from sklearn.utils.metaestimators import if_delegate_has_method
>>>
>>>
>>> class MetaEst(object):
... def __init__(self, sub_est):
... self.sub_est = sub_est
...
... @if_delegate_has_method(delegate='sub_est')
... def predict(self, X):
... return self.sub_est.predict(X)
...
>>> class HasPredict(object):
... def predict(self, X):
... return X.sum(axis=1)
...
>>> class HasNoPredict(object):
... pass
...
>>> hasattr(MetaEst(HasPredict()), 'predict')
True
>>> hasattr(MetaEst(HasNoPredict()), 'predict')
False
"""
return lambda fn: _IffHasAttrDescriptor(fn, '%s.%s' % (delegate, fn.__name__))
| bsd-3-clause |
RabadanLab/MITKats | Modules/Biophotonics/python/iMC/scripts/ipcai2016/script_analyze_ipcai_in_vivo_small_bowel.py | 6 | 9880 | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 14 11:09:18 2015
@author: wirkert
"""
import Image
import ImageEnhance
import logging
import datetime
import SimpleITK as sitk
import matplotlib
from msi.io.nrrdreader import NrrdReader
import msi.normalize as norm
from regression.estimation import estimate_image
from tasks_common import *
import commons
from msi.io.tiffringreader import TiffRingReader
TiffRingReader.RESIZE_FACTOR = 0.5
sc = commons.ScriptCommons()
sc.add_dir("SMALL_BOWEL_DATA",
os.path.join(sc.get_dir("DATA_FOLDER"), "small_bowel_images"))
sc.add_dir("SMALL_BOWEL_RESULT", os.path.join(sc.get_dir("RESULTS_FOLDER"),
"small_bowel"))
sc.add_dir("FILTER_TRANSMISSIONS",
os.path.join(sc.get_dir("DATA_FOLDER"),
"filter_transmissions"))
font = {'family' : 'normal',
'size' : 25}
matplotlib.rc('font', **font)
class ResultsFile(luigi.Task):
def output(self):
return luigi.LocalTarget(os.path.join(
sc.get_full_dir("SMALL_BOWEL_RESULT"), "results.csv"))
class OxyOverTimeTask(luigi.Task):
def output(self):
return luigi.LocalTarget(os.path.join(
sc.get_full_dir("SMALL_BOWEL_RESULT"),
"colon_oxy_over_time.pdf"))
def requires(self):
return ResultsFile()
def run(self):
df = pd.read_csv(self.input().path, index_col=0)
# determine times from start:
image_name_strings = df["image name"].values
time_strings = map(lambda s: s[
s.find("2015-10-08_")+11:s.find("2015-10-08_")+19],
image_name_strings)
time_in_s = map(lambda s: int(s[0:2]) * 3600 +
int(s[3:5]) * 60 +
int(s[6:]), time_strings)
df["time since first frame [s]"] = np.array(time_in_s) - time_in_s[0]
df["time since applying first clip [s]"] = df["time since first frame [s]"] - 4
# print oxy over time as scatterplot.
plt.figure()
ax = df.plot.scatter(x="time since applying first clip [s]",
y="oxygenation mean [%]", fontsize=30,
s=50, alpha=0.5)
ax.set_xlim((-5, 600))
plt.axvline(x=0, ymin=0, ymax=1, linewidth=2)
plt.axvline(x=39, ymin=0, ymax=1, linewidth=2)
plt.axvline(x=100, ymin=0, ymax=1, linewidth=2)
plt.axvline(x=124, ymin=0, ymax=1, linewidth=2)
ax.annotate('1', xy=(0, ax.get_ylim()[1]),
fontsize=18,
color="blue",
xycoords='data', xytext=(-5, 0),
textcoords='offset points')
ax.annotate('2', xy=(39, ax.get_ylim()[1]),
fontsize=18,
color="blue",
xycoords='data', xytext=(-5, 0),
textcoords='offset points')
ax.annotate('3', xy=(100, ax.get_ylim()[1]),
fontsize=18,
color="blue",
xycoords='data', xytext=(-5, 0),
textcoords='offset points')
ax.annotate('4', xy=(124, ax.get_ylim()[1]),
fontsize=18,
color="blue",
xycoords='data', xytext=(-5, 0),
textcoords='offset points')
plt.grid()
df.to_csv(self.input().path)
# save
plt.savefig(self.output().path,
dpi=250, bbox_inches='tight', mode="pdf")
def plot_image(image, axis):
im2 = axis.imshow(image, interpolation='nearest', alpha=1.0)
# axis.set_title(title, fontsize=5)
# divider = make_axes_locatable(axis)
# cax = divider.append_axes("right", size="10%", pad=0.05)
# cbar = plt.colorbar(im2, cax=cax)
# cbar.ax.tick_params(labelsize=5)
axis.xaxis.set_visible(False)
class IPCAICreateOxyImageTask(luigi.Task):
image_name = luigi.Parameter()
df_prefix = luigi.Parameter()
def requires(self):
return IPCAITrainRegressor(df_prefix=self.df_prefix), \
Flatfield(flatfield_folder=sc.get_full_dir("FLAT_FOLDER")), \
SingleMultispectralImage(image=self.image_name), \
Dark(dark_folder=sc.get_full_dir("DARK_FOLDER"))
def output(self):
return luigi.LocalTarget(os.path.join(sc.get_full_dir("SMALL_BOWEL_RESULT"),
os.path.split(self.image_name)[1] +
"_" + self.df_prefix +
"_summary" + ".png"))
def run(self):
nrrd_reader = NrrdReader()
tiff_ring_reader = TiffRingReader()
# read the flatfield
flat = nrrd_reader.read(self.input()[1].path)
dark = nrrd_reader.read(self.input()[3].path)
# read the msi
nr_filters = len(sc.other["RECORDED_WAVELENGTHS"])
msi, segmentation = tiff_ring_reader.read(self.input()[2].path,
nr_filters)
# only take into account not saturated pixels.
segmentation = np.logical_and(segmentation,
(np.max(msi.get_image(),
axis=-1) < 1000.))
# read the regressor
e_file = open(self.input()[0].path, 'r')
e = pickle.load(e_file)
# correct image setup
filter_nr = int(self.image_name[-6:-5])
original_order = np.arange(nr_filters)
new_image_order = np.concatenate((
original_order[nr_filters - filter_nr:],
original_order[:nr_filters - filter_nr]))
# resort msi to restore original order
msimani.get_bands(msi, new_image_order)
# correct by flatfield
msimani.image_correction(msi, flat, dark)
# create artificial rgb
rgb_image = msi.get_image()[:, :, [2, 3, 1]]
rgb_image /= np.max(rgb_image)
rgb_image *= 255.
# preprocess the image
# sortout unwanted bands
print "1"
# zero values would lead to infinity logarithm, thus clip.
msi.set_image(np.clip(msi.get_image(), 0.00001, 2. ** 64))
# normalize to get rid of lighting intensity
norm.standard_normalizer.normalize(msi)
# transform to absorption
msi.set_image(-np.log(msi.get_image()))
# normalize by l2 for stability
norm.standard_normalizer.normalize(msi, "l2")
print "2"
# estimate
sitk_image, time = estimate_image(msi, e)
image = sitk.GetArrayFromImage(sitk_image)
plt.figure()
print "3"
rgb_image = rgb_image.astype(np.uint8)
im = Image.fromarray(rgb_image, 'RGB')
enh_brightness = ImageEnhance.Brightness(im)
im = enh_brightness.enhance(10.)
plotted_image = np.array(im)
top_left_axis = plt.gca()
top_left_axis.imshow(plotted_image, interpolation='nearest')
top_left_axis.xaxis.set_visible(False)
top_left_axis.yaxis.set_visible(False)
plt.set_cmap("jet")
print "4"
# plot parametric maps
segmentation[0, 0] = 1
segmentation[0, 1] = 1
oxy_image = np.ma.masked_array(image[:, :, 0], ~segmentation)
oxy_image[np.isnan(oxy_image)] = 0.
oxy_image[np.isinf(oxy_image)] = 0.
oxy_mean = np.mean(oxy_image)
oxy_image[0, 0] = 0.0
oxy_image[0, 1] = 1.
plot_image(oxy_image[:, :], plt.gca())
df_image_results = pd.DataFrame(data=np.expand_dims([self.image_name,
oxy_mean * 100.,
time], 0),
columns=["image name",
"oxygenation mean [%]",
"time to estimate"])
results_file = os.path.join(sc.get_full_dir("SMALL_BOWEL_RESULT"),
"results.csv")
if os.path.isfile(results_file):
df_results = pd.read_csv(results_file, index_col=0)
df_results = pd.concat((df_results, df_image_results)).reset_index(
drop=True
)
else:
df_results = df_image_results
df_results.to_csv(results_file)
plt.savefig(self.output().path,
dpi=250, bbox_inches='tight')
plt.close("all")
if __name__ == '__main__':
# create a folder for the results if necessary
sc.set_root("/media/wirkert/data/Data/2016_02_02_IPCAI/")
sc.create_folders()
# root folder there the data lies
logging.basicConfig(filename=os.path.join(sc.get_full_dir("LOG_FOLDER"),
"small_bowel_images" +
str(datetime.datetime.now()) +
'.log'),
level=logging.INFO)
luigi.interface.setup_interface_logging()
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
logger = logging.getLogger()
logger.addHandler(ch)
sch = luigi.scheduler.CentralPlannerScheduler()
w = luigi.worker.Worker(scheduler=sch)
# determine files to process
files = get_image_files_from_folder(sc.get_full_dir("SMALL_BOWEL_DATA"),
suffix="F0.tiff", fullpath=True)
for f in files:
main_task = IPCAICreateOxyImageTask(image_name=f,
df_prefix="ipcai_revision_colon_mean_scattering_te")
w.add(main_task)
w.run()
oxygenation_over_time_task = OxyOverTimeTask()
w.add(oxygenation_over_time_task)
w.run()
| bsd-3-clause |
penelopy/luigi | examples/pyspark_wc.py | 56 | 3361 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import luigi
from luigi.s3 import S3Target
from luigi.contrib.spark import SparkSubmitTask, PySparkTask
class InlinePySparkWordCount(PySparkTask):
"""
This task runs a :py:class:`luigi.contrib.spark.PySparkTask` task
over the target data in :py:meth:`wordcount.input` (a file in S3) and
writes the result into its :py:meth:`wordcount.output` target (a file in S3).
This class uses :py:meth:`luigi.contrib.spark.PySparkTask.main`.
Example luigi configuration::
[spark]
spark-submit: /usr/local/spark/bin/spark-submit
master: spark://spark.example.org:7077
# py-packages: numpy, pandas
"""
driver_memory = '2g'
executor_memory = '3g'
def input(self):
return S3Target("s3n://bucket.example.org/wordcount.input")
def output(self):
return S3Target('s3n://bucket.example.org/wordcount.output')
def main(self, sc, *args):
sc.textFile(self.input().path) \
.flatMap(lambda line: line.split()) \
.map(lambda word: (word, 1)) \
.reduceByKey(lambda a, b: a + b) \
.saveAsTextFile(self.output().path)
class PySparkWordCount(SparkSubmitTask):
"""
This task is the same as :py:class:`InlinePySparkWordCount` above but uses
an external python driver file specified in :py:meth:`app`
It runs a :py:class:`luigi.contrib.spark.SparkSubmitTask` task
over the target data in :py:meth:`wordcount.input` (a file in S3) and
writes the result into its :py:meth:`wordcount.output` target (a file in S3).
This class uses :py:meth:`luigi.contrib.spark.SparkSubmitTask.run`.
Example luigi configuration::
[spark]
spark-submit: /usr/local/spark/bin/spark-submit
master: spark://spark.example.org:7077
deploy-mode: client
"""
driver_memory = '2g'
executor_memory = '3g'
total_executor_cores = luigi.IntParameter(default=100)
name = "PySpark Word Count"
app = 'wordcount.py'
def app_options(self):
# These are passed to the Spark main args in the defined order.
return [self.input().path, self.output().path]
def input(self):
return S3Target("s3n://bucket.example.org/wordcount.input")
def output(self):
return S3Target('s3n://bucket.example.org/wordcount.output')
'''
// Corresponding example Spark Job, running Word count with Spark's Python API
// This file would have to be saved into wordcount.py
import sys
from pyspark import SparkContext
if __name__ == "__main__":
sc = SparkContext()
sc.textFile(sys.argv[1]) \
.flatMap(lambda line: line.split()) \
.map(lambda word: (word, 1)) \
.reduceByKey(lambda a, b: a + b) \
.saveAsTextFile(sys.argv[2])
'''
| apache-2.0 |
huobaowangxi/scikit-learn | sklearn/decomposition/tests/test_pca.py | 199 | 10949 | import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition.pca import _assess_dimension_
from sklearn.decomposition.pca import _infer_dimension_
iris = datasets.load_iris()
def test_pca():
# PCA on dense arrays
pca = PCA(n_components=2)
X = iris.data
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], 2)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
pca = PCA()
pca.fit(X)
assert_almost_equal(pca.explained_variance_ratio_.sum(), 1.0, 3)
X_r = pca.transform(X)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
pca.n_components = n_components
pca.fit(X)
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
def test_whitening():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
n_components = 30
rank = 50
# some low rank data with correlated features
X = np.dot(rng.randn(n_samples, rank),
np.dot(np.diag(np.linspace(10.0, 1.0, rank)),
rng.randn(rank, n_features)))
# the component-wise variance of the first 50 features is 3 times the
# mean component-wise variance of the remaingin 30 features
X[:, :50] *= 3
assert_equal(X.shape, (n_samples, n_features))
# the component-wise variance is thus highly varying:
assert_almost_equal(X.std(axis=0).std(), 43.9, 1)
for this_PCA, copy in [(x, y) for x in (PCA, RandomizedPCA)
for y in (True, False)]:
# whiten the data while projecting to the lower dim subspace
X_ = X.copy() # make sure we keep an original across iterations.
pca = this_PCA(n_components=n_components, whiten=True, copy=copy)
# test fit_transform
X_whitened = pca.fit_transform(X_.copy())
assert_equal(X_whitened.shape, (n_samples, n_components))
X_whitened2 = pca.transform(X_)
assert_array_almost_equal(X_whitened, X_whitened2)
assert_almost_equal(X_whitened.std(axis=0), np.ones(n_components))
assert_almost_equal(X_whitened.mean(axis=0), np.zeros(n_components))
X_ = X.copy()
pca = this_PCA(n_components=n_components, whiten=False,
copy=copy).fit(X_)
X_unwhitened = pca.transform(X_)
assert_equal(X_unwhitened.shape, (n_samples, n_components))
# in that case the output components still have varying variances
assert_almost_equal(X_unwhitened.std(axis=0).std(), 74.1, 1)
# we always center, so no test for non-centering.
def test_explained_variance():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=2).fit(X)
rpca = RandomizedPCA(n_components=2, random_state=42).fit(X)
assert_array_almost_equal(pca.explained_variance_,
rpca.explained_variance_, 1)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 3)
# compare to empirical variances
X_pca = pca.transform(X)
assert_array_almost_equal(pca.explained_variance_,
np.var(X_pca, axis=0))
X_rpca = rpca.transform(X)
assert_array_almost_equal(rpca.explained_variance_,
np.var(X_rpca, axis=0))
def test_pca_check_projection():
# Test that the projection of data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = PCA(n_components=2).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_pca_inverse():
# Test that the projection of data can be inverted
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
pca = PCA(n_components=2).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
# same as above with whitening (approximate reconstruction)
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_pca_validation():
X = [[0, 1], [1, 0]]
for n_components in [-1, 3]:
assert_raises(ValueError, PCA(n_components).fit, X)
def test_randomized_pca_check_projection():
# Test that the projection by RandomizedPCA on dense data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = RandomizedPCA(n_components=2, random_state=0).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_randomized_pca_check_list():
# Test that the projection by RandomizedPCA on list data is correct
X = [[1.0, 0.0], [0.0, 1.0]]
X_transformed = RandomizedPCA(n_components=1,
random_state=0).fit(X).transform(X)
assert_equal(X_transformed.shape, (2, 1))
assert_almost_equal(X_transformed.mean(), 0.00, 2)
assert_almost_equal(X_transformed.std(), 0.71, 2)
def test_randomized_pca_inverse():
# Test that RandomizedPCA is inversible on dense data
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed signal
# (since the data is almost of rank n_components)
pca = RandomizedPCA(n_components=2, random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=2)
# same as above with whitening (approximate reconstruction)
pca = RandomizedPCA(n_components=2, whiten=True,
random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X - Y_inverse) / np.abs(X).mean()).max()
assert_almost_equal(relative_max_delta, 0.11, decimal=2)
def test_pca_dim():
# Check automated dimensionality setting
rng = np.random.RandomState(0)
n, p = 100, 5
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
pca = PCA(n_components='mle').fit(X)
assert_equal(pca.n_components, 'mle')
assert_equal(pca.n_components_, 1)
def test_infer_dim_1():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = (rng.randn(n, p) * .1 + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2])
+ np.array([1, 0, 7, 4, 6]))
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
ll = []
for k in range(p):
ll.append(_assess_dimension_(spect, k, n, p))
ll = np.array(ll)
assert_greater(ll[1], ll.max() - .01 * n)
def test_infer_dim_2():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 1)
def test_infer_dim_3():
n, p = 100, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
X[30:40] += 2 * np.array([-1, 1, -1, 1, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 2)
def test_infer_dim_by_explained_variance():
X = iris.data
pca = PCA(n_components=0.95)
pca.fit(X)
assert_equal(pca.n_components, 0.95)
assert_equal(pca.n_components_, 2)
pca = PCA(n_components=0.01)
pca.fit(X)
assert_equal(pca.n_components, 0.01)
assert_equal(pca.n_components_, 1)
rng = np.random.RandomState(0)
# more features than samples
X = rng.rand(5, 20)
pca = PCA(n_components=.5).fit(X)
assert_equal(pca.n_components, 0.5)
assert_equal(pca.n_components_, 2)
def test_pca_score():
# Test that probabilistic PCA scoring yields a reasonable score
n, p = 1000, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1 ** 2) * p
np.testing.assert_almost_equal(ll1 / h, 1, 0)
def test_pca_score2():
# Test that probabilistic PCA correctly separated different datasets
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
ll2 = pca.score(rng.randn(n, p) * .2 + np.array([3, 4, 5]))
assert_greater(ll1, ll2)
# Test that it gives the same scores if whiten=True
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
ll2 = pca.score(X)
assert_almost_equal(ll1, ll2)
def test_pca_score3():
# Check that probabilistic PCA selects the right model
n, p = 200, 3
rng = np.random.RandomState(0)
Xl = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
Xt = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
ll = np.zeros(p)
for k in range(p):
pca = PCA(n_components=k)
pca.fit(Xl)
ll[k] = pca.score(Xt)
assert_true(ll.argmax() == 1)
| bsd-3-clause |
rhiever/bokeh | bokeh/mplexporter/exporter.py | 11 | 12080 | """
Matplotlib Exporter
===================
This submodule contains tools for crawling a matplotlib figure and exporting
relevant pieces to a renderer.
"""
from __future__ import absolute_import
import warnings
import io
from . import utils
import matplotlib
from matplotlib import transforms
class Exporter(object):
"""Matplotlib Exporter
Parameters
----------
renderer : Renderer object
The renderer object called by the exporter to create a figure
visualization. See mplexporter.Renderer for information on the
methods which should be defined within the renderer.
close_mpl : bool
If True (default), close the matplotlib figure as it is rendered. This
is useful for when the exporter is used within the notebook, or with
an interactive matplotlib backend.
"""
def __init__(self, renderer, close_mpl=True):
self.close_mpl = close_mpl
self.renderer = renderer
def run(self, fig):
"""
Run the exporter on the given figure
Parmeters
---------
fig : matplotlib.Figure instance
The figure to export
"""
# Calling savefig executes the draw() command, putting elements
# in the correct place.
fig.savefig(io.BytesIO(), format='png', dpi=fig.dpi)
if self.close_mpl:
import matplotlib.pyplot as plt
plt.close(fig)
self.crawl_fig(fig)
@staticmethod
def process_transform(transform, ax=None, data=None, return_trans=False,
force_trans=None):
"""Process the transform and convert data to figure or data coordinates
Parameters
----------
transform : matplotlib Transform object
The transform applied to the data
ax : matplotlib Axes object (optional)
The axes the data is associated with
data : ndarray (optional)
The array of data to be transformed.
return_trans : bool (optional)
If true, return the final transform of the data
force_trans : matplotlib.transform instance (optional)
If supplied, first force the data to this transform
Returns
-------
code : string
Code is either "data", "axes", "figure", or "display", indicating
the type of coordinates output.
transform : matplotlib transform
the transform used to map input data to output data.
Returned only if return_trans is True
new_data : ndarray
Data transformed to match the given coordinate code.
Returned only if data is specified
"""
if isinstance(transform, transforms.BlendedGenericTransform):
warnings.warn("Blended transforms not yet supported. "
"Zoom behavior may not work as expected.")
if force_trans is not None:
if data is not None:
data = (transform - force_trans).transform(data)
transform = force_trans
code = "display"
if ax is not None:
for (c, trans) in [("data", ax.transData),
("axes", ax.transAxes),
("figure", ax.figure.transFigure),
("display", transforms.IdentityTransform())]:
if transform.contains_branch(trans):
code, transform = (c, transform - trans)
break
if data is not None:
if return_trans:
return code, transform.transform(data), transform
else:
return code, transform.transform(data)
else:
if return_trans:
return code, transform
else:
return code
def crawl_fig(self, fig):
"""Crawl the figure and process all axes"""
with self.renderer.draw_figure(fig=fig,
props=utils.get_figure_properties(fig)):
for ax in fig.axes:
self.crawl_ax(ax)
def crawl_ax(self, ax):
"""Crawl the axes and process all elements within"""
with self.renderer.draw_axes(ax=ax,
props=utils.get_axes_properties(ax)):
for line in ax.lines:
self.draw_line(ax, line)
for text in ax.texts:
self.draw_text(ax, text)
for (text, ttp) in zip([ax.xaxis.label, ax.yaxis.label, ax.title],
["xlabel", "ylabel", "title"]):
if(hasattr(text, 'get_text') and text.get_text()):
self.draw_text(ax, text, force_trans=ax.transAxes,
text_type=ttp)
for artist in ax.artists:
# TODO: process other artists
if isinstance(artist, matplotlib.text.Text):
self.draw_text(ax, artist)
for patch in ax.patches:
self.draw_patch(ax, patch)
for collection in ax.collections:
self.draw_collection(ax, collection)
for image in ax.images:
self.draw_image(ax, image)
legend = ax.get_legend()
if legend is not None:
props = utils.get_legend_properties(ax, legend)
with self.renderer.draw_legend(legend=legend, props=props):
if props['visible']:
self.crawl_legend(ax, legend)
def crawl_legend(self, ax, legend):
"""
Recursively look through objects in legend children
"""
legendElements = list(utils.iter_all_children(legend._legend_box,
skipContainers=True))
legendElements.append(legend.legendPatch)
for child in legendElements:
# force a large zorder so it appears on top
child.set_zorder(1E6 + child.get_zorder())
try:
# What kind of object...
if isinstance(child, matplotlib.patches.Patch):
self.draw_patch(ax, child, force_trans=ax.transAxes)
elif isinstance(child, matplotlib.text.Text):
if not (child is legend.get_children()[-1]
and child.get_text() == 'None'):
self.draw_text(ax, child, force_trans=ax.transAxes)
elif isinstance(child, matplotlib.lines.Line2D):
self.draw_line(ax, child, force_trans=ax.transAxes)
else:
warnings.warn("Legend element %s not impemented" % child)
except NotImplementedError:
warnings.warn("Legend element %s not impemented" % child)
def draw_line(self, ax, line, force_trans=None):
"""Process a matplotlib line and call renderer.draw_line"""
coordinates, data = self.process_transform(line.get_transform(),
ax, line.get_xydata(),
force_trans=force_trans)
linestyle = utils.get_line_style(line)
if linestyle['dasharray'] in ['None', 'none', None]:
linestyle = None
markerstyle = utils.get_marker_style(line)
if (markerstyle['marker'] in ['None', 'none', None]
or markerstyle['markerpath'][0].size == 0):
markerstyle = None
label = line.get_label()
if markerstyle or linestyle:
self.renderer.draw_marked_line(data=data, coordinates=coordinates,
linestyle=linestyle,
markerstyle=markerstyle,
label=label,
mplobj=line)
def draw_text(self, ax, text, force_trans=None, text_type=None):
"""Process a matplotlib text object and call renderer.draw_text"""
content = text.get_text()
if content:
transform = text.get_transform()
position = text.get_position()
coords, position = self.process_transform(transform, ax,
position,
force_trans=force_trans)
style = utils.get_text_style(text)
self.renderer.draw_text(text=content, position=position,
coordinates=coords,
text_type=text_type,
style=style, mplobj=text)
def draw_patch(self, ax, patch, force_trans=None):
"""Process a matplotlib patch object and call renderer.draw_path"""
vertices, pathcodes = utils.SVG_path(patch.get_path())
transform = patch.get_transform()
coordinates, vertices = self.process_transform(transform,
ax, vertices,
force_trans=force_trans)
linestyle = utils.get_path_style(patch, fill=patch.get_fill())
self.renderer.draw_path(data=vertices,
coordinates=coordinates,
pathcodes=pathcodes,
style=linestyle,
mplobj=patch)
def draw_collection(self, ax, collection,
force_pathtrans=None,
force_offsettrans=None):
"""Process a matplotlib collection and call renderer.draw_collection"""
(transform, transOffset,
offsets, paths) = collection._prepare_points()
offset_coords, offsets = self.process_transform(
transOffset, ax, offsets, force_trans=force_offsettrans)
processed_paths = [utils.SVG_path(path) for path in paths]
path_coords, tr = self.process_transform(
transform, ax, return_trans=True, force_trans=force_pathtrans)
processed_paths = [(tr.transform(path[0]), path[1])
for path in processed_paths]
path_transforms = collection.get_transforms()
try:
# matplotlib 1.3: path_transforms are transform objects.
# Convert them to numpy arrays.
path_transforms = [t.get_matrix() for t in path_transforms]
except AttributeError:
# matplotlib 1.4: path transforms are already numpy arrays.
pass
styles = {'linewidth': collection.get_linewidths(),
'facecolor': collection.get_facecolors(),
'edgecolor': collection.get_edgecolors(),
'alpha': collection._alpha,
'zorder': collection.get_zorder()}
offset_dict = {"data": "before",
"screen": "after"}
offset_order = offset_dict[collection.get_offset_position()]
self.renderer.draw_path_collection(paths=processed_paths,
path_coordinates=path_coords,
path_transforms=path_transforms,
offsets=offsets,
offset_coordinates=offset_coords,
offset_order=offset_order,
styles=styles,
mplobj=collection)
def draw_image(self, ax, image):
"""Process a matplotlib image object and call renderer.draw_image"""
self.renderer.draw_image(imdata=utils.image_to_base64(image),
extent=image.get_extent(),
coordinates="data",
style={"alpha": image.get_alpha(),
"zorder": image.get_zorder()},
mplobj=image)
| bsd-3-clause |
khkaminska/bokeh | bokeh/session.py | 42 | 20253 | ''' The session module provides the Session class, which encapsulates a
connection to a Document that resides on a Bokeh server.
The Session class provides methods for creating, loading and storing
documents and objects, as well as methods for user-authentication. These
are useful when the server is run in multi-user mode.
'''
from __future__ import absolute_import, print_function
#--------
# logging
#--------
import logging
logger = logging.getLogger(__name__)
#-------------
# standard lib
#-------------
import time
import json
from os import makedirs
from os.path import expanduser, exists, join
import tempfile
#------------
# third party
#------------
from six.moves.urllib.parse import urlencode
from requests.exceptions import ConnectionError
#---------
# optional
#---------
try:
import pandas as pd
import tables
has_pandas = True
except ImportError as e:
has_pandas = False
#--------
# project
#--------
from . import browserlib
from . import protocol
from .embed import autoload_server
from .exceptions import DataIntegrityException
from .util.notebook import publish_display_data
from .util.serialization import dump, get_json, urljoin
DEFAULT_SERVER_URL = "http://localhost:5006/"
class Session(object):
""" Encapsulate a connection to a document stored on a Bokeh Server.
Args:
name (str, optional) : name of server
root_url (str, optional) : root url of server
userapikey (str, optional) : (default: "nokey")
username (str, optional) : (default: "defaultuser")
load_from_config (bool, optional) :
Whether to load login information from config. (default: True)
If False, then we may overwrite the user's config.
configdir (str) : location of user configuration information
Attributes:
base_url (str) :
configdir (str) :
configfile (str) :
http_session (requests.session) :
userapikey (str) :
userinfo (dict) :
username (str) :
"""
def __init__(
self,
name = DEFAULT_SERVER_URL,
root_url = DEFAULT_SERVER_URL,
userapikey = "nokey",
username = "defaultuser",
load_from_config = True,
configdir = None,
):
self.name = name
if not root_url.endswith("/"):
logger.warning("root_url should end with a /, adding one")
root_url = root_url + "/"
self.root_url = root_url
# single user mode case
self.userapikey = userapikey
self.username = username
self._configdir = None
if configdir:
self.configdir = configdir
if load_from_config:
self.load()
@property
def http_session(self):
if hasattr(self, "_http_session"):
return self._http_session
else:
import requests
self._http_session = requests.session()
return self._http_session
@property
def username(self):
return self.http_session.headers.get('BOKEHUSER')
@username.setter
def username(self, val):
self.http_session.headers.update({'BOKEHUSER': val})
@property
def userapikey(self):
return self.http_session.headers.get('BOKEHUSER-API-KEY')
@userapikey.setter
def userapikey(self, val):
self.http_session.headers.update({'BOKEHUSER-API-KEY': val})
@property
def configdir(self):
""" filename where our config are stored. """
if self._configdir:
return self._configdir
bokehdir = join(expanduser("~"), ".bokeh")
if not exists(bokehdir):
makedirs(bokehdir)
return bokehdir
# for testing
@configdir.setter
def configdir(self, path):
self._configdir = path
@property
def configfile(self):
return join(self.configdir, "config.json")
def load_dict(self):
configfile = self.configfile
if not exists(configfile):
data = {}
else:
with open(configfile, "r") as f:
data = json.load(f)
return data
def load(self):
""" Loads the server configuration information from disk
Returns:
None
"""
config_info = self.load_dict().get(self.name, {})
print("Using saved session configuration for %s" % self.name)
print("To override, pass 'load_from_config=False' to Session")
self.root_url = config_info.get('root_url', self.root_url)
self.userapikey = config_info.get('userapikey', self.userapikey)
self.username = config_info.get('username', self.username)
def save(self):
""" Save the server configuration information to JSON
Returns:
None
"""
data = self.load_dict()
data[self.name] = {'root_url': self.root_url,
'userapikey': self.userapikey,
'username': self.username}
configfile = self.configfile
with open(configfile, "w+") as f:
json.dump(data, f)
def register(self, username, password):
''' Register a new user with a bokeh server.
.. note::
This is useful in multi-user mode.
Args:
username (str) : user name to register
password (str) : user password for account
Returns:
None
'''
url = urljoin(self.root_url, "bokeh/register")
result = self.execute('post', url, data={
'username': username,
'password': password,
'api': 'true'
})
if result.status_code != 200:
raise RuntimeError("Unknown Error")
result = get_json(result)
if result['status']:
self.username = username
self.userapikey = result['userapikey']
self.save()
else:
raise RuntimeError(result['error'])
def login(self, username, password):
''' Log a user into a bokeh server.
.. note::
This is useful in multi-user mode.
Args:
username (str) : user name to log in
password (str) : user password
Returns:
None
'''
url = urljoin(self.root_url, "bokeh/login")
result = self.execute('post', url, data={
'username': username,
'password': password,
'api': 'true'
})
if result.status_code != 200:
raise RuntimeError("Unknown Error")
result = get_json(result)
if result['status']:
self.username = username
self.userapikey = result['userapikey']
self.save()
else:
raise RuntimeError(result['error'])
self.save()
def browser_login(self):
""" Open a browser with a token that logs the user into a bokeh server.
.. note::
This is useful in multi-user mode.
Return:
None
"""
controller = browserlib.get_browser_controller()
url = urljoin(self.root_url, "bokeh/loginfromapikey")
url += "?" + urlencode({'username': self.username,
'userapikey': self.userapikey})
controller.open(url)
def data_source(self, name, data):
""" Makes and uploads a server data source to the server.
.. note::
The server must be configured with a data directory.
Args:
name (str) : name for the data source object
data (pd.DataFrame or np.array) : data to upload
Returns:
a ServerDataSource
"""
raise NotImplementedError
def list_data(self):
""" Return all the data soruces on the server.
Returns:
sources : JSON
"""
raise NotImplementedError
def publish(self):
url = urljoin(self.root_url, "/bokeh/%s/publish" % self.docid)
self.post_json(url)
def execute(self, method, url, headers=None, **kwargs):
""" Execute an HTTP request using the current session.
Returns the response
Args:
method (string) : 'get' or 'post'
url (string) : url
headers (dict, optional) : any extra HTTP headers
Keyword Args:
Any extra arguments to pass into the requests library
Returns:
response
Returns the response
"""
import requests
import warnings
func = getattr(self.http_session, method)
try:
resp = func(url, headers=headers, **kwargs)
except requests.exceptions.ConnectionError as e:
warnings.warn("You need to start the bokeh-server to see this example.")
raise e
if resp.status_code == 409:
raise DataIntegrityException
if resp.status_code == 401:
raise Exception('HTTP Unauthorized accessing')
return resp
def execute_json(self, method, url, headers=None, **kwargs):
""" same as execute, except ensure that json content-type is
set in headers and interprets and returns the json response
"""
if headers is None:
headers = {}
headers['content-type'] = 'application/json'
resp = self.execute(method, url, headers=headers, **kwargs)
return get_json(resp)
def get_json(self, url, headers=None, **kwargs):
""" Return the result of an HTTP 'get'.
Args:
url (str) : the URL for the 'get' request
headers (dict, optional) : any extra HTTP headers
Keyword Args:
Any extra arguments to pass into the requests library
Returns:
response: JSON
"""
return self.execute_json('get', url, headers=headers, **kwargs)
def post_json(self, url, headers=None, **kwargs):
""" Return the result of an HTTP 'post'
Args:
url (str) : the URL for the 'get' request
headers (dict, optional) : any extra HTTP headers
Keyword Args:
Any extra arguments to pass into the requests library
Returns:
response: JSON
"""
return self.execute_json('post', url, headers=headers, **kwargs)
@property
def userinfo(self):
if not hasattr(self, "_userinfo"):
url = urljoin(self.root_url, 'bokeh/userinfo/')
self._userinfo = self.get_json(url)
return self._userinfo
@userinfo.setter
def userinfo(self, val):
self._userinfo = val
@property
def base_url(self):
return urljoin(self.root_url, "bokeh/bb/")
def get_api_key(self, docid):
""" Retrieve the document API key from the server.
Args:
docid (string) : docid of the document to retrive API key for
Returns:
apikey : string
"""
url = urljoin(self.root_url,"bokeh/getdocapikey/%s" % docid)
apikey = self.get_json(url)
if 'apikey' in apikey:
apikey = apikey['apikey']
logger.info('got read write apikey')
else:
apikey = apikey['readonlyapikey']
logger.info('got read only apikey')
return apikey
def find_doc(self, name):
""" Return the docid of the document with a title matching ``name``.
.. note::
Creates a new document with the given title if one is not found.
Args:
name (string) : name for the document
Returns:
docid : str
"""
docs = self.userinfo.get('docs')
matching = [x for x in docs if x.get('title') == name]
if len(matching) == 0:
logger.info("No documents found, creating new document '%s'" % name)
self.make_doc(name)
return self.find_doc(name)
elif len(matching) > 1:
logger.warning("Multiple documents with name '%s'" % name)
return matching[0]['docid']
def use_doc(self, name=None, docid=None):
""" Configure the session to use a given document.
Args:
name (str, optional) : name of the document to use
docid (str, optional) : id of the document to use
.. note::
only one of ``name`` or ``docid`` may be supplied.
Creates a document for with the given name if one is not present on
the server.
Returns:
None
"""
if docid is not None and name is not None:
raise ValueError("only one of 'name' or 'docid' can be supplied to use_doc(...)")
if docid:
self.docid = docid
else:
self.docid = self.find_doc(name)
self.apikey = self.get_api_key(self.docid)
def make_doc(self, title):
""" Makes a new document with the given title on the server
.. note:: user information is reloaded
Returns:
None
"""
url = urljoin(self.root_url,"bokeh/doc/")
data = protocol.serialize_json({'title' : title})
self.userinfo = self.post_json(url, data=data)
def pull(self, typename=None, objid=None):
""" Pull JSON objects from the server.
Returns a specific object if both ``typename`` and ``objid`` are
supplied. Otherwise, returns all objects for the currently configured
document.
This is a low-level function.
Args:
typename (str, optional) : name of the type of object to pull
objid (str, optional) : ID of the object to pull
.. note::
you must supply either ``typename`` AND ``objid`` or omit both.
Returns:
attrs : JSON
"""
if typename is None and objid is None:
url = urljoin(self.base_url, self.docid +"/")
attrs = self.get_json(url)
elif typename is None or objid is None:
raise ValueError("typename and objid must both be None, or neither.")
else:
url = urljoin(
self.base_url,
self.docid + "/" + typename + "/" + objid + "/"
)
attr = self.get_json(url)
attrs = [{
'type': typename,
'id': objid,
'attributes': attr
}]
return attrs
def push(self, *jsonobjs):
""" Push JSON objects to the server.
This is a low-level function.
Args:
*jsonobjs (JSON) : objects to push to the server
Returns:
None
"""
data = protocol.serialize_json(jsonobjs)
url = urljoin(self.base_url, self.docid + "/", "bulkupsert")
self.post_json(url, data=data)
def gc(self):
url = urljoin(self.base_url, self.docid + "/", "gc")
self.post_json(url)
# convenience functions to use a session and store/fetch from server
def load_document(self, doc):
""" Loads data for the session and merge with the given document.
Args:
doc (Document) : document to load data into
Returns:
None
"""
self.gc()
json_objs = self.pull()
doc.merge(json_objs)
doc.docid = self.docid
def load_object(self, obj, doc):
""" Update an object in a document with data pulled from the server.
Args:
obj (PlotObject) : object to be updated
doc (Document) : the object's document
Returns:
None
"""
assert obj._id in doc._models
attrs = self.pull(typename=obj.__view_model__, objid=obj._id)
doc.load(*attrs)
def store_document(self, doc, dirty_only=True):
""" Store a document on the server.
Returns the models that were actually pushed.
Args:
doc (Document) : the document to store
dirty_only (bool, optional) : whether to store only dirty objects. (default: True)
Returns:
models : list[PlotObject]
"""
doc._add_all()
models = doc._models.values()
if dirty_only:
models = [x for x in models if getattr(x, '_dirty', False)]
json_objs = doc.dump(*models)
self.push(*json_objs)
for model in models:
model._dirty = False
return models
def store_objects(self, *objs, **kwargs):
""" Store objects on the server
Returns the objects that were actually stored.
Args:
*objs (PlotObject) : objects to store
Keywords Args:
dirty_only (bool, optional) : whether to store only dirty objects. (default: True)
Returns:
models : set[PlotObject]
"""
models = set()
for obj in objs:
models.update(obj.references())
if kwargs.pop('dirty_only', True):
models = list(models)
json_objs = dump(models, self.docid)
self.push(*json_objs)
for model in models:
model._dirty = False
return models
def object_link(self, obj):
""" Return a URL to a server page that will render the given object.
Args:
obj (PlotObject) : object to render
Returns:
URL string
"""
link = "bokeh/doc/%s/%s" % (self.docid, obj._id)
return urljoin(self.root_url, link)
def show(self, obj):
""" Display an object as HTML in IPython using its display protocol.
Args:
obj (PlotObject) : object to display
Returns:
None
"""
data = {'text/html': autoload_server(obj, self)}
publish_display_data(data)
def poll_document(self, document, interval=0.5):
""" Periodically ask the server for updates to the `document`. """
try:
while True:
self.load_document(document)
time.sleep(interval)
except KeyboardInterrupt:
print()
except ConnectionError:
print("Connection to bokeh-server was terminated")
# helper methods
def _prep_data_source_df(self, name, dataframe):
name = tempfile.NamedTemporaryFile(prefix="bokeh_data",
suffix=".pandas").name
store = pd.HDFStore(name)
store.append("__data__", dataframe, format="table", data_columns=True)
store.close()
return name
def _prep_data_source_numpy(self, name, arr):
name = tempfile.NamedTemporaryFile(prefix="bokeh_data",
suffix=".table").name
store = tables.File(name, 'w')
store.createArray("/", "__data__", obj=arr)
store.close()
return name
class TestSession(Session):
"""Currently, register and login do not work, everything else should work
in theory, but we'll have to test this as we go along and convert tests
"""
def __init__(self, *args, **kwargs):
if 'load_from_config' not in kwargs:
kwargs['load_from_config'] = False
self.client = kwargs.pop('client')
self.headers = {}
super(TestSession, self).__init__(*args, **kwargs)
@property
def username(self):
return self.headers.get('BOKEHUSER')
@username.setter
def username(self, val):
self.headers.update({'BOKEHUSER': val})
@property
def userapikey(self):
return self.headers.get('BOKEHUSER-API-KEY')
@userapikey.setter
def userapikey(self, val):
self.headers.update({'BOKEHUSER-API-KEY': val})
def execute(self, method, url, headers=None, **kwargs):
if headers is None:
headers = {}
func = getattr(self.client, method)
resp = func(url, headers=headers, **kwargs)
if resp.status_code == 409:
raise DataIntegrityException
if resp.status_code == 401:
raise Exception('HTTP Unauthorized accessing')
return resp
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.